text stringlengths 8 4.13M |
|---|
//! # ArcGuard
//!
//! A Guard around `Arc<Mutex<T>>` allowing you to write less boilerplate code.
//!
//! # Example
//!
//! Before:
//! ```
//! use std::sync::{Arc, Mutex};
//!
//! let indicator = Arc::new(Mutex::new(Indicator::new()));
//! let indicator_clone = indicator.clone();
//! let indicator_clone = indicator_clone.lock().expect("Unable to lock indicator.");
//!
//! indicator_clone.do_something();
//!
//! drop(indicator_clone);
//! ```
//!
//! After:
//!
//! ```
//! use arc_guard::ArcGuard;
//!
//! let indicator = ArcGuard::new(Indicator::new());
//!
//! indicator.execute(|indicator| {
//! let indicator = indicator.lock().expect("Unable to lock indicator.");
//! indicator.do_something();
//! });
//! ```
//!
use std::sync::{Arc, Mutex};
pub struct ArcGuard<T> {
arc: Arc<Mutex<T>>,
}
impl<T> ArcGuard<T> {
/// Constructs a new `ArcGuard<T>`.
///
/// # Example
///
/// ```
/// use arc_guard::ArcGuard;
///
/// let indicator = ArcGuard::new(Indicator::new());
/// ```
pub fn new(t: T) -> Self {
ArcGuard{arc: Arc::new(Mutex::new(t))}
}
/// Executes a closure passed as an argument.
///
/// This is exactly what helps us avoid the boilerplate code,
/// `execute` passes an `Arc<Mutex<T>>` clone and when the Closure finishes,
/// the clone is automatically dropped.
///
/// # Example
///
/// ```
/// use arc_guard::ArcGuard;
///
/// let indicator = ArcGuard::new(Indicator::new());
///
/// indicator.execute(|indicator| {
/// let indicator = indicator.lock().expect("Unable to lock indicator.");
/// indicator.do_something();
/// });
/// ```
///
/// `execute` takes the return type of the Closure as its own,
/// so you are able to return from your closure into a variable.
///
/// # Example
///
/// ```
/// use arc_guard::ArcGuard;
///
/// let indicator = ArcGuard::new(Indicator::new());
///
/// let some_string: String = indicator.execute(|indicator| -> String {
/// let indicator = indicator.lock().expect("Unable to lock indicator.");
/// return indicator.something();
/// });
/// ```
pub fn execute<R>(&self, mut callback: impl FnMut(Arc<Mutex<T>>) -> R) -> R {
callback(self.arc.clone())
}
/// In some cases it is convenient to use `Arc<Mutex<T>>`, instead of `ArcGuard<T>`.
///
/// With this method you are able to get a clone of the inner `Arc<Mutex<T>>`.
///
/// # Example
///
/// ```
/// use arc_guard::ArcGuard;
///
/// let indicator = ArcGuard::new(Indicator::new());
///
/// let inner_arc = indicator.arc();
/// ```
pub fn arc(&self) -> Arc<Mutex<T>> {
self.arc.clone()
}
/// Returns new `ArcGuard` with a clone of the inner `Arc<Mutex<T>>`.
///
/// # Example
///
/// ```
/// use arc_guard::ArcGuard;
///
/// let indicator = ArcGuard::new(Indicator::new());
///
/// let indicator_clone = indicator.clone();
/// ```
pub fn clone(&self) -> Self {
ArcGuard{arc: self.arc.clone()}
}
}
#[cfg(test)]
mod tests {
use super::ArcGuard;
struct Indicator;
impl Indicator {
pub fn new() -> Self {Indicator}
}
#[test]
fn it_works() {
let indicator = ArcGuard::new(Indicator::new());
let string = indicator.execute(|indicator| -> String {
String::from("5")
});
assert_eq!(string, "5");
}
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
//! This crate contains Winterfell STARK prover and verifier.
//!
//! A STARK is a novel proof-of-computation scheme to create efficiently verifiable proofs of the
//! correct execution of a computation. The scheme was developed by Eli Ben-Sasson, Michael Riabzev
//! et al. at Technion - Israel Institute of Technology. STARKs do not require an initial trusted
//! setup, and rely on very few cryptographic assumptions. See [references](#references) for more
//! info.
//!
//! ## Proof generation
//! To generate a proof that a computation was executed correctly, you'll need to do the
//! following:
//!
//! 1. Define an *algebraic intermediate representation* (AIR) for your computation. This can
//! be done by implementing [Air] trait.
//! 2. Execute your computation and record its execution trace in [ExecutionTrace] struct.
//! 3. Execute [prove()] function and supply the AIR of your computation together with its
//! execution trace as input parameters. The function will produce a instance of [StarkProof]
//! as an output.
//!
//! This `StarkProof` can be serialized and sent to a STARK verifier for verification. The size
//! of proof depends on the specifics of a given computation, but for most computations it should
//! be in the range between 15 KB (for very small computations) and 300 KB (for very large
//! computations).
//!
//! Proof generation time is also highly dependent on the specifics of a given computation, but
//! also depends on the capabilities of the machine used to generate the proofs (i.e. on number
//! of CPU cores and memory bandwidth).
//!
//! When the crate is compiled with `concurrent` feature enabled, proof generation will be
//! performed in multiple threads (usually, as many threads as there are logical cores on the
//! machine). The number of threads can be configured via `RAYON_NUM_THREADS` environment
//! variable.
//!
//! ## Prof verification
//! To verify a [StarkProof] generated as described in the previous sections, you'll need to
//! do the following:
//!
//! 1. Define an *algebraic intermediate representation* (AIR) for you computation. This AIR
//! must be the same as the one used during proof generation process.
//! 2. Execute [verify()] function and supply the AIR of your computation together with the
//! [StarkProof] and related public inputs as parameters.
//!
//! Proof verification is extremely fast and is nearly independent of the complexity of the
//! computation being verified. In vast majority of cases proofs can be verified in 3 - 5 ms
//! on a modern mid-range laptop CPU (using a single core).
//!
//! There is one exception, however: if a computation requires a lot of `sequence` assertions
//! (see [Assertion] for more info), the verification time will grow linearly in the number of
//! asserted values. But for the impact to be noticeable, the number of asserted values would
//! need to be in tens of thousands. And even for hundreds of thousands of asserted values, the
//! verification time should not exceed 50 ms.
//!
//! # Examples
//! The best way to understand the STARK proof generation and verification process is to go
//! through a trivial example from start to finish. First, we'll need to pick a computation for
//! which we'll be generating and verifying STARK proofs. To keep things simple, we'll use the
//! following:
//!
//! ```no_run
//! use winterfell::math::{fields::f128::BaseElement, FieldElement};
//!
//! fn do_work(start: BaseElement, n: usize) -> BaseElement {
//! let mut result = start;
//! for _ in 1..n {
//! result = result.exp(3) + BaseElement::new(42);
//! }
//! result
//! }
//! ```
//!
//! This computation starts with an element in a finite field and then, for the specified number
//! of steps, cubes the element and adds value `42` to it.
//!
//! Suppose, we run this computation for a million steps and get some result. Using STARKs we can
//! prove that we did the work correctly without requiring any verifying party to re-execute the
//! computation. Here is how to do it:
//!
//! First, we need to define an *execution trace* for our computation. This trace should capture
//! the state of the computation at every step of its execution. In our case, the trace is just a
//! single column of intermediate values after each execution of the loop. For example, if we start
//! with value `3` and run the computation for 1,048,576 (same as 2<sup>20</sup>) steps, the
//! execution trace will look like this:
//!
//! | Step | State |
//! | :-------: | :----- |
//! | 0 | 3 |
//! | 1 | 69 |
//! | 2 | 328551 |
//! | 3 | 35465687262668193 |
//! | 4 | 237280320818395402166933071684267763523 |
//! | ... |
//! | 1,048,575 | 247770943907079986105389697876176586605 |
//!
//! To record the trace, we'll use the [ExecutionTrace] struct. The function below, is just a
//! modified version of the `do_work()` function which records every intermediate state of the
//! computation in the [ExecutionTrace] struct:
//!
//! ```no_run
//! use winterfell::{
//! math::{fields::f128::BaseElement, FieldElement},
//! ExecutionTrace,
//! };
//!
//! pub fn build_do_work_trace(start: BaseElement, n: usize) -> ExecutionTrace<BaseElement> {
//! // Instantiate the trace with a given width and length; this will allocate all
//! // required memory for the trace
//! let trace_width = 1;
//! let mut trace = ExecutionTrace::new(trace_width, n);
//!
//! // Fill the trace with data; the first closure initializes the first state of the
//! // computation; the second closure computes the next state of the computation based
//! // on its current state.
//! trace.fill(
//! |state| {
//! state[0] = start;
//! },
//! |_, state| {
//! state[0] = state[0].exp(3u32.into()) + BaseElement::new(42);
//! },
//! );
//!
//! trace
//! }
//! ```
//!
//! Next, we need to define *algebraic intermediate representation* (AIR) for our computation.
//! This process is usually called *arithmetization*. We do this by implementing the [Air] trait.
//! At the high level, the code below does three things:
//!
//! 1. Defines what the public inputs for our computation should look like. These inputs are
//! called "public" because they must be known to both, the prover and the verifier.
//! 2. Defines a transition function with a single transition constraint. This transition
//! constraint must evaluate to zero for all valid state transitions, and to non-zero for any
//! invalid state transition. The degree of this constraint is 3 (see more about constraint
//! degrees "Constraint degrees" section of [Air] trait documentation).
//! 3. Define two assertions against an execution trace of our computation. These assertions tie
//! a specific set of public inputs to a specific execution trace (see more about assertions
//! "Trace assertions" section of [Air] trait documentation).
//!
//! Here is the actual code:
//!
//! ```no_run
//! use winterfell::{
//! math::{fields::f128::BaseElement, FieldElement},
//! Air, AirContext, Assertion, ByteWriter, EvaluationFrame, ProofOptions, Serializable,
//! TraceInfo, TransitionConstraintDegree,
//! };
//!
//! // Public inputs for our computation will consist of the starting value and the end result.
//! pub struct PublicInputs {
//! start: BaseElement,
//! result: BaseElement,
//! }
//!
//! // We need to describe how public inputs can be converted to bytes.
//! impl Serializable for PublicInputs {
//! fn write_into<W: ByteWriter>(&self, target: &mut W) {
//! target.write(self.start);
//! target.write(self.result);
//! }
//! }
//!
//! // For a specific instance of our computation, we'll keep track of the public inputs and
//! // the computation's context which we'll build in the constructor. The context is used
//! // internally by the Winterfell prover/verifier when interpreting this AIR.
//! pub struct WorkAir {
//! context: AirContext<BaseElement>,
//! start: BaseElement,
//! result: BaseElement,
//! }
//!
//! impl Air for WorkAir {
//! // First, we'll specify which finite field to use for our computation, and also how
//! // the public inputs must look like.
//! type BaseElement = BaseElement;
//! type PublicInputs = PublicInputs;
//!
//! // Here, we'll construct a new instance of our computation which is defined by 3
//! // parameters: starting value, number of steps, and the end result. Another way to
//! // think about it is that an instance of our computation is a specific invocation of
//! // the do_work() function.
//! fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self {
//! // our execution trace should have only one column.
//! assert_eq!(1, trace_info.width());
//!
//! // Our computation requires a single transition constraint. The constraint itself
//! // is defined in the evaluate_transition() method below, but here we need to specify
//! // the expected degree of the constraint. If the expected and actual degrees of the
//! // constraints don't match, an error will be thrown in the debug mode, but in release
//! // mode, an invalid proof will be generated which will not be accepted by any
//! // verifier.
//! let degrees = vec![TransitionConstraintDegree::new(3)];
//! WorkAir {
//! context: AirContext::new(trace_info, degrees, options),
//! start: pub_inputs.start,
//! result: pub_inputs.result,
//! }
//! }
//!
//! // In this method we'll define our transition constraints; a computation is considered to
//! // be valid, if for all valid state transitions, transition constraints evaluate to all
//! // zeros, and for any invalid transition, at least one constraint evaluates to a non-zero
//! // value. The `frame` parameter will contain current and next states of the computation.
//! fn evaluate_transition<E: FieldElement + From<Self::BaseElement>>(
//! &self,
//! frame: &EvaluationFrame<E>,
//! _periodic_values: &[E],
//! result: &mut [E],
//! ) {
//! // First, we'll read the current state, and use it to compute the expected next state
//! let current_state = &frame.current()[0];
//! let next_state = current_state.exp(3u32.into()) + E::from(42u32);
//!
//! // Then, we'll subtract the expected next state from the actual next state; this will
//! // evaluate to zero if and only if the expected and actual states are the same.
//! result[0] = frame.next()[0] - next_state;
//! }
//!
//! // Here, we'll define a set of assertions about the execution trace which must be
//! // satisfied for the computation to be valid. Essentially, this ties computation's
//! // execution trace to the public inputs.
//! fn get_assertions(&self) -> Vec<Assertion<Self::BaseElement>> {
//! // for our computation to be valid, value in column 0 at step 0 must be equal to the
//! // starting value, and at the last step it must be equal to the result.
//! let last_step = self.trace_length() - 1;
//! vec![
//! Assertion::single(0, 0, self.start),
//! Assertion::single(0, last_step, self.result),
//! ]
//! }
//!
//! // This is just boilerplate which is used by the Winterfell prover/verifier to retrieve
//! // the context of the computation.
//! fn context(&self) -> &AirContext<Self::BaseElement> {
//! &self.context
//! }
//! }
//! ```
//!
//! Now, we are finally ready to generate and verify STARK proofs.
//!
//! In the code below, we will execute our computation and get the result together with the proof
//! that the computation was executed correctly. Then, we will use this proof (together with the
//! public inputs) to verify that we did in fact execute the computation and got the claimed
//! result.
//!
//! ```
//! # use winterfell::{
//! # math::{fields::f128::BaseElement, FieldElement},
//! # Air, AirContext, Assertion, ByteWriter, EvaluationFrame, Serializable,
//! # TraceInfo, TransitionConstraintDegree,
//! # ExecutionTrace, FieldExtension, HashFunction, ProofOptions, StarkProof,
//! # };
//! #
//! # pub fn build_do_work_trace(start: BaseElement, n: usize) -> ExecutionTrace<BaseElement> {
//! # let trace_width = 1;
//! # let mut trace = ExecutionTrace::new(trace_width, n);
//! # trace.fill(
//! # |state| {
//! # state[0] = start;
//! # },
//! # |_, state| {
//! # state[0] = state[0].exp(3u32.into()) + BaseElement::new(42);
//! # },
//! # );
//! # trace
//! # }
//! #
//! #
//! # pub struct PublicInputs {
//! # start: BaseElement,
//! # result: BaseElement,
//! # }
//! #
//! # impl Serializable for PublicInputs {
//! # fn write_into<W: ByteWriter>(&self, target: &mut W) {
//! # target.write(self.start);
//! # target.write(self.result);
//! # }
//! # }
//! #
//! # pub struct WorkAir {
//! # context: AirContext<BaseElement>,
//! # start: BaseElement,
//! # result: BaseElement,
//! # }
//! #
//! # impl Air for WorkAir {
//! # type BaseElement = BaseElement;
//! # type PublicInputs = PublicInputs;
//! #
//! # fn new(trace_info: TraceInfo, pub_inputs: PublicInputs, options: ProofOptions) -> Self {
//! # assert_eq!(1, trace_info.width());
//! # let degrees = vec![TransitionConstraintDegree::new(3)];
//! # WorkAir {
//! # context: AirContext::new(trace_info, degrees, options),
//! # start: pub_inputs.start,
//! # result: pub_inputs.result,
//! # }
//! # }
//! #
//! # fn evaluate_transition<E: FieldElement + From<Self::BaseElement>>(
//! # &self,
//! # frame: &EvaluationFrame<E>,
//! # _periodic_values: &[E],
//! # result: &mut [E],
//! # ) {
//! # let current_state = &frame.current()[0];
//! # let next_state = current_state.exp(3u32.into()) + E::from(42u32);
//! # result[0] = frame.next()[0] - next_state;
//! # }
//! #
//! # fn get_assertions(&self) -> Vec<Assertion<Self::BaseElement>> {
//! # let last_step = self.trace_length() - 1;
//! # vec![
//! # Assertion::single(0, 0, self.start),
//! # Assertion::single(0, last_step, self.result),
//! # ]
//! # }
//! #
//! # fn context(&self) -> &AirContext<Self::BaseElement> {
//! # &self.context
//! # }
//! # }
//! #
//! // We'll just hard-code the parameters here for this example. We'll also just run the
//! // computation just for 1024 steps to save time during testing.
//! let start = BaseElement::new(3);
//! let n = 1024;
//!
//! // Build the execution trace and get the result from the last step.
//! let trace = build_do_work_trace(start, n);
//! let result = trace.get(0, n - 1);
//!
//! // Define proof options; these will be enough for ~96-bit security level.
//! let options = ProofOptions::new(
//! 32, // number of queries
//! 8, // blowup factor
//! 0, // grinding factor
//! HashFunction::Blake3_256,
//! FieldExtension::None,
//! 8, // FRI folding factor
//! 128, // FRI max remainder length
//! );
//!
//! // Generate the proof.
//! let pub_inputs = PublicInputs { start, result };
//! let proof = winterfell::prove::<WorkAir>(trace, pub_inputs, options).unwrap();
//!
//! // Verify the proof. The number of steps and options are encoded in the proof itself,
//! // so we don't need to pass them explicitly to the verifier.
//! let pub_inputs = PublicInputs { start, result };
//! assert!(winterfell::verify::<WorkAir>(proof, pub_inputs).is_ok());
//! ```
//!
//! That's all there is to it!
//!
//! # References
//!
//! If you are interested in learning how STARKs work under the hood, here are a few links to get
//! you started. From the standpoint of this library, *arithmetization* is by far the most
//! important concept to understand.
//!
//! * STARKs whitepaper: [Scalable, transparent, and post-quantum secure computational integrity](https://eprint.iacr.org/2018/046)
//! * STARKs vs. SNARKs: [A Cambrian Explosion of Crypto Proofs](https://nakamoto.com/cambrian-explosion-of-crypto-proofs/)
//!
//! Vitalik Buterin's blog series on zk-STARKs:
//! * [STARKs, part 1: Proofs with Polynomials](https://vitalik.ca/general/2017/11/09/starks_part_1.html)
//! * [STARKs, part 2: Thank Goodness it's FRI-day](https://vitalik.ca/general/2017/11/22/starks_part_2.html)
//! * [STARKs, part 3: Into the Weeds](https://vitalik.ca/general/2018/07/21/starks_part_3.html)
//!
//! StarkWare's STARK Math blog series:
//! * [STARK Math: The Journey Begins](https://medium.com/starkware/stark-math-the-journey-begins-51bd2b063c71)
//! * [Arithmetization I](https://medium.com/starkware/arithmetization-i-15c046390862)
//! * [Arithmetization II](https://medium.com/starkware/arithmetization-ii-403c3b3f4355)
//! * [Low Degree Testing](https://medium.com/starkware/low-degree-testing-f7614f5172db)
//! * [A Framework for Efficient STARKs](https://medium.com/starkware/a-framework-for-efficient-starks-19608ba06fbe)
#![no_std]
pub use prover::{
crypto, iterators, math, prove, Air, AirContext, Assertion, BoundaryConstraint,
BoundaryConstraintGroup, ByteReader, ByteWriter, ConstraintCompositionCoefficients,
ConstraintDivisor, DeepCompositionCoefficients, Deserializable, DeserializationError,
EvaluationFrame, ExecutionTrace, ExecutionTraceFragment, FieldExtension, HashFunction,
ProofOptions, ProverError, Serializable, StarkProof, TraceInfo, TransitionConstraintDegree,
TransitionConstraintGroup,
};
pub use verifier::{verify, VerifierError};
|
use chrono;
use fern;
pub(crate) fn set_up_logging() {
use fern::colors::{Color, ColoredLevelConfig};
let colors_line = ColoredLevelConfig::new()
.error(Color::BrightRed)
.warn(Color::Yellow);
let colors_level = colors_line.clone().info(Color::Green).debug(Color::Cyan);
// here we set up our fern Dispatch
fern::Dispatch::new()
.format(move |out, message, record| {
out.finish(format_args!(
"{color_line}[{date}][{target}][{level}{color_line}] {message}\x1B[0m",
color_line = format_args!(
"\x1B[{}m",
colors_line.get_color(&record.level()).to_fg_str()
),
date = chrono::Local::now().format("%Y-%m-%d %H:%M:%S"),
target = record.target(),
level = colors_level.color(record.level()),
message = message,
));
})
// set the default log level. to filter out verbose log messages from dependencies, set
// this to Warn and overwrite the log level for your crate.
.level(log::LevelFilter::Warn)
.level_for("monitor_websites", log::LevelFilter::Trace)
.chain(std::io::stdout())
.apply()
.expect("logging setup should be OK");
}
|
#[doc = "Register `CCIPR2` reader"]
pub type R = crate::R<CCIPR2_SPEC>;
#[doc = "Register `CCIPR2` writer"]
pub type W = crate::W<CCIPR2_SPEC>;
#[doc = "Field `I2S1SEL` reader - 2S1SEL"]
pub type I2S1SEL_R = crate::FieldReader;
#[doc = "Field `I2S1SEL` writer - 2S1SEL"]
pub type I2S1SEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `I2S2SEL` reader - I2S2SEL"]
pub type I2S2SEL_R = crate::FieldReader;
#[doc = "Field `I2S2SEL` writer - I2S2SEL"]
pub type I2S2SEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `FDCANSEL` reader - FDCANSEL"]
pub type FDCANSEL_R = crate::FieldReader;
#[doc = "Field `FDCANSEL` writer - FDCANSEL"]
pub type FDCANSEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `USBSEL` reader - USBSEL"]
pub type USBSEL_R = crate::BitReader;
#[doc = "Field `USBSEL` writer - USBSEL"]
pub type USBSEL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bits 0:1 - 2S1SEL"]
#[inline(always)]
pub fn i2s1sel(&self) -> I2S1SEL_R {
I2S1SEL_R::new((self.bits & 3) as u8)
}
#[doc = "Bits 2:3 - I2S2SEL"]
#[inline(always)]
pub fn i2s2sel(&self) -> I2S2SEL_R {
I2S2SEL_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 8:9 - FDCANSEL"]
#[inline(always)]
pub fn fdcansel(&self) -> FDCANSEL_R {
FDCANSEL_R::new(((self.bits >> 8) & 3) as u8)
}
#[doc = "Bit 12 - USBSEL"]
#[inline(always)]
pub fn usbsel(&self) -> USBSEL_R {
USBSEL_R::new(((self.bits >> 12) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:1 - 2S1SEL"]
#[inline(always)]
#[must_use]
pub fn i2s1sel(&mut self) -> I2S1SEL_W<CCIPR2_SPEC, 0> {
I2S1SEL_W::new(self)
}
#[doc = "Bits 2:3 - I2S2SEL"]
#[inline(always)]
#[must_use]
pub fn i2s2sel(&mut self) -> I2S2SEL_W<CCIPR2_SPEC, 2> {
I2S2SEL_W::new(self)
}
#[doc = "Bits 8:9 - FDCANSEL"]
#[inline(always)]
#[must_use]
pub fn fdcansel(&mut self) -> FDCANSEL_W<CCIPR2_SPEC, 8> {
FDCANSEL_W::new(self)
}
#[doc = "Bit 12 - USBSEL"]
#[inline(always)]
#[must_use]
pub fn usbsel(&mut self) -> USBSEL_W<CCIPR2_SPEC, 12> {
USBSEL_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Peripherals independent clock configuration register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ccipr2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ccipr2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CCIPR2_SPEC;
impl crate::RegisterSpec for CCIPR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ccipr2::R`](R) reader structure"]
impl crate::Readable for CCIPR2_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ccipr2::W`](W) writer structure"]
impl crate::Writable for CCIPR2_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CCIPR2 to value 0"]
impl crate::Resettable for CCIPR2_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
mod common;
use std::str::FromStr;
fn part1(passports: &[Passport]) -> usize {
passports.iter().filter(|p| p.is_valid()).count()
}
fn part2(passports: &[Passport]) -> usize {
passports.iter().filter(|p| p.is_strictly_valid()).count()
}
#[derive(Debug)]
struct Passport {
byr: Option<String>,
iyr: Option<String>,
eyr: Option<String>,
hgt: Option<String>,
hcl: Option<String>,
ecl: Option<String>,
pid: Option<String>,
cid: Option<String>,
}
impl Passport {
fn new() -> Passport {
Passport {
byr: None,
iyr: None,
eyr: None,
hgt: None,
hcl: None,
ecl: None,
pid: None,
cid: None,
}
}
fn set_field(&mut self, key: &str, value: &str) -> Result<(), String> {
match key {
"byr" => self.byr = Some(value.to_string()),
"iyr" => self.iyr = Some(value.to_string()),
"eyr" => self.eyr = Some(value.to_string()),
"hgt" => self.hgt = Some(value.to_string()),
"hcl" => self.hcl = Some(value.to_string()),
"ecl" => self.ecl = Some(value.to_string()),
"pid" => self.pid = Some(value.to_string()),
"cid" => self.cid = Some(value.to_string()),
_ => return Err("invalid key: ".to_string() + key),
}
Ok(())
}
fn is_valid(&self) -> bool {
self.byr.is_some()
&& self.iyr.is_some()
&& self.eyr.is_some()
&& self.hgt.is_some()
&& self.hcl.is_some()
&& self.ecl.is_some()
&& self.pid.is_some()
}
fn validate_field(field: &Option<String>, pattern: &str) -> bool {
match field {
Some(value) => {
let re = regex::Regex::new(pattern).unwrap();
re.is_match(value)
}
None => false,
}
}
fn is_strictly_valid(&self) -> bool {
Self::validate_field(&self.byr, "^((19[2-9][0-9])|(200[0-2]))$")
&& Self::validate_field(&self.iyr, "^20(1[0-9]|20)$")
&& Self::validate_field(&self.eyr, "^20(2[0-9]|30)$")
&& Self::validate_field(
&self.hgt,
"^(((1[5-8][0-9]|19[0-3])cm)|((59|6[0-9]|7[0-6])in))$",
)
&& Self::validate_field(&self.hcl, "^#[0-9a-f]{6}$")
&& Self::validate_field(&self.ecl, "^(amb|blu|brn|gry|grn|hzl|oth)$")
&& Self::validate_field(&self.pid, "^[0-9]{9}$")
}
}
impl FromStr for Passport {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut passport = Passport::new();
for entry in s.split_whitespace() {
let mut split = entry.splitn(2, ':');
let key = split.next().ok_or("no key")?;
let value = split.next().ok_or("no value")?;
passport.set_field(key, value)?;
}
Ok(passport)
}
}
fn parse_input(input: &str) -> Result<Vec<Passport>, String> {
let mut passports = Vec::new();
let mut cur_string = String::new();
for line in input.lines() {
if line.is_empty() {
passports.push(cur_string.parse::<Passport>()?);
cur_string.clear()
} else {
cur_string.push_str(line);
cur_string.push('\n');
}
}
if !cur_string.is_empty() {
passports.push(cur_string.parse::<Passport>()?);
}
Ok(passports)
}
fn main() {
let input = parse_input(&common::get_content()).expect("could not parse input");
println!("Part1: {}", part1(&input));
println!("Part2: {}", part2(&input));
}
#[cfg(test)]
mod tests {
use super::*;
fn test_input() -> Vec<Passport> {
let input = "ecl:gry pid:860033327 eyr:2020 hcl:#fffffd\n\
byr:1937 iyr:2017 cid:147 hgt:183cm\n\
\n\
iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884\n\
hcl:#cfa07d byr:1929\n\
\n\
hcl:#ae17e1 iyr:2013\n\
eyr:2024\n\
ecl:brn pid:760753108 byr:1931\n\
hgt:179cm\n\
\n\
hcl:#cfa07d eyr:2025 pid:166559648\n\
iyr:2011 ecl:brn hgt:59in";
parse_input(&input).unwrap()
}
fn test_invalid() -> Vec<Passport> {
let input = "eyr:1972 cid:100\n\
hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926\n\
\n\
iyr:2019\n\
hcl:#602927 eyr:1967 hgt:170cm\n\
ecl:grn pid:012533040 byr:1946\n\
\n\
hcl:dab227 iyr:2012\n\
ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277\n\
\n\
hgt:59cm ecl:zzz\n\
eyr:2038 hcl:74454a iyr:2023\n\
pid:3556412378 byr:2007";
parse_input(&input).unwrap()
}
fn test_valid() -> Vec<Passport> {
let input = "pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980\n\
hcl:#623a2f\n\
\n\
eyr:2029 ecl:blu cid:129 byr:1989\n\
iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm\n\
\n\
hcl:#888785\n\
hgt:164cm byr:2001 iyr:2015 cid:88\n\
pid:545766238 ecl:hzl\n\
eyr:2022\n\
\n\
iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719";
parse_input(&input).unwrap()
}
#[test]
fn test_part1() {
assert_eq!(part1(&test_input()), 2);
}
#[test]
fn test_part2() {
assert_eq!(part2(&test_invalid()), 0);
assert_eq!(part2(&test_valid()), 4);
}
}
|
use std::io::{Write, Result};
use util::join;
use parser::html::Comparator;
use super::Generator;
use super::ast::{Code, Statement, Expression};
pub trait Emit {
fn emit(&mut self, code: &Code) -> Result<()>;
}
fn write_str<W:Write, S: AsRef<str>>(w: &mut W, val: S) -> Result<()> {
try!(w.write_all(b"\""));
for ch in val.as_ref().chars() {
match ch {
'\r' => { try!(write!(w, "\\r")); }
'\n' => { try!(write!(w, "\\n")); }
'\t' => { try!(write!(w, "\\t")); }
'\"' => { try!(write!(w, "\\\"")); }
'\'' => { try!(write!(w, "\\\'")); }
'\x00'...'\x1f' => { try!(write!(w, "\\x{:02}", ch as u8)) }
_ => { try!(write!(w, "{}", ch)) }
}
}
try!(w.write_all(b"\""));
Ok(())
}
fn is_ident<S: AsRef<str>>(s: S) -> bool {
let s = s.as_ref();
if s.len() == 0 {
return false;
}
let mut iter = s.chars();
match iter.next().unwrap() {
'a'...'z'|'A'...'Z'|'_' => {},
_ => return false,
}
for ch in iter {
match ch {
'a'...'z'|'A'...'Z'|'0'...'9'|'_' => {}
_ => return false,
}
}
return true;
}
impl<'a, W:Write+'a> Generator<'a, W> {
fn write_indent(&mut self, indent: u32) -> Result<()> {
// TODO(tailhook) Is there a beter way ?
for _ in 0..indent {
try!(self.buf.write_all(b" "));
}
Ok(())
}
fn emit_expression(&mut self, expr: &Expression, indent: u32)
-> Result<()>
{
let nindent = self.indent + indent;
match expr {
&Expression::Str(ref s) => {
try!(write_str(self.buf, &s[..]));
}
&Expression::Num(ref s) => {
try!(write!(self.buf, "{}", s));
}
&Expression::Object(ref pairs) => {
try!(self.buf.write_all(b"{"));
if pairs.len() == 0 {
} else if pairs.len() == 1 {
if is_ident(&pairs[0].0[..]) {
try!(write!(self.buf, "{}: ", pairs[0].0));
} else {
try!(write_str(self.buf, &pairs[0].0));
try!(write!(self.buf, ": "));
}
try!(self.emit_expression(&pairs[0].1, indent));
} else {
try!(self.buf.write_all(b"\n"));
for &(ref key, ref value) in pairs.iter() {
try!(self.write_indent(nindent));
if is_ident(&key[..]) {
try!(write!(self.buf, "{}: ", key));
} else {
try!(write_str(self.buf, key));
try!(write!(self.buf, ": "));
}
try!(self.emit_expression(value, nindent));
try!(self.buf.write_all(b",\n"));
}
try!(self.write_indent(indent));
}
try!(self.buf.write_all(b"}"));
}
&Expression::List(ref lst) => {
try!(self.buf.write_all(b"["));
if lst.len() == 0 {
} else if lst.len() == 1 {
try!(self.emit_expression(&lst[0], indent));
} else {
try!(self.buf.write_all(b"\n"));
for item in lst.iter() {
try!(self.write_indent(nindent));
try!(self.emit_expression(item, nindent));
try!(self.buf.write_all(b",\n"));
}
try!(self.write_indent(indent));
}
try!(self.buf.write_all(b"]"));
}
&Expression::Name(ref s) => {
try!(write!(self.buf, "{}", s));
}
&Expression::Attr(ref parent, ref attr) => {
try!(self.emit_expression(parent, indent));
try!(write!(self.buf, ".{}", attr));
}
&Expression::Item(ref parent, ref item) => {
try!(self.emit_expression(parent, indent));
try!(write!(self.buf, "["));
try!(self.emit_expression(item, indent));
try!(write!(self.buf, "]"));
}
&Expression::Call(ref parent, ref args) => {
try!(self.emit_expression(parent, indent));
try!(self.buf.write_all(b"("));
if args.len() > 0 {
try!(self.emit_expression(&args[0], indent));
for i in args[1..].iter() {
try!(self.buf.write_all(b", "));
try!(self.emit_expression(i, indent));
}
}
try!(self.buf.write_all(b")"));
}
&Expression::New(ref val) => {
try!(write!(self.buf, "new "));
try!(self.emit_expression(val, indent));
}
&Expression::Not(ref val) => {
try!(write!(self.buf, "!"));
try!(self.emit_expression(val, indent));
}
&Expression::Or(ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " || "));
try!(self.emit_expression(right, indent));
}
&Expression::And(ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " && "));
try!(self.emit_expression(right, indent));
}
&Expression::Add(ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " + "));
try!(self.emit_expression(right, indent));
}
&Expression::Sub(ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " - "));
try!(self.emit_expression(right, indent));
}
&Expression::Mul(ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " * "));
try!(self.emit_expression(right, indent));
}
&Expression::Div(ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " / "));
try!(self.emit_expression(right, indent));
}
&Expression::Comparison(op, ref left, ref right) => {
try!(self.emit_expression(left, indent));
try!(write!(self.buf, " {} ", match op {
Comparator::Eq => "===",
Comparator::NotEq => "!==",
Comparator::Less => "<",
Comparator::LessEq => "<=",
Comparator::Greater => ">",
Comparator::GreaterEq => ">=",
}));
try!(self.emit_expression(right, indent));
}
&Expression::Function(ref name, ref params, ref body) => {
try!(write!(self.buf, "function {name}({params}) {{\n",
name=name.as_ref().unwrap_or(&String::from("")),
params=join(params.iter().map(|x| &x.name), ", ")));
// TODO(tailhook) default values
try!(self.emit_statements(&body, nindent));
try!(self.write_indent(indent));
try!(self.buf.write_all(b"}"));
}
&Expression::AssignAttr(ref expr, ref attr, ref value) => {
try!(self.emit_expression(expr, indent));
try!(write!(self.buf, ".{} = ", attr));
try!(self.emit_expression(value, indent));
}
&Expression::Ternary(ref cond, ref left, ref right) => {
try!(write!(self.buf, "(("));
try!(self.emit_expression(cond, indent));
try!(write!(self.buf, ")?("));
try!(self.emit_expression(left, indent));
try!(write!(self.buf, "):("));
try!(self.emit_expression(right, indent));
try!(write!(self.buf, "))"));
}
}
Ok(())
}
fn emit_statements(&mut self, stmts: &Vec<Statement>, indent: u32)
-> Result<()>
{
let nindent = indent + self.indent;
for stmt in stmts.iter() {
match stmt {
&Statement::Expr(ref expr) => {
try!(self.write_indent(indent));
try!(self.emit_expression(expr, nindent));
try!(self.buf.write_all(b"\n"));
}
&Statement::Return(ref expr) => {
try!(self.write_indent(indent));
try!(self.buf.write_all(b"return "));
try!(self.emit_expression(expr, nindent));
try!(self.buf.write_all(b";\n"));
}
&Statement::Var(ref name, ref expr) => {
try!(self.write_indent(indent));
try!(write!(self.buf, "var {} = ", name));
try!(self.emit_expression(expr, nindent));
try!(self.buf.write_all(b";\n"));
}
&Statement::Function(ref name, ref params, ref body) => {
try!(self.write_indent(indent));
try!(write!(self.buf, "function {name}({params}) {{\n",
name=name,
params=join(params.iter().map(|x| &x.name), ", ")));
// TODO(tailhook) default values
try!(self.emit_statements(&body, nindent));
try!(self.write_indent(indent));
try!(self.buf.write_all(b"}\n"));
}
}
}
Ok(())
}
}
impl<'a, W:Write+'a> Emit for Generator<'a, W> {
fn emit(&mut self, code: &Code) -> Result<()> {
try!(self.emit_statements(&code.statements, 0));
Ok(())
}
}
|
// file: stochastic.rs
//
// Copyright 2015-2017 The RsGenetic Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use pheno::{Fitness, Phenotype};
use super::*;
use rand::Rng;
/// Selects phenotypes at random, starting from a random index and taking equidistant jumps.
///
/// Commonly known as *Stochastic Universal Sampling*.
#[derive(Clone, Copy, Debug)]
pub struct StochasticSelector {
count: usize,
}
impl StochasticSelector {
/// Create and return a stochastic selector.
///
/// Such a selector selects elements using stochastic universal sampling,
/// yielding parents with low, medium and high fitness values. In total,
/// `count` parents are selected.
///
/// * `count`: must be larger than zero, a multiple of 2 and less than the population size.
pub fn new(count: usize) -> StochasticSelector {
StochasticSelector { count: count }
}
}
impl<T, F> Selector<T, F> for StochasticSelector
where
T: Phenotype<F>,
F: Fitness,
{
fn select<'a>(&self, population: &'a [T]) -> Result<Parents<&'a T>, String> {
if self.count == 0 || self.count % 2 != 0 || self.count >= population.len() {
return Err(format!(
"Invalid parameter `count`: {}. Should be larger than zero, a \
multiple of two and less than the population size.",
self.count
));
}
let ratio = population.len() / self.count;
let mut result: Parents<&T> = Vec::new();
let mut i = ::rand::thread_rng().gen_range::<usize>(0, population.len());
let mut selected = 0;
while selected < self.count {
result.push((
&population[i],
&population[(i + ratio - 1) % population.len()],
));
i += ratio - 1;
i %= population.len();
selected += 2;
}
Ok(result)
}
}
#[cfg(test)]
mod tests {
use sim::select::*;
use test::Test;
#[test]
fn test_count_zero() {
let selector = StochasticSelector::new(0);
let population: Vec<Test> = (0..100).map(|i| Test { f: i }).collect();
assert!(selector.select(&population).is_err());
}
#[test]
fn test_count_odd() {
let selector = StochasticSelector::new(5);
let population: Vec<Test> = (0..100).map(|i| Test { f: i }).collect();
assert!(selector.select(&population).is_err());
}
#[test]
fn test_count_too_large() {
let selector = StochasticSelector::new(100);
let population: Vec<Test> = (0..100).map(|i| Test { f: i }).collect();
assert!(selector.select(&population).is_err());
}
#[test]
fn test_result_size() {
let selector = StochasticSelector::new(20);
let population: Vec<Test> = (0..100).map(|i| Test { f: i }).collect();
assert_eq!(20, selector.select(&population).unwrap().len() * 2);
}
}
|
// Test compression algorithms for numbers / data
// Written 2016.04.05 by Willi Kappler (grandor@gmx.de)
// External crates:
// none
// Internal crates:
extern crate compression1;
// System modules:
// none
// External modules:
// none
// Internal modules:
use compression1::*;
fn main() {
let y1 = compress_year(2016);
let d1 = compress_date(2016, 4, 5); // 2016.04.05
let t1 = compress_time(15, 21); // 10:21 h
let dt1 = compress_date_time(2016, 4, 5, 15, 21); // 2016.04.05, 15:21
println!("Year: {:0>8x}, {:0>32b}", y1, y1);
println!("Date: {:0>8x}, {:0>32b}", d1, d1);
println!("Time: {:0>8x}, {:0>32b}", t1, t1);
println!("DT : {:0>8x}, {:0>32b}", dt1, dt1);
println!("Decoded: {:?}", decode_date_time(dt1));
}
|
#![windows_subsystem = "windows"]
use std::thread;
use std::time::{
Duration,
Instant
};
#[cfg(windows)]
mod windows;
#[cfg(windows)]
use windows::{get_idle_time, start};
use crossbeam::channel::{
unbounded,
Sender
};
const IDLE_PAUSE_TIME: Duration = Duration::from_secs(60);
const IDLE_RESET_TIME: Duration = Duration::from_secs(300);
const BREAK_TIME: Duration = Duration::from_secs(2700);
#[derive(Debug)]
pub enum Event {
UpdateTime(Duration),
NotifyBreak,
NotifyReset
}
fn monitor_idle_time(s: Sender<Event>) {
let mut start = Instant::now();
let mut has_reset: bool = false;
let mut has_break: bool = false;
s.send(Event::UpdateTime(start.elapsed())).unwrap();
loop {
thread::sleep(Duration::from_secs(1));
match get_idle_time() {
Ok(idle_time) if idle_time > IDLE_RESET_TIME => {
if !has_reset {
s.send(Event::NotifyReset).unwrap();
s.send(Event::UpdateTime(Duration::from_secs(0))).unwrap();
has_reset = true;
}
start = Instant::now();
},
Ok(idle_time) if idle_time > IDLE_PAUSE_TIME => {},
Ok(_idle_time) => {
if has_reset {
start = Instant::now();
has_reset = false;
has_break = false;
}
s.send(Event::UpdateTime(start.elapsed())).unwrap();
if start.elapsed() >= BREAK_TIME {
if !has_break {
s.send(Event::NotifyBreak).unwrap();
has_break = true;
}
}
},
Err(_errno) => {
}
}
}
}
fn main() {
let (s, r) = unbounded();
thread::spawn(|| monitor_idle_time(s));
start(r);
}
|
use crate::map::triangle::Triangle;
use crate::math::vector::Vector2;
pub struct Sector {
pub index: usize,
pub bottom: f32,
pub floor: f32,
pub ceiling: f32,
pub top: f32,
pub floor_texture: i32,
pub ceiling_texture: i32,
pub vecs: Vec<Vector2>,
pub lines: Vec<usize>,
pub triangles: Vec<Triangle>,
pub inside: Vec<usize>,
pub outside: Option<usize>,
}
impl Sector {
pub fn new(bottom: f32, floor: f32, ceiling: f32, top: f32, floor_texture: i32, ceiling_texture: i32, vecs: Vec<Vector2>, lines: Vec<usize>) -> Self {
Sector {
index: 0,
bottom,
floor,
ceiling,
top,
floor_texture,
ceiling_texture,
vecs,
lines,
triangles: Vec::new(),
inside: Vec::new(),
outside: Option::None,
}
}
pub fn update_triangles(&mut self, triangles: Vec<Triangle>) {
self.triangles = triangles;
}
pub fn contains(&self, x: f32, y: f32) -> bool {
let mut odd: bool = false;
let vecs: &Vec<Vector2> = &self.vecs;
let count: usize = vecs.len();
let mut k: usize = count - 1;
for i in 0..count {
let a: &Vector2 = &vecs[i];
let b: &Vector2 = &vecs[k];
if (a.y > y) != (b.y > y) {
let value: f32 = (b.x - a.x) * (y - a.y) / (b.y - a.y) + a.x;
if x < value {
odd = !odd;
}
}
k = i;
}
odd
}
pub fn has_floor(&self) -> bool {
self.floor_texture >= 0
}
pub fn has_ceiling(&self) -> bool {
self.ceiling_texture >= 0
}
}
pub fn find<'s>(sectors: &'s Vec<Sector>, sector: &'s Sector, x: f32, y: f32) -> &'s Sector {
for i in sector.inside.iter().copied() {
let sector = §ors[i];
if sector.contains(x, y) {
return find(sectors, sector, x, y);
}
}
return sector;
}
|
use crate::mcc::agent::mcc_agent::MCCAgent;
#[derive(Clone)]
pub struct AgentQueue {
agents: Vec<MCCAgent>,
current_agent_index: usize,
pub max_items_limit: u32,
total_individuals_added: u32,
}
impl AgentQueue {
pub fn new(mcc_agents: Vec<MCCAgent>, max_items_limit: u32) -> AgentQueue {
let total_individuals_added = mcc_agents.len() as u32;
AgentQueue {
agents: mcc_agents,
current_agent_index: 0,
max_items_limit,
total_individuals_added,
}
}
pub fn iter(&self) -> impl Iterator<Item = &MCCAgent> {
self.agents.iter()
}
pub fn len(&self) -> usize {
self.agents.len()
}
pub fn push(&mut self, agent: MCCAgent) {
self.agents.push(agent);
if self.agents.len() as u32 >= self.max_items_limit {
self.remove_oldest(self.agents.len() - self.max_items_limit as usize);
}
}
fn remove_oldest(&mut self, amount: usize) {
for _ in 0..amount {
self.agents.remove(0);
}
if amount > self.current_agent_index {
self.current_agent_index = 0;
} else {
self.current_agent_index -= amount;
}
}
pub fn get_children(&mut self, amount: usize) -> Vec<MCCAgent> {
let mut children: Vec<MCCAgent> = vec![];
for _ in 0..amount {
if self.current_agent_index >= self.agents.len() {
self.current_agent_index = 0;
}
children.push(self.agents.get(self.current_agent_index).unwrap().clone());
self.current_agent_index =
(self.current_agent_index + 1) % self.max_items_limit as usize;
}
for child in children.iter_mut() {
child.id = self.total_individuals_added;
self.total_individuals_added += 1;
child.mutate();
child.viable = false;
}
children
}
pub fn get_largest_size(&self) -> u32 {
let max = self.agents.iter().max_by_key(|a| a.genome.links.len());
return max.unwrap().genome.links.len() as u32;
}
pub fn get_smallest_size(&self) -> u32 {
let min = self.agents.iter().min_by_key(|a| a.genome.links.len());
return min.unwrap().genome.links.len() as u32;
}
pub fn get_average_size(&self) -> f64 {
let mut sum = 0;
for a in self.agents.iter() {
sum += a.genome.links.len();
}
sum as f64 / self.agents.len() as f64
}
}
|
extern crate bindgen;
extern crate cc;
use std::env;
use std::path::PathBuf;
fn main() {
println!("cargo:rerun-if-changed=src/c/snes_spc/spc.h");
cc::Build::new()
.include("src/c/")
.flag("-fPIC")
.flag("-fno-exceptions")
// .flag("-Wno-implicit-fallthrough")
.flag("-fno-rtti")
.flag("-Wall")
// .flag("-Wextra")
.flag("-std=c++11")
.flag("-DNDEBUG")
.flag("-DSPC_ISOLATED_ECHO_BUFFER")
.flag("-DBLARGG_BUILD_DLL")
.opt_level(3)
.cpp(true)
.file("src/c/snes_spc/SNES_SPC.cpp")
.file("src/c/snes_spc/SNES_SPC_misc.cpp")
.file("src/c/snes_spc/SNES_SPC_state.cpp")
.file("src/c/snes_spc/SPC_DSP.cpp")
.file("src/c/snes_spc/SPC_Filter.cpp")
.file("src/c/snes_spc/dsp.cpp")
.file("src/c/snes_spc/spc.cpp")
.compile("libspc.a");
let bindings = bindgen::Builder::default()
.header("src/c/snes_spc/spc.h")
.derive_copy(true)
.derive_debug(true)
.derive_eq(true)
.derive_hash(true)
.derive_ord(true)
.generate()
.expect("Unable to generate bindings!");
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
bindings
.write_to_file(out_path.join("bindings.rs"))
.expect("Couldn't write bindings");
}
|
pub fn high_entropy_pass(input: &str) -> bool {
let string_array: Vec<&str> = input.split(' ').collect();
for (ind1, string1) in string_array.iter().enumerate() {
for (ind2, string2) in string_array.iter().enumerate() {
if ind1 == ind2 {
continue;
}
if string1 == string2 {
return false;
}
}
}
true
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_valid_passphrase() {
assert!(high_entropy_pass("a"));
assert!(!high_entropy_pass("a a"));
assert!(high_entropy_pass("a b"));
assert!(high_entropy_pass("aa bb cc dd ee"));
assert!(!high_entropy_pass("aa bb cc dd aa"));
assert!(high_entropy_pass("aa bb cc dd aaa"));
}
}
|
#[rustfmt::skip]
pub(crate) const LUT_VCOM_DC: [u8; 44] = [
0x00, 0x00,
0x00, 0x1A, 0x1A, 0x00, 0x00, 0x01,
0x00, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x00, 0x0E, 0x01, 0x0E, 0x01, 0x10,
0x00, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x00, 0x04, 0x10, 0x00, 0x00, 0x05,
0x00, 0x03, 0x0E, 0x00, 0x00, 0x0A,
0x00, 0x23, 0x00, 0x00, 0x00, 0x01,
];
#[rustfmt::skip]
pub(crate) const LUT_WW: [u8; 42] =[
0x90, 0x1A, 0x1A, 0x00, 0x00, 0x01,
0x40, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x84, 0x0E, 0x01, 0x0E, 0x01, 0x10,
0x80, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x00, 0x04, 0x10, 0x00, 0x00, 0x05,
0x00, 0x03, 0x0E, 0x00, 0x00, 0x0A,
0x00, 0x23, 0x00, 0x00, 0x00, 0x01,
];
#[rustfmt::skip]
pub(crate) const LUT_BW: [u8; 42] =[
0xA0, 0x1A, 0x1A, 0x00, 0x00, 0x01,
0x00, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x84, 0x0E, 0x01, 0x0E, 0x01, 0x10,
0x90, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0xB0, 0x04, 0x10, 0x00, 0x00, 0x05,
0xB0, 0x03, 0x0E, 0x00, 0x00, 0x0A,
0xC0, 0x23, 0x00, 0x00, 0x00, 0x01,
];
#[rustfmt::skip]
pub(crate) const LUT_BB: [u8; 42] =[
0x90, 0x1A, 0x1A, 0x00, 0x00, 0x01,
0x40, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x84, 0x0E, 0x01, 0x0E, 0x01, 0x10,
0x80, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x00, 0x04, 0x10, 0x00, 0x00, 0x05,
0x00, 0x03, 0x0E, 0x00, 0x00, 0x0A,
0x00, 0x23, 0x00, 0x00, 0x00, 0x01,
];
#[rustfmt::skip]
pub(crate) const LUT_WB: [u8; 42] =[
0x90, 0x1A, 0x1A, 0x00, 0x00, 0x01,
0x20, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x84, 0x0E, 0x01, 0x0E, 0x01, 0x10,
0x10, 0x0A, 0x0A, 0x00, 0x00, 0x08,
0x00, 0x04, 0x10, 0x00, 0x00, 0x05,
0x00, 0x03, 0x0E, 0x00, 0x00, 0x0A,
0x00, 0x23, 0x00, 0x00, 0x00, 0x01,
];
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiErrorBase {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InnerError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub exceptiontype: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub errordetail: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiError {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ApiErrorBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub innererror: Option<InnerError>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceNetworkProfile {
#[serde(rename = "networkInterfaces", default, skip_serializing_if = "Vec::is_empty")]
pub network_interfaces: Vec<SubResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceInstanceViewStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(rename = "displayStatus", default, skip_serializing_if = "Option::is_none")]
pub display_status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<resource_instance_view_status::Level>,
}
pub mod resource_instance_view_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Level {
Info,
Warning,
Error,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceInstanceView {
#[serde(rename = "platformUpdateDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_update_domain: Option<i32>,
#[serde(rename = "platformFaultDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain: Option<i32>,
#[serde(rename = "privateId", default, skip_serializing_if = "Option::is_none")]
pub private_id: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<ResourceInstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceProperties {
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<RoleInstanceNetworkProfile>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<RoleInstanceInstanceView>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstance {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<InstanceSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RoleInstanceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceListResult {
pub value: Vec<RoleInstance>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleProperties {
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRole {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<CloudServiceRoleSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudServiceRoleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleListResult {
pub value: Vec<CloudServiceRole>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CloudServiceUpgradeMode {
Auto,
Manual,
Simultaneous,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleProfileProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<CloudServiceRoleSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub roles: Vec<CloudServiceRoleProfileProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceVaultCertificate {
#[serde(rename = "certificateUrl", default, skip_serializing_if = "Option::is_none")]
pub certificate_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceVaultSecretGroup {
#[serde(rename = "sourceVault", default, skip_serializing_if = "Option::is_none")]
pub source_vault: Option<SubResource>,
#[serde(rename = "vaultCertificates", default, skip_serializing_if = "Vec::is_empty")]
pub vault_certificates: Vec<CloudServiceVaultCertificate>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceOsProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<CloudServiceVaultSecretGroup>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerFrontendIpConfigurationProperties {
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<SubResource>,
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerFrontendIpConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LoadBalancerFrontendIpConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerConfigurationProperties {
#[serde(rename = "frontendIPConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub frontend_ip_configurations: Vec<LoadBalancerFrontendIpConfiguration>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LoadBalancerConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceNetworkProfile {
#[serde(rename = "loadBalancerConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_configurations: Vec<LoadBalancerConfiguration>,
#[serde(rename = "swappableCloudService", default, skip_serializing_if = "Option::is_none")]
pub swappable_cloud_service: Option<SubResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceVaultAndSecretReference {
#[serde(rename = "sourceVault", default, skip_serializing_if = "Option::is_none")]
pub source_vault: Option<SubResource>,
#[serde(rename = "secretUrl", default, skip_serializing_if = "Option::is_none")]
pub secret_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceExtensionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(rename = "autoUpgradeMinorVersion", default, skip_serializing_if = "Option::is_none")]
pub auto_upgrade_minor_version: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub settings: Option<String>,
#[serde(rename = "protectedSettings", default, skip_serializing_if = "Option::is_none")]
pub protected_settings: Option<String>,
#[serde(rename = "protectedSettingsFromKeyVault", default, skip_serializing_if = "Option::is_none")]
pub protected_settings_from_key_vault: Option<CloudServiceVaultAndSecretReference>,
#[serde(rename = "forceUpdateTag", default, skip_serializing_if = "Option::is_none")]
pub force_update_tag: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "rolesAppliedTo", default, skip_serializing_if = "Vec::is_empty")]
pub roles_applied_to: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Extension {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudServiceExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceExtensionProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extensions: Vec<Extension>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceProperties {
#[serde(rename = "packageUrl", default, skip_serializing_if = "Option::is_none")]
pub package_url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub configuration: Option<String>,
#[serde(rename = "configurationUrl", default, skip_serializing_if = "Option::is_none")]
pub configuration_url: Option<String>,
#[serde(rename = "startCloudService", default, skip_serializing_if = "Option::is_none")]
pub start_cloud_service: Option<bool>,
#[serde(rename = "upgradeMode", default, skip_serializing_if = "Option::is_none")]
pub upgrade_mode: Option<CloudServiceUpgradeMode>,
#[serde(rename = "roleProfile", default, skip_serializing_if = "Option::is_none")]
pub role_profile: Option<CloudServiceRoleProfile>,
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<CloudServiceOsProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<CloudServiceNetworkProfile>,
#[serde(rename = "extensionProfile", default, skip_serializing_if = "Option::is_none")]
pub extension_profile: Option<CloudServiceExtensionProfile>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudService {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StatusCodeCount {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceViewStatusesSummary {
#[serde(rename = "statusesSummary", default, skip_serializing_if = "Vec::is_empty")]
pub statuses_summary: Vec<StatusCodeCount>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceInstanceView {
#[serde(rename = "roleInstance", default, skip_serializing_if = "Option::is_none")]
pub role_instance: Option<InstanceViewStatusesSummary>,
#[serde(rename = "sdkVersion", default, skip_serializing_if = "Option::is_none")]
pub sdk_version: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<ResourceInstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceListResult {
pub value: Vec<CloudService>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstances {
#[serde(rename = "roleInstances")]
pub role_instances: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateDomain {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateDomainListResult {
pub value: Vec<UpdateDomain>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
|
use std::{
future::Future,
mem,
pin::Pin,
task::{Context, Poll},
};
use futures_core::ready;
use futures_util::{future, FutureExt, Sink, SinkExt};
use tokio::{task::JoinHandle, time::timeout};
/// Returns a future that will flush the sink, even if flushing is temporarily completed.
/// Finishes only if the sink throws an error.
pub(crate) fn keep_flushing<'a, T, S: Sink<T> + Unpin + 'a>(
mut s: S,
) -> impl Future<Output = S::Error> + 'a {
future::poll_fn(move |cx| match s.poll_flush_unpin(cx) {
Poll::Ready(Err(e)) => Poll::Ready(e),
_ => Poll::Pending,
})
}
pub struct CancelOnDrop<T>(pub JoinHandle<T>);
impl<T> Future for CancelOnDrop<T> {
type Output = <JoinHandle<T> as Future>::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.0.poll_unpin(cx)
}
}
impl<T> Drop for CancelOnDrop<T> {
fn drop(&mut self) {
self.0.abort();
}
}
pub struct TimeoutOnDrop<T: Send + 'static> {
handle: Option<JoinHandle<T>>,
timeout: tokio::time::Duration,
}
impl<T: Send + 'static> TimeoutOnDrop<T> {
pub fn new(handle: JoinHandle<T>, timeout: tokio::time::Duration) -> Self {
Self {
handle: Some(handle),
timeout,
}
}
pub fn take(&mut self) -> Option<JoinHandle<T>> {
self.handle.take()
}
}
impl<T: Send + 'static> Future for TimeoutOnDrop<T> {
type Output = <JoinHandle<T> as Future>::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let r = ready!(self
.handle
.as_mut()
.expect("Polled after ready")
.poll_unpin(cx));
self.handle = None;
Poll::Ready(r)
}
}
impl<T: Send + 'static> Drop for TimeoutOnDrop<T> {
fn drop(&mut self) {
let mut handle = if let Some(handle) = self.handle.take() {
handle
} else {
return;
};
if (&mut handle).now_or_never().is_some() {
// Already finished
return;
}
match tokio::runtime::Handle::try_current() {
Ok(h) => {
h.spawn(timeout(self.timeout, CancelOnDrop(handle)));
}
Err(_) => {
// Not in tokio context, can't spawn
handle.abort();
}
}
}
}
pub trait Seq {
fn next(&self) -> Self;
}
macro_rules! impl_seq {
($($ty:ty)*) => { $(
impl Seq for $ty {
fn next(&self) -> Self { (*self).wrapping_add(1) }
}
)* }
}
impl_seq!(u8 u16 u32 u64 usize);
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct SeqGenerator<T: Seq>(T);
impl<T: Seq> SeqGenerator<T> {
pub fn new(value: T) -> Self {
SeqGenerator(value)
}
pub fn get(&mut self) -> T {
let value = self.0.next();
mem::replace(&mut self.0, value)
}
}
|
#[doc = "Reader of register RCC_MC_APB2LPENCLRR"]
pub type R = crate::R<u32, super::RCC_MC_APB2LPENCLRR>;
#[doc = "Writer for register RCC_MC_APB2LPENCLRR"]
pub type W = crate::W<u32, super::RCC_MC_APB2LPENCLRR>;
#[doc = "Register RCC_MC_APB2LPENCLRR `reset()`'s with value 0x0137_271f"]
impl crate::ResetValue for super::RCC_MC_APB2LPENCLRR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0137_271f
}
}
#[doc = "TIM1LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TIM1LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<TIM1LPEN_A> for bool {
#[inline(always)]
fn from(variant: TIM1LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TIM1LPEN`"]
pub type TIM1LPEN_R = crate::R<bool, TIM1LPEN_A>;
impl TIM1LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TIM1LPEN_A {
match self.bits {
false => TIM1LPEN_A::B_0X0,
true => TIM1LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TIM1LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TIM1LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `TIM1LPEN`"]
pub struct TIM1LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> TIM1LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TIM1LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(TIM1LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(TIM1LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "TIM8LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TIM8LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<TIM8LPEN_A> for bool {
#[inline(always)]
fn from(variant: TIM8LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TIM8LPEN`"]
pub type TIM8LPEN_R = crate::R<bool, TIM8LPEN_A>;
impl TIM8LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TIM8LPEN_A {
match self.bits {
false => TIM8LPEN_A::B_0X0,
true => TIM8LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TIM8LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TIM8LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `TIM8LPEN`"]
pub struct TIM8LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> TIM8LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TIM8LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(TIM8LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(TIM8LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "TIM15LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TIM15LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<TIM15LPEN_A> for bool {
#[inline(always)]
fn from(variant: TIM15LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TIM15LPEN`"]
pub type TIM15LPEN_R = crate::R<bool, TIM15LPEN_A>;
impl TIM15LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TIM15LPEN_A {
match self.bits {
false => TIM15LPEN_A::B_0X0,
true => TIM15LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TIM15LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TIM15LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `TIM15LPEN`"]
pub struct TIM15LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> TIM15LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TIM15LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(TIM15LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(TIM15LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "TIM16LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TIM16LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<TIM16LPEN_A> for bool {
#[inline(always)]
fn from(variant: TIM16LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TIM16LPEN`"]
pub type TIM16LPEN_R = crate::R<bool, TIM16LPEN_A>;
impl TIM16LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TIM16LPEN_A {
match self.bits {
false => TIM16LPEN_A::B_0X0,
true => TIM16LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TIM16LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TIM16LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `TIM16LPEN`"]
pub struct TIM16LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> TIM16LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TIM16LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(TIM16LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(TIM16LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "TIM17LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TIM17LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<TIM17LPEN_A> for bool {
#[inline(always)]
fn from(variant: TIM17LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TIM17LPEN`"]
pub type TIM17LPEN_R = crate::R<bool, TIM17LPEN_A>;
impl TIM17LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TIM17LPEN_A {
match self.bits {
false => TIM17LPEN_A::B_0X0,
true => TIM17LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TIM17LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TIM17LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `TIM17LPEN`"]
pub struct TIM17LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> TIM17LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TIM17LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(TIM17LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(TIM17LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "SPI1LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SPI1LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<SPI1LPEN_A> for bool {
#[inline(always)]
fn from(variant: SPI1LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SPI1LPEN`"]
pub type SPI1LPEN_R = crate::R<bool, SPI1LPEN_A>;
impl SPI1LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SPI1LPEN_A {
match self.bits {
false => SPI1LPEN_A::B_0X0,
true => SPI1LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == SPI1LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == SPI1LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `SPI1LPEN`"]
pub struct SPI1LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> SPI1LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SPI1LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(SPI1LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(SPI1LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "SPI4LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SPI4LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<SPI4LPEN_A> for bool {
#[inline(always)]
fn from(variant: SPI4LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SPI4LPEN`"]
pub type SPI4LPEN_R = crate::R<bool, SPI4LPEN_A>;
impl SPI4LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SPI4LPEN_A {
match self.bits {
false => SPI4LPEN_A::B_0X0,
true => SPI4LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == SPI4LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == SPI4LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `SPI4LPEN`"]
pub struct SPI4LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> SPI4LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SPI4LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(SPI4LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(SPI4LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "SPI5LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SPI5LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<SPI5LPEN_A> for bool {
#[inline(always)]
fn from(variant: SPI5LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SPI5LPEN`"]
pub type SPI5LPEN_R = crate::R<bool, SPI5LPEN_A>;
impl SPI5LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SPI5LPEN_A {
match self.bits {
false => SPI5LPEN_A::B_0X0,
true => SPI5LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == SPI5LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == SPI5LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `SPI5LPEN`"]
pub struct SPI5LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> SPI5LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SPI5LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(SPI5LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(SPI5LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "USART6LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum USART6LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<USART6LPEN_A> for bool {
#[inline(always)]
fn from(variant: USART6LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `USART6LPEN`"]
pub type USART6LPEN_R = crate::R<bool, USART6LPEN_A>;
impl USART6LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> USART6LPEN_A {
match self.bits {
false => USART6LPEN_A::B_0X0,
true => USART6LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == USART6LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == USART6LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `USART6LPEN`"]
pub struct USART6LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> USART6LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: USART6LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(USART6LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(USART6LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "SAI1LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SAI1LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<SAI1LPEN_A> for bool {
#[inline(always)]
fn from(variant: SAI1LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SAI1LPEN`"]
pub type SAI1LPEN_R = crate::R<bool, SAI1LPEN_A>;
impl SAI1LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SAI1LPEN_A {
match self.bits {
false => SAI1LPEN_A::B_0X0,
true => SAI1LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == SAI1LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == SAI1LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `SAI1LPEN`"]
pub struct SAI1LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> SAI1LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SAI1LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(SAI1LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(SAI1LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "SAI2LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SAI2LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<SAI2LPEN_A> for bool {
#[inline(always)]
fn from(variant: SAI2LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SAI2LPEN`"]
pub type SAI2LPEN_R = crate::R<bool, SAI2LPEN_A>;
impl SAI2LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SAI2LPEN_A {
match self.bits {
false => SAI2LPEN_A::B_0X0,
true => SAI2LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == SAI2LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == SAI2LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `SAI2LPEN`"]
pub struct SAI2LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> SAI2LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SAI2LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(SAI2LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(SAI2LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "SAI3LPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SAI3LPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<SAI3LPEN_A> for bool {
#[inline(always)]
fn from(variant: SAI3LPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SAI3LPEN`"]
pub type SAI3LPEN_R = crate::R<bool, SAI3LPEN_A>;
impl SAI3LPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SAI3LPEN_A {
match self.bits {
false => SAI3LPEN_A::B_0X0,
true => SAI3LPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == SAI3LPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == SAI3LPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `SAI3LPEN`"]
pub struct SAI3LPEN_W<'a> {
w: &'a mut W,
}
impl<'a> SAI3LPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SAI3LPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(SAI3LPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(SAI3LPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "DFSDMLPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DFSDMLPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<DFSDMLPEN_A> for bool {
#[inline(always)]
fn from(variant: DFSDMLPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `DFSDMLPEN`"]
pub type DFSDMLPEN_R = crate::R<bool, DFSDMLPEN_A>;
impl DFSDMLPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DFSDMLPEN_A {
match self.bits {
false => DFSDMLPEN_A::B_0X0,
true => DFSDMLPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == DFSDMLPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == DFSDMLPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `DFSDMLPEN`"]
pub struct DFSDMLPEN_W<'a> {
w: &'a mut W,
}
impl<'a> DFSDMLPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: DFSDMLPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(DFSDMLPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(DFSDMLPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "ADFSDMLPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ADFSDMLPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<ADFSDMLPEN_A> for bool {
#[inline(always)]
fn from(variant: ADFSDMLPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ADFSDMLPEN`"]
pub type ADFSDMLPEN_R = crate::R<bool, ADFSDMLPEN_A>;
impl ADFSDMLPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADFSDMLPEN_A {
match self.bits {
false => ADFSDMLPEN_A::B_0X0,
true => ADFSDMLPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == ADFSDMLPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == ADFSDMLPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `ADFSDMLPEN`"]
pub struct ADFSDMLPEN_W<'a> {
w: &'a mut W,
}
impl<'a> ADFSDMLPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ADFSDMLPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(ADFSDMLPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(ADFSDMLPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | (((value as u32) & 0x01) << 21);
self.w
}
}
#[doc = "FDCANLPEN\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FDCANLPEN_A {
#[doc = "0: Writing has no effect, reading means\r\n that the peripheral clocks are disabled in\r\n CSLEEP"]
B_0X0 = 0,
#[doc = "1: Writing disables the peripheral\r\n clocks in CSLEEP, reading means that the\r\n peripheral clocks are enabled in\r\n CSLEEP"]
B_0X1 = 1,
}
impl From<FDCANLPEN_A> for bool {
#[inline(always)]
fn from(variant: FDCANLPEN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `FDCANLPEN`"]
pub type FDCANLPEN_R = crate::R<bool, FDCANLPEN_A>;
impl FDCANLPEN_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> FDCANLPEN_A {
match self.bits {
false => FDCANLPEN_A::B_0X0,
true => FDCANLPEN_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == FDCANLPEN_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == FDCANLPEN_A::B_0X1
}
}
#[doc = "Write proxy for field `FDCANLPEN`"]
pub struct FDCANLPEN_W<'a> {
w: &'a mut W,
}
impl<'a> FDCANLPEN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FDCANLPEN_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Writing has no effect, reading means that the peripheral clocks are disabled in CSLEEP"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(FDCANLPEN_A::B_0X0)
}
#[doc = "Writing disables the peripheral clocks in CSLEEP, reading means that the peripheral clocks are enabled in CSLEEP"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(FDCANLPEN_A::B_0X1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
impl R {
#[doc = "Bit 0 - TIM1LPEN"]
#[inline(always)]
pub fn tim1lpen(&self) -> TIM1LPEN_R {
TIM1LPEN_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - TIM8LPEN"]
#[inline(always)]
pub fn tim8lpen(&self) -> TIM8LPEN_R {
TIM8LPEN_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - TIM15LPEN"]
#[inline(always)]
pub fn tim15lpen(&self) -> TIM15LPEN_R {
TIM15LPEN_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - TIM16LPEN"]
#[inline(always)]
pub fn tim16lpen(&self) -> TIM16LPEN_R {
TIM16LPEN_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - TIM17LPEN"]
#[inline(always)]
pub fn tim17lpen(&self) -> TIM17LPEN_R {
TIM17LPEN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 8 - SPI1LPEN"]
#[inline(always)]
pub fn spi1lpen(&self) -> SPI1LPEN_R {
SPI1LPEN_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - SPI4LPEN"]
#[inline(always)]
pub fn spi4lpen(&self) -> SPI4LPEN_R {
SPI4LPEN_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - SPI5LPEN"]
#[inline(always)]
pub fn spi5lpen(&self) -> SPI5LPEN_R {
SPI5LPEN_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 13 - USART6LPEN"]
#[inline(always)]
pub fn usart6lpen(&self) -> USART6LPEN_R {
USART6LPEN_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 16 - SAI1LPEN"]
#[inline(always)]
pub fn sai1lpen(&self) -> SAI1LPEN_R {
SAI1LPEN_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - SAI2LPEN"]
#[inline(always)]
pub fn sai2lpen(&self) -> SAI2LPEN_R {
SAI2LPEN_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - SAI3LPEN"]
#[inline(always)]
pub fn sai3lpen(&self) -> SAI3LPEN_R {
SAI3LPEN_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 20 - DFSDMLPEN"]
#[inline(always)]
pub fn dfsdmlpen(&self) -> DFSDMLPEN_R {
DFSDMLPEN_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 21 - ADFSDMLPEN"]
#[inline(always)]
pub fn adfsdmlpen(&self) -> ADFSDMLPEN_R {
ADFSDMLPEN_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 24 - FDCANLPEN"]
#[inline(always)]
pub fn fdcanlpen(&self) -> FDCANLPEN_R {
FDCANLPEN_R::new(((self.bits >> 24) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - TIM1LPEN"]
#[inline(always)]
pub fn tim1lpen(&mut self) -> TIM1LPEN_W {
TIM1LPEN_W { w: self }
}
#[doc = "Bit 1 - TIM8LPEN"]
#[inline(always)]
pub fn tim8lpen(&mut self) -> TIM8LPEN_W {
TIM8LPEN_W { w: self }
}
#[doc = "Bit 2 - TIM15LPEN"]
#[inline(always)]
pub fn tim15lpen(&mut self) -> TIM15LPEN_W {
TIM15LPEN_W { w: self }
}
#[doc = "Bit 3 - TIM16LPEN"]
#[inline(always)]
pub fn tim16lpen(&mut self) -> TIM16LPEN_W {
TIM16LPEN_W { w: self }
}
#[doc = "Bit 4 - TIM17LPEN"]
#[inline(always)]
pub fn tim17lpen(&mut self) -> TIM17LPEN_W {
TIM17LPEN_W { w: self }
}
#[doc = "Bit 8 - SPI1LPEN"]
#[inline(always)]
pub fn spi1lpen(&mut self) -> SPI1LPEN_W {
SPI1LPEN_W { w: self }
}
#[doc = "Bit 9 - SPI4LPEN"]
#[inline(always)]
pub fn spi4lpen(&mut self) -> SPI4LPEN_W {
SPI4LPEN_W { w: self }
}
#[doc = "Bit 10 - SPI5LPEN"]
#[inline(always)]
pub fn spi5lpen(&mut self) -> SPI5LPEN_W {
SPI5LPEN_W { w: self }
}
#[doc = "Bit 13 - USART6LPEN"]
#[inline(always)]
pub fn usart6lpen(&mut self) -> USART6LPEN_W {
USART6LPEN_W { w: self }
}
#[doc = "Bit 16 - SAI1LPEN"]
#[inline(always)]
pub fn sai1lpen(&mut self) -> SAI1LPEN_W {
SAI1LPEN_W { w: self }
}
#[doc = "Bit 17 - SAI2LPEN"]
#[inline(always)]
pub fn sai2lpen(&mut self) -> SAI2LPEN_W {
SAI2LPEN_W { w: self }
}
#[doc = "Bit 18 - SAI3LPEN"]
#[inline(always)]
pub fn sai3lpen(&mut self) -> SAI3LPEN_W {
SAI3LPEN_W { w: self }
}
#[doc = "Bit 20 - DFSDMLPEN"]
#[inline(always)]
pub fn dfsdmlpen(&mut self) -> DFSDMLPEN_W {
DFSDMLPEN_W { w: self }
}
#[doc = "Bit 21 - ADFSDMLPEN"]
#[inline(always)]
pub fn adfsdmlpen(&mut self) -> ADFSDMLPEN_W {
ADFSDMLPEN_W { w: self }
}
#[doc = "Bit 24 - FDCANLPEN"]
#[inline(always)]
pub fn fdcanlpen(&mut self) -> FDCANLPEN_W {
FDCANLPEN_W { w: self }
}
}
|
use std::cmp;
#[derive(Debug, PartialEq)]
pub struct SquareLocation {
pub x: i32,
pub y: i32,
}
#[derive(Debug, Clone)]
pub struct Square {
pub index: usize,
pub content: Option<u32>,
}
#[derive(Debug, Clone)]
pub struct Ring {
pub n: u32,
pub squares: Vec<Square>,
}
#[derive(Debug)]
pub struct SpiralRingIterator {
curr_ring: Ring,
next_ring_n: u32,
}
impl SpiralRingIterator {
pub fn new() -> SpiralRingIterator {
let curr_ring = Ring {
n: 0,
squares: vec![
Square {
index: 1,
content: None,
},
],
};
SpiralRingIterator {
curr_ring,
next_ring_n: 0,
}
}
}
impl Iterator for SpiralRingIterator {
type Item = Ring;
fn next(&mut self) -> Option<Ring> {
// check if this is the first ring
if self.next_ring_n == 0 {
// FIXME: This feels dirty
self.next_ring_n += 1;
return Some(self.curr_ring.clone());
}
// information on the current ring
let last_index_curr = self.curr_ring.squares.last().unwrap().index;
// create next ring
let first_index_next = last_index_curr + 1;
let length_next = 8 * (self.curr_ring.n as usize + 1);
let next_squares = (first_index_next..)
.take(length_next as usize)
.map(|index| Square {
index,
content: None,
})
.collect();
let next_ring = Ring {
n: self.next_ring_n,
squares: next_squares,
};
// increase ring n
self.next_ring_n += 1;
self.curr_ring = next_ring.clone();
Some(next_ring)
}
}
pub fn location_of_square_in_ring(ring: &Ring, square_index: usize) -> Option<SquareLocation> {
if !(ring.squares.first().unwrap().index <= square_index
&& square_index <= ring.squares.last().unwrap().index)
{
return None;
}
// start with location of first square in this ring:
let location_first_square = SquareLocation {
x: ring.n as i32,
y: if ring.n == 0 { 0 } else { 1 - (ring.n as i32) },
};
// step trough the ring until the square
let spiral_size = 2 * ring.n + 1;
assert!(spiral_size > 0);
let mut steps_to_take: u32 = square_index as u32 - ring.squares.first().unwrap().index as u32;
let mut location_square = location_first_square;
// folow spiral right hand side up
let steps = cmp::min(spiral_size.saturating_sub(2), steps_to_take);
location_square.y += steps as i32;
steps_to_take -= steps;
// folow spiral top left
let steps = cmp::min(spiral_size - 1, steps_to_take);
location_square.x -= steps as i32;
steps_to_take -= steps;
// folow spiral left hand side down
let steps = cmp::min(spiral_size - 1, steps_to_take);
location_square.y -= steps as i32;
steps_to_take -= steps;
// folow spiral top left
let steps = cmp::min(spiral_size - 1, steps_to_take);
location_square.x += steps as i32;
steps_to_take -= steps;
assert!(steps_to_take == 0);
Some(location_square)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_location_of_square_in_ring() {
let mut ring_iter = SpiralRingIterator::new();
let ring = ring_iter.next().unwrap();
assert_eq!(
location_of_square_in_ring(&ring, 1),
Some(SquareLocation { x: 0, y: 0 })
);
let ring = ring_iter.next().unwrap();
assert_eq!(
location_of_square_in_ring(&ring, 2),
Some(SquareLocation { x: 1, y: 0 })
);
assert_eq!(
location_of_square_in_ring(&ring, 3),
Some(SquareLocation { x: 1, y: 1 })
);
assert_eq!(
location_of_square_in_ring(&ring, 4),
Some(SquareLocation { x: 0, y: 1 })
);
}
}
|
use env_logger::{Builder, Env};
use log::*;
use std::cmp::max;
use std::error::Error;
use std::io;
use std::net::UdpSocket;
use std::sync::{Arc, Mutex};
use std::thread;
use stepper::*;
fn main() -> Result<(), Box<dyn Error>> {
Builder::from_env(Env::default().default_filter_or("info"))
.default_format_timestamp_nanos(true)
.init();
// listen on socket
let socket = {
let addr = "0.0.0.0:6789";
info!("listen on {}", addr);
UdpSocket::bind(addr)?
};
// setup stepper
let stepper_l = Arc::new(Mutex::new(Stepper::new(
"L",
EnablePin(23),
StepPin(27),
DirectionPin(22),
)?));
let stepper_r = Arc::new(Mutex::new(Stepper::new(
"R",
EnablePin(24),
StepPin(13),
DirectionPin(17),
)?));
loop {
// read from network socket
let mut buf = [0; 1024];
info!("waiting for data ...");
let (len, _src) = socket.recv_from(&mut buf)?;
let buf = String::from_utf8_lossy(&buf[..len]);
// try to parse the telegram
match split_fields(&buf) {
Ok((l, r)) => {
// calculate the delay for each stepper
// this synchronizes both steppers if their have
// different step counts
let delay = max(l.abs(), r.abs()) * 600;
let delay_l = delay / max(1, l.abs());
let delay_r = delay / max(1, r.abs());
info!("trigger stepper actions");
let thread_hndl_l = make_steps_async(stepper_l.clone(), l, delay_l);
let thread_hdnl_r = make_steps_async(stepper_r.clone(), r, delay_r);
// wait for stepper actions
info!("wait for steppers");
thread_hndl_l.join().unwrap();
thread_hdnl_r.join().unwrap();
info!("done");
}
Err(err) => error!("invalid telegram: '{}' - {} - ignore telegram", buf, err),
}
}
}
fn split_fields(s: &str) -> Result<(i32, i32), Box<dyn Error>> {
let s = s.trim();
match s.split_terminator(':').collect::<Vec<_>>().as_slice() {
[l, r] => {
let l = l.parse()?;
let r = r.parse()?;
Ok((l, r))
}
_ => Err(Box::new(io::Error::new(
io::ErrorKind::Other,
"no / to many split terminator(s) ':' found",
))),
}
}
fn make_steps_async(stepper: Arc<Mutex<Stepper>>, x: i32, delay: i32) -> thread::JoinHandle<()> {
let direction = if x < 0 { Direction::Left } else { Direction::Right };
let steps = x.abs() as u32;
thread::spawn(move || {
stepper.lock().unwrap().step_n(direction, steps, delay);
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn split_fields_normal_numbers() {
let (l, r) = split_fields("4:5").unwrap();
assert_eq!(l, 4);
assert_eq!(r, 5);
}
#[test]
fn split_fields_prefixed_numbers() {
let (l, r) = split_fields("+3:-6").unwrap();
assert_eq!(l, 3);
assert_eq!(r, -6);
}
#[test]
fn split_fields_with_newline() {
let (l, r) = split_fields("2:7\n").unwrap();
assert_eq!(l, 2);
assert_eq!(r, 7);
}
#[test]
fn split_fields_with_invalid_content() {
assert!(split_fields("3").is_err());
}
}
|
// Compatibility macros/typedefs needed for Solaris -> Linux port
pub fn p2_align(x: u64, align: u64) -> u64 {
x & -(align as i64) as u64
}
fn p2_cross(x: u64, y: u64, align: u64) -> bool {
x ^ y > align - 1
}
fn p2_round_up(x: u64, align: u64) -> u64 {
((x - 1) | (align - 1)) + 1
}
fn p2_boundary(off: u64, len: u64, align: u64) -> bool {
(off ^ (off + len - 1)) > (align - 1)
}
fn p2_phase(x: u64, align: u64) -> u64 {
x & (align - 1)
}
fn p2_nphase(x: u64, align: u64) -> u64 {
-(x as i64) as u64 & (align - 1)
}
fn p2_nphase_typed(x: u64, align: u64) -> u64 {
-(x as i64) as u64 & (align - 1)
}
fn is_p2(x: u64) -> bool {
x & (x - 1) == 0
}
fn is_p2_aligned(v: u64, a: u64) -> bool {
v & (a - 1) == 0
}
pub fn highbit64(u: u64) -> u32 {
63 - u.leading_zeros()
}
// Typed version of the P2* macros. These macros should be used to ensure
// that the result is correctly calculated based on the data type of (x),
// which is passed in as the last argument, regardless of the data
// type of the alignment. For example, if (x) is of type uint64_t,
// and we want to round it up to a page boundary using "PAGESIZE" as
// the alignment, we can do either
// P2ROUNDUP(x, (uint64_t)PAGESIZE)
// or
// P2ROUNDUP_TYPED(x, PAGESIZE, uint64_t)
//
// #define P2ALIGN_TYPED(x, align, type) \
// ((type)(x) & -(type)(align))
// #define P2PHASE_TYPED(x, align, type) \
// ((type)(x) & ((type)(align) - 1))
// #define P2NPHASE_TYPED(x, align, type) \
// (-(type)(x) & ((type)(align) - 1))
// #define P2ROUNDUP_TYPED(x, align, type) \
// ((((type)(x) - 1) | ((type)(align) - 1)) + 1)
// #define P2END_TYPED(x, align, type) \
// (-(~(type)(x) & -(type)(align)))
// #define P2PHASEUP_TYPED(x, align, phase, type) \
// ((type)(phase) - (((type)(phase) - (type)(x)) & -(type)(align)))
// #define P2CROSS_TYPED(x, y, align, type) \
// (((type)(x) ^ (type)(y)) > (type)(align) - 1)
// #define P2SAMEHIGHBIT_TYPED(x, y, type) \
// (((type)(x) ^ (type)(y)) < ((type)(x) & (type)(y)))
//
//
// avoid any possibility of clashing with <stddef.h> version
// #if defined(_KERNEL) && !defined(_KMEMUSER) && !defined(offsetof)
// #define offsetof(s, m) ((size_t)(&(((s *)0)->m)))
// #endif
|
pub fn read<T: std::str::FromStr>() -> T {
let mut s = String::new();
std::io::stdin().read_line(&mut s).ok();
s.trim().parse().ok().unwrap()
}
pub fn read_vec<T: std::str::FromStr>() -> Vec<T> {
read::<String>()
.split_whitespace()
.map(|e| e.parse().ok().unwrap())
.collect()
}
pub fn read_vec2<T: std::str::FromStr>(n: u32) -> Vec<Vec<T>> {
(0..n).map(|_| read_vec()).collect()
}
pub fn read_col<T: std::str::FromStr>(n: u32) -> Vec<T> {
(0..n).map(|_| read()).collect()
}
fn gcd(a: u32, b: u32) -> u32 {
assert!(a != 0 || b != 0);
if a == 0 {
b
} else {
gcd(b % a, a)
}
}
fn lcm(a: u32, b: u32) -> u32 {
a * b / gcd(a, b)
}
fn main() {
let _n: u8 = read();
let nums: Vec<u32> = read_vec();
// &head に注意
if let Some((&head, tail)) = nums.split_first() {
// &b に注意
println!("{}", tail.iter().fold(head, |a, &b| lcm(a, b)));
}
}
|
use crate::Number;
use super::Term;
///An enum defining the different operations supported by sequence terms
pub enum SequenceOperations
{
Addition,
Multiplication
}
///A term which takes in a vector of terms and combines them together with an operator (i.e. +, *)
pub struct SequenceTerm<T: Number>
{
terms: Vec<Box<dyn Term<T> + Send + Sync>>,
operation: SequenceOperations
}
impl<T: Number> SequenceTerm<T>
{
///Creates a sequence term from the list of terms and the given operation
///
/// # Examples
///
/// ```
/// use crate::parametrizer::term::constantterm::ConstantTerm;
/// use crate::parametrizer::term::variableterm::VariableTerm;
/// use crate::parametrizer::term::sequenceterm::SequenceOperations;
/// use crate::parametrizer::term::sequenceterm::SequenceTerm;
/// use crate::parametrizer::term::Term;
///
/// let const1 = Box::new(ConstantTerm::new(13));
/// let const2 = Box::new(ConstantTerm::new(5));
/// let variable = Box::new(VariableTerm::new());
///
/// let terms : Vec<Box<dyn Term<i32> + Send + Sync>> = vec![const1, const2, variable];
///
/// let addition = SequenceTerm::new(terms, SequenceOperations::Addition);
///
/// assert_eq!(19, addition.evaluate(1));
/// assert_eq!(24, addition.evaluate(6));
/// ```
///
/// ```
/// use crate::parametrizer::term::constantterm::ConstantTerm;
/// use crate::parametrizer::term::variableterm::VariableTerm;
/// use crate::parametrizer::term::sequenceterm::SequenceOperations;
/// use crate::parametrizer::term::sequenceterm::SequenceTerm;
/// use crate::parametrizer::term::Term;
///
/// let const1 = Box::new(ConstantTerm::new(13));
/// let const2 = Box::new(ConstantTerm::new(5));
/// let variable = Box::new(VariableTerm::new());
///
/// let terms : Vec<Box<dyn Term<i32> + Send + Sync>> = vec![const1, const2, variable];
///
/// let addition = SequenceTerm::new(terms, SequenceOperations::Multiplication);
///
/// assert_eq!(65, addition.evaluate(1));
/// assert_eq!(390, addition.evaluate(6));
/// ```
pub fn new(terms: Vec<Box<dyn Term<T> + Send + Sync>>, operation: SequenceOperations) -> SequenceTerm<T>
{
return SequenceTerm {terms, operation};
}
fn unit(&self) -> T
{
match self.operation
{
SequenceOperations::Addition => T::zero(),
SequenceOperations::Multiplication => T::one()
}
}
fn compound(&self, l: T, r: T) -> T
{
match self.operation
{
SequenceOperations::Addition => l + r,
SequenceOperations::Multiplication => l * r
}
}
}
impl<T: Number> Term<T> for SequenceTerm<T>
{
///Adds/multiplies together all of the terms
fn evaluate(&self, t: T) -> T
{
let mut computed = self.unit();
for term in &self.terms
{
computed = self.compound(computed, term.evaluate(t));
}
return computed;
}
}
|
use libc::c_int;
use ffi::{core, LLVMPassManager};
use ffi::prelude::LLVMPassManagerRef;
use cbox::CSemiBox;
use std::marker::PhantomData;
use module::Module;
use ffi::transforms::scalar::*;
use ffi::transforms::vectorize::*;
use ffi::transforms::ipo::*;
use value::Value;
use ffi::LLVMPassRegistry;
use ffi::core::LLVMGetGlobalPassRegistry;
use ffi::initialization::*;
/// The struct responsible for setting up optimization sequences
pub struct PassManager(PhantomData<[u8]>);
native_ref!{&PassManager = LLVMPassManagerRef}
dispose!{PassManager,LLVMPassManager,core::LLVMDisposePassManager}
impl<'a> PassManager {
/// Create a new pass manager
pub fn new() -> CSemiBox<'a, PassManager> {
unsafe { core::LLVMCreatePassManager() }.into()
}
/// Create a new function pass manager for a given module. It runs the optimizations
/// on each function immediatly as it is generated
pub fn new_func_pass(module: &'a Module) -> CSemiBox<'a, PassManager> {
unsafe { core::LLVMCreateFunctionPassManagerForModule(module.into()) }.into()
}
// Run the function pass manager
pub fn run_func_pass(&self, f: &'a Value) -> bool {
unsafe { core::LLVMRunFunctionPassManager(self.into(), f.into()) != 0 }
}
pub fn init_func_pass(&self) {
unsafe { core::LLVMInitializeFunctionPassManager(self.into()) };
}
pub fn finalize_func_pass(&self) {
unsafe { core::LLVMFinalizeFunctionPassManager(self.into()) };
}
}
macro_rules! add_pass {
($name:ident, $passname:expr) => {
impl <'a> PassManager {
pub fn $name(&self) -> &PassManager {
unsafe {$passname(self.into())};
self
}
}
};
}
// Scalar transformations
add_pass!{add_agressive_dce,LLVMAddAggressiveDCEPass}
add_pass!{add_alingmnet_from_assum,LLVMAddAlignmentFromAssumptionsPass}
add_pass!{add_basic_alias_analysis,LLVMAddBasicAliasAnalysisPass}
add_pass!{add_bit_tacking_dce,LLVMAddBitTrackingDCEPass}
add_pass!{add_cfg,LLVMAddCFGSimplificationPass}
add_pass!{add_constant_propagation,LLVMAddConstantPropagationPass}
add_pass!{add_dead_store_elimination,LLVMAddDeadStoreEliminationPass}
add_pass!{add_demote_memory_to_register,LLVMAddDemoteMemoryToRegisterPass}
add_pass!{add_early_cse,LLVMAddEarlyCSEPass}
add_pass!{add_correlated_value_propagation,LLVMAddCorrelatedValuePropagationPass}
add_pass!{add_gvn,LLVMAddGVNPass}
add_pass!{add_ind_var_simplify,LLVMAddIndVarSimplifyPass}
add_pass!{add_instruction_combining,LLVMAddInstructionCombiningPass}
add_pass!{add_licm,LLVMAddLICMPass}
add_pass!{add_loop_deletion,LLVMAddLoopDeletionPass}
add_pass!{add_loop_idiom,LLVMAddLoopIdiomPass}
add_pass!{add_loop_reroll,LLVMAddLoopRerollPass}
add_pass!{add_loop_rotate,LLVMAddLoopRotatePass}
add_pass!{add_loop_unroll,LLVMAddLoopUnrollPass}
add_pass!{add_loop_nswitch,LLVMAddLoopUnswitchPass}
add_pass!{add_lower_expect_intrinsic,LLVMAddLowerExpectIntrinsicPass}
add_pass!{add_lower_swithc,LLVMAddLowerSwitchPass}
add_pass!{add_mem_cpy,LLVMAddMemCpyOptPass}
add_pass!{add_merged_load_store_motion,LLVMAddMergedLoadStoreMotionPass}
add_pass!{add_partially_inline_lib_calls,LLVMAddPartiallyInlineLibCallsPass}
add_pass!{add_promote_memory_to_register,LLVMAddPromoteMemoryToRegisterPass}
add_pass!{add_reassociate,LLVMAddReassociatePass}
add_pass!{add_sccp,LLVMAddSCCPPass}
add_pass!{add_scalar_repl_aggregates,LLVMAddScalarReplAggregatesPass}
add_pass!{add_scalar_repl_aggregates_ssa,LLVMAddScalarReplAggregatesPassSSA}
add_pass!{add_scalarizer,LLVMAddScalarizerPass}
add_pass!{add_scoped_no_alias_aa,LLVMAddScopedNoAliasAAPass}
add_pass!{add_simplify_lib_calls,LLVMAddSimplifyLibCallsPass}
add_pass!{add_tail_call_elimination,LLVMAddTailCallEliminationPass}
add_pass!{add_type_based_alias_nalysis,LLVMAddTypeBasedAliasAnalysisPass}
add_pass!{add_verifier,LLVMAddVerifierPass}
// Vectorization transformations
add_pass!{add_bb_vectorize,LLVMAddBBVectorizePass}
add_pass!{add_loop_vectorize,LLVMAddLoopVectorizePass}
add_pass!{add_slp_vectorize,LLVMAddSLPVectorizePass}
// Interprocedural transformations
add_pass!{add_always_inline,LLVMAddAlwaysInlinerPass}
add_pass!{add_argument_promotion,LLVMAddArgumentPromotionPass}
add_pass!{add_constant_merge,LLVMAddConstantMergePass}
add_pass!{add_dead_arg_elimination,LLVMAddDeadArgEliminationPass}
add_pass!{add_function_attrs,LLVMAddFunctionAttrsPass}
add_pass!{add_function_inlining,LLVMAddFunctionInliningPass}
add_pass!{add_global_dce,LLVMAddGlobalDCEPass}
add_pass!{add_global_pptimizer,LLVMAddGlobalOptimizerPass}
add_pass!{add_ip_constant_propagation,LLVMAddIPConstantPropagationPass}
add_pass!{add_ipsccp,LLVMAddIPSCCPPass}
add_pass!{add_prune_eh,LLVMAddPruneEHPass}
add_pass!{add_strip_dead_prototypes,LLVMAddStripDeadPrototypesPass}
add_pass!{add_strip_symbols,LLVMAddStripSymbolsPass}
use ffi::transforms::pass_manager_builder::*;
/// Used to custimize a pass sequence in various ways
/// For more information go to [llvm](http://llvm.org/doxygen/classllvm_1_1PassManagerBuilder.html)
pub struct PassManagerBuilder(PhantomData<[u8]>);
native_ref!{&PassManagerBuilder = LLVMPassManagerBuilderRef}
dispose!{PassManagerBuilder,LLVMOpaquePassManagerBuilder,LLVMPassManagerBuilderDispose}
impl<'a> PassManagerBuilder {
/// Create a new `PassManagerBuilder`
pub fn new() -> CSemiBox<'a, PassManagerBuilder> {
unsafe { LLVMPassManagerBuilderCreate() }.into()
}
/// Specify the basic optimization level
pub fn set_opt(&self, level: u32) {
unsafe { LLVMPassManagerBuilderSetOptLevel(self.into(), level.into()) };
}
/// How much we're optimizing for size
pub fn set_size(&self, level: u32) {
unsafe { LLVMPassManagerBuilderSetOptLevel(self.into(), level.into()) };
}
///
pub fn set_disable_simplify_lib_calls(&self, opt: bool) {
unsafe { LLVMPassManagerBuilderSetDisableSimplifyLibCalls(self.into(), opt as c_int) }
}
pub fn disable_unit_at_a_time(&self, opt: bool) {
unsafe { LLVMPassManagerBuilderSetDisableUnitAtATime(self.into(), opt as c_int) }
}
pub fn disable_unroll_loop(&self, opt: bool) {
unsafe { LLVMPassManagerBuilderSetDisableUnrollLoops(self.into(), opt as c_int) }
}
pub fn populate_lto_pass_manger(
&self,
pass_manager: &PassManager,
internalize: bool,
run_inliner: bool,
) {
unsafe {
LLVMPassManagerBuilderPopulateLTOPassManager(
self.into(),
pass_manager.into(),
internalize as c_int,
run_inliner as c_int,
)
}
}
pub fn inline_with_threshold(&self, threshold: u32) {
unsafe { LLVMPassManagerBuilderUseInlinerWithThreshold(self.into(), threshold) }
}
pub fn populate_module_pass_manager(&self, pass_manager: &PassManager) {
unsafe { LLVMPassManagerBuilderPopulateModulePassManager(self.into(), pass_manager.into()) }
}
pub fn populate_function_pass_manager(&self, pass_manager: &PassManager) {
unsafe {
LLVMPassManagerBuilderPopulateFunctionPassManager(self.into(), pass_manager.into())
}
}
}
/// This class manages the registration and intitialization of
/// the pass subsystem as application startup, and assists the PassManager
/// in resolving pass dependencies
pub struct PassRegistry(*mut LLVMPassRegistry);
// Waiting on stabilisation of #13231
//impl !Send for PassRegistry {}
//impl !Sync for PassRegistry{}
impl PassRegistry {
pub fn new() -> Self {
PassRegistry(unsafe { LLVMGetGlobalPassRegistry() })
}
pub fn init_all(&self) {
self.init_analyis()
.init_codegen()
.init_core()
.init_ipa()
.init_ipo()
.init_inst_combine()
.init_instrumentation()
.init_scalar_opts()
.init_target()
.init_transfrom_utils()
.init_vectorization();
}
fn get(&self) -> *mut LLVMPassRegistry {
self.0
}
pub fn init_analyis(&self) -> &PassRegistry {
unsafe { LLVMInitializeAnalysis(self.get()) }
self
}
pub fn init_codegen(&self) -> &PassRegistry {
unsafe { LLVMInitializeCodeGen(self.get()) }
self
}
pub fn init_core(&self) -> &PassRegistry {
unsafe { LLVMInitializeCore(self.get()) }
self
}
pub fn init_ipa(&self) -> &PassRegistry {
unsafe { LLVMInitializeIPA(self.get()) }
self
}
pub fn init_ipo(&self) -> &PassRegistry {
unsafe { LLVMInitializeIPO(self.get()) }
self
}
pub fn init_inst_combine(&self) -> &PassRegistry {
unsafe { LLVMInitializeInstCombine(self.get()) }
self
}
pub fn init_instrumentation(&self) -> &PassRegistry {
unsafe { LLVMInitializeInstrumentation(self.get()) }
self
}
pub fn init_scalar_opts(&self) -> &PassRegistry {
unsafe { LLVMInitializeScalarOpts(self.get()) }
self
}
pub fn init_target(&self) -> &PassRegistry {
unsafe { LLVMInitializeTarget(self.get()) }
self
}
pub fn init_transfrom_utils(&self) -> &PassRegistry {
unsafe { LLVMInitializeTransformUtils(self.get()) }
self
}
pub fn init_vectorization(&self) -> &PassRegistry {
unsafe { LLVMInitializeVectorization(self.get()) }
self
}
}
|
use std::fmt;
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum Version {
V1,
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let value = format!("{:?}", self).to_lowercase();
f.write_str(&value)
}
}
|
use super::components::*;
use super::map::Map;
use super::string_writer::StringWriter;
use serde::{Deserialize, Serialize};
use specs::error::NoError;
use specs::prelude::*;
use specs::saveload::{
DeserializeComponents, SerializeComponents, SimpleMarker, SimpleMarkerAllocator,
};
#[derive(Default, Serialize, Deserialize, Clone)]
struct OptimisticGameSave {
serialized_ecs: String,
maybe_serialized_map: Option<String>,
}
/// Magic stolen from "Roguelike Tutorial - In Rust" (See README.md)
/// Macro to serialize ECS components and entities
macro_rules! serialize_individually {
($ecs:expr, $ser:expr, $data:expr, $( $type:ty),*) => {
$(
SerializeComponents::<NoError, SimpleMarker<EntityMarker>>::serialize(
&( $ecs.read_storage::<$type>(), ),
&$data.0,
&$data.1,
&mut $ser,
)
.unwrap();
)*
};
}
/// Convert our ECS into a JSON string
pub fn serialize_ecs(ecs: &mut World) -> String {
let data = (
ecs.entities(),
ecs.read_storage::<SimpleMarker<EntityMarker>>(),
);
let mut writer = StringWriter::new();
let mut serializer = serde_json::Serializer::new(&mut writer);
serialize_individually!(
ecs,
serializer,
data,
FPSTracker,
Location,
PlayerInfo,
Renderable,
TextRenderable,
ChatRenderable,
GraphicRenderable,
GraphicAnimatable,
WantsToMoveTo,
Disappearing,
CarriedBy,
CrabAI,
WantsToBePickedUp,
WantsToStab
);
// TODO: Compress serde?
writer.to_string()
}
pub fn serialize_map(map: &Map) -> String {
serde_json::to_string(&map).unwrap()
}
/// Package our save state into a struct that optionally includes the map.
/// This allows us to only send the large map data when the client's map needs to be updated
pub fn package_save_state(serialized_ecs: String, maybe_serialized_map: Option<String>) -> String {
serde_json::to_string(&OptimisticGameSave {
serialized_ecs,
maybe_serialized_map,
})
.unwrap()
}
/// Magic stolen from "Roguelike Tutorial - In Rust" (See README.md)
/// Macro to deserialize ECS components and entities
macro_rules! deserialize_individually {
($ecs:expr, $de:expr, $data:expr, $( $type:ty),*) => {
$(
DeserializeComponents::<NoError, _>::deserialize(
&mut ( &mut $ecs.write_storage::<$type>(), ),
&$data.0, // entities
&mut $data.1, // marker
&mut $data.2, // allocater
&mut $de,
)
.unwrap();
)*
};
}
/// Update the ECS to reflect the world deserialized from the OptimisticGameSave JSON string
pub fn load_game(ecs: &mut World, package_save_str: String) {
{
// Delete everything
let mut to_delete = Vec::new();
for e in ecs.entities().join() {
to_delete.push(e);
}
for del in to_delete.iter() {
ecs.delete_entity(*del).expect("Deletion failed");
}
}
// Extract our save package from the string
let package_save_state: OptimisticGameSave = serde_json::from_str(&package_save_str).unwrap();
// Deserialize the ECS because we know that will be there
let mut de = serde_json::Deserializer::from_str(&package_save_state.serialized_ecs);
{
let mut d = (
&mut ecs.entities(),
&mut ecs.write_storage::<SimpleMarker<EntityMarker>>(),
&mut ecs.write_resource::<SimpleMarkerAllocator<EntityMarker>>(),
);
deserialize_individually!(
ecs,
de,
d,
FPSTracker,
Location,
PlayerInfo,
Renderable,
TextRenderable,
ChatRenderable,
GraphicRenderable,
GraphicAnimatable,
WantsToMoveTo,
Disappearing,
CarriedBy,
CrabAI,
WantsToBePickedUp,
WantsToStab
);
}
// If the map was in this package, copy the map over to our instance
if let Some(serialized_map) = package_save_state.maybe_serialized_map {
let mut map_ref = ecs.write_resource::<Map>();
let new_map: Map = serde_json::from_str(&serialized_map).unwrap();
*map_ref = new_map.clone();
}
}
#[derive(Serialize, Deserialize)]
pub enum PlayerInput {
CreatePlayer { id: String, name: String },
DeletePlayer { id: String },
ChangeName { id: String, name: String },
SpecialInput { id: String, input: String },
Click { id: String, x: i32, y: i32 },
Chat { id: String, message: String },
}
pub fn serialize_player_input(player_input: PlayerInput) -> String {
serde_json::to_string(&player_input).unwrap()
}
pub fn deserialize_player_input(player_input_str: String) -> PlayerInput {
serde_json::from_str(&player_input_str).unwrap()
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
use std::cell::RefCell;
use std::cmp::max;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use crate::block::Header;
use crate::Allocator;
use crate::BlockBuilder;
use crate::MemoizationCache;
use crate::ToOcamlRep;
use crate::Value;
struct Chunk {
data: Box<[Value<'static>]>,
index: usize,
/// Pointer to the prev arena segment.
prev: Option<Box<Chunk>>,
}
impl Chunk {
fn with_capacity(capacity: usize) -> Self {
Self {
index: 0,
data: vec![Value::int(0); capacity].into_boxed_slice(),
prev: None,
}
}
fn capacity(&self) -> usize {
self.data.len()
}
fn can_fit(&self, requested_size: usize) -> bool {
self.index + requested_size <= self.data.len()
}
#[inline]
pub fn alloc(&mut self, requested_size: usize) -> &mut [Value<'static>] {
let previous_index = self.index;
self.index += requested_size;
&mut self.data[previous_index..self.index]
}
}
// The generation number is used solely to identify which arena a cached value
// belongs to in `RcOc`.
//
// We use usize::max_value() / 2 here to avoid colliding with ocamlpool and
// SlabAllocator generation numbers (ocamlpool starts at 0, and SlabAllocator
// starts at usize::max_value() / 4). This generation trick isn't sound with the
// use of multiple generation counters, but this mitigation should make it
// extremely difficult to mix up values allocated with ocamlpool, Arena, and
// SlabAllocator in practice (one would have to serialize the same value with
// multiple Allocators, and only after increasing the generation of one by an
// absurd amount).
//
// If we add more allocators, we might want to rethink this strategy.
static NEXT_GENERATION: AtomicUsize = AtomicUsize::new(usize::max_value() / 2);
/// An [`Allocator`](trait.Allocator.html) which builds values in Rust-managed
/// memory. The memory is freed when the Arena is dropped.
pub struct Arena {
generation: usize,
current_chunk: RefCell<Chunk>,
cache: MemoizationCache,
}
impl Arena {
/// Create a new Arena with 4KB of capacity preallocated.
pub fn new() -> Self {
Self::with_capacity(1024 * 4)
}
/// Create a new Arena with `capacity_in_bytes` preallocated.
pub fn with_capacity(capacity_in_bytes: usize) -> Self {
let generation = NEXT_GENERATION.fetch_add(1, Ordering::SeqCst);
let capacity_in_words = max(2, capacity_in_bytes / std::mem::size_of::<Value<'_>>());
Self {
generation,
current_chunk: RefCell::new(Chunk::with_capacity(capacity_in_words)),
cache: MemoizationCache::new(),
}
}
#[inline]
#[allow(clippy::mut_from_ref)]
fn alloc<'a>(&'a self, requested_size: usize) -> &'a mut [Value<'a>] {
if !self.current_chunk.borrow().can_fit(requested_size) {
let prev_chunk_capacity = self.current_chunk.borrow().capacity();
let prev_chunk = self.current_chunk.replace(Chunk::with_capacity(max(
requested_size * 2,
prev_chunk_capacity,
)));
self.current_chunk.borrow_mut().prev = Some(Box::new(prev_chunk));
}
let mut chunk = self.current_chunk.borrow_mut();
let slice = chunk.alloc(requested_size);
// Transmute the 'static lifetime to 'a, to allow Values which point to
// blocks allocated using this Arena to be stored in other such blocks.
// The lifetime ensures that callers cannot allow such Values to outlive
// the arena (and therefore outlive the block they point to). This
// transmute violates the 'static lifetime in Chunk, so it is critical
// for safety that we never expose a view of those Values to code
// outside this module (using the type `Value<'static>`).
// Also transmute the unnamed lifetime referring to the mutable borrow
// of `chunk` to 'a. This allows callers to hold multiple mutable blocks
// at once. This is safe because the blocks handed out by Chunk::alloc
// are non-overlapping, so there is no aliasing.
unsafe { std::mem::transmute::<&'_ mut [Value<'static>], &'a mut [Value<'a>]>(slice) }
}
#[inline(always)]
pub fn add<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> {
value.to_ocamlrep(self)
}
#[inline(always)]
pub fn add_root<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> {
Allocator::add_root(self, value)
}
}
impl Allocator for Arena {
#[inline(always)]
fn generation(&self) -> usize {
self.generation
}
fn block_with_size_and_tag(&self, size: usize, tag: u8) -> BlockBuilder<'_> {
let block = self.alloc(size + 1);
let header = Header::new(size, tag);
// Safety: We need to make sure that the Header written to index 0 of
// this slice is never observed as a Value. We guarantee that by not
// exposing raw Chunk memory--only allocated Values.
block[0] = unsafe { Value::from_bits(header.to_bits()) };
let slice = &mut block[1..];
BlockBuilder::new(slice)
}
#[inline(always)]
fn set_field<'a>(&self, block: &mut BlockBuilder<'a>, index: usize, value: Value<'a>) {
unsafe { *self.block_ptr_mut(block).add(index) = value }
}
unsafe fn block_ptr_mut<'a>(&self, block: &mut BlockBuilder<'a>) -> *mut Value<'a> {
block.address() as *mut _
}
fn memoized<'a>(
&'a self,
ptr: usize,
size: usize,
f: impl FnOnce(&'a Self) -> Value<'a>,
) -> Value<'a> {
let bits = self.cache.memoized(ptr, size, || f(self).to_bits());
// SAFETY: The only memoized values in the cache are those computed in
// the closure on the previous line. Since f returns Value<'a>, any
// cached bits must represent a valid Value<'a>,
unsafe { Value::from_bits(bits) }
}
fn add_root<'a, T: ToOcamlRep + ?Sized>(&'a self, value: &'a T) -> Value<'a> {
self.cache.with_cache(|| value.to_ocamlrep(self))
}
}
#[cfg(test)]
mod tests {
use std::time::Instant;
use super::*;
#[test]
fn test_alloc_block_of_three_fields() {
let arena = Arena::with_capacity(1000);
let mut block = arena.block_with_size(3);
arena.set_field(&mut block, 0, Value::int(1));
arena.set_field(&mut block, 1, Value::int(2));
arena.set_field(&mut block, 2, Value::int(3));
let block = block.build().as_block().unwrap();
assert_eq!(block.size(), 3);
assert_eq!(block[0].as_int().unwrap(), 1);
assert_eq!(block[1].as_int().unwrap(), 2);
assert_eq!(block[2].as_int().unwrap(), 3);
}
#[test]
fn test_large_allocs() {
let arena = Arena::with_capacity(1000);
let alloc_block = |size| arena.block_with_size(size).build().as_block().unwrap();
let max = alloc_block(1000);
assert_eq!(max.size(), 1000);
let two_thousand = alloc_block(2000);
assert_eq!(two_thousand.size(), 2000);
let four_thousand = alloc_block(4000);
assert_eq!(four_thousand.size(), 4000);
}
#[test]
fn perf_test() {
let arena = Arena::with_capacity(10_000);
let alloc_block = |size| arena.block_with_size(size).build().as_block().unwrap();
println!("Benchmarks for allocating [1] 200,000 times");
let now = Instant::now();
for _ in 0..200_000 {
vec![0; 1].into_boxed_slice();
}
println!("Alloc: {:?}", now.elapsed());
let now = Instant::now();
for _ in 0..200_000 {
alloc_block(1);
}
println!("Arena: {:?}", now.elapsed());
println!("Benchmarks for allocating [5] 200,000 times");
let now = Instant::now();
for _ in 0..200_000 {
vec![0; 5].into_boxed_slice();
}
println!("Alloc: {:?}", now.elapsed());
let now = Instant::now();
for _ in 0..200_000 {
alloc_block(5);
}
println!("Arena: {:?}", now.elapsed());
println!("Benchmarks for allocating [10] 200,000 times");
let now = Instant::now();
for _ in 0..200_000 {
vec![0; 10].into_boxed_slice();
}
println!("Alloc: {:?}", now.elapsed());
let now = Instant::now();
for _ in 0..200_000 {
alloc_block(10);
}
println!("Arena: {:?}", now.elapsed());
}
}
|
use crate::{
chat_server::{ChatServer, MessagePayload},
pb::teddy::{
teddy_service_server::{TeddyService, TeddyServiceServer},
SendMessageRep, SendMessageReq,
},
};
use actix::Addr;
use tonic::{Request, Response, Status};
pub struct Teddy {
server: Addr<ChatServer>,
}
impl Teddy {
pub fn new_srv(server: Addr<ChatServer>) -> TeddyServiceServer<Teddy> {
TeddyServiceServer::new(Teddy { server })
}
}
#[tonic::async_trait]
impl TeddyService for Teddy {
async fn send_message(
&self,
req: Request<SendMessageReq>,
) -> Result<Response<SendMessageRep>, Status> {
// todo:
// self.server
// .do_send(req.into_inner().message.unwrap() as PushMessage);
Err(Status::internal("internal"))
}
}
|
use std::fmt::Debug;
pub type Stack<T> = Vec<T>;
pub trait BasicStack<T> {
fn empty_stack() -> Self;
fn push(&mut self, item: T);
fn pop(&mut self) -> Option<T>;
fn peek(&mut self) -> Option<&T>;
fn is_empty(&self) -> bool;
}
pub trait Env<T> {
fn empty_env(&self) -> Self;
fn extend_env(&self, key: String, value: T) -> Self;
fn apply_env(&self, func: fn(T) -> T, target: String) -> Option<T>;
}
impl<T: Clone + Debug> Env<T> for Stack<(String, T)> {
fn empty_env(&self) -> Self {
Stack::empty_stack()
}
fn extend_env(&self, key: String, value: T) -> Self {
let mut new_env: Stack<(String, T)> = Vec::with_capacity(self.capacity());
for item in self.iter() {
new_env.push(item.clone());
}
new_env.push((key, value));
new_env
}
fn apply_env(&self, func: fn(T) -> T, target: String) -> Option<T> {
let mut result = None;
for (key, value) in self.iter() {
if *key == target {
result = Some(func(value.clone()));
}
}
result
}
}
impl<T> BasicStack<T> for Stack<T> {
fn empty_stack() -> Self {
vec![]
}
fn push(&mut self, item: T) {
self.push(item);
}
fn pop(&mut self) -> Option<T> {
self.pop()
}
fn is_empty(&self) -> bool {
self.is_empty()
}
fn peek(&mut self) -> Option<&T> {
self.first()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_env_functions() {
let env: Stack<(String, u32)> = Stack::empty_stack();
let env_1 = env.extend_env("d".to_string(), 6);
let env_2 = env_1.extend_env("y".to_string(), 8);
let env_3 = env_2.extend_env("x".to_string(), 7);
let env_4 = env_3.extend_env("y".to_string(), 14);
assert_eq!(env, vec![]);
assert_eq!(env_1, vec![("d".to_string(), 6)]);
assert_eq!(env_2, vec![("d".to_string(), 6), ("y".to_string(), 8)]);
assert_eq!(
env_3,
vec![
("d".to_string(), 6),
("y".to_string(), 8),
("x".to_string(), 7)
]
);
assert_eq!(
env_4,
vec![
("d".to_string(), 6),
("y".to_string(), 8),
("x".to_string(), 7),
("y".to_string(), 14)
]
);
assert_eq!(env_4.apply_env(|x| x * 2, "y".to_string()), Some(28 as u32));
}
}
|
#[doc = "Interrupt\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [intr](intr) module"]
pub type INTR = crate::Reg<u32, _INTR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTR;
#[doc = "`read()` method returns [intr::R](intr::R) reader structure"]
impl crate::Readable for INTR {}
#[doc = "`write(|w| ..)` method takes [intr::W](intr::W) writer structure"]
impl crate::Writable for INTR {}
#[doc = "Interrupt"]
pub mod intr;
#[doc = "Interrupt set\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [intr_set](intr_set) module"]
pub type INTR_SET = crate::Reg<u32, _INTR_SET>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTR_SET;
#[doc = "`read()` method returns [intr_set::R](intr_set::R) reader structure"]
impl crate::Readable for INTR_SET {}
#[doc = "`write(|w| ..)` method takes [intr_set::W](intr_set::W) writer structure"]
impl crate::Writable for INTR_SET {}
#[doc = "Interrupt set"]
pub mod intr_set;
#[doc = "Interrupt mask\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [intr_mask](intr_mask) module"]
pub type INTR_MASK = crate::Reg<u32, _INTR_MASK>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTR_MASK;
#[doc = "`read()` method returns [intr_mask::R](intr_mask::R) reader structure"]
impl crate::Readable for INTR_MASK {}
#[doc = "`write(|w| ..)` method takes [intr_mask::W](intr_mask::W) writer structure"]
impl crate::Writable for INTR_MASK {}
#[doc = "Interrupt mask"]
pub mod intr_mask;
#[doc = "Interrupt masked\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about avaliable fields see [intr_masked](intr_masked) module"]
pub type INTR_MASKED = crate::Reg<u32, _INTR_MASKED>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _INTR_MASKED;
#[doc = "`read()` method returns [intr_masked::R](intr_masked::R) reader structure"]
impl crate::Readable for INTR_MASKED {}
#[doc = "Interrupt masked"]
pub mod intr_masked;
|
use super::super::atom::{
btn::Btn,
common::Common,
fa,
heading::{self, Heading},
slider::{self, Slider},
text::Text,
};
use super::super::organism::{
popup_color_pallet::{self, PopupColorPallet},
room_modeless::RoomModeless,
};
use super::ShowingModal;
use crate::arena::{block, BlockMut};
use crate::libs::color::Pallet;
use isaribi::{
style,
styled::{Style, Styled},
};
use kagura::prelude::*;
use nusa::prelude::*;
pub struct Props {
pub craftboard: block::craftboard::Block,
}
pub enum Msg {
NoOp,
Sub(On),
}
pub enum On {
OpenModal(ShowingModal),
SetName(String),
SetDisplayName0(String),
SetDisplayName1(String),
SetXSize(f64),
SetYSize(f64),
SetZSize(f64),
SetGridColor(Pallet),
SetVoxelDensityX(f64),
SetVoxelDensityY(f64),
SetVoxelDensityZ(f64),
}
pub struct Tab0 {
craftboard: block::craftboard::Block,
element_id: ElementId,
}
ElementId! {
input_craftboard_name,
input_craftboard_display_name
}
impl Component for Tab0 {
type Props = Props;
type Msg = Msg;
type Event = On;
}
impl HtmlComponent for Tab0 {}
impl Constructor for Tab0 {
fn constructor(props: Self::Props) -> Self {
Self {
craftboard: props.craftboard,
element_id: ElementId::new(),
}
}
}
impl Update for Tab0 {
fn on_load(mut self: Pin<&mut Self>, props: Self::Props) -> Cmd<Self> {
self.craftboard = props.craftboard;
Cmd::none()
}
fn update(self: Pin<&mut Self>, msg: Self::Msg) -> Cmd<Self> {
match msg {
Msg::NoOp => Cmd::none(),
Msg::Sub(event) => Cmd::submit(event),
}
}
}
impl Render<Html> for Tab0 {
type Children = ();
fn render(&self, _: Self::Children) -> Html {
Self::styled(Html::div(
Attributes::new()
.class(RoomModeless::class("common-base"))
.class("pure-form"),
Events::new(),
vec![
self.craftboard
.map(|data| self.render_header(data))
.unwrap_or(Common::none()),
self.craftboard
.map(|data| self.render_main(data))
.unwrap_or(Common::none()),
],
))
}
}
impl Tab0 {
fn render_header(&self, craftboard: &block::Craftboard) -> Html {
Html::div(
Attributes::new().class(RoomModeless::class("common-header")),
Events::new(),
vec![
Html::label(
Attributes::new()
.class(RoomModeless::class("common-label"))
.string("for", &self.element_id.input_craftboard_name),
Events::new(),
vec![fa::fas_i("fa-user")],
),
Html::input(
Attributes::new()
.id(&self.element_id.input_craftboard_name)
.value(craftboard.name()),
Events::new().on_input(self, |name| Msg::Sub(On::SetName(name))),
vec![],
),
Html::label(
Attributes::new()
.class(RoomModeless::class("common-label"))
.string("for", &self.element_id.input_craftboard_display_name),
Events::new(),
vec![Html::text("表示名")],
),
Html::input(
Attributes::new().value(&craftboard.display_name().1),
Events::new().on_input(self, |name| Msg::Sub(On::SetDisplayName1(name))),
vec![],
),
Text::span(""),
Html::input(
Attributes::new()
.id(&self.element_id.input_craftboard_display_name)
.value(&craftboard.display_name().0),
Events::new().on_input(self, |name| Msg::Sub(On::SetDisplayName0(name))),
vec![],
),
],
)
}
fn render_main(&self, craftboard: &block::Craftboard) -> Html {
Html::div(
Attributes::new().class(Self::class("main")),
Events::new(),
vec![
self.render_props(craftboard),
Heading::h3(
heading::Variant::Light,
Attributes::new(),
Events::new(),
vec![Html::text("テクスチャ")],
),
self.render_textures(craftboard),
],
)
}
fn render_props(&self, craftboard: &block::Craftboard) -> Html {
Html::div(
Attributes::new().class(Self::class("content")),
Events::new(),
vec![
Html::div(
Attributes::new().class(Common::keyvalue()),
Events::new(),
vec![
Text::span("X幅(横幅)"),
Slider::new(
self,
None,
slider::Position::Linear {
min: 1.0,
max: 100.0,
val: craftboard.size()[0],
step: 1.0,
},
Sub::map(move |sub| match sub {
slider::On::Input(x) => Msg::Sub(On::SetXSize(x)),
_ => Msg::NoOp,
}),
slider::Props {
range_is_editable: false,
theme: slider::Theme::Light,
},
),
Text::span("Y幅(奥行き)"),
Slider::new(
self,
None,
slider::Position::Linear {
min: 1.0,
max: 100.0,
val: craftboard.size()[1],
step: 1.0,
},
Sub::map(move |sub| match sub {
slider::On::Input(y) => Msg::Sub(On::SetYSize(y)),
_ => Msg::NoOp,
}),
slider::Props {
range_is_editable: false,
theme: slider::Theme::Light,
},
),
Text::span("Z幅(高さ)"),
Slider::new(
self,
None,
slider::Position::Linear {
min: 0.0,
max: 100.0,
val: craftboard.size()[2],
step: 1.0,
},
Sub::map(move |sub| match sub {
slider::On::Input(z) => Msg::Sub(On::SetZSize(z)),
_ => Msg::NoOp,
}),
slider::Props {
range_is_editable: false,
theme: slider::Theme::Light,
},
),
Text::span("色"),
PopupColorPallet::empty(
self,
None,
popup_color_pallet::Props {
direction: popup_color_pallet::Direction::Bottom,
default_selected: craftboard.grid_color().clone(),
},
Sub::map(|sub| match sub {
popup_color_pallet::On::SelectColor(color) => {
Msg::Sub(On::SetGridColor(color))
}
}),
),
],
),
Html::div(
Attributes::new().class(Common::keyvalue()),
Events::new(),
vec![
Text::span("ボクセル密度(X)"),
Slider::new(
self,
None,
slider::Position::Linear {
min: 0.5,
max: 10.0,
val: craftboard.voxel_density()[0],
step: 0.5,
},
Sub::map(move |sub| match sub {
slider::On::Input(x) => Msg::Sub(On::SetVoxelDensityX(x)),
_ => Msg::NoOp,
}),
slider::Props {
range_is_editable: false,
theme: slider::Theme::Light,
},
),
Text::span("ボクセル密度(Y)"),
Slider::new(
self,
None,
slider::Position::Linear {
min: 0.5,
max: 10.0,
val: craftboard.voxel_density()[1],
step: 0.5,
},
Sub::map(move |sub| match sub {
slider::On::Input(y) => Msg::Sub(On::SetVoxelDensityY(y)),
_ => Msg::NoOp,
}),
slider::Props {
range_is_editable: false,
theme: slider::Theme::Light,
},
),
Text::span("ボクセル密度(Z)"),
Slider::new(
self,
None,
slider::Position::Linear {
min: 0.5,
max: 10.0,
val: craftboard.voxel_density()[2],
step: 0.5,
},
Sub::map(move |sub| match sub {
slider::On::Input(z) => Msg::Sub(On::SetVoxelDensityZ(z)),
_ => Msg::NoOp,
}),
slider::Props {
range_is_editable: false,
theme: slider::Theme::Light,
},
),
],
),
],
)
}
fn render_textures(&self, craftboard: &block::Craftboard) -> Html {
Html::div(
Attributes::new().class(Self::class("content")),
Events::new(),
vec![
self.render_texture_block(craftboard, "PZ(上)", 2),
self.render_texture_block(craftboard, "NZ(下)", 5),
self.render_texture_block(craftboard, "PY(奥)", 1),
self.render_texture_block(craftboard, "NY(前)", 4),
self.render_texture_block(craftboard, "PX(右)", 0),
self.render_texture_block(craftboard, "NX(左)", 3),
],
)
}
fn render_texture_block(
&self,
craftboard: &block::Craftboard,
name: impl Into<String>,
tex_idx: usize,
) -> Html {
Html::div(
Attributes::new(),
Events::new(),
vec![
Heading::h5(
heading::Variant::Light,
Attributes::new(),
Events::new(),
vec![Html::text(name)],
),
self.render_texture(craftboard, tex_idx),
],
)
}
fn render_texture(&self, craftboard: &block::Craftboard, tex_idx: usize) -> Html {
craftboard.textures()[tex_idx]
.as_ref()
.map(|texture| {
texture.map(|texture| {
Html::img(
Attributes::new()
.draggable("false")
.src(texture.url().to_string())
.class(Common::bg_transparent()),
Events::new().on_click(self, move |_| {
Msg::Sub(On::OpenModal(ShowingModal::SelectTexture(tex_idx)))
}),
vec![],
)
})
})
.unwrap_or(None)
.unwrap_or_else(|| {
Btn::secondary(
Attributes::new(),
Events::new().on_click(self, move |_| {
Msg::Sub(On::OpenModal(ShowingModal::SelectTexture(tex_idx)))
}),
vec![Html::text("画像を選択")],
)
})
}
}
impl Styled for Tab0 {
fn style() -> Style {
style! {
".main" {
"display": "grid";
"grid-template-columns": "1fr";
"grid-auto-rows": "max-content";
"overflow-y": "scroll";
}
".content" {
"display": "grid";
"column-gap": ".65rem";
"row-gap": ".65rem";
"align-items": "start";
"padding-left": ".65rem";
"padding-right": ".65rem";
"grid-template-columns": "repeat(auto-fit, minmax(20rem, 1fr))";
"grid-auto-rows": "max-content";
}
".content img" {
"width": "100%";
"max-height": "20rem";
"object-fit": "contain";
}
}
}
}
|
use crate::errors::*;
use crate::game::Game;
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
const VERSION: usize = 1;
#[derive(Deserialize, Debug, Serialize)]
pub struct Database {
version: usize,
pub games: Vec<Game>,
#[serde(skip)]
path: PathBuf,
}
impl Database {
pub fn new<T: AsRef<Path>>(storage_path: T) -> Result<Database> {
let mut db: Database;
let windows_path = storage_path.as_ref().join("windows.json");
if windows_path.exists() {
db = Database::load_from(&windows_path)?;
} else {
db = Database::load(include_str!("../res/windows.json"))?;
db.path = windows_path;
db.save()?;
}
Ok(db)
}
pub fn save(&self) -> Result<()> {
println!("Saving {}", self.path.display());
let f = std::fs::File::create(&self.path)?;
serde_json::to_writer_pretty(f, self)?;
Ok(())
}
fn load_from<T: AsRef<Path>>(path: T) -> Result<Database> {
let data = std::fs::read_to_string(&path)?;
let mut db = Database::load(data)?;
db.path = path.as_ref().to_path_buf();
println!(
"Loaded {} game entries from {}",
db.games.len(),
path.as_ref().display()
);
Ok(db)
}
fn load<T: AsRef<str>>(data: T) -> Result<Database> {
let mut db: Database = serde_json::from_str(data.as_ref())?;
if db.version > VERSION {
bail!(ErrorKind::DatabaseTooNew(db.version, VERSION));
}
// The sorting of Game prioratises customisations.
db.games.sort();
db.games.dedup();
// Convert path variables to expanded paths
for game in &mut db.games {
for save in &mut game.saves {
save.update_path()?;
}
}
Ok(db)
}
pub fn search(&self, keyword: &str) {
if keyword.is_empty() {
eprintln!("The keyword must not be empty");
return;
}
let mut missed = true;
for game in &self.games {
if game.id.contains(keyword) || game.title.contains(keyword) {
println!("Found {} ({})", game.title, game.id);
missed = false;
}
}
if missed {
println!("Couldn't find any matching games");
}
}
pub fn add(&mut self, game: Game) -> Result<()> {
self.games.retain(|g| !(*g == game && g.custom));
self.games.push(game);
self.save()
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn test_load_older_version_succeeds() {
let json = json!({ "version": VERSION - 1, "games": [] });
Database::load(json.to_string()).unwrap();
}
#[test]
fn test_load_current_version_succeeds() {
let json = json!({ "version": VERSION, "games": [] });
Database::load(json.to_string()).unwrap();
}
#[test]
fn test_load_newer_version_fails() {
let json = json!({ "version": VERSION + 1, "games": [] });
Database::load(json.to_string()).unwrap_err();
}
}
|
#[macro_use]
extern crate vulkano;
#[macro_use]
extern crate vulkano_shader_derive;
use std::sync::Arc;
use vulkano::buffer::BufferUsage;
use vulkano::buffer::CpuAccessibleBuffer;
use vulkano::command_buffer::AutoCommandBufferBuilder;
use vulkano::command_buffer::CommandBuffer;
use vulkano::descriptor::descriptor_set::PersistentDescriptorSet;
use vulkano::device::Device;
use vulkano::device::DeviceExtensions;
use vulkano::device::QueuesIter;
use vulkano::instance::Features;
use vulkano::instance::Instance;
use vulkano::instance::InstanceExtensions;
use vulkano::instance::PhysicalDevice;
use vulkano::pipeline::ComputePipeline;
use vulkano::sync::GpuFuture;
fn select_physical_device<'a>(instance: &'a Arc<Instance>) -> PhysicalDevice<'a> {
let device_index = 0;
PhysicalDevice::from_index(&instance, device_index)
.expect("Failed to get the specified physical device.")
}
fn get_device(physical_device: PhysicalDevice) -> (Arc<Device>, QueuesIter) {
let queue_family = physical_device.queue_families()
.find(|&q| q.supports_graphics())
.expect("Couldn't find a graphical queue family.");
Device::new(physical_device, &Features::none(), &DeviceExtensions::none(),
[(queue_family, 0.5)].iter().cloned()).expect("failed to create device")
}
mod cs {
#[derive(VulkanoShader)]
#[ty = "compute"]
#[path = "src/shader/compute.glsl"]
struct Dummy;
}
fn main() {
const K: usize = 128;
const M: usize = 256;
const N: usize = 512;
const LOCAL_SIZE_X : usize = 32;
const LOCAL_SIZE_Y : usize = 32;
// let mut a = vec![0f32; K * M];
// let mut b = vec![0f32; N * K];
// let mut c = vec![0f32; N * M];
let mut a = [0f32; K * M];
let mut b = [0f32; N * K];
let mut c = [0f32; N * M];
// TODO compare performance and generated assembly (bound checks) for array initialization
// - normal index access
// - unsafe index access
// - iterators
for (i, v) in a.iter_mut().enumerate() {
*v = (i / K + i % K) as f32;
}
for (i, v) in b.iter_mut().enumerate() {
*v = (i / N + i % N) as f32;
}
// Vulkan initialization
let app_infos = app_info_from_cargo_toml!();
let instance = Instance::new(Some(&app_infos), &InstanceExtensions::none(), None)
.expect("Failed to create instance - no Vulkan implementations available.");
let (device, mut queues) = get_device(select_physical_device(&instance));
let queue = queues.next().unwrap();
let buffer_a = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
a).expect("failed to create buffer");
let buffer_b = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
b).expect("failed to create buffer");
let buffer_c = CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(),
c).expect("failed to create buffer");
let shader = cs::Shader::load(device.clone())
.expect("Failed to create shader module.");
let compute_pipeline = Arc::new(ComputePipeline::new(device.clone(), &shader.main_entry_point(), &())
.expect("Failed to create compute pipeline."));
let descriptor_set = Arc::new(PersistentDescriptorSet::start(compute_pipeline.clone(), 0)
.add_buffer(buffer_a.clone()).unwrap()
.add_buffer(buffer_b.clone()).unwrap()
.add_buffer(buffer_c.clone()).unwrap()
.build().unwrap()
);
let command_buffer = AutoCommandBufferBuilder::new(device.clone(), queue.family()).unwrap()
.dispatch([(N / LOCAL_SIZE_X) as u32, (M / LOCAL_SIZE_Y) as u32, 1],
compute_pipeline.clone(),
descriptor_set.clone(),
[K as u32, M as u32, N as u32]).unwrap()
.build().unwrap();
println!("Created command buffer");
let finished = command_buffer.execute(queue.clone()).unwrap();
println!("Submitted command buffer to queue");
finished.then_signal_fence_and_flush().unwrap()
.wait(None).unwrap();
println!("Command buffer finished executing");
let mat_c_results = buffer_c.read().unwrap();
for (n, val) in mat_c_results.iter().enumerate() {
let x = n % N;
let y = n / N;
let mut expected_value = 0f32;
for i in 0..K {
expected_value += a[y * K + i] * b[x + i * N];
}
assert_eq!(*val, expected_value);
}
println!("Success");
}
|
#[derive(Copy, Clone, Debug, PartialEq)]
struct Number (i32);
fn main() {
let num = Number(8i32);
println!("{:?}", num);
let num_clone = num.clone();
println!("{:?}", num_clone);
println!("num == num_clone ? {}", num == num_clone);
}
|
use crate::applied_channel_txn::AppliedChannelTxn;
use crate::proof::signed_channel_transaction_proof::SignedChannelTransactionProof;
use anyhow::{ensure, Result};
use libra_types::account_address::AccountAddress;
use libra_types::contract_event::ContractEvent;
use libra_types::ledger_info::LedgerInfo;
use libra_types::transaction::Version;
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct SignedChannelTransactionWithProof {
pub version: Version,
pub signed_transaction: AppliedChannelTxn,
pub events: Option<Vec<ContractEvent>>,
pub proof: SignedChannelTransactionProof,
}
impl SignedChannelTransactionWithProof {
/// Verifies the signed transaction with the proof, both carried by `self`.
///
/// Two things are ensured if no error is raised:
/// 1. This signed transaction exists in the ledger represented by `ledger_info`.
/// 2. And this signed transaction has the same `version`, `sender`, and `sequence_number` as
/// indicated by the parameter list. If any of these parameter is unknown to the call site that
/// is supposed to be informed via this struct, get it from the struct itself, such as:
/// `signed_txn_with_proof.version`, `signed_txn_with_proof.signed_transaction.sender()`, etc.
pub fn verify(
&self,
_ledger_info: &LedgerInfo,
version: Version,
proposer: AccountAddress,
_sequence_number: u64,
channel_sequence_number: u64,
) -> Result<()> {
ensure!(
self.version == version,
"Version ({}) is not expected ({}).",
self.version,
version,
);
ensure!(
self.signed_transaction.proposer() == proposer,
"Sender ({}) not expected ({}).",
self.signed_transaction.proposer(),
proposer,
);
// ensure!(
// self.signed_transaction.sequence_number() == sequence_number,
// "Sequence number ({}) not expected ({}).",
// self.signed_transaction.sequence_number(),
// sequence_number,
// );
ensure!(
self.signed_transaction.channel_sequence_number() == channel_sequence_number,
"Channel sequence number ({}) not expected ({}).",
self.signed_transaction.channel_sequence_number(),
channel_sequence_number,
);
// TODO(caojiafeng): impl the verification logic
// let events_root_hash = self.events.as_ref().map(|events| {
// let event_hashes: Vec<_> = events.iter().map(ContractEvent::hash).collect();
// get_accumulator_root_hash::<EventAccumulatorHasher>(&event_hashes)
// });
// verify_signed_transaction(
// ledger_info,
// self.signed_transaction.hash(),
// events_root_hash,
// version,
// &self.proof,
// )
Ok(())
}
}
|
/*
SPDX-License-Identifier: Apache-2.0 OR MIT
Copyright 2020 The arboard contributors
The project to which this file belongs is licensed under either of
the Apache 2.0 or the MIT license at the licensee's choice. The terms
and conditions of the chosen license apply to this file.
*/
//!
//!
//! This implementation is a port of https://github.com/dacap/clip to Rust
//! The structure of the original is more or less maintained.
//!
//! Disclaimer: The original C++ code is well organized and feels clean but it relies on C++
//! allowing a liberal data sharing between threads and it is painfully obvious from certain parts
//! of this port that this code was not designed for Rust. It should probably be reworked because
//! the absolute plague that the Arc<Mutex<>> objects are in this code is horrible just to look at
//! and will forever haunt me in my nightmares.
//!
//! Most changes are to conform with Rusts rules for example there are multiple overloads of
//! the `get_atom` functtion in the original but there's no function overloading in Rust so
//! those are split apart into functions with different names. (`get_atom_by_id` and the other one
//! at the time of writing I haven't needed to use)
//!
//! More noteably the `Manager` class had to be split into mutliple `structs` and some member
//! functions were made into global functions to conform Rust's aliasing rules.
//! Furthermore the signature of many functions was changed to follow a simple locking philosophy;
//! namely that the mutex gets locked at the topmost level possible and then most functions don't
//! need to attempt to lock, instead they just use the direct object references passed on as arguments.
//!
//!
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::rc::Rc;
use std::sync::{Arc, Condvar, Mutex, MutexGuard};
use std::time::Duration;
use lazy_static::lazy_static;
use x11rb::protocol::xproto;
use x11rb::{
connection::Connection,
protocol::{
xproto::{
Atom, AtomEnum, ConnectionExt as _, CreateWindowAux, EventMask, GetPropertyReply,
PropMode, Property, PropertyNotifyEvent, SelectionClearEvent, SelectionNotifyEvent,
SelectionRequestEvent, Time, Window, WindowClass,
},
Event,
},
rust_connection::RustConnection,
wrapper::ConnectionExt as _,
};
use super::common::{Error, ImageData};
x11rb::atom_manager! {
pub CommonAtoms: CommonAtomCookies {
ATOM,
INCR,
TARGETS,
CLIPBOARD,
MIME_IMAGE_PNG: b"image/png",
ATOM_PAIR,
SAVE_TARGETS,
MULTIPLE,
CLIPBOARD_MANAGER,
}
}
x11rb::atom_manager! {
pub TextAtoms: TextAtomCookies {
UTF8_STRING,
TEXT_PLAIN_1: b"text/plain;charset=utf-8",
TEXT_PLAIN_2: b"text/plain;charset=UTF-8",
// ANSI C strings?
STRING,
TEXT,
TEXT_PLAIN_0: b"text/plain",
}
}
type BufferPtr = Option<Arc<Mutex<Vec<u8>>>>;
type Atoms = Vec<Atom>;
type NotifyCallback = Option<Arc<dyn (Fn(&BufferPtr) -> bool) + Send + Sync + 'static>>;
lazy_static! {
static ref LOCKED_OBJECTS: Arc<Mutex<Option<LockedObjects>>> = Arc::new(Mutex::new(None));
// Used to wait/notify the arrival of the SelectionNotify event when
// we requested the clipboard content from other selection owner.
static ref CONDVAR: Condvar = Condvar::new();
}
struct LockedObjects {
shared: SharedState,
manager: Manager,
}
impl LockedObjects {
fn new() -> Result<LockedObjects, Error> {
let (connection, screen) = RustConnection::connect(None).unwrap();
match Manager::new(&connection, screen) {
Ok(manager) => {
//unsafe { libc::atexit(Manager::destruct); }
Ok(LockedObjects {
shared: SharedState {
conn: Some(Arc::new(connection)),
common_atoms: Default::default(),
text_atoms: Default::default(),
},
manager,
})
}
Err(e) => Err(e),
}
}
}
/// The name indicates that objects in this struct are shared between
/// the event processing thread and the user tread. However it's important
/// that the `Manager` itself is also shared. So the real reason for splitting these
/// apart from the `Manager` is to conform to Rust's aliasing rules but that is hard to
/// convey in a short name.
struct SharedState {
conn: Option<Arc<RustConnection>>,
// Cache of common used atoms by us
common_atoms: Option<CommonAtoms>,
// Cache of atoms related to text or image content
text_atoms: Option<TextAtoms>,
//image_atoms: Atoms,
}
impl SharedState {
fn common_atoms(&mut self) -> CommonAtoms {
self.common_atoms.unwrap_or_else(|| {
CommonAtoms::new(self.conn.as_ref().unwrap().as_ref()).unwrap().reply().unwrap()
})
}
fn text_atoms(&mut self) -> Atoms {
let atoms = self.text_atoms.unwrap_or_else(|| {
TextAtoms::new(self.conn.as_ref().unwrap().as_ref()).unwrap().reply().unwrap()
});
vec![
atoms.UTF8_STRING,
atoms.TEXT_PLAIN_1,
atoms.TEXT_PLAIN_2,
atoms.STRING,
atoms.TEXT,
atoms.TEXT_PLAIN_0,
]
}
}
struct Manager {
// Temporal background window used to own the clipboard and process
// all events related about the clipboard in a background thread
window: Window,
// Thread used to run a background message loop to wait X11 events
// about clipboard. The X11 selection owner will be a hidden window
// created by us just for the clipboard purpose/communication.
thread_handle: Option<std::thread::JoinHandle<()>>,
// WARNING: The callback must not attempt to lock the manager or the shared state.
// (Otherwise the code needs to be restructured slightly)
//
// Internal callback used when a SelectionNotify is received (or the
// whole data content is received by the INCR method). So this
// callback can use the notification by different purposes (e.g. get
// the data length only, or get/process the data content, etc.).
callback: NotifyCallback,
// Result returned by the m_callback. Used as return value in the
// get_data_from_selection_owner() function. For example, if the
// callback must read a "image/png" file from the clipboard data and
// fails, the callback can return false and finally the get_image()
// will return false (i.e. there is data, but it's not a valid image
// format).
callback_result: bool,
// Actual clipboard data generated by us (when we "copy" content in
// the clipboard, it means that we own the X11 "CLIPBOARD"
// selection, and in case of SelectionRequest events, we've to
// return the data stored in this "m_data" field)
data: BTreeMap<Atom, BufferPtr>,
// Copied image in the clipboard. As we have to transfer the image
// in some specific format (e.g. image/png) we want to keep a copy
// of the image and make the conversion when the clipboard data is
// requested by other process.
image: super::common::ImageData<'static>,
// True if we have received an INCR notification so we're going to
// process several PropertyNotify to concatenate all data chunks.
incr_process: bool,
/// Variable used to wait more time if we've received an INCR
/// notification, which means that we're going to receive large
/// amounts of data from the selection owner.
///mutable bool m_incr_received;
incr_received: bool,
// Target/selection format used in the SelectionNotify. Used in the
// INCR method to get data from the same property in the same format
// (target) on each PropertyNotify.
target_atom: Atom,
// Each time we receive data from the selection owner, we put that
// data in this buffer. If we get the data with the INCR method,
// we'll concatenate chunks of data in this buffer to complete the
// whole clipboard content.
reply_data: BufferPtr,
// Used to concatenate chunks of data in "m_reply_data" from several
// PropertyNotify when we are getting the selection owner data with
// the INCR method.
reply_offset: usize,
// List of user-defined formats/atoms.
//custom_formats: Vec<xcb::xproto::Atom>,
}
impl Manager {
fn new(connection: &RustConnection, screen: usize) -> Result<Self, Error> {
let setup = connection.setup();
let screen = setup.roots.get(screen).ok_or(Error::Unknown {
description: String::from("Could not get screen from setup"),
})?;
let event_mask =
// Just in case that some program reports SelectionNotify events
// with XCB_EVENT_MASK_PROPERTY_CHANGE mask.
EventMask::PROPERTY_CHANGE |
// To receive DestroyNotify event and stop the message loop.
EventMask::STRUCTURE_NOTIFY;
let window = connection
.generate_id()
.map_err(|e| Error::Unknown { description: format!("{}", e) })?;
connection
.create_window(
0,
window,
screen.root,
0,
0,
1,
1,
0,
WindowClass::INPUT_OUTPUT,
screen.root_visual,
&CreateWindowAux::new().event_mask(event_mask),
)
.map_err(|e| Error::Unknown { description: format!("{}", e) })?;
let thread_handle = std::thread::spawn(process_x11_events);
Ok(Manager {
//mutex: Mutex::new(()),
window,
thread_handle: Some(thread_handle),
callback: None,
callback_result: false,
data: Default::default(),
image: super::common::ImageData {
width: 0,
height: 0,
bytes: std::borrow::Cow::from(vec![]),
},
incr_process: false,
incr_received: false,
target_atom: 0,
reply_data: Default::default(),
reply_offset: 0,
})
}
fn set_x11_selection_owner(&self, shared: &mut SharedState) -> bool {
let clipboard_atom = shared.common_atoms().CLIPBOARD;
let cookie = shared.conn.as_ref().unwrap().set_selection_owner(
self.window,
clipboard_atom,
Time::CURRENT_TIME,
);
cookie.is_ok()
}
fn set_image(&mut self, shared: &mut SharedState, image: ImageData) -> Result<(), Error> {
if !self.set_x11_selection_owner(shared) {
return Err(Error::Unknown {
description: "Failed to set x11 selection owner.".into(),
});
}
self.image.width = image.width;
self.image.height = image.height;
self.image.bytes = image.bytes.into_owned().into();
// Put a ~nullptr~ (None) in the m_data for image/png format and then we'll
// encode the png data when the image is requested in this format.
self.data.insert(shared.common_atoms().MIME_IMAGE_PNG, None);
Ok(())
}
/// Rust impl: instead of this function there's a more generic `set_data` which I believe can
/// also set user formats, but arboard doesn't support that for now.
fn set_text(&mut self, shared: &mut SharedState, bytes: Vec<u8>) -> Result<(), Error> {
if !self.set_x11_selection_owner(shared) {
return Err(Error::Unknown {
description: "Could not take ownership of the x11 selection".into(),
});
}
let atoms = shared.text_atoms();
if atoms.is_empty() {
return Err(Error::Unknown { description:
"Couldn't get the atoms that identify supported text formats for the x11 clipboard"
.into(),
});
}
let arc_data = Arc::new(Mutex::new(bytes));
for atom in atoms {
self.data.insert(atom, Some(arc_data.clone()));
}
Ok(())
}
fn clear_data(&mut self) {
self.data.clear();
self.image.width = 0;
self.image.height = 0;
self.image.bytes = Vec::new().into();
}
fn set_requestor_property_with_clipboard_content(
&mut self,
shared: &mut SharedState,
requestor: Window,
property: Atom,
target: Atom,
) -> bool {
let item = {
if let Some(item) = self.data.get_mut(&target) {
item
} else {
// Nothing to do (unsupported target)
return false;
}
};
// This can be null if the data was set from an image but we
// didn't encode the image yet (e.g. to image/png format).
if item.is_none() {
encode_data_on_demand(shared, &mut self.image, target, item);
// Return nothing, the given "target" cannot be constructed
// (maybe by some encoding error).
if item.is_none() {
return false;
}
}
let item = item.as_ref().unwrap().lock().unwrap();
// Set the "property" of "requestor" with the
// clipboard content in the requested format ("target").
if let Err(e) = shared.conn.as_ref().unwrap().change_property8(
PropMode::REPLACE,
requestor,
property,
target,
item.as_slice(),
) {
log::error!("{}", e)
}
true
}
fn copy_reply_data(&mut self, reply: &GetPropertyReply) {
let src = &reply.value;
// n = length of "src" in bytes
let n = reply.value_len;
let req = self.reply_offset + n as usize;
match &mut self.reply_data {
None => {
self.reply_offset = 0; // Rust impl: I added this just to be extra sure.
self.reply_data = Some(Arc::new(Mutex::new(vec![0; req])));
}
// The "m_reply_data" size can be smaller because the size
// specified in INCR property is just a lower bound.
Some(reply_data) => {
let mut reply_data = reply_data.lock().unwrap();
if req > reply_data.len() {
reply_data.resize(req, 0);
}
}
}
let src_slice = src.as_slice();
let mut reply_data_locked = self.reply_data.as_mut().unwrap().lock().unwrap();
reply_data_locked[self.reply_offset..req].copy_from_slice(src_slice);
self.reply_offset += n as usize;
}
// Rust impl: It's strange, the reply attribute is also unused in the original code.
fn call_callback(&mut self, _reply: GetPropertyReply) {
self.callback_result = false;
if let Some(callback) = &self.callback {
self.callback_result = callback(&self.reply_data);
}
CONDVAR.notify_one();
self.reply_data = None;
}
/// Rust impl: This function was added instead of the destructor because the drop
/// does not get called on lazy static objects. This function is registered for `libc::atexit`
/// on a successful initialization
fn destruct() {
let join_handle;
// The following scope is to ensure that we release the lock
// before attempting to join the thread.
{
let mut guard = LOCKED_OBJECTS.lock().unwrap();
if guard.is_none() {
return;
}
macro_rules! manager {
() => {
guard.as_mut().unwrap().manager
};
}
macro_rules! shared {
() => {
guard.as_mut().unwrap().shared
};
}
if !manager!().data.is_empty()
&& manager!().window != 0
&& manager!().window == get_x11_selection_owner(&mut shared!())
{
let atoms = vec![shared!().common_atoms().SAVE_TARGETS];
let selection = shared!().common_atoms().CLIPBOARD_MANAGER;
// Start the SAVE_TARGETS mechanism so the X11
// CLIPBOARD_MANAGER will save our clipboard data
// from now on.
guard = get_data_from_selection_owner(
guard,
&atoms,
Some(Arc::new(|_| true)),
selection,
)
.1;
}
if manager!().window != 0 {
let window = manager!().window;
let _ = shared!().conn.as_ref().unwrap().destroy_window(window);
let _ = shared!().conn.as_ref().unwrap().flush();
manager!().window = 0;
}
join_handle = manager!().thread_handle.take();
}
if let Some(handle) = join_handle {
handle.join().ok();
}
// This is not needed because the connection is automatically disconnected when droped
// if (m_connection)
// xcb_disconnect(m_connection);
}
}
fn process_x11_events() {
let connection = {
let lo = LOCKED_OBJECTS.lock().unwrap();
lo.as_ref().unwrap().shared.conn.clone()
};
let mut stop = false;
while !stop {
let event = {
// If this doesn't work, wrap the connection into an Arc
std::thread::sleep(Duration::from_millis(5));
let maybe_event = connection.as_ref().unwrap().poll_for_event();
match maybe_event {
Ok(Some(e)) => e,
Ok(None) => continue,
Err(_) => break,
}
};
match event {
Event::DestroyNotify(_) => {
//println!("Received destroy event, stopping");
stop = true;
//panic!("{}", line!());
//break;
}
// Someone else has new content in the clipboard, so is
// notifying us that we should delete our data now.
Event::SelectionClear(event) => {
//println!("Received selection clear,");
handle_selection_clear_event(event);
}
// Someone is requesting the clipboard content from us.
Event::SelectionRequest(event) => {
//println!("Received selection request");
handle_selection_request_event(event);
}
// We've requested the clipboard content and this is the
// answer.
Event::SelectionNotify(event) => {
//println!("Received selection notify");
handle_selection_notify_event(event);
}
Event::PropertyNotify(event) => {
//println!("Received property notify");
handle_property_notify_event(event);
}
_ => {}
}
// The event uses RAII, so it's free'd automatically
}
}
fn handle_selection_clear_event(event: SelectionClearEvent) {
let selection = event.selection;
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let locked = guard.as_mut().unwrap();
let clipboard_atom = { locked.shared.common_atoms().CLIPBOARD };
if selection == clipboard_atom {
locked.manager.clear_data();
}
}
fn handle_selection_request_event(event: SelectionRequestEvent) {
let target = event.target;
let requestor = event.requestor;
let property = event.property;
let time = event.time;
let selection = event.selection;
let targets_atom;
let save_targets_atom;
let multiple_atom;
let atom_atom;
{
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let locked = guard.as_mut().unwrap();
let shared = &mut locked.shared;
targets_atom = shared.common_atoms().TARGETS;
save_targets_atom = shared.common_atoms().SAVE_TARGETS;
multiple_atom = shared.common_atoms().MULTIPLE;
atom_atom = shared.common_atoms().ATOM;
}
if target == targets_atom {
let mut targets = Atoms::with_capacity(4);
targets.push(targets_atom);
targets.push(save_targets_atom);
targets.push(multiple_atom);
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let locked = guard.as_mut().unwrap();
let manager = &locked.manager;
for atom in manager.data.keys() {
targets.push(*atom);
}
let shared = &locked.shared;
// Set the "property" of "requestor" with the clipboard
// formats ("targets", atoms) that we provide.
if let Err(e) = shared.conn.as_ref().unwrap().change_property32(
PropMode::REPLACE,
requestor,
property,
atom_atom,
targets.as_slice(),
) {
log::error!("{}", e);
};
} else if target == save_targets_atom {
// Do nothing
} else if target == multiple_atom {
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let locked = guard.as_mut().unwrap();
let reply = {
let atom_pair_atom = locked.shared.common_atoms().ATOM_PAIR;
get_and_delete_property(
locked.shared.conn.as_ref().unwrap(),
requestor,
property,
atom_pair_atom,
false,
)
};
if let Some(reply) = reply {
let atoms = reply.value32();
for atom in atoms.into_iter().flatten() {
let target = atom;
let property = atom;
let property_set = locked.manager.set_requestor_property_with_clipboard_content(
&mut locked.shared,
requestor,
property,
target,
);
if !property_set {
if let Err(e) = locked.shared.conn.as_ref().unwrap().change_property(
PropMode::REPLACE,
requestor,
property,
AtomEnum::NONE,
0,
0,
&[],
) {
log::error!("{}", e)
}
}
}
}
} else {
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let locked = guard.as_mut().unwrap();
let property_set = locked.manager.set_requestor_property_with_clipboard_content(
&mut locked.shared,
requestor,
property,
target,
);
if !property_set {
return;
}
}
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let locked = guard.as_mut().unwrap();
let shared = &mut locked.shared;
// Notify the "requestor" that we've already updated the property.
let notify = SelectionNotifyEvent {
response_type: xproto::SELECTION_NOTIFY_EVENT,
sequence: 0,
time,
requestor,
selection,
target,
property,
};
if let Err(e) =
shared.conn.as_ref().unwrap().send_event(false, requestor, EventMask::NO_EVENT, notify)
{
log::error!("{}", e)
}
if let Err(e) = shared.conn.as_ref().unwrap().flush() {
log::error!("{}", e)
}
}
fn handle_selection_notify_event(event: SelectionNotifyEvent) {
let target = event.target;
let requestor = event.requestor;
let property = event.property;
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let mut locked = guard.as_mut().unwrap();
assert_eq!(requestor, locked.manager.window);
if target == locked.shared.common_atoms().TARGETS {
locked.manager.target_atom = locked.shared.common_atoms().ATOM;
} else {
locked.manager.target_atom = target;
}
let target_atom = locked.manager.target_atom;
let reply = get_and_delete_property(
locked.shared.conn.as_ref().unwrap(),
requestor,
property,
target_atom,
true,
);
if let Some(reply) = reply {
let reply_type = reply.type_;
// In this case, We're going to receive the clipboard content in
// chunks of data with several PropertyNotify events.
let incr_atom = locked.shared.common_atoms().INCR;
if reply_type == incr_atom {
let reply = get_and_delete_property(
locked.shared.conn.as_ref().unwrap(),
requestor,
property,
incr_atom,
true,
);
if let Some(reply) = reply {
if reply.value_len == 4 {
let n = reply.value32().and_then(|mut values| values.next()).unwrap_or(0);
locked.manager.reply_data = Some(Arc::new(Mutex::new(vec![0u8; n as usize])));
locked.manager.reply_offset = 0;
locked.manager.incr_process = true;
locked.manager.incr_received = true;
}
}
} else {
// Simple case, the whole clipboard content in just one reply
// (without the INCR method).
locked.manager.reply_data = None;
locked.manager.reply_offset = 0;
locked.manager.copy_reply_data(&reply);
locked.manager.call_callback(reply);
}
}
}
fn handle_property_notify_event(event: PropertyNotifyEvent) {
let state = event.state;
let atom = event.atom;
let window = event.window;
let mut guard = LOCKED_OBJECTS.lock().unwrap();
let mut locked = guard.as_mut().unwrap();
if locked.manager.incr_process
&& state == Property::NEW_VALUE
&& atom == locked.shared.common_atoms().CLIPBOARD
{
let target_atom = locked.manager.target_atom;
let reply = get_and_delete_property(
locked.shared.conn.as_ref().unwrap(),
window,
atom,
target_atom,
true,
);
if let Some(reply) = reply {
locked.manager.incr_received = true;
// When the length is 0 it means that the content was
// completely sent by the selection owner.
if reply.value_len > 0 {
locked.manager.copy_reply_data(&reply);
} else {
// Now that m_reply_data has the complete clipboard content,
// we can call the m_callback.
locked.manager.call_callback(reply);
locked.manager.incr_process = false;
}
}
}
}
fn get_and_delete_property(
conn: &RustConnection,
window: Window,
property: Atom,
atom: Atom,
delete_prop: bool,
) -> Option<GetPropertyReply> {
conn.get_property(
delete_prop,
window,
property,
atom,
0,
0x1fffffff, // 0x1fffffff = INT32_MAX / 4
)
.ok()
.and_then(|cookie| cookie.reply().ok())
}
fn get_data_from_selection_owner<'a>(
mut guard: MutexGuard<'a, Option<LockedObjects>>,
atoms: &[Atom],
callback: NotifyCallback,
mut selection: xproto::Atom,
) -> (bool, MutexGuard<'a, Option<LockedObjects>>) {
// Wait a response for 100 milliseconds
const CV_TIMEOUT: std::time::Duration = std::time::Duration::from_millis(100);
{
let locked = guard.as_mut().unwrap();
if selection == 0 {
selection = locked.shared.common_atoms().CLIPBOARD;
}
locked.manager.callback = callback;
// Clear data if we are not the selection owner.
if locked.manager.window != get_x11_selection_owner(&mut locked.shared) {
locked.manager.data.clear();
}
}
// Ask to the selection owner for its content on each known
// text format/atom.
for atom in atoms.iter() {
{
let locked = guard.as_mut().unwrap();
let clipboard_atom = locked.shared.common_atoms().CLIPBOARD;
if let Err(e) = locked.shared.conn.as_ref().unwrap().convert_selection(
locked.manager.window,
selection,
*atom,
clipboard_atom,
Time::CURRENT_TIME,
) {
log::error!("{}", e)
}
if let Err(e) = locked.shared.conn.as_ref().unwrap().flush() {
log::error!("{}", e)
}
}
// We use the "m_incr_received" to wait several timeouts in case
// that we've received the INCR SelectionNotify or
// PropertyNotify events.
'incr_loop: loop {
guard.as_mut().unwrap().manager.incr_received = false;
match CONDVAR.wait_timeout(guard, CV_TIMEOUT) {
Ok((new_guard, status)) => {
guard = new_guard;
if !status.timed_out() {
// If the condition variable was notified, it means that the
// callback was called correctly.
return (guard.as_ref().unwrap().manager.callback_result, guard);
}
if !guard.as_ref().unwrap().manager.incr_received {
break 'incr_loop;
}
}
Err(err) => {
panic!(
"A critical error occured while working with the x11 clipboard. {}",
err
);
}
}
}
}
guard.as_mut().unwrap().manager.callback = None;
(false, guard)
}
fn get_x11_selection_owner(shared: &mut SharedState) -> Window {
let mut result = 0;
let clipboard_atom = shared.common_atoms().CLIPBOARD;
let cookie = shared.conn.as_ref().unwrap().get_selection_owner(clipboard_atom);
let reply = cookie.ok().and_then(|cookie| cookie.reply().ok());
if let Some(reply) = reply {
result = reply.owner;
}
result
}
fn get_text(mut guard: MutexGuard<Option<LockedObjects>>) -> Result<String, Error> {
// Rust impl: This function is probably the ugliest Rust code I've ever written
// Make no mistake, the original, C++ code was perfectly fine (which I didn't write)
let owner = get_x11_selection_owner(&mut guard.as_mut().unwrap().shared);
if owner == guard.as_mut().unwrap().manager.window {
let atoms = guard.as_mut().unwrap().shared.text_atoms();
for atom in atoms.iter() {
let mut item = None;
if let Some(Some(i)) = guard.as_mut().unwrap().manager.data.get(atom) {
item = Some(i.clone());
}
if let Some(item) = item {
// Unwrapping the item because we always initialize text with `Some`
let locked = item.lock().unwrap();
let result = String::from_utf8(locked.clone());
return result.map_err(|_| Error::ConversionFailure);
}
}
} else if owner != 0 {
let atoms = guard.as_mut().unwrap().shared.text_atoms();
let result = Arc::new(Mutex::new(Ok(String::new())));
let callback = {
let result = result.clone();
Arc::new(move |data: &BufferPtr| {
if let Some(reply_data) = data {
let locked_data = reply_data.lock().unwrap();
let mut locked_result = result.lock().unwrap();
*locked_result = String::from_utf8(locked_data.clone());
}
true
})
};
let (success, _) = get_data_from_selection_owner(guard, &atoms, Some(callback as _), 0);
if success {
let mut taken = Ok(String::new());
let mut locked = result.lock().unwrap();
std::mem::swap(&mut taken, &mut locked);
return taken.map_err(|_| Error::ConversionFailure);
}
}
Err(Error::ContentNotAvailable)
}
fn get_image(mut guard: MutexGuard<Option<LockedObjects>>) -> Result<ImageData, Error> {
let owner = get_x11_selection_owner(&mut guard.as_mut().unwrap().shared);
//let mut result_img;
if owner == guard.as_ref().unwrap().manager.window {
let image = &guard.as_ref().unwrap().manager.image;
if image.width > 0 && image.height > 0 && !image.bytes.is_empty() {
return Ok(image.to_owned_img());
}
} else if owner != 0 {
let atoms = vec![guard.as_mut().unwrap().shared.common_atoms().MIME_IMAGE_PNG];
let result: Arc<Mutex<Result<ImageData, Error>>> =
Arc::new(Mutex::new(Err(Error::ContentNotAvailable)));
let callback = {
let result = result.clone();
Arc::new(move |data: &BufferPtr| {
if let Some(reply_data) = data {
let locked_data = reply_data.lock().unwrap();
let cursor = std::io::Cursor::new(&*locked_data);
let mut reader = image::io::Reader::new(cursor);
reader.set_format(image::ImageFormat::Png);
let image;
match reader.decode() {
Ok(img) => image = img.into_rgba8(),
Err(_e) => {
let mut locked_result = result.lock().unwrap();
*locked_result = Err(Error::ConversionFailure);
return false;
}
}
let (w, h) = image.dimensions();
let mut locked_result = result.lock().unwrap();
let image_data = ImageData {
width: w as usize,
height: h as usize,
bytes: image.into_raw().into(),
};
*locked_result = Ok(image_data);
}
true
})
};
let _success = get_data_from_selection_owner(guard, &atoms, Some(callback as _), 0).0;
// Rust impl: We return the result here no matter if it succeeded, because the result will
// tell us if it hasn't
let mut taken = Err(Error::Unknown {
description: format!("Implementation error at {}:{}", file!(), line!()),
});
let mut locked = result.lock().unwrap();
std::mem::swap(&mut taken, &mut locked);
return taken;
}
Err(Error::ContentNotAvailable)
}
fn encode_data_on_demand(
shared: &mut SharedState,
image: &mut ImageData,
atom: xproto::Atom,
buffer: &mut Option<Arc<Mutex<Vec<u8>>>>,
) {
/// This is a workaround for the PNGEncoder not having a `into_inner` like function
/// which would allow us to take back our Vec after the encoder finished encoding.
/// So instead we create this wrapper around an Rc Vec which implements `io::Write`
#[derive(Clone)]
struct RcBuffer {
inner: Rc<RefCell<Vec<u8>>>,
}
impl std::io::Write for RcBuffer {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.inner.borrow_mut().extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> std::io::Result<()> {
// Noop
Ok(())
}
}
if atom == shared.common_atoms().MIME_IMAGE_PNG {
if image.bytes.is_empty() || image.width == 0 || image.height == 0 {
return;
}
let output = RcBuffer { inner: Rc::new(RefCell::new(Vec::new())) };
let encoding_result;
{
let encoder = image::png::PngEncoder::new(output.clone());
encoding_result = encoder.encode(
image.bytes.as_ref(),
image.width as u32,
image.height as u32,
image::ColorType::Rgba8,
);
}
// Rust impl: The encoder must be destroyed so that it lets go of its reference to the
// `output` before we `try_unwrap()`
if encoding_result.is_ok() {
*buffer =
Some(Arc::new(Mutex::new(Rc::try_unwrap(output.inner).unwrap().into_inner())));
}
}
}
fn ensure_lo_initialized() -> Result<MutexGuard<'static, Option<LockedObjects>>, Error> {
let mut locked = LOCKED_OBJECTS.lock().unwrap();
if locked.is_none() {
*locked = Some(LockedObjects::new().map_err(|e| Error::Unknown {
description: format!(
"Could not initialize the x11 clipboard handling facilities. Cause: {}",
e
),
})?);
}
Ok(locked)
}
fn with_locked_objects<F, T>(action: F) -> Result<T, Error>
where
F: FnOnce(&mut LockedObjects) -> Result<T, Error>,
{
// The gobal may not have been initialized yet or may have been destroyed previously.
//
// Note: the global objects gets destroyed (replaced with None) when the last
// clipboard context is dropped (goes out of scope).
let mut locked = ensure_lo_initialized()?;
let lo = locked.as_mut().unwrap();
action(lo)
}
pub struct X11ClipboardContext {
_owned: Arc<Mutex<Option<LockedObjects>>>,
}
impl Drop for X11ClipboardContext {
fn drop(&mut self) {
// If there's no other owner than us and the global,
// then destruct the manager
if Arc::strong_count(&LOCKED_OBJECTS) == 2 {
Manager::destruct();
let mut locked = LOCKED_OBJECTS.lock().unwrap();
*locked = None;
}
}
}
impl X11ClipboardContext {
pub(crate) fn new() -> Result<Self, Error> {
Ok(X11ClipboardContext { _owned: LOCKED_OBJECTS.clone() })
}
pub(crate) fn get_text(&mut self) -> Result<String, Error> {
let locked = ensure_lo_initialized()?;
get_text(locked)
}
pub(crate) fn set_text(&mut self, text: String) -> Result<(), Error> {
with_locked_objects(|locked| {
let manager = &mut locked.manager;
let shared = &mut locked.shared;
manager.set_text(shared, text.into_bytes())
})
}
pub(crate) fn get_image(&mut self) -> Result<ImageData, Error> {
let locked = ensure_lo_initialized()?;
get_image(locked)
}
pub(crate) fn set_image(&mut self, image: ImageData) -> Result<(), Error> {
with_locked_objects(|locked| {
let manager = &mut locked.manager;
let shared = &mut locked.shared;
manager.set_image(shared, image)
})
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogAgreementProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub product: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<String>,
#[serde(rename = "licenseTextLink", default, skip_serializing_if = "Option::is_none")]
pub license_text_link: Option<String>,
#[serde(rename = "privacyPolicyLink", default, skip_serializing_if = "Option::is_none")]
pub privacy_policy_link: Option<String>,
#[serde(rename = "retrieveDatetime", default, skip_serializing_if = "Option::is_none")]
pub retrieve_datetime: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub signature: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub accepted: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogAgreementResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DatadogAgreementProperties>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogAgreementResourceListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DatadogAgreementResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogApiKey {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
pub key: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub created: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogApiKeyListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DatadogApiKey>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogInstallMethod {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tool: Option<String>,
#[serde(rename = "toolVersion", default, skip_serializing_if = "Option::is_none")]
pub tool_version: Option<String>,
#[serde(rename = "installerVersion", default, skip_serializing_if = "Option::is_none")]
pub installer_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogLogsAgent {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub transport: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogHostMetadata {
#[serde(rename = "agentVersion", default, skip_serializing_if = "Option::is_none")]
pub agent_version: Option<String>,
#[serde(rename = "installMethod", default, skip_serializing_if = "Option::is_none")]
pub install_method: Option<DatadogInstallMethod>,
#[serde(rename = "logsAgent", default, skip_serializing_if = "Option::is_none")]
pub logs_agent: Option<DatadogLogsAgent>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogHost {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub aliases: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub apps: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub meta: Option<DatadogHostMetadata>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogHostListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DatadogHost>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinkedResourceListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<LinkedResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitoredResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "sendingMetrics", default, skip_serializing_if = "Option::is_none")]
pub sending_metrics: Option<bool>,
#[serde(rename = "reasonForMetricsStatus", default, skip_serializing_if = "Option::is_none")]
pub reason_for_metrics_status: Option<String>,
#[serde(rename = "sendingLogs", default, skip_serializing_if = "Option::is_none")]
pub sending_logs: Option<bool>,
#[serde(rename = "reasonForLogsStatus", default, skip_serializing_if = "Option::is_none")]
pub reason_for_logs_status: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitoredResourceListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<MonitoredResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationDisplay {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<OperationDisplay>,
#[serde(rename = "isDataAction", default, skip_serializing_if = "Option::is_none")]
pub is_data_action: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OperationResult>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSku {
pub name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Accepted,
Creating,
Updating,
Deleting,
Succeeded,
Failed,
Canceled,
Deleted,
NotSpecified,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MonitoringStatus {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MarketplaceSubscriptionStatus {
Provisioning,
Active,
Suspended,
Unsubscribed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogOrganizationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "linkingAuthCode", default, skip_serializing_if = "Option::is_none")]
pub linking_auth_code: Option<String>,
#[serde(rename = "linkingClientId", default, skip_serializing_if = "Option::is_none")]
pub linking_client_id: Option<String>,
#[serde(rename = "redirectUri", default, skip_serializing_if = "Option::is_none")]
pub redirect_uri: Option<String>,
#[serde(rename = "apiKey", default, skip_serializing_if = "Option::is_none")]
pub api_key: Option<String>,
#[serde(rename = "applicationKey", default, skip_serializing_if = "Option::is_none")]
pub application_key: Option<String>,
#[serde(rename = "enterpriseAppId", default, skip_serializing_if = "Option::is_none")]
pub enterprise_app_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "emailAddress", default, skip_serializing_if = "Option::is_none")]
pub email_address: Option<String>,
#[serde(rename = "phoneNumber", default, skip_serializing_if = "Option::is_none")]
pub phone_number: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LiftrResourceCategories {
Unknown,
MonitorLogs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitorProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<ProvisioningState>,
#[serde(rename = "monitoringStatus", default, skip_serializing_if = "Option::is_none")]
pub monitoring_status: Option<MonitoringStatus>,
#[serde(rename = "marketplaceSubscriptionStatus", default, skip_serializing_if = "Option::is_none")]
pub marketplace_subscription_status: Option<MarketplaceSubscriptionStatus>,
#[serde(rename = "datadogOrganizationProperties", default, skip_serializing_if = "Option::is_none")]
pub datadog_organization_properties: Option<DatadogOrganizationProperties>,
#[serde(rename = "userInfo", default, skip_serializing_if = "Option::is_none")]
pub user_info: Option<UserInfo>,
#[serde(rename = "liftrResourceCategory", default, skip_serializing_if = "Option::is_none")]
pub liftr_resource_category: Option<LiftrResourceCategories>,
#[serde(rename = "liftrResourcePreference", default, skip_serializing_if = "Option::is_none")]
pub liftr_resource_preference: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ManagedIdentityTypes {
SystemAssigned,
UserAssigned,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProperties {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<ManagedIdentityTypes>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogMonitorResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ResourceSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<MonitorProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<IdentityProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogMonitorResourceListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DatadogMonitorResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitorUpdateProperties {
#[serde(rename = "monitoringStatus", default, skip_serializing_if = "Option::is_none")]
pub monitoring_status: Option<MonitoringStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogMonitorResourceUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<MonitorUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ResourceSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogSetPasswordLink {
#[serde(rename = "setPasswordLink", default, skip_serializing_if = "Option::is_none")]
pub set_password_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TagAction {
Include,
Exclude,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FilteringTag {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub action: Option<TagAction>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogRules {
#[serde(rename = "sendAadLogs", default, skip_serializing_if = "Option::is_none")]
pub send_aad_logs: Option<bool>,
#[serde(rename = "sendSubscriptionLogs", default, skip_serializing_if = "Option::is_none")]
pub send_subscription_logs: Option<bool>,
#[serde(rename = "sendResourceLogs", default, skip_serializing_if = "Option::is_none")]
pub send_resource_logs: Option<bool>,
#[serde(rename = "filteringTags", default, skip_serializing_if = "Vec::is_empty")]
pub filtering_tags: Vec<FilteringTag>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MetricRules {
#[serde(rename = "filteringTags", default, skip_serializing_if = "Vec::is_empty")]
pub filtering_tags: Vec<FilteringTag>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitoringTagRulesProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<ProvisioningState>,
#[serde(rename = "logRules", default, skip_serializing_if = "Option::is_none")]
pub log_rules: Option<LogRules>,
#[serde(rename = "metricRules", default, skip_serializing_if = "Option::is_none")]
pub metric_rules: Option<MetricRules>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitoringTagRules {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<MonitoringTagRulesProperties>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MonitoringTagRulesListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<MonitoringTagRules>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SingleSignOnStates {
Initial,
Enable,
Disable,
Existing,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogSingleSignOnProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<ProvisioningState>,
#[serde(rename = "singleSignOnState", default, skip_serializing_if = "Option::is_none")]
pub single_sign_on_state: Option<SingleSignOnStates>,
#[serde(rename = "enterpriseAppId", default, skip_serializing_if = "Option::is_none")]
pub enterprise_app_id: Option<String>,
#[serde(rename = "singleSignOnUrl", default, skip_serializing_if = "Option::is_none")]
pub single_sign_on_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogSingleSignOnResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DatadogSingleSignOnProperties>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatadogSingleSignOnResourceListResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DatadogSingleSignOnResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
|
use std::fs::read_to_string;
/// Logic for handling out-of-memory situations.
pub trait MemoryInfo {
/// Return how much memory the computer has, as bytes.
fn total_memory(&self) -> usize;
/// Return how much memory we have, as bytes.
fn get_available_memory(&self) -> usize;
/// Return how much process memory is resident, as bytes.
fn get_resident_process_memory(&self) -> usize;
/// Print some debug info.
fn print_info(&self);
}
/// Estimate whether we're about to run out of memory.
///
/// First, we need to define what "running out of memory" means. As a first
/// pass, 100MB or less of non-swap memory availability, minimum of OS in
/// general and current cgroup. Don't count swap because goal is
/// to avoid slowness, if someone wants to fallback to disk they should use
/// mmap().
///
/// This will break on over-committing, but... we can live with that.
///
/// macOS is very aggressive about swapping, so we add second heuristic: swap
/// for the process is bigger than available memory. This suggests large
/// pressure to swap, since the process wouldn't fit in memory on its own.
///
/// Second, we probably don't want to check every time, that's expensive. So
/// check every 1% of allocations remaining until we run out of available memory
/// (we don't even check for free()s, which just means more frequent checks).
pub struct OutOfMemoryEstimator {
// How many bytes it takes until we check again: whenever it's reset, it
// starts as 1% of available memory.
check_threshold_bytes: usize,
// Minimum number of bytes we want to be available at any time.
minimal_required_available_bytes: usize,
// Pluggable way to get memory usage of the system and process.
pub memory_info: Box<dyn MemoryInfo + Sync + Send>,
}
impl OutOfMemoryEstimator {
pub fn new(memory_info: Box<dyn MemoryInfo + Sync + Send>) -> Self {
Self {
check_threshold_bytes: 0,
// Either 100MB or 2% of available memory, whatever is bigger.
minimal_required_available_bytes: std::cmp::max(
100 * 1024 * 1024,
memory_info.total_memory() / 50,
),
memory_info,
}
}
/// Check if we're (close to being) out of memory.
pub fn are_we_oom(&mut self, total_allocated_bytes: usize) -> bool {
let available_bytes = self.memory_info.get_available_memory();
// Check if we're in danger zone, with very low available memory:
if available_bytes < self.minimal_required_available_bytes {
eprintln!(
"=fil-profile= WARNING: Available bytes {} less than minimal required {}",
available_bytes, self.minimal_required_available_bytes
);
return true;
}
// Check if we're excessively swapping. On macOS in particular there is
// a strong tendency to go to swap (coupled with difficulty getting swap
// numbers for a process). So if swap is bigger than available bytes,
// we'll assume we're effectively OOM on theory that extensive swapping
// is highly undesirable. We calculate relevant swap by subtracting
// resident memory from the memory we know we've allocated.
let rss = self.memory_info.get_resident_process_memory();
// Because we don't track all allocations, technically resident memory
// might be larger than what we think we allocated!
if rss < total_allocated_bytes && (total_allocated_bytes - rss) > available_bytes {
eprintln!(
concat!(
"=fil-profile= WARNING: Excessive swapping. Program itself ",
"allocated {} bytes, {} are resident, the difference (presumably swap) is {}, ",
"which is more than available system bytes {}"
),
total_allocated_bytes,
rss,
total_allocated_bytes - rss,
available_bytes
);
return true;
}
// Still have enough, so threshold to 1% to running out altogether. If
// we're at 101MB free, this will check basically at the boundary.
// Anything higher and we'll check even farther away, so it's still
// safe, and this prevents us from checking too often when we're close,
// as in an earlier iteration of this check.
//
// What if someone allocations 80MB when we're 120MB from running out?
// See add_allocation() in memorytracking.rs, which will just immediatly
// free that memory again since we're going to exit anyway.
self.check_threshold_bytes = available_bytes / 100;
// We're not OOM:
false
}
/// Given new allocation size and total allocated bytes for the process,
/// return whether we're out-of-memory. Only checks actual memory
/// availability intermittently, as an optimization.
pub fn too_big_allocation(
&mut self,
allocated_bytes: usize,
total_allocated_bytes: usize,
) -> bool {
let current_threshold = self.check_threshold_bytes;
if allocated_bytes > current_threshold {
// We've allocated enough that it's time to check for potential OOM
// condition.
self.are_we_oom(total_allocated_bytes)
} else {
self.check_threshold_bytes = current_threshold - allocated_bytes;
debug_assert!(self.check_threshold_bytes < current_threshold);
false
}
}
pub fn print_info(&self) {
self.memory_info.print_info();
}
}
#[cfg(target_os = "linux")]
fn get_cgroup_paths(proc_cgroups: &str) -> Option<Vec<&str>> {
let mut result = vec![];
for line in proc_cgroups.lines() {
// TODO better error handling?
let mut parts = line.splitn(3, ':');
let subsystems = parts.nth(1)?;
if subsystems.is_empty() || subsystems.split(',').any(|s| s == "memory") {
let cgroup_path = parts.next()?.strip_prefix('/')?;
result.push(cgroup_path);
}
}
Some(result)
}
/// Real system information.
pub struct RealMemoryInfo {
// The current process.
process: Option<psutil::process::Process>,
// On Linux, the current cgroup _at startup_. If it changes after startup,
// we'll be wrong, but that's unlikely.
#[cfg(target_os = "linux")]
cgroup: Option<cgroups_rs::Cgroup>,
}
impl Default for RealMemoryInfo {
#[cfg(target_os = "linux")]
fn default() -> Self {
let get_cgroup = || {
let contents = match read_to_string("/proc/self/cgroup") {
Ok(contents) => contents,
Err(err) => {
eprintln!("=fil-profile= Couldn't read /proc/self/cgroup ({:})", err);
return None;
}
};
let cgroup_paths = get_cgroup_paths(&contents)?;
if let Some(path) = cgroup_paths.into_iter().next() {
let h = cgroups_rs::hierarchies::auto();
let cgroup = cgroups_rs::Cgroup::load(h, path);
// Make sure memory_stat() works. Sometimes it doesn't
// (https://github.com/pythonspeed/filprofiler/issues/147). If
// it doesn't, this'll panic.
let mem: &cgroups_rs::memory::MemController = cgroup.controller_of()?;
let _mem = mem.memory_stat();
return Some(cgroup);
}
None
};
let cgroup_result = std::panic::catch_unwind(get_cgroup);
let cgroup = match cgroup_result {
Ok(c) => c,
Err(err) => {
eprintln!(
"=fil-profile= Error retrieving cgroup memory, per-container/per-cgroup memory limits won't be respected (error: {:?}). This is expected behavior on old versions of Linux, e.g. RHEL 7. If you're on a newer version, please file a bug at https://github.com/pythonspeed/filprofiler/issues/new/choose.", err);
None
}
};
Self {
cgroup,
process: psutil::process::Process::current().ok(),
}
}
#[cfg(target_os = "macos")]
fn default() -> Self {
Self {
process: psutil::process::Process::current().ok(),
}
}
}
impl RealMemoryInfo {
#[cfg(target_os = "linux")]
pub fn get_cgroup_available_memory(&self) -> usize {
let mut result = std::usize::MAX;
if let Some(cgroup) = &self.cgroup {
if let Some(mem) = cgroup.controller_of::<cgroups_rs::memory::MemController>() {
let mem = mem.memory_stat();
if mem.limit_in_bytes == 0 {
// A limit of 0 is nonsensical. Seen on Docker with cgroups v1
// with no limit set, and the usage was also 0. So just assume
// there is no limit.
return result;
}
result = std::cmp::min(
result,
(mem.limit_in_bytes - mem.usage_in_bytes as i64) as usize,
);
}
}
result
}
#[cfg(target_os = "macos")]
pub fn get_cgroup_available_memory(&self) -> usize {
std::usize::MAX
}
}
impl MemoryInfo for RealMemoryInfo {
fn total_memory(&self) -> usize {
psutil::memory::virtual_memory()
.map(|vm| vm.total() as usize)
.unwrap_or(0)
}
/// Return how much free memory we have, as bytes.
fn get_available_memory(&self) -> usize {
// This will include memory that can become available by syncing
// filesystem buffers to disk, which is probably what we want.
let available = psutil::memory::virtual_memory()
.map(|vm| vm.available() as usize)
.unwrap_or(std::usize::MAX);
let cgroup_available = self.get_cgroup_available_memory();
std::cmp::min(available, cgroup_available)
}
fn get_resident_process_memory(&self) -> usize {
self.process
.as_ref()
.and_then(|p| p.memory_info().map(|mi| mi.rss()).ok())
.unwrap_or(0) as usize
}
/// Print debugging info to stderr.
fn print_info(&self) {
eprintln!(
"=fil-profile= Host memory info: {:?} {:?}",
psutil::memory::virtual_memory(),
psutil::memory::swap_memory()
);
#[cfg(target_os = "linux")]
eprintln!(
"=fil-profile= cgroup (e.g. container) memory info: {:?}",
if let Some(cgroup) = &self.cgroup {
cgroup
.controller_of::<cgroups_rs::memory::MemController>()
.as_ref()
.map(|mem| mem.memory_stat())
} else {
None
}
);
eprintln!(
"=fil-profile= Process memory info: {:?}",
self.process.as_ref().map(|p| p.memory_info())
);
}
}
// Used to disable out-of-memory heuristic.
pub struct InfiniteMemory {}
impl MemoryInfo for InfiniteMemory {
fn total_memory(&self) -> usize {
2usize.pow(48u32)
}
fn get_available_memory(&self) -> usize {
2usize.pow(48u32)
}
fn get_resident_process_memory(&self) -> usize {
0
}
/// Print debugging info to stderr.
fn print_info(&self) {
eprintln!("=fil-profile= Out of memory detection is disabled.");
}
}
#[cfg(test)]
mod tests {
use super::{MemoryInfo, OutOfMemoryEstimator};
use proptest::prelude::*;
use std::cell::Ref;
use std::cell::RefCell;
use std::sync::Arc;
struct FakeMemory {
available_memory: RefCell<usize>,
swap: RefCell<usize>,
checks: RefCell<Vec<usize>>,
}
impl FakeMemory {
fn new() -> Arc<Self> {
Arc::new(FakeMemory {
available_memory: RefCell::new(1_000_000_000),
checks: RefCell::new(vec![]),
swap: RefCell::new(0),
})
}
fn allocate(&self, size: usize) {
let mut mem = self.available_memory.borrow_mut();
*mem -= size;
}
fn add_swap(&self, size: usize) {
*self.swap.borrow_mut() += size;
}
fn get_checks(&self) -> Ref<Vec<usize>> {
self.checks.borrow()
}
fn get_allocated(&self) -> usize {
1_000_000_000 - *self.available_memory.borrow()
}
}
impl MemoryInfo for Arc<FakeMemory> {
fn total_memory(&self) -> usize {
1_000_000_000
}
fn get_available_memory(&self) -> usize {
self.checks
.borrow_mut()
.push(*self.available_memory.borrow());
*self.available_memory.borrow()
}
fn get_resident_process_memory(&self) -> usize {
self.get_allocated() - *self.swap.borrow()
}
fn print_info(&self) {}
}
unsafe impl Sync for FakeMemory {}
fn setup_estimator() -> (OutOfMemoryEstimator, Arc<FakeMemory>) {
let fake_memory = FakeMemory::new();
(
OutOfMemoryEstimator::new(Box::new(fake_memory.clone())),
fake_memory,
)
}
proptest! {
// Random allocations don't break invariants
#[test]
fn not_oom(allocated_sizes in prop::collection::vec(1..1000usize, 10..2000)) {
let (mut estimator, memory_info) = setup_estimator();
let mut allocated = 0;
for size in allocated_sizes {
memory_info.allocate(size);
allocated += size;
let too_big = estimator.too_big_allocation(size, allocated);
prop_assert_eq!(too_big, estimator.memory_info.get_available_memory() <= estimator.minimal_required_available_bytes);
if too_big {
break;
}
}
}
}
// We're out of memory if we're below the threshold.
#[test]
fn oom_threshold() {
let (mut estimator, memory_info) = setup_estimator();
assert!(!estimator.are_we_oom(memory_info.get_allocated()));
memory_info.allocate(500_000_000);
assert!(!estimator.are_we_oom(memory_info.get_allocated()));
memory_info.allocate(350_000_000);
assert!(!estimator.are_we_oom(memory_info.get_allocated()));
memory_info.allocate(50_000_000);
// Now that we're below the maximum, we've gone too far:
assert!(estimator.are_we_oom(memory_info.get_allocated()));
memory_info.allocate(40_000_000);
assert!(estimator.are_we_oom(memory_info.get_allocated()));
}
// We're out of memory if swap > available.
#[test]
fn oom_swap() {
let (mut estimator, memory_info) = setup_estimator();
memory_info.allocate(500_000_001);
assert!(!estimator.are_we_oom(memory_info.get_allocated()));
memory_info.add_swap(499_999_999);
assert!(!estimator.are_we_oom(memory_info.get_allocated()));
memory_info.add_swap(2);
assert!(estimator.are_we_oom(memory_info.get_allocated()));
}
// The intervals between checking if out-of-memory shrink as we get closer
// to running out of memory
#[test]
fn oom_estimator_shrinking_intervals() {
let (mut estimator, memory_info) = setup_estimator();
loop {
memory_info.allocate(10_000);
if estimator.too_big_allocation(10_000, memory_info.get_allocated()) {
break;
}
// by 100MB we should have detected OOM.
assert!(*memory_info.available_memory.borrow() >= 99_000_000);
}
let checks = memory_info.get_checks();
// Each check should come closer than the next:
for pair in checks.windows(2) {
assert!(pair[0] >= pair[1], "{} vs {}", pair[0], pair[1]);
}
// In the beginning we check infrequently:
assert!((checks[0] - checks[1]) > 9_000_000);
// By the end we should be checking more frequently:
let final_difference = checks[checks.len() - 2] - checks[checks.len() - 1];
assert!(
final_difference < 1_100_000,
"final difference: {}",
final_difference,
);
}
}
|
// Copyright 2017 rust-ipfs-api Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
extern crate futures;
extern crate ipfs_api;
extern crate tokio_core;
extern crate tokio_timer;
use futures::stream::Stream;
use ipfs_api::{response, IpfsClient};
use std::thread;
use std::time::Duration;
use tokio_core::reactor::{Core, Handle};
use tokio_timer::Timer;
static TOPIC: &'static str = "test";
fn get_client(handle: &Handle) -> IpfsClient {
println!("connecting to localhost:5001...");
IpfsClient::default(handle)
}
// Creates an Ipfs client, and simultaneously publishes and reads from a pubsub
// topic.
//
fn main() {
// This block will execute a repeating function that sends
// a message to the "test" topic.
//
thread::spawn(move || {
let mut event_loop = Core::new().expect("expected event loop");
let client = get_client(&event_loop.handle());
let timer = Timer::default();
let publish = timer
.interval(Duration::from_secs(1))
.map_err(|_| response::Error::from("timeout error"))
.for_each(move |_| {
println!("");
println!("publishing message...");
client.pubsub_pub(TOPIC, "Hello World!")
});
println!("");
println!("starting task to publish messages to ({})...", TOPIC);
event_loop.run(publish).expect(
"expected the publish task to start",
);
});
// This block will execute a future that suscribes to a topic,
// and reads any incoming messages.
//
{
let mut event_loop = Core::new().expect("expected event loop");
let client = get_client(&event_loop.handle());
let req = client.pubsub_sub(TOPIC, false);
println!("");
println!("waiting for messages on ({})...", TOPIC);
event_loop
.run(req.take(5).for_each(|msg| {
println!("");
println!("received ({:?})", msg);
Ok(())
}))
.expect("expected a valid response");
}
}
|
#![allow(non_camel_case_types)]
#![allow(clippy::upper_case_acronyms)]
use littlefs2::consts;
// TODO: this needs to be overridable.
// Should we use the "config crate that can have a replacement patched in" idea?
pub type MAX_APPLICATION_NAME_LENGTH = consts::U256;
pub const MAX_LONG_DATA_LENGTH: usize = 1024;
pub const MAX_MESSAGE_LENGTH: usize = 1024;
pub type MAX_OBJECT_HANDLES = consts::U16;
pub type MAX_LABEL_LENGTH = consts::U256;
pub const MAX_MEDIUM_DATA_LENGTH: usize = 256;
pub type MAX_PATH_LENGTH = consts::U256;
pub const MAX_KEY_MATERIAL_LENGTH: usize = 128;
// must be above + 4
pub const MAX_SERIALIZED_KEY_LENGTH: usize = 132;
pub type MAX_SERVICE_CLIENTS = consts::U5;
pub const MAX_SHORT_DATA_LENGTH: usize = 128;
pub const MAX_SIGNATURE_LENGTH: usize = 72;
pub const MAX_USER_ATTRIBUTE_LENGTH: usize = 256;
pub const USER_ATTRIBUTE_NUMBER: u8 = 37;
|
mod ssdp;
pub use self::ssdp::SSDPServer;
mod mediaserver;
pub use self::mediaserver::MediaServer;
|
//! # RISC Emulator Core
//! The core library for the rust RISC emulator crate.
pub mod tests;
|
extern crate piston_snake;
fn main() {
piston_snake::run(160, 160);
}
|
use crate::rtb_type;
rtb_type! {
AgentType,
500,
Webbrowser=1;
InApp=2;
PersonBasedId=3
}
|
// Copyright (c) 2016-2017 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
extern crate habitat_core as hab_core;
extern crate habitat_http_client as hab_http;
extern crate hyper;
extern crate hyper_openssl;
#[macro_use]
extern crate log;
extern crate regex;
extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
extern crate url;
pub mod error;
pub use error::{Error, Result};
use std::io::Read;
use std::path::Path;
use hab_core::package::PackageIdent;
use hab_http::ApiClient;
use hyper::client::{IntoUrl, RequestBuilder, Response};
use hyper::header::{Accept, Authorization, Bearer, ContentType};
use hyper::status::StatusCode;
const DEFAULT_API_PATH: &'static str = "/v1";
pub struct Client(ApiClient);
#[derive(Deserialize)]
pub struct ReverseDependencies {
pub origin: String,
pub name: String,
pub rdeps: Vec<String>,
}
#[derive(Default, Deserialize)]
pub struct JobGroupPromoteResponse {
pub group_id: String,
pub not_promoted: Vec<PackageIdent>,
}
impl Client {
pub fn new<U>(
endpoint: U,
product: &str,
version: &str,
fs_root_path: Option<&Path>,
) -> Result<Self>
where
U: IntoUrl,
{
let mut endpoint = endpoint.into_url().map_err(Error::URL)?;
if !endpoint.cannot_be_a_base() && endpoint.path() == "/" {
endpoint.set_path(DEFAULT_API_PATH);
}
Ok(Client(
ApiClient::new(endpoint, product, version, fs_root_path)
.map_err(Error::HabitatHttpClient)?,
))
}
/// Create a job.
///
/// # Failures
///
/// * Remote API Server is not available
///
/// # Panics
///
/// * Authorization token was not set on client
pub fn create_job(&self, ident: &PackageIdent, token: &str) -> Result<(String)> {
debug!("Creating a job for {}", ident);
let body = json!({ "project_id": format!("{}", ident) });
let sbody = serde_json::to_string(&body).unwrap();
let result = self
.add_authz(self.0.post("jobs"), token)
.body(&sbody)
.header(Accept::json())
.header(ContentType::json())
.send();
match result {
Ok(mut response) => match response.status {
StatusCode::Created => {
let mut encoded = String::new();
response.read_to_string(&mut encoded).map_err(Error::IO)?;
debug!("Body: {:?}", encoded);
let v: serde_json::Value = serde_json::from_str(&encoded).map_err(Error::Json)?;
let id = v["id"].as_str().unwrap();
Ok(id.to_string())
}
StatusCode::Unauthorized => Err(Error::APIError(
response.status,
"Your GitHub token requires both user:email and read:org \
permissions."
.to_string(),
)),
_ => Err(err_from_response(response)),
},
Err(e) => Err(Error::HyperError(e)),
}
}
/// Fetch the reverse dependencies for a package
///
/// # Failures
///
/// * Remote API Server is not available
pub fn fetch_rdeps(&self, ident: &PackageIdent) -> Result<Vec<String>> {
debug!("Fetching the reverse dependencies for {}", ident);
let url = format!("rdeps/{}", ident);
let mut res = self.0.get(&url).send().map_err(Error::HyperError)?;
if res.status != StatusCode::Ok {
return Err(err_from_response(res));
}
let mut encoded = String::new();
res.read_to_string(&mut encoded).map_err(Error::IO)?;
debug!("Body: {:?}", encoded);
let rd: ReverseDependencies = serde_json::from_str(&encoded).map_err(Error::Json)?;
Ok(rd.rdeps.to_vec())
}
/// Promote/Demote a job group to/from a channel
///
/// # Failures
///
/// * Remote API Server is not available
pub fn job_group_promote_or_demote<T: AsRef<str> + serde::Serialize>(
&self,
group_id: u64,
idents: &[T],
channel: &str,
token: &str,
promote: bool,
) -> Result<()> {
let json_idents = json!(idents);
let body = json!({ "idents": json_idents });
let sbody = serde_json::to_string(&body).unwrap();
let url = format!(
"jobs/group/{}/{}/{}",
group_id,
if promote { "promote" } else { "demote" },
channel
);
let res = self
.add_authz(self.0.post(&url), token)
.body(&sbody)
.header(Accept::json())
.header(ContentType::json())
.send()
.map_err(Error::HyperError)?;
if res.status != StatusCode::NoContent {
debug!(
"Failed to {} group, status: {:?}",
if promote { "promote" } else { "demote" },
res.status
);
return Err(err_from_response(res));
}
Ok(())
}
/// Cancel a job group
///
/// # Failures
///
/// * Remote API Server is not available
pub fn job_group_cancel(&self, group_id: u64, token: &str) -> Result<()> {
let url = format!("jobs/group/{}/cancel", group_id);
let res = self
.add_authz(self.0.post(&url), token)
.send()
.map_err(Error::HyperError)?;
if res.status != StatusCode::NoContent {
debug!("Failed to cancel group, status: {:?}", res.status);
return Err(err_from_response(res));
}
Ok(())
}
fn add_authz<'a>(&'a self, rb: RequestBuilder<'a>, token: &str) -> RequestBuilder {
rb.header(Authorization(Bearer {
token: token.to_string(),
}))
}
}
fn err_from_response(mut response: Response) -> Error {
let mut s = String::new();
response.read_to_string(&mut s).map_err(Error::IO).unwrap();
Error::APIError(response.status, s)
}
|
use std::{io::Write, net::SocketAddr};
use mio::net::TcpStream;
use serde::Serialize;
use crate::common::message_type::{MsgEncryption, MsgType, UdpPacket};
use super::RendezvousServer;
impl RendezvousServer {
pub fn send_tcp_message<T: ?Sized>(sock: &mut TcpStream, t: MsgType, msg: &T) where T: Serialize {
let t: u8 = num::ToPrimitive::to_u8(&t).unwrap();
let msg = &bincode::serialize(msg).unwrap()[..];
let msg_size = bincode::serialize(&msg.len()).unwrap();
let chained: &[u8] = &[&[t], &msg_size[..], &msg].concat()[..];
sock.write_all(chained).unwrap();
}
pub fn send_udp_message<T: ?Sized>(&mut self, addr: SocketAddr, t: MsgType, msg: &T) where T: Serialize {
let t: u8 = num::ToPrimitive::to_u8(&t).unwrap();
let msg = &bincode::serialize(msg).unwrap()[..];
let chained: &[u8] = &[&[t], msg].concat()[..];
let packet = UdpPacket {
data: chained.to_vec(),
reliable: false, //FIXME
msg_id: self.next_msg_id,
upgraded: MsgEncryption::Unencrypted
};
self.next_msg_id += 1;
let wrapped_data = &bincode::serialize(&packet).unwrap()[..];
self.udp_listener.send_to(wrapped_data, addr).unwrap();
}
} |
use super::types::Only;
use crate::data::Entry;
use crate::data::{Item, Status};
use crate::index::Indexer;
use anyhow::Result;
use crossterm::style::Stylize;
use std::path::PathBuf;
pub struct StatusHandler {
indexer: Indexer,
items: Vec<Item>,
}
// Public methods.
impl StatusHandler {
pub fn new(home: PathBuf, repository: PathBuf, items: Vec<Item>, only: Option<Only>) -> Self {
let indexer = Indexer::new(home, repository, only);
Self { indexer, items }
}
pub fn status(&self, brief: bool) -> Result<()> {
log::debug!("Showing status with brief={}", brief);
let mut indexed = self.indexer.index(&self.items)?;
indexed.sort_by(|(_, a), (_, b)| a.len().partial_cmp(&b.len()).unwrap());
if brief {
let mut filtered = Vec::new();
for (name, entries) in indexed {
let entries: Vec<Entry> = entries
.into_iter()
.filter(|entry| !entry.is_status_ok())
.collect();
if !entries.is_empty() {
filtered.push((name, entries));
}
}
self.display(&filtered);
} else {
self.display(&indexed);
println!(
"\n{} ok | {} diff | {} invalid | {} missing home | {} missing repository",
Status::Ok,
Status::Diff,
"".red(),
Status::MissingHome,
Status::MissingRepo,
);
}
Ok(())
}
fn display(&self, indexed: &[(String, Vec<Entry>)]) {
for (name, entries) in indexed {
if entries.is_empty() {
continue;
}
if entries.len() == 1 {
println!(" {}: {}", name, entries.first().unwrap());
} else {
println!("\n {}", name);
for entry in entries {
println!(" {}", entry);
}
}
}
}
}
|
//! Cluster Amazon ratings.
use crate::prelude::*;
use polars::prelude::*;
/// Group Amazon ratings into clusters.
#[derive(Args, Debug)]
#[command(name = "cluster-ratings")]
pub struct ClusterRatings {
/// Rating output file
#[arg(short = 'o', long = "output", name = "FILE")]
ratings_out: PathBuf,
/// Input file to cluster
#[arg(name = "INPUT")]
infile: PathBuf,
}
impl Command for ClusterRatings {
fn exec(&self) -> Result<()> {
let isbns = LazyFrame::scan_parquet("book-links/isbn-clusters.parquet", default())?;
let isbns = isbns.select(&[col("isbn"), col("cluster")]);
let ratings = LazyFrame::scan_parquet(&self.infile, default())?;
let joined = ratings.join(isbns, &[col("asin")], &[col("isbn")], JoinType::Inner);
let joined = joined
.select(&[
col("user"),
col("cluster").alias("item"),
col("rating"),
col("timestamp"),
])
.sort("timestamp", default());
let actions = joined.groupby(&[col("user"), col("item")]).agg(&[
col("rating").median().alias("rating"),
col("rating").last().alias("last_rating"),
col("timestamp").min().alias("first_time"),
col("timestamp").max().alias("last_time"),
col("item").count().alias("nratings"),
]);
info!("collecting results");
let actions = actions.collect()?;
info!("saving {} records", actions.height());
save_df_parquet(actions, &self.ratings_out)?;
Ok(())
}
}
|
pub use {Trie, TrieLayer, TrieIter};
pub use tuple_utils::Merge;
pub struct TrieAnd<A, B>(A, B);
impl<A, B> TrieAnd<A, B> {
pub fn new(a: A, b: B) -> TrieAnd<A, B> {
TrieAnd(a, b)
}
}
pub struct TrieAndL2<A, B>(A, B);
pub struct TrieAndL1<A, B>(A, B);
pub struct TrieAndL0<A, B>(A, B);
impl<A, B> TrieLayer for TrieAndL2<A, B>
where A: TrieLayer,
B: TrieLayer
{
type Value = TrieAndL1<A::Value, B::Value>;
#[inline(always)]
fn mask(&self) -> u64 {
self.0.mask() & self.1.mask()
}
#[inline(always)]
unsafe fn get(&self, idx: usize) -> Self::Value {
TrieAndL1(self.0.get(idx), self.1.get(idx))
}
}
impl<A, B> TrieLayer for TrieAndL1<A, B>
where A: TrieLayer,
B: TrieLayer
{
type Value = TrieAndL0<A::Value, B::Value>;
#[inline(always)]
fn mask(&self) -> u64 {
self.0.mask() & self.1.mask()
}
#[inline(always)]
unsafe fn get(&self, idx: usize) -> Self::Value {
TrieAndL0(self.0.get(idx), self.1.get(idx))
}
}
impl<A, B, AV, BV, V> TrieLayer for TrieAndL0<A, B>
where A: TrieLayer<Value = AV>,
B: TrieLayer<Value = BV>,
AV: Merge<BV, Output = V>
{
type Value = V;
#[inline(always)]
fn mask(&self) -> u64 {
self.0.mask() & self.1.mask()
}
#[inline(always)]
unsafe fn get(&self, idx: usize) -> Self::Value {
self.0.get(idx).merge(self.1.get(idx))
}
}
impl<A, B, AV, BV, V, R> Trie for TrieAnd<A, B>
where A: Trie<Row = R, Value = AV>,
B: Trie<Row = R, Value = BV>,
AV: Merge<BV, Output = V>
{
type L2 = TrieAndL2<A::L2, B::L2>;
type L1 = TrieAndL1<A::L1, B::L1>;
type L0 = TrieAndL0<A::L0, B::L0>;
type Value = V;
type Row=R;
fn top(self) -> TrieAndL2<A::L2, B::L2> {
let TrieAnd(a, b) = self;
TrieAndL2(a.top(), b.top())
}
}
impl<A, B, AV, BV, V> IntoIterator for TrieAnd<A, B>
where A: Trie<Row = u32, Value = AV>,
B: Trie<Row = u32, Value = BV>,
AV: Merge<BV, Output = V>
{
type Item = (u32, V);
type IntoIter = TrieIter<TrieAnd<A, B>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
|
use serde::{Deserialize, Serialize};
use typed_builder::TypedBuilder;
#[derive(
Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TypedBuilder,
)]
#[cfg_attr(feature = "structopt", derive(structopt::StructOpt))]
#[cfg_attr(feature = "poem-openapi", derive(poem_openapi::Object))]
#[cfg_attr(feature = "poem-openapi", oai(rename = "TarballPackage"))]
pub struct Package {
#[cfg_attr(feature = "structopt", structopt(short, long))]
pub url: url::Url,
#[cfg_attr(feature = "structopt", structopt(short, long))]
pub size: u64,
#[cfg_attr(feature = "structopt", structopt(short, long))]
pub installed_size: u64,
}
impl super::AsDownloadUrl for Package {
fn as_download_url(&self) -> &url::Url {
&self.url
}
}
|
extern crate lanyout;
#[no_mangle]
pub extern "C" fn test() -> i32 {
let mut err = 0;
err += lanyout::frame::test::test();
err += lanyout::canvas::test::test();
return err;
}
fn main() {
lanyout::init();
test();
lanyout::main_loop();
}
|
use aoc2019::intcode::IntCodeCpu;
use std::collections::HashMap;
use std::fs;
use std::io;
#[derive(PartialEq, Clone, Copy, Debug)]
enum Color {
Black,
White,
}
impl From<i64> for Color {
fn from(val: i64) -> Self {
match val {
0 => Color::Black,
1 => Color::White,
_ => panic!("unknown color {}", val),
}
}
}
#[derive(Debug)]
enum Direction {
Up,
Right,
Down,
Left,
}
#[derive(Debug)]
enum Turn {
Left,
Right,
}
impl From<i64> for Turn {
fn from(val: i64) -> Self {
match val {
0 => Turn::Left,
1 => Turn::Right,
_ => panic!("unknown turn direction {}", val),
}
}
}
#[derive(Debug)]
struct Robot {
position: (i32, i32),
direction: Direction,
visited_positions: HashMap<(i32, i32), Color>,
brain: IntCodeCpu,
}
impl Robot {
fn new(brain: IntCodeCpu) -> Self {
Robot {
position: (0, 0),
direction: Direction::Up,
visited_positions: [((0, 0), BASIC_PANEL_COLOR)].iter().cloned().collect(),
brain,
}
}
fn turn(&mut self, turn: Turn) {
match turn {
Turn::Left => {
self.direction = match self.direction {
Direction::Up => Direction::Left,
Direction::Left => Direction::Down,
Direction::Down => Direction::Right,
Direction::Right => Direction::Up,
}
}
Turn::Right => {
self.direction = match self.direction {
Direction::Up => Direction::Right,
Direction::Right => Direction::Down,
Direction::Down => Direction::Left,
Direction::Left => Direction::Up,
}
}
}
}
fn move_forward(&mut self) {
match self.direction {
// Up has to be decreased and down has to be increased,
// otherwise the result will be upside-down!
Direction::Up => self.position.1 -= 1,
Direction::Right => self.position.0 += 1,
Direction::Down => self.position.1 += 1,
Direction::Left => self.position.0 -= 1,
}
}
fn paint(&mut self, color: Color) {
self.visited_positions.insert(self.position, color);
}
}
fn show_panel(visited_positions: &HashMap<(i32, i32), Color>) {
let xmin = visited_positions.keys().min_by_key(|xs| xs.0).unwrap().0;
let xmax = visited_positions.keys().max_by_key(|xs| xs.0).unwrap().0;
let ymin = visited_positions.keys().min_by_key(|ys| ys.1).unwrap().1;
let ymax = visited_positions.keys().max_by_key(|ys| ys.1).unwrap().1;
let mut panel =
vec![vec![' '; (xmax - xmin).abs() as usize + 1]; (ymax - ymin).abs() as usize + 1];
for pos in visited_positions.keys() {
if *visited_positions.get(&pos).unwrap() == Color::White {
let (xs, ys) = pos;
panel[(*ys + ymin.abs()) as usize][(*xs + xmin.abs()) as usize] = '█';
}
}
for l in panel {
for c in l {
print!("{}", c);
}
println!();
}
}
const BASIC_PANEL_COLOR: Color = Color::White;
fn main() -> io::Result<()> {
let code = fs::read_to_string("./input/day11.in")?;
let mut robot = Robot::new(IntCodeCpu::from_code(&code));
loop {
match robot
.visited_positions
.get(&robot.position)
.unwrap_or(&BASIC_PANEL_COLOR)
{
Color::Black => robot.brain.input.push_back(0),
Color::White => robot.brain.input.push_back(1),
}
if let Some(new_color) = robot.brain.run_until_output() {
robot.paint(Color::from(new_color));
if let Some(new_direction) = robot.brain.run_until_output() {
robot.turn(Turn::from(new_direction));
robot.move_forward();
} else {
break;
}
} else {
break;
}
}
println!("p1: {}", robot.visited_positions.iter().count());
println!("p2: ");
show_panel(&robot.visited_positions);
Ok(())
}
|
#[doc = "Register `ETH_MACRxTxSR` reader"]
pub type R = crate::R<ETH_MACRX_TX_SR_SPEC>;
#[doc = "Field `TJT` reader - TJT"]
pub type TJT_R = crate::BitReader;
#[doc = "Field `NCARR` reader - NCARR"]
pub type NCARR_R = crate::BitReader;
#[doc = "Field `LCARR` reader - LCARR"]
pub type LCARR_R = crate::BitReader;
#[doc = "Field `EXDEF` reader - EXDEF"]
pub type EXDEF_R = crate::BitReader;
#[doc = "Field `LCOL` reader - LCOL"]
pub type LCOL_R = crate::BitReader;
#[doc = "Field `EXCOL` reader - EXCOL"]
pub type EXCOL_R = crate::BitReader;
#[doc = "Field `RWT` reader - RWT"]
pub type RWT_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - TJT"]
#[inline(always)]
pub fn tjt(&self) -> TJT_R {
TJT_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - NCARR"]
#[inline(always)]
pub fn ncarr(&self) -> NCARR_R {
NCARR_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - LCARR"]
#[inline(always)]
pub fn lcarr(&self) -> LCARR_R {
LCARR_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - EXDEF"]
#[inline(always)]
pub fn exdef(&self) -> EXDEF_R {
EXDEF_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - LCOL"]
#[inline(always)]
pub fn lcol(&self) -> LCOL_R {
LCOL_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - EXCOL"]
#[inline(always)]
pub fn excol(&self) -> EXCOL_R {
EXCOL_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 8 - RWT"]
#[inline(always)]
pub fn rwt(&self) -> RWT_R {
RWT_R::new(((self.bits >> 8) & 1) != 0)
}
}
#[doc = "The Receive Transmit Status register contains the Receive and Transmit Error status.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`eth_macrx_tx_sr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ETH_MACRX_TX_SR_SPEC;
impl crate::RegisterSpec for ETH_MACRX_TX_SR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`eth_macrx_tx_sr::R`](R) reader structure"]
impl crate::Readable for ETH_MACRX_TX_SR_SPEC {}
#[doc = "`reset()` method sets ETH_MACRxTxSR to value 0"]
impl crate::Resettable for ETH_MACRX_TX_SR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
extern crate advent_of_code_2017_day_13;
use advent_of_code_2017_day_13::*;
#[test]
fn part_1_example() {
let input = "\
0: 3
1: 2
4: 4
6: 4";
assert_eq!(solve_puzzle_part_1(input), "24");
}
#[test]
fn part_2_example() {
let input = "\
0: 3
1: 2
4: 4
6: 4";
assert_eq!(solve_puzzle_part_2(input), "10");
}
|
#[derive(Debug, PartialEq, Clone)]
pub struct CircularBuffer <T> {
capacity: usize,
buffer: Vec<T>
}
#[derive(Debug, PartialEq)]
pub enum Error {
EmptyBuffer,
FullBuffer,
}
impl<T: std::clone::Clone + std::fmt::Debug> CircularBuffer<T> {
pub fn new(capacity: usize) -> Self {
let vector: Vec<T> = Vec::with_capacity(capacity);
CircularBuffer {
buffer: vector,
capacity: capacity
}
}
pub fn read(&mut self) -> Result<T, Error> {
if self.buffer.is_empty() {
Err(Error::EmptyBuffer)
} else {
Ok(self.buffer.remove(0usize))
}
}
pub fn write(&mut self, _element: T) -> Result<(), Error> {
if self.buffer.len() >= self.capacity {
Err(Error::FullBuffer)
} else {
self.buffer.push(_element);
Ok(())
}
}
pub fn clear(&mut self) {
self.buffer.clear()
}
pub fn overwrite(&mut self, _element: T) {
if self.buffer.len() < self.capacity {
let _wasted_ret = self.write(_element);
} else {
let _wasted_ret = self.read();
let _wasted_ret = self.write(_element);
}
}
}
|
use std::fmt;
use std::fs::OpenOptions;
use std::io::{self, LineWriter, Write};
use std::net::IpAddr;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::time::SystemTime;
use anyhow::{anyhow, Error};
use log::debug;
use serde::Serialize;
use structopt::StructOpt;
use hassh::{live, packet::KeyExchange, pcap, Hassh};
#[derive(Debug, StructOpt)]
#[structopt(
name = "hassh",
about = "Extract fingerprinting to identify specific Client and Server SSH implementations."
)]
struct Opt {
/// pcap file to process
#[structopt(short, long, parse(from_os_str))]
file: Vec<PathBuf>,
/// directory of pcap files to process
#[structopt(short, long, parse(from_os_str))]
directory: Vec<PathBuf>,
/// listen on interface
#[structopt(short, long)]
interface: Option<String>,
/// client or server fingerprint.
#[structopt(short = "p", long, default_value = "all")]
fingerprint: Fingerprint,
/// BPF capture filter to use (for live capture only).
#[structopt(short, long, default_value = "tcp port 22 or tcp port 2222")]
bpf_filter: String,
/// specify the output log format: json, csv
#[structopt(short, long)]
log_format: Option<LogFormat>,
/// "specify the output log file
#[structopt(short, long, parse(from_os_str))]
output_file: Option<PathBuf>,
/// save the live captured packets to this file
#[structopt(short, long, parse(from_os_str))]
write_pcap: Option<PathBuf>,
}
impl Opt {
pub fn output_file(&self) -> &Path {
self.output_file.as_ref().map_or_else(
|| {
Path::new(match self.log_format {
Some(LogFormat::CSV) => "hassh.csv",
Some(LogFormat::JSON) => "hassh.json",
None => "hassh.log",
})
},
|s| s.as_path(),
)
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum Fingerprint {
All,
Server,
Client,
}
impl FromStr for Fingerprint {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"all" => Ok(Self::All),
"server" => Ok(Self::Server),
"client" => Ok(Self::Client),
_ => Err(anyhow!("unexpected finterprint: {}", s)),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum LogFormat {
JSON,
CSV,
}
impl FromStr for LogFormat {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"json" => Ok(LogFormat::JSON),
"csv" => Ok(LogFormat::CSV),
_ => Err(anyhow!("unexpected log format: {}", s)),
}
}
}
const CL1: &str = "\u{001b}[38;5;81m";
const CL2: &str = "\u{001b}[38;5;220m";
const CL3: &str = "\u{001b}[38;5;181m";
const CL4: &str = "\u{001b}[38;5;208m";
const END: &str = "\x1b[0m";
trait IsServer {
fn is_server(&self) -> bool;
}
impl IsServer for Hassh {
fn is_server(&self) -> bool {
self.src.port() < self.dest.port()
}
}
#[repr(transparent)]
struct HasshFmt(Hassh);
impl fmt::Display for HasshFmt {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0.is_server() {
write!(
f,
"
[+] Server SSH_MSG_KEXINIT detected
{cl1}[ {sip}:{sport} -> {dip}:{dport} ]{end}
[-] Identification String: {cl4}{proto}{end}
[-] hasshServer: {cl4}{hassh:x}{end}
[-] hasshServer Algorithms: {cl3}{algo}{end}",
sip = self.0.src.ip(),
sport = self.0.src.port(),
dip = self.0.dest.ip(),
dport = self.0.dest.port(),
proto = self.0.version,
hassh = self.0.server_hash(),
algo = self.0.server_algo(),
cl1 = CL1,
cl3 = CL3,
cl4 = CL4,
end = END
)
} else {
write!(
f,
"
[+] Client SSH_MSG_KEXINIT detected
{cl1}[ {sip}:{sport} -> {dip}:{dport} ]{end}
[-] Identification String: {cl2}{proto}{end}
[-] hassh: {cl2}{hassh:x}{end}
[-] hassh Algorithms: {cl3}{algo}{end}",
sip = self.0.src.ip(),
sport = self.0.src.port(),
dip = self.0.dest.ip(),
dport = self.0.dest.port(),
proto = self.0.version,
hassh = self.0.client_hash(),
algo = self.0.client_algo(),
cl1 = CL1,
cl2 = CL2,
cl3 = CL3,
end = END
)
}
}
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct Row {
pub timestamp: String,
pub source_ip: IpAddr,
pub destination_ip: IpAddr,
pub source_port: u16,
pub destination_port: u16,
pub hassh_type: &'static str,
pub identification_string: String,
pub hassh: String,
pub hassh_version: &'static str,
pub hassh_algorithms: String,
pub kex_algs: String,
pub enc_algs: String,
pub mac_algs: String,
pub cmp_algs: String,
}
const HASSH_VERSION: &str = "1.0";
impl From<Hassh> for Row {
fn from(hassh: Hassh) -> Row {
let is_server = hassh.is_server();
let timestamp = hassh
.ts
.map(|ts| humantime::format_rfc3339_millis(SystemTime::UNIX_EPOCH + ts).to_string())
.unwrap_or_default();
let source_ip = hassh.src.ip();
let destination_ip = hassh.dest.ip();
let source_port = hassh.src.port();
let destination_port = hassh.dest.port();
let identification_string = hassh.version.to_string();
let hash = format!(
"{:x}",
if is_server {
hassh.server_hash()
} else {
hassh.client_hash()
}
);
let hassh_algorithms = if is_server {
hassh.server_algo()
} else {
hassh.client_algo()
};
let KeyExchange {
kex_algs,
encr_algs_server_to_client,
encr_algs_client_to_server,
mac_algs_server_to_client,
mac_algs_client_to_server,
comp_algs_server_to_client,
comp_algs_client_to_server,
..
} = hassh.kex;
Row {
timestamp,
source_ip,
destination_ip,
source_port,
destination_port,
hassh_type: if is_server { "server" } else { "client" },
identification_string,
hassh: hash,
hassh_version: HASSH_VERSION,
hassh_algorithms,
kex_algs,
enc_algs: if is_server {
encr_algs_server_to_client
} else {
encr_algs_client_to_server
},
mac_algs: if is_server {
mac_algs_server_to_client
} else {
mac_algs_client_to_server
},
cmp_algs: if is_server {
comp_algs_server_to_client
} else {
comp_algs_client_to_server
},
}
}
}
enum LogWriter<W: Write> {
JSON(W),
CSV(csv::Writer<W>),
Text(io::Stdout),
}
impl<W: Write> LogWriter<W> {
fn write(&mut self, hassh: Hassh) -> Result<(), Error> {
match self {
LogWriter::JSON(w) => {
let mut w = LineWriter::new(w);
serde_json::to_writer(&mut w, &Row::from(hassh))?;
w.write(b"\n")?;
}
LogWriter::CSV(w) => {
w.serialize(&Row::from(hassh))?;
w.flush()?;
}
LogWriter::Text(w) => {
write!(w, "{}\n", HasshFmt(hassh))?;
w.flush()?;
}
}
Ok(())
}
}
fn process_hassh<W: Write>(
out: &mut LogWriter<W>,
hassh: Hassh,
fingerprint: Fingerprint,
) -> Result<(), Error> {
let is_server = hassh.is_server();
match fingerprint {
Fingerprint::Client if is_server => Ok(()),
Fingerprint::Server if !is_server => Ok(()),
_ => {
out.write(hassh)?;
Ok(())
}
}
}
pub fn main() -> Result<(), Error> {
pretty_env_logger::init_timed();
let opt = Opt::from_args();
debug!("{:#?}", opt);
let mut out = {
let filename = opt.output_file();
let output_file = move || OpenOptions::new().create(true).append(true).open(filename);
match opt.log_format {
Some(LogFormat::CSV) => {
debug!("write to CSV file: {}", filename.display());
let f = output_file()?;
let w = csv::WriterBuilder::new()
.has_headers(f.metadata()?.len() == 0)
.from_writer(f);
LogWriter::CSV(w)
}
Some(LogFormat::JSON) => {
debug!("write to JSON file: {}", filename.display());
LogWriter::JSON(output_file()?)
}
_ => LogWriter::Text(io::stdout()),
}
};
let fingerprint = opt.fingerprint;
let mut log_hassh = move |hassh| process_hassh(&mut out, hassh, fingerprint);
for path in opt.file {
for hassh in pcap::open(path).map(Box::new)? {
log_hassh(hassh)?;
}
}
for dir in opt.directory {
for path in glob::glob(&dir.join("*").to_string_lossy())? {
for hassh in pcap::open(path?)? {
log_hassh(hassh)?;
}
}
}
if let Some(intf) = opt.interface {
for hassh in live::capture(&intf)?.with_filter(&opt.bpf_filter)? {
log_hassh(hassh)?;
}
}
Ok(())
}
|
#[doc = "Register `SR2` reader"]
pub type R = crate::R<SR2_SPEC>;
#[doc = "Field `SDBF` reader - Step Down converter Bypass mode flag"]
pub type SDBF_R = crate::BitReader;
#[doc = "Field `SDSMPSF` reader - Step Down converter SMPS mode flag"]
pub type SDSMPSF_R = crate::BitReader;
#[doc = "Field `REGLPS` reader - Low-power regulator started"]
pub type REGLPS_R = crate::BitReader;
#[doc = "Field `REGLPF` reader - Low-power regulator flag"]
pub type REGLPF_R = crate::BitReader;
#[doc = "Field `VOSF` reader - Voltage scaling flag"]
pub type VOSF_R = crate::BitReader;
#[doc = "Field `PVDO` reader - Power voltage detector output"]
pub type PVDO_R = crate::BitReader;
#[doc = "Field `PVMO1` reader - Peripheral voltage monitoring output: VDDUSB vs. 1.2 V"]
pub type PVMO1_R = crate::BitReader;
#[doc = "Field `PVMO3` reader - Peripheral voltage monitoring output: VDDA vs. 1.62 V"]
pub type PVMO3_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - Step Down converter Bypass mode flag"]
#[inline(always)]
pub fn sdbf(&self) -> SDBF_R {
SDBF_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Step Down converter SMPS mode flag"]
#[inline(always)]
pub fn sdsmpsf(&self) -> SDSMPSF_R {
SDSMPSF_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 8 - Low-power regulator started"]
#[inline(always)]
pub fn reglps(&self) -> REGLPS_R {
REGLPS_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - Low-power regulator flag"]
#[inline(always)]
pub fn reglpf(&self) -> REGLPF_R {
REGLPF_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - Voltage scaling flag"]
#[inline(always)]
pub fn vosf(&self) -> VOSF_R {
VOSF_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Power voltage detector output"]
#[inline(always)]
pub fn pvdo(&self) -> PVDO_R {
PVDO_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - Peripheral voltage monitoring output: VDDUSB vs. 1.2 V"]
#[inline(always)]
pub fn pvmo1(&self) -> PVMO1_R {
PVMO1_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 14 - Peripheral voltage monitoring output: VDDA vs. 1.62 V"]
#[inline(always)]
pub fn pvmo3(&self) -> PVMO3_R {
PVMO3_R::new(((self.bits >> 14) & 1) != 0)
}
}
#[doc = "Power status register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr2::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SR2_SPEC;
impl crate::RegisterSpec for SR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`sr2::R`](R) reader structure"]
impl crate::Readable for SR2_SPEC {}
#[doc = "`reset()` method sets SR2 to value 0x02"]
impl crate::Resettable for SR2_SPEC {
const RESET_VALUE: Self::Ux = 0x02;
}
|
#[doc = "Register `SCAR_PRG` reader"]
pub type R = crate::R<SCAR_PRG_SPEC>;
#[doc = "Register `SCAR_PRG` writer"]
pub type W = crate::W<SCAR_PRG_SPEC>;
#[doc = "Field `SEC_AREA_START` reader - Bank 1 lowest secure protected address configuration"]
pub type SEC_AREA_START_R = crate::FieldReader<u16>;
#[doc = "Field `SEC_AREA_START` writer - Bank 1 lowest secure protected address configuration"]
pub type SEC_AREA_START_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 12, O, u16>;
#[doc = "Field `SEC_AREA_END` reader - Bank 1 highest secure protected address configuration"]
pub type SEC_AREA_END_R = crate::FieldReader<u16>;
#[doc = "Field `SEC_AREA_END` writer - Bank 1 highest secure protected address configuration"]
pub type SEC_AREA_END_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 12, O, u16>;
#[doc = "Field `DMES` reader - Bank 1 secure protected erase enable option configuration bit"]
pub type DMES_R = crate::BitReader;
#[doc = "Field `DMES` writer - Bank 1 secure protected erase enable option configuration bit"]
pub type DMES_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bits 0:11 - Bank 1 lowest secure protected address configuration"]
#[inline(always)]
pub fn sec_area_start(&self) -> SEC_AREA_START_R {
SEC_AREA_START_R::new((self.bits & 0x0fff) as u16)
}
#[doc = "Bits 16:27 - Bank 1 highest secure protected address configuration"]
#[inline(always)]
pub fn sec_area_end(&self) -> SEC_AREA_END_R {
SEC_AREA_END_R::new(((self.bits >> 16) & 0x0fff) as u16)
}
#[doc = "Bit 31 - Bank 1 secure protected erase enable option configuration bit"]
#[inline(always)]
pub fn dmes(&self) -> DMES_R {
DMES_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:11 - Bank 1 lowest secure protected address configuration"]
#[inline(always)]
#[must_use]
pub fn sec_area_start(&mut self) -> SEC_AREA_START_W<SCAR_PRG_SPEC, 0> {
SEC_AREA_START_W::new(self)
}
#[doc = "Bits 16:27 - Bank 1 highest secure protected address configuration"]
#[inline(always)]
#[must_use]
pub fn sec_area_end(&mut self) -> SEC_AREA_END_W<SCAR_PRG_SPEC, 16> {
SEC_AREA_END_W::new(self)
}
#[doc = "Bit 31 - Bank 1 secure protected erase enable option configuration bit"]
#[inline(always)]
#[must_use]
pub fn dmes(&mut self) -> DMES_W<SCAR_PRG_SPEC, 31> {
DMES_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "FLASH secure address for bank 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`scar_prg::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`scar_prg::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SCAR_PRG_SPEC;
impl crate::RegisterSpec for SCAR_PRG_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`scar_prg::R`](R) reader structure"]
impl crate::Readable for SCAR_PRG_SPEC {}
#[doc = "`write(|w| ..)` method takes [`scar_prg::W`](W) writer structure"]
impl crate::Writable for SCAR_PRG_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets SCAR_PRG to value 0"]
impl crate::Resettable for SCAR_PRG_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub fn solve_puzzle_part_1(input: &str) -> String {
input
.lines()
.map(is_valid_passphrase_part_1)
.filter(|&bool| bool)
.count()
.to_string()
}
pub fn solve_puzzle_part_2(input: &str) -> String {
input
.lines()
.map(is_valid_passphrase_part_2)
.filter(|&bool| bool)
.count()
.to_string()
}
fn is_valid_passphrase_part_1(passphrase: &str) -> bool {
let words: Vec<&str> = passphrase.split_whitespace().collect();
// go over all pairs of words looking for duplicates
for (i, word1) in words.iter().enumerate() {
for word2 in words[i + 1..].iter() {
if word1 == word2 {
return false;
}
}
}
// no duplicates found
true
}
fn are_anagrams(s1: &str, s2: &str) -> bool {
let mut s1_chars: Vec<_> = s1.chars().collect();
let mut s2_chars: Vec<_> = s2.chars().collect();
s1_chars.sort();
s2_chars.sort();
s1_chars == s2_chars
}
fn is_valid_passphrase_part_2(passphrase: &str) -> bool {
let words: Vec<&str> = passphrase.split_whitespace().collect();
// go over all pairs of words looking for anagrams
for (i, word1) in words.iter().enumerate() {
for word2 in words[i + 1..].iter() {
if are_anagrams(word1, word2) {
return false;
}
}
}
// no anagrams found
true
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn part_1_examples() {
assert!(is_valid_passphrase_part_1("aa bb cc dd ee"));
assert!(is_valid_passphrase_part_1("aa bb cc dd aa") == false);
assert!(is_valid_passphrase_part_1("aa bb cc dd aaa"));
}
#[test]
fn part_2_examples() {
assert!(is_valid_passphrase_part_2("abcde fghij"));
assert!(is_valid_passphrase_part_2("abcde xyz ecdab") == false);
assert!(is_valid_passphrase_part_2("a ab abc abd abf abj"));
assert!(is_valid_passphrase_part_2("iiii oiii ooii oooi oooo"));
assert!(is_valid_passphrase_part_2("oiii ioii iioi iiio") == false);
}
}
|
use necsim_core::{
cogs::{Habitat, HabitatPrimeableRng, PrimeableRng, TurnoverRate},
intrinsics::floor,
landscape::IndexedLocation,
};
use necsim_core_bond::NonNegativeF64;
use super::EventTimeSampler;
#[allow(clippy::module_name_repetitions)]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "cuda", derive(rust_cuda::common::RustToCuda))]
pub struct FixedEventTimeSampler(());
impl Default for FixedEventTimeSampler {
fn default() -> Self {
Self(())
}
}
#[contract_trait]
impl<H: Habitat, G: PrimeableRng, T: TurnoverRate<H>> EventTimeSampler<H, G, T>
for FixedEventTimeSampler
{
#[inline]
fn next_event_time_at_indexed_location_weakly_after(
&self,
indexed_location: &IndexedLocation,
time: NonNegativeF64,
habitat: &H,
rng: &mut G,
turnover_rate: &T,
) -> NonNegativeF64 {
let lambda =
turnover_rate.get_turnover_rate_at_location(indexed_location.location(), habitat);
#[allow(clippy::cast_possible_truncation)]
#[allow(clippy::cast_sign_loss)]
let time_step = floor(time.get() * lambda.get()) as u64 + 1;
rng.prime_with_habitat(habitat, indexed_location, time_step);
NonNegativeF64::from(time_step) / lambda
}
}
|
use frame_support::weights::{constants::RocksDbWeight as DbWeight, Weight};
pub trait WeightInfo {
fn on_finalize() -> Weight;
fn account_disable() -> Weight;
fn account_add_with_role_and_data() -> Weight;
fn account_set_with_role_and_data() -> Weight;
fn token_mint_request_create_everusd() -> Weight;
fn token_mint_request_revoke_everusd() -> Weight;
fn token_mint_request_confirm_everusd() -> Weight;
fn token_mint_request_decline_everusd() -> Weight;
fn token_burn_request_create_everusd() -> Weight;
fn token_burn_request_revoke_everusd() -> Weight;
fn token_burn_request_confirm_everusd() -> Weight;
fn token_burn_request_decline_everusd() -> Weight;
fn bond_add_new() -> Weight;
fn bond_set() -> Weight;
fn bond_update() -> Weight;
fn bond_release() -> Weight;
fn bond_unit_package_buy() -> Weight;
fn bond_unit_package_return() -> Weight;
fn bond_withdraw() -> Weight;
fn bond_activate() -> Weight;
fn bond_impact_report_send() -> Weight;
fn bond_impact_report_approve() -> Weight;
fn bond_redeem() -> Weight;
fn bond_declare_bankrupt() -> Weight;
fn bond_accrue_coupon_yield() -> Weight;
fn bond_revoke() -> Weight;
fn bond_withdraw_everusd() -> Weight;
fn bond_deposit_everusd() -> Weight;
fn bond_unit_lot_bid() -> Weight;
fn bond_unit_lot_settle() -> Weight;
}
impl WeightInfo for () {
fn on_finalize() -> Weight {
10000 as Weight
}
fn account_disable() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(4 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn account_add_with_role_and_data() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(2 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn account_set_with_role_and_data() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_mint_request_create_everusd() -> Weight {
(20000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_mint_request_revoke_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(2 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_mint_request_confirm_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(5 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_mint_request_decline_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(2 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_burn_request_create_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(4 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_burn_request_revoke_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(2 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_burn_request_confirm_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(2 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn token_burn_request_decline_everusd() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_add_new() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_set() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_update() -> Weight {
(50000 as Weight)
.saturating_add(DbWeight::get().reads(1 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_release() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_unit_package_buy() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(4 as Weight))
.saturating_add(DbWeight::get().writes(3 as Weight))
}
fn bond_unit_package_return() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(4 as Weight))
.saturating_add(DbWeight::get().writes(3 as Weight))
}
fn bond_withdraw() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(4 as Weight))
.saturating_add(DbWeight::get().writes(10 as Weight))
}
fn bond_activate() -> Weight {
(100000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(3 as Weight))
}
fn bond_impact_report_send() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_impact_report_approve() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_redeem() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(2 as Weight))
}
fn bond_declare_bankrupt() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_accrue_coupon_yield() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(14 as Weight))
.saturating_add(DbWeight::get().writes(13 as Weight))
}
fn bond_revoke() -> Weight {
(10000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_withdraw_everusd() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(2 as Weight))
}
fn bond_deposit_everusd() -> Weight {
(1000000 as Weight)
.saturating_add(DbWeight::get().reads(3 as Weight))
.saturating_add(DbWeight::get().writes(2 as Weight))
}
fn bond_unit_lot_bid() -> Weight {
(20000 as Weight)
.saturating_add(DbWeight::get().reads(4 as Weight))
.saturating_add(DbWeight::get().writes(1 as Weight))
}
fn bond_unit_lot_settle() -> Weight {
(20000 as Weight)
.saturating_add(DbWeight::get().reads(9 as Weight))
.saturating_add(DbWeight::get().writes(5 as Weight))
}
}
|
#![deny(clippy::all)]
#![warn(clippy::pedantic)]
#![allow(clippy::single_match)]
#![allow(clippy::cast_possible_truncation)]
use std::fmt::{Display, Formatter, Result};
use std::fs;
use std::vec::Vec;
use svg2polylines::{parse, CoordinatePair};
static FILLER: char = '#';
#[derive(Debug)]
pub struct AsciiResult {
data: Vec<Vec<char>>,
}
impl Display for AsciiResult {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut buffer = if let Some(row) = self.data.get(0) {
String::with_capacity(row.len() * self.data.len())
} else {
String::new()
};
for row in &self.data {
for ch in row {
buffer.push(*ch);
}
buffer.push('\n');
}
write!(f, "{}", buffer)
}
}
pub fn svg_to_ascii(svg_file_path: &str) -> AsciiResult {
let mut ascii_array = vec![vec![' '; 46]; 24];
if let Ok(contents) = fs::read_to_string(svg_file_path) {
if let Ok(res) = parse(&contents) {
for pair in res {
for i in 0..pair.len() {
if i < (pair.len() - 1) {
let current_pair: CoordinatePair = *pair.get(i).unwrap();
let next_pair: CoordinatePair = *pair.get(i + 1).unwrap();
let (from_x, to_x) = if current_pair.x < next_pair.x {
(current_pair.x.round() as u64, next_pair.x.round() as u64)
} else {
(next_pair.x.round() as u64, current_pair.x.round() as u64)
};
let (from_y, to_y) = if current_pair.y < next_pair.y {
(current_pair.y.round() as u64, next_pair.y.round() as u64)
} else {
(next_pair.y.round() as u64, current_pair.y.round() as u64)
};
for x in from_x..=to_x {
for y in from_y..=to_y {
if let Some(elem) = ascii_array.get_mut(y as usize) {
if let Some(elem) = elem.get_mut(x as usize) {
*elem = FILLER;
}
}
}
}
let (from_x, to_x) = if current_pair.x < next_pair.x {
(current_pair.x as u64, next_pair.x as u64)
} else {
(next_pair.x as u64, current_pair.x as u64)
};
let (from_y, to_y) = if current_pair.y < next_pair.y {
(current_pair.y as u64, next_pair.y as u64)
} else {
(next_pair.y as u64, current_pair.y as u64)
};
for x in from_x..=to_x {
for y in from_y..=to_y {
if let Some(elem) = ascii_array.get_mut(y as usize) {
if let Some(elem) = elem.get_mut(x as usize) {
*elem = FILLER;
}
}
}
}
}
}
}
}
}
AsciiResult { data: ascii_array }
}
|
#[aoc_generator(day01)]
pub fn day01_gen(input: &str) -> Vec<i32> {
input.lines().map(|l| l.parse().unwrap()).collect()
}
#[aoc(day01, part1)]
pub fn day01_part1(input: &[i32]) -> i32 {
input
.iter()
.map(|&n| (n as f32 / 3.0).floor() as i32 - 2)
.sum()
}
fn calculate_fuel(n: i32) -> i32 {
let fuel = (n as f32 / 3.0).floor() as i32 - 2;
if fuel > 0 {
fuel + calculate_fuel(fuel)
} else {
0
}
}
#[aoc(day01, part2)]
pub fn day01_part2(input: &[i32]) -> i32 {
input.iter().map(|&n| calculate_fuel(n)).sum()
}
|
use std::cell::RefCell;
use std::rc::Rc;
use web_sys;
use web_sys::{WebGl2RenderingContext, WebGlBuffer, WebGlProgram};
use nalgebra_glm as glm;
use regmach::dsp::types as rdt;
use std::collections::{HashMap, HashSet};
// #[derive(PartialEq, Eq, Hash)]
// pub struct MeshId(u32);
pub struct Mesh {
pub vertices: Vec<f32>,
// pub indices: Vec<u16>,
pub shader_program: WebGlProgram,
pub vertex_buffer: WebGlBuffer,
pub x: f32,
pub y: f32,
pub translation_matrix: glm::Mat4,
}
pub(crate) struct FontMgr<'a>(pub rusttype::Font<'a>);
#[derive(PartialEq, Eq, Hash)]
pub(crate) struct BucketLoc {
pub x: i32,
pub y: i32,
}
pub(crate) struct SpaceHash {
pub(crate) store: HashMap<rdt::EntityId, Box<rdt::Entity>>,
pub(crate) space: HashMap<BucketLoc, HashSet<rdt::EntityId>>,
}
pub struct FontMesh {
pub vertices: Vec<f32>,
pub colors: Vec<f32>,
// pub indices: Vec<u16>,
pub shader_program: WebGlProgram,
pub vertex_buffer: WebGlBuffer,
pub color_buffer: WebGlBuffer,
pub x: f32,
pub y: f32,
pub translation_matrix: glm::Mat4,
}
pub struct BrowserDisplay<'a> {
pub window: web_sys::Window,
pub canvas: web_sys::HtmlCanvasElement,
pub wrapper: web_sys::HtmlDivElement,
pub ctx: WebGl2RenderingContext,
pub events: Rc<RefCell<Vec<rdt::Event>>>,
pub props: rdt::DisplayProperties,
pub camera: Camera,
pub(crate) space_hash: SpaceHash,
pub(crate) font_mgr: FontMgr<'a>,
}
pub type V3 = glm::Vec3;
pub struct Camera {
pub pos: V3,
pub fov: f32,
pub aspect: f32,
pub z_near: f32,
pub z_far: f32,
pub perspective: glm::Mat4x4,
pub forward: V3,
pub up: V3,
}
pub struct Grid {
pub meshes: Vec<Mesh>,
}
// pub struct CompoundMesh {
// pub meshes: Vec<Mesh>,
// }
pub struct Text {
pub color: rdt::Color,
pub text: String,
pub font_mesh: FontMesh,
}
// some small numbers
pub const EPSILON32: f32 = 1e-12;
pub const EPSILON64: f64 = 1e-12;
|
use permutohedron::Heap;
use std::collections::HashMap;
pub type Word = i64;
pub struct Computer {
input: Option<Word>,
pc: Word,
pub memory: Memory,
pub outputs: Vec<Word>,
halted: bool,
relative_base: Word,
}
#[derive(Debug)]
pub struct Memory {
mem: HashMap<Word, Word>,
}
impl Memory {
pub fn load(text: &str) -> Memory {
let mut mem = HashMap::new();
for (i, word) in text.split(",").enumerate() {
let word = word.parse::<Word>().unwrap();
mem.insert(i as Word, word);
}
Self { mem: mem }
}
pub fn read(&mut self, ptr: Word) -> Word {
if ptr < 0 {
panic!("out of bounds");
}
match self.mem.get(&ptr) {
Some(val) => *val,
None => {
self.mem.insert(ptr, 0);
0
}
}
}
pub fn write(&mut self, ptr: Word, value: Word) {
if ptr < 0 {
panic!("out of bounds");
}
self.mem.insert(ptr, value);
}
fn print(&self) -> String {
let mut keys: Vec<&Word> = self.mem.keys().collect();
keys.sort();
keys.iter()
.map(|i| self.mem[*i].to_string())
.collect::<Vec<String>>()
.join(",")
}
}
#[derive(Debug)]
enum Mode {
Position,
Immediate,
Relative,
}
impl Mode {
fn new(num: Word) -> Self {
match num {
0 => Self::Position,
1 => Self::Immediate,
2 => Self::Relative,
_ => panic!("unknown mode"),
}
}
}
#[derive(Debug)]
enum Opcode {
Add(Mode, Mode, Mode),
Mult(Mode, Mode, Mode),
Input(Mode),
Output(Mode),
JIT(Mode, Mode),
JIF(Mode, Mode),
LT(Mode, Mode, Mode),
Eq(Mode, Mode, Mode),
ARB(Mode),
Halt,
}
impl Opcode {
fn new(opcode: Word) -> Self {
let op = opcode % 100;
let mode1 = Mode::new((opcode / 100) % 10);
let mode2 = Mode::new((opcode / 1000) % 10);
let mode3 = Mode::new((opcode / 10000) % 10);
match op {
1 => Opcode::Add(mode1, mode2, mode3),
2 => Opcode::Mult(mode1, mode2, mode3),
3 => Opcode::Input(mode1),
4 => Opcode::Output(mode1),
5 => Opcode::JIT(mode1, mode2),
6 => Opcode::JIF(mode1, mode2),
7 => Opcode::LT(mode1, mode2, mode3),
8 => Opcode::Eq(mode1, mode2, mode3),
9 => Opcode::ARB(mode1), //adjust relative base
99 => Opcode::Halt,
x => panic!("unknown opcode: {}", x),
}
}
}
impl Computer {
pub fn load(text: &str) -> Computer {
let mut memory = Memory::load(text);
Computer {
input: None,
pc: 0,
memory: memory,
outputs: vec![],
halted: false,
relative_base: 0,
}
}
pub fn input(&mut self, input: Word) {
match self.input {
None => self.input = Some(input),
Some(_) => panic!("already have input!"),
}
}
pub fn run_with_input(&mut self, input: Word) {
self.input(input);
self.run();
}
pub fn run(&mut self) {
let mut r = self.step();
while r {
r = self.step();
}
}
pub fn halted(&self) -> bool {
self.halted
}
fn step(&mut self) -> bool{
let opcode = Opcode::new(self.read_and_advance());
match opcode {
Opcode::Add(mode1, mode2, mode3) => {
let inputs = self.get_operands(vec![mode1, mode2]);
let output_addr = self.read_and_advance();
let result = inputs[0] + inputs[1];
self.write(output_addr, result, mode3);
}
Opcode::Mult(mode1, mode2, mode3) => {
let inputs = self.get_operands(vec![mode1, mode2]);
let output_addr = self.read_and_advance();
let result = inputs[0] * inputs[1];
self.write(output_addr, result, mode3);
}
Opcode::Input(mode1) => {
match self.input {
Some(input) => {
let output_addr = self.read_and_advance();
self.write(output_addr, input, mode1);
self.input = None;
}
None => {
// move pc back and wait for more input
self.pc -= 1;
return false;
}
}
}
Opcode::Output(mode1) => {
let inputs = self.get_operands(vec![mode1]);
let result = inputs[0];
self.outputs.push(result);
}
Opcode::JIT(mode1, mode2) => {
let inputs = self.get_operands(vec![mode1, mode2]);
if inputs[0] != 0 {
self.pc = inputs[1];
}
}
Opcode::JIF(mode1, mode2) => {
let inputs = self.get_operands(vec![mode1, mode2]);
if inputs[0] == 0 {
self.pc = inputs[1];
}
}
Opcode::LT(mode1, mode2, mode3) => {
let inputs = self.get_operands(vec![mode1, mode2]);
let output_addr = self.read_and_advance();
if inputs[0] < inputs[1] {
self.write(output_addr, 1, mode3);
} else {
self.write(output_addr, 0, mode3);
}
}
Opcode::Eq(mode1, mode2, mode3) => {
let inputs = self.get_operands(vec![mode1, mode2]);
let output_addr = self.read_and_advance();
if inputs[0] == inputs[1] {
self.write(output_addr, 1, mode3);
} else {
self.write(output_addr, 0, mode3);
}
}
// adjust relative base
Opcode::ARB(mode1) => {
let inputs = self.get_operands(vec![mode1]);
self.relative_base += inputs[0];
}
Opcode::Halt => {
self.halted = true;
return false;
}
}
true
}
fn read_and_advance(&mut self) -> Word {
let out = self.memory.read(self.pc);
self.pc += 1;
out
}
fn write(&mut self, addr: Word, value: Word, mode: Mode) {
match mode {
Mode::Position => self.memory.write(addr, value),
Mode::Immediate => panic!("can't write in immediate mode"),
Mode::Relative => self.memory.write(self.relative_base + addr, value),
}
}
fn get_operands(&mut self, modes: Vec<Mode>) -> Vec<Word> {
let mut output = vec![];
for mode in modes {
let value = match mode {
Mode::Position => {
let pointer = self.read_and_advance();
self.memory.read(pointer)
}
Mode::Immediate => self.read_and_advance(),
Mode::Relative => {
let pointer = self.relative_base + self.read_and_advance();
self.memory.read(pointer)
}
};
output.push(value);
}
output
}
}
pub fn day7(input: &str) -> Word {
let mut phase_settings = vec![0, 1, 2, 3, 4];
let heap = Heap::new(&mut phase_settings);
let mut max_output = 0;
for permutation in heap {
let mut last_output = 0;
for i in permutation {
let mut computer = Computer::load(input);
computer.run_with_input(i);
if !computer.halted {
computer.run_with_input(last_output);
}
last_output = computer.outputs[0]
}
if last_output > max_output {
max_output = last_output;
}
}
max_output
}
fn prev_index(i: usize, max: usize) -> usize {
if i > 0 {
i - 1
} else {
max - 1
}
}
pub fn day7_2(input: &str) -> Word {
let mut phase_settings = vec![5, 6, 7, 8, 9];
let heap = Heap::new(&mut phase_settings);
let mut max_output = 0;
for permutation in heap {
let mut computers = vec![];
for i in permutation {
let mut computer = Computer::load(input);
computer.run_with_input(i);
computers.push(computer);
}
let mut num_halted = 0;
let mut i = 1;
computers[0].run_with_input(0);
while num_halted < computers.len() {
i = i % computers.len();
if computers[i].halted {
num_halted += 1; // this may be a bug
} else {
let output = *computers[prev_index(i, computers.len())]
.outputs
.last()
.unwrap();
computers[i].run_with_input(output);
}
i += 1;
}
let last_output = *computers[computers.len() - 1].outputs.last().unwrap();
if last_output > max_output {
max_output = last_output;
}
}
max_output
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
#[test]
fn test_inputer() {
let t = vec![
["1,0,0,0,99", "2,0,0,0,99"],
["2,3,0,3,99", "2,3,0,6,99"],
["2,4,4,5,99,0", "2,4,4,5,99,9801"],
["1,1,1,4,99,5,6,0,99", "30,1,1,4,2,5,6,0,99"],
["1002,4,3,4,33", "1002,4,3,4,99"],
];
for [input, output] in t.into_iter() {
let mut computer = Computer::load(input);
computer.run();
assert_eq!(computer.memory.print(), output);
}
}
#[test]
fn test_part2() {
let input = "3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99";
let mut computer = Computer::load(input);
computer.run_with_input(1);
assert_eq!(vec![999], computer.outputs);
let mut computer = Computer::load(input);
computer.run_with_input(8);
assert_eq!(vec![1000], computer.outputs);
let mut computer = Computer::load(input);
computer.run_with_input(9);
assert_eq!(vec![1001], computer.outputs);
}
#[test]
fn test_day7() {
assert_eq!(
43210,
day7("3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0")
);
assert_eq!(
54321,
day7("3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24,23,23,4,23,99,0,0")
);
assert_eq!(65210,day7("3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0"))
}
#[test]
fn test_day7_2() {
assert_eq!(139629729, day7_2("3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5"));
assert_eq!(18216, day7_2("3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26,1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1,55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10"));
}
#[test]
fn test_day9_1() {
let input = "109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99";
let mut computer = Computer::load(input);
computer.run();
assert_eq!(
vec![109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99],
computer.outputs
);
let input = "1102,34915192,34915192,7,4,7,99,0";
let mut computer = Computer::load(input);
computer.run();
assert_eq!(vec![1219070632396864], computer.outputs);
let input = "104,1125899906842624,99";
let mut computer = Computer::load(input);
computer.run();
assert_eq!(vec![1125899906842624], computer.outputs);
}
}
|
///
/// Wraps `s` at each `width`-th character adding `wrapstr` as a kind of line ending.
///
pub fn wrap_words(s: &str, width: u32, wrapstr: &str) -> String {
let mut out = Vec::<String>::new();
for line in s.lines() {
let mut cur_line = String::new();
for word in line.split_whitespace() {
if cur_line.len() + word.len() > width as usize {
if !cur_line.is_empty() {
// Relevant if cur_line.len = 0 and word.len > width
out.push(cur_line);
}
cur_line = String::new();
} else if !cur_line.is_empty() {
cur_line.push(' ');
}
cur_line.push_str(word);
}
if !cur_line.is_empty() {
out.push(cur_line);
}
}
out.join(wrapstr)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple() {
let input = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.";
let expected = concat!(
"Lorem ipsum dolor sit amet, consetetur sadipscing\n",
"elitr, sed diam nonumy eirmod tempor invidunt ut\n",
"labore et dolore magna aliquyam erat, sed diam\n",
"voluptua. At vero eos et accusam et justo duo\n",
"dolores et ea rebum. Stet clita kasd gubergren, no\n",
"sea takimata sanctus est Lorem ipsum dolor sit\n",
"amet. Lorem ipsum dolor sit amet, consetetur\n",
"sadipscing elitr, sed diam nonumy eirmod tempor\n",
"invidunt ut labore et dolore magna aliquyam erat,\n",
"sed diam voluptua. At vero eos et accusam et justo\n",
"duo dolores et ea rebum. Stet clita kasd gubergren,\n",
"no sea takimata sanctus est Lorem ipsum dolor sit\n",
"amet."
);
let out = wrap_words(input, 50, "\n");
assert_eq!(out, expected);
}
#[test]
fn shorter() {
let input = "Lorem ipsum dolor sit amet, consetetur";
let expected = "Lorem ipsum dolor sit amet, consetetur".to_owned();
let out = wrap_words(input, 50, "\n");
assert_eq!(out, expected);
}
#[test]
fn empty_line() {
let input = " ";
let expected = "".to_owned();
let out = wrap_words(input, 50, "\n");
assert_eq!(out, expected);
}
#[test]
fn wrap_string() {
let input = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt";
let expected = concat!(
"Lorem ipsum dolor sit amet, consetetur sadipscing<br align=\"left\"/>",
"elitr, sed diam nonumy eirmod tempor invidunt",
);
let out = wrap_words(input, 50, "<br align=\"left\"/>");
assert_eq!(out, expected);
}
#[test]
fn with_newlines() {
let input = "Lorem ipsum dolor sit amet,\nconsetetur sadipscing\nelitr, sed diam nonumy eirmod tempor invidunt";
let expected = concat!(
"Lorem ipsum dolor sit amet,<br align=\"left\"/>",
"consetetur sadipscing<br align=\"left\"/>",
"elitr, sed diam nonumy eirmod tempor invidunt",
);
let out = wrap_words(input, 50, "<br align=\"left\"/>");
assert_eq!(out, expected);
}
#[test]
fn even_shorter() {
let input = "Devide";
let out = wrap_words(input, 5, "\n");
assert_eq!(input, out);
}
}
|
use crate::{Scene, point, GRID_HORZ_COUNT, GRID_VERT_COUNT, SceneParams};
use ggez::{Context, GameError};
use ggez::event::KeyCode;
use crate::data::maps::{Map, read_map_file};
use crate::graphics::renderer::*;
use std::rc::Rc;
use crate::graphics::map_rendering::{draw_map_with_costs, draw_map_with_costs_start_end};
use std::collections::HashMap;
const MAP_CURSOR_ID: &'static str = "map_highlighted";
const VARIANT_CURSOR_ID: &'static str = "variant_highlighted";
pub struct MapPicker {
maps: Vec<Rc<Map>>,
selected: Option<usize>,
highlighted: usize,
variant_highlighted: usize,
}
impl MapPicker {
pub fn new(cursor_mem: &HashMap<&str, usize>) -> MapPicker {
MapPicker {
maps: vec![],
selected: None,
highlighted: *cursor_mem.get(MAP_CURSOR_ID).unwrap_or(&0),
variant_highlighted: *cursor_mem.get(VARIANT_CURSOR_ID).unwrap_or(&0),
}
}
}
impl MapPicker {
pub fn setup(&mut self, ctx: &mut Context, _renderer: &mut Renderer) -> Result<(), GameError> {
for i in 0..10 {
self.maps.push(Rc::new(read_map_file(ctx, i)));
}
Ok(())
}
fn get_cell_size_for_screen(size: (f32, f32)) -> f32 {
let cell_w = match size.0 {
res if res >= 3840. => 24.,
res if res >= 2560. => 16.,
res if res >= 1920. => 12.,
res if res >= 1600. => 10.,
res if res >= 1280. => 8.,
res if res >= 1024. => 6.,
_ => return 5.
};
let cell_h = match size.1 {
res if res >= 2160. => 25.,
res if res >= 1440. => 16.,
res if res >= 1080. => 12.,
res if res >= 768. => 9.,
_ => return 7.
};
return if cell_w < cell_h { cell_w } else { cell_h };
}
}
impl Scene for MapPicker {
fn update(&mut self, _ctx: &mut Context) -> Result<(), GameError> {
Ok(())
}
fn render(&mut self, ctx: &mut Context, renderer: &mut Renderer) -> Result<(), GameError> {
let screen_size = Renderer::get_screen_size(ctx);
let cell_size = MapPicker::get_cell_size_for_screen(screen_size);
let grid_size = (cell_size * GRID_HORZ_COUNT as f32, cell_size * GRID_VERT_COUNT as f32);
let grid_spacing = screen_size.1 * 0.05;
let indicator_size = renderer.calc_height(0.03);
let indicator_spacing = renderer.calc_height(0.03);
let variant_spacing = (screen_size.0 - (grid_size.0 * 4.) - (indicator_spacing * 2.) - indicator_size) * 0.3;
let grid_offset = (grid_spacing, (screen_size.1 * 0.5) - (grid_size.1 * 0.5));
let indicator_pos = (grid_spacing + grid_size.0 + indicator_spacing, (screen_size.1 * 0.5) - (indicator_size * 0.5));
let variant_offset = (indicator_pos.0 + indicator_size + indicator_spacing + grid_size.0 * 0.5 + variant_spacing, (screen_size.1 * 0.5) - (grid_size.1 * 0.5));
if self.variant_highlighted > 0 {
draw_map_with_costs_start_end(ctx, renderer, (variant_offset.0 - grid_size.0 - variant_spacing, variant_offset.1), cell_size, self.maps[self.highlighted].as_ref(), self.variant_highlighted - 1)?;
}
draw_map_with_costs_start_end(ctx, renderer, variant_offset, cell_size, self.maps[self.highlighted].as_ref(), self.variant_highlighted)?;
if self.variant_highlighted < (self.maps[self.highlighted].variants.len() - 1) {
draw_map_with_costs_start_end(ctx, renderer, (variant_offset.0 + grid_size.0 + variant_spacing, variant_offset.1), cell_size, self.maps[self.highlighted].as_ref(), self.variant_highlighted + 1)?;
if self.variant_highlighted < (self.maps[self.highlighted].variants.len() - 2) {
draw_map_with_costs_start_end(ctx, renderer, (variant_offset.0 + (grid_size.0 + variant_spacing) * 2., variant_offset.1), cell_size, self.maps[self.highlighted].as_ref(), self.variant_highlighted + 2)?;
}
}
let grid_background = renderer.make_rect_mesh(ctx, indicator_pos.0 + indicator_spacing * 2., screen_size.1, true, 0.)?;
renderer.draw_coloured_mesh(ctx, grid_background.as_ref(), point(0., 0.), (0, 0, 0, 255).into());
if self.highlighted > 0 {
draw_map_with_costs(ctx, renderer, (grid_offset.0, grid_offset.1 - grid_spacing - grid_size.1), cell_size, self.maps[self.highlighted - 1].as_ref())?;
if self.highlighted > 1 {
draw_map_with_costs(ctx, renderer, (grid_offset.0, grid_offset.1 - ((grid_spacing + grid_size.1) * 2.)), cell_size, self.maps[self.highlighted - 2].as_ref())?;
}
}
draw_map_with_costs(ctx, renderer, grid_offset, cell_size, self.maps[self.highlighted].as_ref())?;
if self.highlighted < (self.maps.len() - 1) {
draw_map_with_costs(ctx, renderer, (grid_offset.0, grid_offset.1 + grid_spacing + grid_size.1), cell_size, self.maps[self.highlighted + 1].as_ref())?;
if self.highlighted < (self.maps.len() - 2) {
draw_map_with_costs(ctx, renderer, (grid_offset.0, grid_offset.1 + (grid_spacing + grid_size.1) * 2.), cell_size, self.maps[self.highlighted + 2].as_ref())?;
}
}
let highlight_mesh = renderer.make_rect_mesh(ctx, grid_size.0 + 8., grid_size.1 + 8., false, 6.)?;
renderer.draw_coloured_mesh(ctx, highlight_mesh.as_ref(), point(variant_offset.0 - 4., variant_offset.1 - 4.), (0., 1., 1., 1.).into());
let indicator = renderer.make_list_indicator_mesh(ctx, indicator_size)?;
renderer.draw_mesh(ctx, indicator.as_ref(), point(indicator_pos.0, indicator_pos.1));
let grid_shader = renderer.make_rect_mesh(ctx, grid_size.0 * 1.1, grid_size.1, true, 0.)?;
renderer.draw_coloured_mesh(ctx, grid_shader.as_ref(), point(grid_offset.0 - 10., -grid_spacing), (0., 0., 0., 0.75).into());
renderer.draw_coloured_mesh(ctx, grid_shader.as_ref(), point(grid_offset.0 - 10., grid_offset.1 - grid_spacing - grid_size.1), (0., 0., 0., 0.4).into());
renderer.draw_coloured_mesh(ctx, grid_shader.as_ref(), point(grid_offset.0 - 10., grid_offset.1 + grid_spacing + grid_size.1), (0., 0., 0., 0.4).into());
renderer.draw_coloured_mesh(ctx, grid_shader.as_ref(), point(grid_offset.0 - 10., screen_size.1 - grid_size.1 * 0.8), (0., 0., 0., 0.75).into());
renderer.draw_white_text(ctx, String::from("Choose map and variant"), point(screen_size.0 / 2., 50.), renderer.calc_height(0.04), true);
Ok(())
}
fn on_button_down(&mut self, keycode: KeyCode) {
match keycode {
KeyCode::Up => {
if self.highlighted > 0 {
self.highlighted -= 1;
self.variant_highlighted = 0;
}
}
KeyCode::Down => {
if self.highlighted < 9 {
self.highlighted += 1;
self.variant_highlighted = 0;
}
}
KeyCode::Left => {
if self.variant_highlighted > 0 {
self.variant_highlighted -= 1;
}
}
KeyCode::Right => {
if self.variant_highlighted < self.maps[self.highlighted].variants.len() - 1 {
self.variant_highlighted += 1;
}
}
_ => {}
}
}
fn on_button_up(&mut self, keycode: KeyCode) {
match keycode {
KeyCode::Return => {
self.selected = Some(self.highlighted);
}
_ => {}
}
}
fn is_complete(&self) -> bool {
return self.selected.is_some();
}
fn get_next_stage_params(&self, cursor_mem: &mut HashMap<&str, usize>) -> SceneParams {
cursor_mem.insert(MAP_CURSOR_ID, self.highlighted);
cursor_mem.insert(VARIANT_CURSOR_ID, self.variant_highlighted);
return SceneParams::AlgoSelection {
map: self.maps[self.selected.unwrap()].clone(),
variant: self.variant_highlighted,
};
}
} |
/*!
```rudra-poc
[target]
crate = "algorithmica"
version = "0.1.8"
[report]
issue_url = "https://github.com/AbrarNitk/algorithmica/issues/1"
issue_date = 2021-03-07
rustsec_url = "https://github.com/RustSec/advisory-db/pull/872"
rustsec_id = "RUSTSEC-2021-0053"
[[bugs]]
analyzer = "UnsafeDataflow"
bug_class = "PanicSafety"
rudra_report_locations = ["src/sort/merge_sort.rs:9:1: 55:2"]
```
!*/
#![forbid(unsafe_code)]
use algorithmica::sort::merge_sort::sort;
fn main() {
let mut arr = vec![
String::from("Hello"),
String::from("World"),
String::from("Rust"),
];
// Calling `merge_sort::sort` on an array of `T: Drop` triggers double drop
algorithmica::sort::merge_sort::sort(&mut arr);
dbg!(arr);
}
|
use std::fmt;
use std::iter::Peekable;
use std::str::CharIndices;
#[derive(Debug)]
pub enum TokenKind {
LParen,
RParen,
LBracket,
RBracket,
Quote,
Name(String),
Integer(i64),
String(String)
}
#[derive(Debug)]
pub struct Token {
pub kind: TokenKind,
pub pos: usize
}
impl Token {
fn new_simple(ch: char, pos: usize) -> Token {
Token {
kind: match ch {
'(' => TokenKind::LParen,
')' => TokenKind::RParen,
'[' => TokenKind::LBracket,
']' => TokenKind::RBracket,
'\'' => TokenKind::Quote,
_ => unreachable!()
},
pos
}
}
fn new_name(name: String, pos: usize) -> Token {
Token {
kind: TokenKind::Name(name),
pos
}
}
fn new_integer(value: i64, pos: usize) -> Token {
Token {
kind: TokenKind::Integer(value),
pos
}
}
fn new_string(value: String, pos: usize) -> Token {
Token {
kind: TokenKind::String(value),
pos
}
}
}
impl fmt::Display for Token {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let readable_name = match self.kind {
TokenKind::LParen => "opening parenthesis",
TokenKind::RParen => "closing parenthesis",
TokenKind::LBracket => "opening bracket",
TokenKind::RBracket => "closing bracket",
TokenKind::Quote => "quote",
TokenKind::Name(_) => "name",
TokenKind::Integer(_) => "integer",
TokenKind::String(_) => "string"
};
write!(f, "{}", readable_name)
}
}
#[derive(Debug)]
pub struct TokenizeError {
pub message: String,
pub pos: usize
}
struct Consumed {
this: char,
next: Option<char>,
pos: usize
}
pub struct Tokenizer<'a> {
it: Peekable<CharIndices<'a>>
}
impl<'a> Tokenizer<'a> {
pub fn new_from_source(source: &'a str) -> Tokenizer {
Tokenizer {
it: source.char_indices().peekable(),
}
}
fn consume_next(&mut self) -> Option<Consumed> {
let (pos, this) = self.it.next()?;
let next = self.it.peek().map(|v| v.1);
Some(Consumed{this, next, pos})
}
pub fn tokenize(&mut self) -> Result<Vec<Token>, TokenizeError> {
let mut tokens = Vec::<Token>::new();
while let Some(c) = self.consume_next() {
match (c.this, c.next) {
('('|')'|'['|']'|'\'', _) => tokens.push(Token::new_simple(c.this, c.pos)),
('"', _) => {
let mut content = String::new();
loop {
let s = self.consume_next();
if let Some(c) = s {
let v = if c.this == '\\' {
if let Some(next) = c.next {
self.consume_next();
match next {
'"' => '"',
't' => '\t',
'n' => '\n',
_ => return Err(TokenizeError{
message: format!("Unknown escape sequence '\\{}'", next),
pos: c.pos
})
}
} else {
return Err(TokenizeError{
message: "Unexpected end of file".to_string(),
pos: c.pos
});
}
} else if c.this == '"' {
break;
} else {
c.this
};
content.push(v);
} else {
return Err(TokenizeError{
message: "Unterminated string".to_string(),
pos: c.pos
});
}
}
tokens.push(Token::new_string(content, c.pos));
},
('0'..='9', _)|('-', Some('0'..='9')) => {
let sign: i64 = if c.this == '-' { -1 } else { 1 };
let mut value: i64 = if c.this == '-' { 0 } else { c.this.to_digit(10).unwrap() as i64 };
let base = if c.this == '0' && c.next == Some('x') {
self.consume_next();
16
} else {
10
};
while let Some((_, ch)) = self.it.peek() {
if ch.is_whitespace() || matches!(ch, ')'|']') {
break;
}
let s = self.consume_next().unwrap();
if !s.this.is_digit(base) {
return Err(TokenizeError{
message: "Unexpected character in integer literal".to_string(),
pos: s.pos
});
}
value *= base as i64;
value += s.this.to_digit(base).unwrap() as i64;
}
value *= sign;
tokens.push(Token::new_integer(value, c.pos));
},
('#', _) => {
while let Some(c) = self.consume_next() {
if c.this == '\n' {
break;
}
}
},
_ if !c.this.is_whitespace() => {
let mut name = String::new();
name.push(c.this);
while let Some((_, ch)) = self.it.peek() {
if ch.is_whitespace() || matches!(ch, '('|')'|'"') {
break;
}
let s = self.consume_next().unwrap();
name.push(s.this);
}
tokens.push(Token::new_name(name, c.pos));
},
_ => {}
}
}
Ok(tokens)
}
}
|
use typesense::Document;
use serde::{Serialize, Deserialize};
#[derive(Document, Serialize, Deserialize)]
struct Company {
company_name: String,
num_employees: i32,
#[typesense(facet)]
#[typesense(facet)]
country_code: String,
}
fn main() {}
|
//! Provides Result type and Error type commonly used in apllodb workspace.
mod from;
pub(crate) mod session_error;
pub(crate) mod sqlstate;
use sqlstate::SqlState;
use std::{error::Error, fmt::Display};
use serde::{Deserialize, Serialize};
/// Result type commonly used in apllodb workspace.
pub type ApllodbResult<T> = Result<T, ApllodbError>;
/// Error type commonly used in apllodb workspace.
///
/// Note that `source` parameter is always serialized into `None`, so that, for example, a client cannot know what's the cause of a server's error.
#[derive(Debug, Serialize, Deserialize)]
pub struct ApllodbError {
/// Machine-readable error type.
kind: SqlState,
/// Human-readable description of each error instance.
desc: String,
/// Source of this error if any.
/// `impl From<FooError> for ApllodbError` is supposed to set this as `Some(foo_err)`
#[serde(skip_serializing)]
#[serde(skip_deserializing)]
source: Option<Box<dyn Error + Sync + Send + 'static>>,
}
impl ApllodbError {
/// General constructor.
///
/// Pass `Some(SourceError)` if you have one.
fn new(
kind: SqlState,
desc: impl ToString,
source: Option<Box<dyn Error + Sync + Send + 'static>>,
) -> Self {
Self {
kind,
desc: desc.to_string(),
source,
}
}
/// Constructor of [SqlState::FeatureNotSupported](crate::SqlState::FeatureNotSupported).
pub fn feature_not_supported(desc: impl ToString) -> Self {
Self::new(SqlState::FeatureNotSupported, desc, None)
}
/// Constructor of [SqlState::ConnectionExceptionDatabaseNotOpen](crate::SqlState::ConnectionExceptionDatabaseNotOpen).
pub fn connection_exception_database_not_open(desc: impl ToString) -> Self {
Self::new(SqlState::ConnectionExceptionDatabaseNotOpen, desc, None)
}
/// Constructor of [SqlState::ConnectionExceptionDatabaseAlreadyOpen](crate::SqlState::ConnectionExceptionDatabaseAlreadyOpen).
pub fn connection_exception_database_already_open(desc: impl ToString) -> Self {
Self::new(SqlState::ConnectionExceptionDatabaseAlreadyOpen, desc, None)
}
/// Constructor of [SqlState::DataException](crate::SqlState::DataException).
pub fn data_exception(desc: impl ToString) -> Self {
Self::new(SqlState::DataException, desc, None)
}
/// Constructor of [SqlState::SyntaxErrorOrAccessRuleViolationSyntaxError](crate::SqlState::SyntaxErrorOrAccessRuleViolationSyntaxError).
pub fn syntax_error(
desc: impl ToString,
source: Box<dyn Error + Sync + Send + 'static>,
) -> Self {
Self::new(
SqlState::SyntaxErrorOrAccessRuleViolationSyntaxError,
desc,
Some(source),
)
}
/// Constructor of [SqlState::DataExceptionNumericValueOutOfRange](crate::SqlState::DataExceptionNumericValueOutOfRange).
pub fn data_exception_numeric_value_out_of_range(desc: impl ToString) -> Self {
Self::new(SqlState::DataExceptionNumericValueOutOfRange, desc, None)
}
/// Constructor of [SqlState::DataExceptionIllegalConversion](crate::SqlState::DataExceptionIllegalConversion).
pub fn data_exception_illegal_conversion(desc: impl ToString) -> Self {
Self::new(SqlState::DataExceptionIllegalConversion, desc, None)
}
/// Constructor of [SqlState::DataExceptionIllegalComparison](crate::SqlState::DataExceptionIllegalComparison).
pub fn data_exception_illegal_comparison(desc: impl ToString) -> Self {
Self::new(SqlState::DataExceptionIllegalComparison, desc, None)
}
/// Constructor of [SqlState::DataExceptionIllegalOperation](crate::SqlState::DataExceptionIllegalOperation).
pub fn data_exception_illegal_operation(desc: impl ToString) -> Self {
Self::new(SqlState::DataExceptionIllegalOperation, desc, None)
}
/// Constructor of [SqlState::IntegrityConstraintNotNullViolation](crate::SqlState::IntegrityConstraintNotNullViolation).
pub fn integrity_constraint_not_null_violation(desc: impl ToString) -> Self {
Self::new(SqlState::IntegrityConstraintNotNullViolation, desc, None)
}
/// Constructor of [SqlState::IntegrityConstraintUniqueViolation](crate::SqlState::IntegrityConstraintUniqueViolation).
pub fn integrity_constraint_unique_violation(desc: impl ToString) -> Self {
Self::new(SqlState::IntegrityConstraintUniqueViolation, desc, None)
}
/// Constructor of [SqlState::NameErrorNotFound](crate::SqlState::NameErrorNotFound).
pub fn name_error_not_found(desc: impl ToString) -> Self {
Self::new(SqlState::NameErrorNotFound, desc, None)
}
/// Constructor of [SqlState::NameErrorAmbiguous](crate::SqlState::NameErrorAmbiguous).
pub fn name_error_ambiguous(desc: impl ToString) -> Self {
Self::new(SqlState::NameErrorAmbiguous, desc, None)
}
/// Constructor of [SqlState::NameErrorDuplicate](crate::SqlState::NameErrorDuplicate).
pub fn name_error_duplicate(desc: impl ToString) -> Self {
Self::new(SqlState::NameErrorDuplicate, desc, None)
}
/// Constructor of [SqlState::NameErrorTooLong](crate::SqlState::NameErrorTooLong).
pub fn name_error_too_long(desc: impl ToString) -> Self {
Self::new(SqlState::NameErrorTooLong, desc, None)
}
/// Constructor of [SqlState::InvalidTransactionState](crate::SqlState::InvalidTransactionState).
pub fn invalid_transaction_state(desc: impl ToString) -> Self {
Self::new(SqlState::InvalidTransactionState, desc, None)
}
/// Constructor of [SqlState::TransactionRollbackDeadlock](crate::SqlState::TransactionRollbackDeadlock).
pub fn transaction_rollback_deadlock(desc: impl ToString) -> Self {
Self::new(SqlState::TransactionRollbackDeadlock, desc, None)
}
/// Constructor of [SqlState::DdlError](crate::SqlState::DdlError).
pub fn ddl_error(desc: impl ToString) -> Self {
Self::new(SqlState::DdlError, desc, None)
}
/// Constructor of [SqlState::SystemError](crate::SqlState::SystemError).
pub fn system_error(
desc: impl ToString,
source: Box<dyn Error + Sync + Send + 'static>,
) -> Self {
Self::new(SqlState::SystemError, desc, Some(source))
}
}
impl Error for ApllodbError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
// FIXME `self.source.as_ref().map(|s| s.as_ref())` produces compile error
#[allow(clippy::manual_map)]
match &self.source {
Some(s) => Some(s.as_ref()),
None => None,
}
}
}
impl Display for ApllodbError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
r#"{description} ({sqlstate}) `{}` ; caused by: `{source}`"#,
description = self.desc,
sqlstate = self.kind(),
source = self
.source()
.map_or_else(|| "none".to_string(), |e| format!("{}", e))
)
}
}
impl ApllodbError {
/// Use this for error handling with pattern match.
pub fn kind(&self) -> &SqlState {
&self.kind
}
/// Human-readable error description.
pub fn desc(&self) -> &str {
&self.desc
}
}
|
use std::{
io,
time::Instant
};
use fibonacci_series::{fibonacci_non_recursive, fibonacci_recursive};
fn main() {
println!("Fibonacci Series");
loop {
println!("please input a positive number:");
let mut target = String::new();
io::stdin()
.read_line(&mut target)
.expect("failed to read line");
if target.trim() == "exit" {
break;
}
let target = match target.trim().parse::<u32>() {
Ok(num) => num,
Err(_) => continue,
};
let start = Instant::now();
let result = fibonacci_non_recursive(target);
let elapsed_time = start.elapsed().as_micros();
println!("Non-recursive took {:.3e} µs and the result is {}", elapsed_time, result);
let start = Instant::now();
let result = fibonacci_recursive(target);
let elapsed_time = start.elapsed().as_micros();
println!("Recursive took {:.3e} µs and the result is {}", elapsed_time, result);
}
}
|
use nom::number::complete::{le_u8, le_u16, le_u32, le_u64, le_f32};
use serde_derive::{Serialize};
#[derive(Debug, PartialEq, Serialize)]
pub struct PacketHeader {
pub m_packetFormat: u16,
pub m_gameMajorVersion: u8,
pub m_gameMinorVersion: u8,
pub m_packetVersion: u8,
pub m_packetId: u8,
pub m_sessionUID: u64,
pub m_sessionTime: f32,
pub m_frameIdentifier: u32,
pub m_playerCarIndex: u8
}
named!(pub parse_header<&[u8], PacketHeader>,
do_parse!(
m_packetFormat: le_u16 >>
m_gameMajorVersion: le_u8 >>
m_gameMinorVersion: le_u8 >>
m_packetVersion: le_u8 >>
m_packetId: le_u8 >>
m_sessionUID: le_u64 >>
m_sessionTime: le_f32 >>
m_frameIdentifier: le_u32 >>
m_playerCarIndex : le_u8 >>
(PacketHeader {
m_packetFormat: m_packetFormat,
m_gameMajorVersion: m_gameMajorVersion,
m_gameMinorVersion: m_gameMinorVersion,
m_packetVersion: m_packetVersion,
m_packetId: m_packetId,
m_sessionUID: m_sessionUID,
m_sessionTime: m_sessionTime,
m_frameIdentifier: m_frameIdentifier,
m_playerCarIndex: m_playerCarIndex
})
)
);
|
//! MIPS CP0 EntryLo register
bitflags! {
pub struct Flags : u32 {
const DIRTY = 0b000100;
const VALID = 0b000010;
const GLOBAL = 0b000001;
const UNCACHED = 0b010000;
const CACHEABLE = 0b011000;
const FLAG_MASK = 0b111111;
const CACHE_MASK = 0b111000;
}
}
#[derive(Clone, Copy, Debug)]
pub struct EntryLo {
pub bits: u32,
}
impl EntryLo {
register_flags!();
register_field!(get_pfn, set_pfn, 6, 24);
register_struct_bit_accessor!(dirty, set_dirty, reset_dirty, 2);
register_struct_bit_accessor!(valid, set_valid, reset_valid, 1);
register_struct_bit_accessor!(global, set_global, reset_global, 0);
register_struct_block_setter!(
set_uncached,
Flags::UNCACHED.bits(),
Flags::CACHE_MASK.bits()
);
register_struct_block_setter!(
set_cacheable,
Flags::CACHEABLE.bits(),
Flags::CACHE_MASK.bits()
);
}
pub mod __entry_lo0 {
register_rw!(2, 0);
}
pub mod __entry_lo1 {
register_rw!(3, 0);
}
#[inline]
pub fn read0_u32() -> u32 {
__entry_lo0::read_u32()
}
#[inline]
pub fn read1_u32() -> u32 {
__entry_lo1::read_u32()
}
#[inline]
pub fn write0_u32(v: u32) {
__entry_lo0::write_u32(v);
}
#[inline]
pub fn write1_u32(v: u32) {
__entry_lo1::write_u32(v);
}
#[inline]
pub fn read0() -> EntryLo {
EntryLo { bits: read0_u32() }
}
#[inline]
pub fn read1() -> EntryLo {
EntryLo { bits: read1_u32() }
}
#[inline]
pub fn write0(reg: EntryLo) {
write0_u32(reg.bits);
}
#[inline]
pub fn write1(reg: EntryLo) {
write1_u32(reg.bits);
}
|
/// Pallet's business-logic public interface
use crate::proposal::{InputProposalBatch, DeipProposal, ProposalId};
use crate::storage::StorageWrite;
use super::{Config, Error};
/// Create proposal
pub fn propose<T: Config>(
author: T::AccountId,
batch: InputProposalBatch<T>,
external_id: Option<ProposalId>,
)
-> Result<(), Error<T>>
{
StorageWrite::<T>::new()
.commit(move |ops| {
DeipProposal::<T>::create(
batch,
author,
external_id,
ops,
pallet_timestamp::Module::<T>::get()
)
})
}
|
use std::marker::PhantomData;
use mopa;
use render::RenderBuilder;
use event::{EventHandler, EventArgs};
use geometry::Rect;
pub trait Draw: ::std::fmt::Debug + mopa::Any {
fn draw(&mut self, bounds: Rect, crop_to: Rect, renderer: &mut RenderBuilder);
}
mopafy!(Draw);
pub struct DrawEventHandler<T, E> {
draw_callback: Box<Fn(&mut T)>,
phantom: PhantomData<E>,
}
impl<T: 'static, E> DrawEventHandler<T, E> {
pub fn new<F: Fn(&mut T) + 'static>(_: E, draw_callback: F) -> Self {
DrawEventHandler {
draw_callback: Box::new(draw_callback),
phantom: PhantomData,
}
}
}
impl<T: Draw + 'static, E> EventHandler<E> for DrawEventHandler<T, E> {
fn handle(&mut self, _: &E, mut args: EventArgs) {
args.widget.update(|state: &mut T| {
(self.draw_callback)(state);
});
}
}
|
//! Contains marker types used to implement type state for encrypted and signed
//! GBLs.
use self::private::*;
use super::*;
use crate::utils::Blob;
use either::Either;
use std::borrow::Cow;
use std::fmt;
use std::marker::PhantomData;
mod sealed {
use super::*;
pub trait Sealed {}
impl<'a> Sealed for Encrypted<'a> {}
impl<'a> Sealed for NotEncrypted<'a> {}
impl<'a> Sealed for MaybeEncrypted<'a> {}
impl<'a> Sealed for Signed<'a> {}
impl<'a> Sealed for NotSigned<'a> {}
impl<'a> Sealed for MaybeSigned<'a> {}
}
pub(crate) mod private {
use super::*;
/// Trait implemented by marker types specifying the encryption state of a
/// GBL.
///
/// This is an internal trait that should not be publicly reachable.
pub trait EncryptionState<'a>: sealed::Sealed {
/// The `Self` type, but with all lifetime parameters set to `'static`.
type StaticSelf: EncryptionState<'static> + 'static;
fn into_owned(self) -> Self::StaticSelf;
fn clone(&'a self) -> Self;
fn into_either(self) -> Either<Encrypted<'a>, NotEncrypted<'a>>;
fn as_either_ref(&self) -> Either<&Encrypted<'a>, &NotEncrypted<'a>>;
}
/// Trait implemented by marker types specifying the signature state of a
/// GBL.
///
/// This is an internal trait that should not be publicly reachable.
pub trait SignatureState<'a>: sealed::Sealed {
/// The `Self` type, but with all lifetime parameters set to `'static`.
type StaticSelf: SignatureState<'static> + 'static;
fn into_owned(self) -> Self::StaticSelf;
fn clone(&'a self) -> Self;
fn into_either(self) -> Either<Signed<'a>, NotSigned<'a>>;
fn as_either_ref(&self) -> Either<&Signed<'a>, &NotSigned<'a>>;
}
}
/// The GBL is encrypted.
#[derive(Debug)]
pub struct Encrypted<'a> {
pub(crate) enc_header: EncryptionHeader,
pub(crate) enc_sections: Vec<Blob<Cow<'a, [u8]>>>,
}
/// The GBL is not encrypted.
#[derive(Debug)]
pub struct NotEncrypted<'a> {
pub(crate) app_info: AppInfo,
pub(crate) sections: Vec<ProgramData<'a>>,
}
/// The GBL may or may not be encrypted.
pub struct MaybeEncrypted<'a> {
pub(crate) inner: Either<Encrypted<'a>, NotEncrypted<'a>>,
}
impl<'a> EncryptionState<'a> for Encrypted<'a> {
type StaticSelf = Encrypted<'static>;
fn into_owned(self) -> Self::StaticSelf {
Encrypted {
enc_header: self.enc_header,
enc_sections: self
.enc_sections
.into_iter()
.map(|section| Blob(section.0.into_owned().into()))
.collect(),
}
}
fn clone(&'a self) -> Self {
Self {
enc_header: self.enc_header,
enc_sections: self
.enc_sections
.iter()
.map(|section| Blob(Cow::Borrowed(&**section)))
.collect(),
}
}
fn into_either(self) -> Either<Encrypted<'a>, NotEncrypted<'a>> {
Either::Left(self)
}
fn as_either_ref(&self) -> Either<&Encrypted<'a>, &NotEncrypted<'a>> {
Either::Left(self)
}
}
impl<'a> EncryptionState<'a> for NotEncrypted<'a> {
type StaticSelf = NotEncrypted<'static>;
fn into_owned(self) -> Self::StaticSelf {
NotEncrypted {
app_info: self.app_info,
sections: self
.sections
.into_iter()
.map(|section| section.into_owned())
.collect(),
}
}
fn clone(&'a self) -> Self {
Self {
app_info: self.app_info,
sections: self.sections.iter().map(ProgramData::clone).collect(),
}
}
fn into_either(self) -> Either<Encrypted<'a>, NotEncrypted<'a>> {
Either::Right(self)
}
fn as_either_ref(&self) -> Either<&Encrypted<'a>, &NotEncrypted<'a>> {
Either::Right(self)
}
}
impl<'a> EncryptionState<'a> for MaybeEncrypted<'a> {
type StaticSelf = MaybeEncrypted<'static>;
fn into_owned(self) -> Self::StaticSelf {
MaybeEncrypted {
inner: self.inner.either(
|enc| Either::Left(enc.into_owned()),
|not_enc| Either::Right(not_enc.into_owned()),
),
}
}
fn clone(&'a self) -> Self {
Self {
inner: self
.inner
.as_ref()
.map_left(Encrypted::clone)
.map_right(NotEncrypted::clone),
}
}
fn into_either(self) -> Either<Encrypted<'a>, NotEncrypted<'a>> {
self.inner
}
fn as_either_ref(&self) -> Either<&Encrypted<'a>, &NotEncrypted<'a>> {
self.inner.as_ref()
}
}
impl<'a> fmt::Debug for MaybeEncrypted<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.inner {
Either::Left(l) => l.fmt(f),
Either::Right(r) => r.fmt(f),
}
}
}
/// The GBL is signed with an ECDSA signature.
#[derive(Debug)]
pub struct Signed<'a> {
pub(crate) signature: Signature<'a>,
}
/// The GBL is not signed.
pub struct NotSigned<'a> {
pub(crate) _p: PhantomData<&'a ()>,
}
impl<'a> NotSigned<'a> {
pub(crate) fn new() -> Self {
Self { _p: PhantomData }
}
}
/// The GBL may or may not contain a signature.
pub struct MaybeSigned<'a> {
pub(crate) inner: Either<Signed<'a>, NotSigned<'a>>,
}
impl<'a> SignatureState<'a> for Signed<'a> {
type StaticSelf = Signed<'static>;
fn into_owned(self) -> Self::StaticSelf {
Signed {
signature: self.signature.into_owned(),
}
}
fn clone(&'a self) -> Self {
Self {
signature: self.signature.clone(),
}
}
fn into_either(self) -> Either<Signed<'a>, NotSigned<'a>> {
Either::Left(self)
}
fn as_either_ref(&self) -> Either<&Signed<'a>, &NotSigned<'a>> {
Either::Left(self)
}
}
impl<'a> SignatureState<'a> for NotSigned<'a> {
type StaticSelf = NotSigned<'static>;
fn into_owned(self) -> Self::StaticSelf {
NotSigned::new()
}
fn clone(&'a self) -> Self {
NotSigned::new()
}
fn into_either(self) -> Either<Signed<'a>, NotSigned<'a>> {
Either::Right(self)
}
fn as_either_ref(&self) -> Either<&Signed<'a>, &NotSigned<'a>> {
Either::Right(self)
}
}
impl<'a> SignatureState<'a> for MaybeSigned<'a> {
type StaticSelf = MaybeSigned<'static>;
fn into_owned(self) -> Self::StaticSelf {
MaybeSigned {
inner: self
.inner
.map_left(Signed::into_owned)
.map_right(NotSigned::into_owned),
}
}
fn clone(&'a self) -> Self {
Self {
inner: self
.inner
.as_ref()
.map_left(Signed::clone)
.map_right(NotSigned::clone),
}
}
fn into_either(self) -> Either<Signed<'a>, NotSigned<'a>> {
self.inner
}
fn as_either_ref(&self) -> Either<&Signed<'a>, &NotSigned<'a>> {
self.inner.as_ref()
}
}
impl<'a> fmt::Debug for NotSigned<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("NotSigned")
}
}
impl<'a> fmt::Debug for MaybeSigned<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.inner {
Either::Left(l) => l.fmt(f),
Either::Right(r) => r.fmt(f),
}
}
}
|
use crate::db::CONN;
use crate::model::{RegisterRequest, Rights, User};
use rusqlite::{params, Result};
use std::str::FromStr;
pub struct UserRepository {}
impl UserRepository {
pub fn init_tables() -> Result<()> {
CONN.lock().unwrap().execute(
"create table if not exists users (
id integer primary key,
login text not null,
password text not null,
name text not null,
rights text not null,
unique (login)
)",
[],
)?;
Ok(())
}
pub fn get_all() -> Result<Vec<User>> {
let conn = CONN.lock().unwrap();
let mut stmt = conn.prepare("select * from users")?;
let post_iter = stmt.query_map([], |row| {
let r: String = row.get(4)?;
Ok(User {
id: row.get(0)?,
login: row.get(1)?,
password: row.get(2)?,
name: row.get(3)?,
rights: Rights::from_str(&r).unwrap(),
})
})?;
Ok(post_iter.map(|x| x.unwrap()).collect())
}
pub fn create(request: RegisterRequest) -> Result<()> {
let conn = CONN.lock().unwrap();
conn.execute(
"insert into users (name, login, password, rights)
values (?1, ?2, ?3, 'USER')",
params![request.name, request.login, request.password],
)?;
Ok(())
}
}
|
//! This crate is a wrapper around [PulseAudio's repackaging of WebRTC's AudioProcessing module](https://www.freedesktop.org/software/pulseaudio/webrtc-audio-processing/).
//!
//! See `examples/simple.rs` for an example of how to use the library.
#![warn(clippy::all)]
#![warn(missing_docs)]
mod config;
use std::{error, fmt, sync::Arc};
use webrtc_audio_processing_sys as ffi;
pub use config::*;
pub use ffi::NUM_SAMPLES_PER_FRAME;
/// Represents an error inside webrtc::AudioProcessing.
/// See the documentation of [`webrtc::AudioProcessing::Error`](https://cgit.freedesktop.org/pulseaudio/webrtc-audio-processing/tree/webrtc/modules/audio_processing/include/audio_processing.h?id=9def8cf10d3c97640d32f1328535e881288f700f)
/// for further details.
#[derive(Debug)]
pub struct Error {
/// webrtc::AudioProcessing::Error
code: i32,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ffi::AudioProcessing::Error code: {}", self.code)
}
}
impl error::Error for Error {}
/// `Processor` provides an access to webrtc's audio processing e.g. echo
/// cancellation and automatic gain control. It can be cloned, and cloned
/// instances share the same underlying processor module. It's the recommended
/// way to run the `Processor` in multi-threaded application.
#[derive(Clone)]
pub struct Processor {
inner: Arc<AudioProcessing>,
// TODO: Refactor. It's not necessary to have two frame buffers as
// `Processor`s are cloned for each thread.
deinterleaved_capture_frame: Vec<Vec<f32>>,
deinterleaved_render_frame: Vec<Vec<f32>>,
}
impl Processor {
/// Creates a new `Processor`. `InitializationConfig` is only used on
/// instantiation, however new configs can be be passed to `set_config()`
/// at any time during processing.
pub fn new(config: &ffi::InitializationConfig) -> Result<Self, Error> {
Ok(Self {
inner: Arc::new(AudioProcessing::new(config)?),
deinterleaved_capture_frame: vec![
vec![0f32; NUM_SAMPLES_PER_FRAME as usize];
config.num_capture_channels as usize
],
deinterleaved_render_frame: vec![
vec![0f32; NUM_SAMPLES_PER_FRAME as usize];
config.num_render_channels as usize
],
})
}
/// Processes and modifies the audio frame from a capture device by applying
/// signal processing as specified in the config. `frame` should hold an
/// interleaved f32 audio frame, with NUM_SAMPLES_PER_FRAME samples.
pub fn process_capture_frame(&mut self, frame: &mut [f32]) -> Result<(), Error> {
Self::deinterleave(frame, &mut self.deinterleaved_capture_frame);
self.inner.process_capture_frame(&mut self.deinterleaved_capture_frame)?;
Self::interleave(&self.deinterleaved_capture_frame, frame);
Ok(())
}
/// Processes and modifies the audio frame from a capture device by applying
/// signal processing as specified in the config. `frame` should be a Vec of
/// length 'num_capture_channels', with each inner Vec representing a channel
/// with NUM_SAMPLES_PER_FRAME samples.
pub fn process_capture_frame_noninterleaved(
&mut self,
frame: &mut Vec<Vec<f32>>,
) -> Result<(), Error> {
self.inner.process_capture_frame(frame)
}
/// Processes and optionally modifies the audio frame from a playback device.
/// `frame` should hold an interleaved `f32` audio frame, with
/// `NUM_SAMPLES_PER_FRAME` samples.
pub fn process_render_frame(&mut self, frame: &mut [f32]) -> Result<(), Error> {
Self::deinterleave(frame, &mut self.deinterleaved_render_frame);
self.inner.process_render_frame(&mut self.deinterleaved_render_frame)?;
Self::interleave(&self.deinterleaved_render_frame, frame);
Ok(())
}
/// Processes and optionally modifies the audio frame from a playback device.
/// `frame` should be a Vec of length 'num_render_channels', with each inner Vec
/// representing a channel with NUM_SAMPLES_PER_FRAME samples.
pub fn process_render_frame_noninterleaved(
&mut self,
frame: &mut Vec<Vec<f32>>,
) -> Result<(), Error> {
self.inner.process_render_frame(frame)
}
/// Returns statistics from the last `process_capture_frame()` call.
pub fn get_stats(&self) -> Stats {
self.inner.get_stats()
}
/// Immediately updates the configurations of the internal signal processor.
/// May be called multiple times after the initialization and during
/// processing.
pub fn set_config(&mut self, config: Config) {
self.inner.set_config(config);
}
/// Signals the AEC and AGC that the audio output will be / is muted.
/// They may use the hint to improve their parameter adaptation.
pub fn set_output_will_be_muted(&self, muted: bool) {
self.inner.set_output_will_be_muted(muted);
}
/// De-interleaves multi-channel frame `src` into `dst`.
///
/// ```text
/// e.g. A stereo frame with 3 samples:
///
/// Interleaved
/// +---+---+---+---+---+---+
/// |L0 |R0 |L1 |R1 |L2 |R2 |
/// +---+---+---+---+---+---+
///
/// Non-interleaved
/// +---+---+---+
/// |L0 |L1 |L2 |
/// +---+---+---+
/// |R0 |R1 |R2 |
/// +---+---+---+
/// ```
fn deinterleave<T: AsMut<[f32]>>(src: &[f32], dst: &mut [T]) {
let num_channels = dst.len();
let num_samples = dst[0].as_mut().len();
assert_eq!(src.len(), num_channels * num_samples);
for channel_index in 0..num_channels {
for sample_index in 0..num_samples {
dst[channel_index].as_mut()[sample_index] =
src[num_channels * sample_index + channel_index];
}
}
}
/// Reverts the `deinterleave` operation.
fn interleave<T: AsRef<[f32]>>(src: &[T], dst: &mut [f32]) {
let num_channels = src.len();
let num_samples = src[0].as_ref().len();
assert_eq!(dst.len(), num_channels * num_samples);
for channel_index in 0..num_channels {
for sample_index in 0..num_samples {
dst[num_channels * sample_index + channel_index] =
src[channel_index].as_ref()[sample_index];
}
}
}
}
/// Minimal wrapper for safe and synchronized ffi.
struct AudioProcessing {
inner: *mut ffi::AudioProcessing,
}
impl AudioProcessing {
fn new(config: &ffi::InitializationConfig) -> Result<Self, Error> {
let mut code = 0;
let inner = unsafe { ffi::audio_processing_create(config, &mut code) };
if !inner.is_null() {
Ok(Self { inner })
} else {
Err(Error { code })
}
}
fn process_capture_frame(&self, frame: &mut Vec<Vec<f32>>) -> Result<(), Error> {
let mut frame_ptr = frame.iter_mut().map(|v| v.as_mut_ptr()).collect::<Vec<*mut f32>>();
unsafe {
let code = ffi::process_capture_frame(self.inner, frame_ptr.as_mut_ptr());
if ffi::is_success(code) {
Ok(())
} else {
Err(Error { code })
}
}
}
fn process_render_frame(&self, frame: &mut Vec<Vec<f32>>) -> Result<(), Error> {
let mut frame_ptr = frame.iter_mut().map(|v| v.as_mut_ptr()).collect::<Vec<*mut f32>>();
unsafe {
let code = ffi::process_render_frame(self.inner, frame_ptr.as_mut_ptr());
if ffi::is_success(code) {
Ok(())
} else {
Err(Error { code })
}
}
}
fn get_stats(&self) -> Stats {
unsafe { ffi::get_stats(self.inner).into() }
}
fn set_config(&self, config: Config) {
unsafe {
ffi::set_config(self.inner, &config.into());
}
}
fn set_output_will_be_muted(&self, muted: bool) {
unsafe {
ffi::set_output_will_be_muted(self.inner, muted);
}
}
}
impl Drop for AudioProcessing {
fn drop(&mut self) {
unsafe {
ffi::audio_processing_delete(self.inner);
}
}
}
// ffi::AudioProcessing provides thread safety with a few exceptions around
// the concurrent usage of its getters and setters e.g. `set_stream_delay_ms()`.
unsafe impl Sync for AudioProcessing {}
unsafe impl Send for AudioProcessing {}
#[cfg(test)]
mod tests {
use super::*;
use std::{thread, time::Duration};
#[test]
fn test_create_failure() {
let config =
InitializationConfig { num_capture_channels: 0, ..InitializationConfig::default() };
assert!(Processor::new(&config).is_err());
}
#[test]
fn test_create_drop() {
let config = InitializationConfig {
num_capture_channels: 1,
num_render_channels: 1,
..InitializationConfig::default()
};
let _p = Processor::new(&config).unwrap();
}
#[test]
fn test_deinterleave_interleave() {
let num_channels = 2usize;
let num_samples = 3usize;
let interleaved = (0..num_channels * num_samples).map(|v| v as f32).collect::<Vec<f32>>();
let mut deinterleaved = vec![vec![-1f32; num_samples]; num_channels];
Processor::deinterleave(&interleaved, &mut deinterleaved);
assert_eq!(vec![vec![0f32, 2f32, 4f32], vec![1f32, 3f32, 5f32]], deinterleaved);
let mut interleaved_out = vec![-1f32; num_samples * num_channels];
Processor::interleave(&deinterleaved, &mut interleaved_out);
assert_eq!(interleaved, interleaved_out);
}
fn sample_stereo_frames() -> (Vec<f32>, Vec<f32>) {
let num_samples_per_frame = NUM_SAMPLES_PER_FRAME as usize;
// Stereo frame with a lower frequency cosine wave.
let mut render_frame = Vec::with_capacity(num_samples_per_frame * 2);
for i in 0..num_samples_per_frame {
render_frame.push((i as f32 / 40.0).cos() * 0.4);
render_frame.push((i as f32 / 40.0).cos() * 0.2);
}
// Stereo frame with a higher frequency sine wave, mixed with the cosine
// wave from render frame.
let mut capture_frame = Vec::with_capacity(num_samples_per_frame * 2);
for i in 0..num_samples_per_frame {
capture_frame.push((i as f32 / 20.0).sin() * 0.4 + render_frame[i * 2] * 0.2);
capture_frame.push((i as f32 / 20.0).sin() * 0.2 + render_frame[i * 2 + 1] * 0.2);
}
(render_frame, capture_frame)
}
#[test]
fn test_nominal() {
let config = InitializationConfig {
num_capture_channels: 2,
num_render_channels: 2,
..InitializationConfig::default()
};
let mut ap = Processor::new(&config).unwrap();
let config = Config {
echo_cancellation: Some(EchoCancellation {
suppression_level: EchoCancellationSuppressionLevel::High,
stream_delay_ms: None,
enable_delay_agnostic: false,
enable_extended_filter: false,
}),
..Config::default()
};
ap.set_config(config);
let (render_frame, capture_frame) = sample_stereo_frames();
let mut render_frame_output = render_frame.clone();
ap.process_render_frame(&mut render_frame_output).unwrap();
// Render frame should not be modified.
assert_eq!(render_frame, render_frame_output);
let mut capture_frame_output = capture_frame.clone();
ap.process_capture_frame(&mut capture_frame_output).unwrap();
// Echo cancellation should have modified the capture frame.
// We don't validate how it's modified. Out of scope for this unit test.
assert_ne!(capture_frame, capture_frame_output);
let stats = ap.get_stats();
assert!(stats.echo_return_loss.is_some());
println!("{:#?}", stats);
}
#[test]
#[ignore]
fn test_nominal_threaded() {
let config = InitializationConfig {
num_capture_channels: 2,
num_render_channels: 2,
..InitializationConfig::default()
};
let ap = Processor::new(&config).unwrap();
let (render_frame, capture_frame) = sample_stereo_frames();
let mut config_ap = ap.clone();
let config_thread = thread::spawn(move || {
thread::sleep(Duration::from_millis(100));
let config = Config {
echo_cancellation: Some(EchoCancellation {
suppression_level: EchoCancellationSuppressionLevel::High,
stream_delay_ms: None,
enable_delay_agnostic: false,
enable_extended_filter: false,
}),
..Config::default()
};
config_ap.set_config(config);
});
let mut render_ap = ap.clone();
let render_thread = thread::spawn(move || {
for _ in 0..100 {
let mut render_frame_output = render_frame.clone();
render_ap.process_render_frame(&mut render_frame_output).unwrap();
thread::sleep(Duration::from_millis(10));
}
});
let mut capture_ap = ap.clone();
let capture_thread = thread::spawn(move || {
for i in 0..100 {
let mut capture_frame_output = capture_frame.clone();
capture_ap.process_capture_frame(&mut capture_frame_output).unwrap();
let stats = capture_ap.get_stats();
if i < 5 {
// first 50ms
assert!(stats.echo_return_loss.is_none());
} else if i >= 95 {
// last 50ms
assert!(stats.echo_return_loss.is_some());
}
thread::sleep(Duration::from_millis(10));
}
});
config_thread.join().unwrap();
render_thread.join().unwrap();
capture_thread.join().unwrap();
}
}
|
use serde_scan::scan;
use std::cmp::Reverse;
use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
type Task = char;
type Result<T> = std::result::Result<T, std::boxed::Box<dyn std::error::Error>>;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
struct InProgressTask {
task: Task,
remaining_time: usize,
}
impl InProgressTask {
fn new(task: Task) -> Self {
InProgressTask {
task,
remaining_time: (task as u8 - b'A' + 61) as usize,
}
}
fn is_complete(&self) -> bool {
self.remaining_time == 0
}
fn log_work(&mut self) {
self.remaining_time -= 1;
}
}
#[derive(Debug)]
struct Constraint {
dependency: Task,
dependent: Task,
}
fn main() -> Result<()> {
let input = std::fs::read_to_string("input")?;
// TODO: don't build the actual vector here, build graph from an iterator
let constraints = input
.lines()
.map(parse_constraint)
.collect::<Result<Vec<Constraint>>>()?;
let task_to_dependents = dependency_map_from_constraints(&constraints);
println!("{:?}", part1(&task_to_dependents));
println!("{:?}", part2(&task_to_dependents));
Ok(())
}
fn part1(task_to_dependents: &HashMap<Task, Vec<Task>>) -> String {
topo_sort(
task_to_dependents,
make_task_to_num_dependencies_map(&task_to_dependents),
)
.iter()
.collect()
}
fn part2(task_to_dependents: &HashMap<Task, Vec<Task>>) -> usize {
let mut task_to_num_dependencies = make_task_to_num_dependencies_map(&task_to_dependents);
let mut num_idle_workers = 5;
let mut work_in_progress = Vec::with_capacity(5);
let mut time = 0;
let mut task_queue: VecDeque<Task> = task_to_num_dependencies
.iter()
.filter(|(_task, &num_dependencies)| num_dependencies == 0)
.map(|(&task, _num_dependencies)| task)
.collect();
while !task_queue.is_empty() || !work_in_progress.is_empty() {
println!("{}", time);
// assign work
let num_tasks_to_assign = std::cmp::min(num_idle_workers, task_queue.len());
for _ in 0..num_tasks_to_assign {
let task = task_queue.pop_front().unwrap();
work_in_progress.push(InProgressTask::new(task));
}
num_idle_workers -= num_tasks_to_assign;
// do work
for work in work_in_progress.iter_mut() {
work.log_work();
}
let completed_tasks: HashSet<InProgressTask> = work_in_progress
.iter()
.filter(|task| task.is_complete())
.cloned()
.collect();
// update state of task queue and in progress tasks
for completed_task in &completed_tasks {
for dependent in &task_to_dependents[&completed_task.task] {
*task_to_num_dependencies.get_mut(dependent).unwrap() -= 1;
// task_to_num_dependencies[dependent] -= 1;
if task_to_num_dependencies[dependent] == 0 {
task_queue.push_back(*dependent);
}
}
}
work_in_progress.retain(|task| !completed_tasks.contains(task));
// work_in_progress.drain_filter(|task| completed_tasks.contains(task));
num_idle_workers += completed_tasks.len();
time += 1;
}
time
}
fn make_task_to_num_dependencies_map(
task_to_dependents: &HashMap<Task, Vec<Task>>,
) -> HashMap<Task, usize> {
let mut task_to_num_dependencies: HashMap<Task, usize> =
HashMap::with_capacity(task_to_dependents.len());
for (&dependency, dependents) in task_to_dependents {
// ensure the dependency is in the map
task_to_num_dependencies.entry(dependency).or_insert(0);
for dependent in dependents {
*task_to_num_dependencies.entry(*dependent).or_insert(0) += 1;
}
}
task_to_num_dependencies
}
fn topo_sort(
task_to_dependents: &HashMap<Task, Vec<Task>>,
mut task_to_num_dependencies: HashMap<Task, usize>,
) -> Vec<Task> {
let mut queue = BinaryHeap::new();
let mut ordering = Vec::with_capacity(task_to_dependents.len());
let roots: Vec<Task> = task_to_num_dependencies
.iter()
.filter(|(_task, &num_dependencies)| num_dependencies == 0)
.map(|(&task, _num_dependencies)| task)
.collect();
for root in roots {
queue.push(Reverse(root));
}
while let Some(Reverse(task)) = queue.pop() {
ordering.push(task);
for dependent in task_to_dependents.get(&task).unwrap() {
let num_dependencies = task_to_num_dependencies.get_mut(dependent).unwrap();
*num_dependencies -= 1;
if *num_dependencies == 0 {
queue.push(Reverse(*dependent))
}
}
}
ordering
}
fn dependency_map_from_constraints(constraints: &[Constraint]) -> HashMap<Task, Vec<Task>> {
let mut task_to_dependents = HashMap::with_capacity(26);
for constraint in constraints {
task_to_dependents
.entry(constraint.dependency)
.or_insert_with(Vec::new)
.push(constraint.dependent);
// also make sure the dependent appears in the map
task_to_dependents
.entry(constraint.dependent)
.or_insert_with(Vec::new);
}
task_to_dependents
}
fn parse_constraint(s: &str) -> Result<Constraint> {
let (dependency, dependent): (Task, Task) =
scan!("Step {} must be finished before step {} can begin." <- s)?;
Ok(Constraint {
dependency,
dependent,
})
}
|
extern crate clap;
use clap::{App, Arg, ErrorKind, Shell, SubCommand};
use serde_json::Value;
use std::fs::File;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::process::exit;
use std::{env, fs, io};
use workon_rs::init;
#[allow(unused)]
const RETURN_OK: i32 = 0;
const RETURN_ERROR: i32 = 1;
fn main() {
let app = init_app();
dispatch(app.clone());
}
fn init_app() -> App<'static, 'static> {
let shell_arg = Arg::with_name("shell")
.value_name("SHELL")
.help("当前运行的shell,支持: Fish")
.required(true);
let env_arg = Arg::with_name("env_name")
.value_name("ENV_NAME")
.help("虚拟环境名称")
.required(true);
// 初始化App,并返回App实例
App::new("workon")
.version("0.0.1")
.author("im.zhaolei@foxmail.com")
.about("workon可以用来激活Python虚拟环境,设置路径和虚拟环境的映射,在使用workon激活虚拟环境是将自动进入配置的项目目录。")
.subcommand(
SubCommand::with_name("--init")
.about("打印用于执行workon的shell函数")
.arg(&shell_arg)
)
.subcommand(
SubCommand::with_name("--completions")
.about("在标准输出中打印生成的workon补全信息")
.arg(
Arg::with_name("shell")
.takes_value(true)
.possible_values(&Shell::variants())
.help("生成补全")
.value_name("SHELL")
.required(true)
),
)
.subcommand(
SubCommand::with_name("--set")
.about("为当前目录设置虚拟环境,激活环境或进入当前目录自动激活环境")
.arg(&env_arg)
)
.subcommand(
SubCommand::with_name("--get")
.about("获取虚拟环境配置的路径,如果未配置则未空")
.arg(&env_arg)
)
.subcommand(
SubCommand::with_name("--remove")
.about("删除指定环境配置,可通过 show 子命令查看已经添加的全部配置信息")
.arg(&env_arg)
)
.subcommand(
SubCommand::with_name("--clean")
.about("清除所有的配置信息")
)
.subcommand(
SubCommand::with_name("--show")
.about("显示所有的配置")
)
.help_message("打印帮助信息")
.version_message("显示版本信息")
}
fn get_config_file_and_dir() -> (PathBuf, String) {
let home_dir = dirs::home_dir().unwrap();
let config_dir = home_dir.as_path().join(".config");
let work_dir = config_dir.join("workon");
let work_dir = work_dir.as_path();
let config_file = work_dir.join("config");
let config_file = config_file.to_str().unwrap();
(work_dir.to_owned(), config_file.to_owned())
}
fn get_config() -> String {
let (work_dir, config_file) = get_config_file_and_dir();
let config_file = config_file.as_str();
let mut file = match File::open(config_file) {
Ok(f) => f,
Err(_) => {
if !work_dir.exists() {
let _ = fs::create_dir(work_dir.to_str().unwrap());
}
let c = File::create(config_file);
match c {
Ok(f) => f,
Err(e) => {
println!("创建配置文件失败: {}", e);
exit(RETURN_ERROR);
}
}
}
};
let mut content = String::new();
if let Err(e) = file.read_to_string(&mut content) {
println!("读取文件异常: {}", e);
exit(RETURN_ERROR);
}
if content.is_empty() {
content = "{}".to_owned();
}
content
}
fn parse_config() -> Value {
let config = get_config();
match serde_json::from_str(&config) {
Ok(v) => v,
Err(e) => {
println!("解析配置失败: {}", e);
exit(RETURN_ERROR);
}
}
}
fn dispatch(mut app: App) {
let matches_safe = app.clone().get_matches_safe();
let matches = match matches_safe {
Ok(m) => m,
Err(e) => {
match e.kind {
ErrorKind::MissingRequiredArgument => {
println!("必需参数未指定,尝试使用--help获取帮助");
}
_ => {
println!("{}", e);
}
}
exit(RETURN_ERROR);
}
};
match matches.subcommand() {
("--init", Some(sub_m)) => {
let shell_name = sub_m.value_of("shell").expect("指定shell");
init::init_main(shell_name);
}
("--completions", Some(sub_m)) => {
let shell: Shell = sub_m
.value_of("shell")
.expect("shell名称未指定")
.parse()
.expect("shell不可用");
app.gen_completions_to("workon", shell, &mut io::stdout().lock());
match shell {
Shell::Fish => {
let complete = r#"complete -c workon -x -a "(ls -D $HOME/{.virtualenvs,Library/Caches/pypoetry/virtualenvs}/ 2> /dev/null | grep -v ':')""#;
println!("{}", complete);
}
Shell::Zsh => {}
_ => {}
}
}
("--set", Some(sub_m)) => {
let env_name = sub_m.value_of("env_name").expect("请指定虚拟环境名称");
set(env_name);
}
("--get", Some(sub_m)) => {
let env_name = sub_m.value_of("env_name").expect("请指定虚拟环境名称");
get(env_name);
}
("--remove", Some(sub_m)) => {
let env_name = sub_m.value_of("env_name").expect("请指定虚拟环境名称");
remove(env_name);
}
("--clean", Some(_)) => {
clean();
}
("--show", Some(_)) => {
show();
}
(_command, _) => {
// unreachable!("Invalid subcommand: {}", command)
let _ = app.print_help();
}
}
}
fn set(env: &str) {
let mut config = parse_config();
match env::current_dir() {
Ok(path) => {
config
.as_object_mut()
.unwrap()
.insert(env.to_owned(), Value::from(path.to_str().unwrap()));
}
Err(e) => {
println!("无法为当前目录设置虚拟环境({}): {}", env, e);
exit(RETURN_ERROR);
}
}
let json_string = serde_json::to_string_pretty(&config).expect("期望接收一个JSON类型的数据");
let (_, config_file) = get_config_file_and_dir();
match File::create(config_file) {
Ok(mut f) => {
let _ = f.write(json_string.as_bytes());
}
Err(_) => return,
}
println!("配置 {} 已添加", env);
}
fn get(env: &str) {
let config = parse_config();
if let Some(s) = config.get(env) {
if let Ok(current_dir) = env::current_dir() {
let config_dir = s.as_str().unwrap();
if config_dir != current_dir.to_str().unwrap() {
println!("{}", s.as_str().unwrap());
}
}
}
}
fn remove(env: &str) {
let mut config = parse_config();
if config.get(env).is_none() {
println!("配置 {} 不存在!", env);
exit(1);
}
config.as_object_mut().unwrap().remove(env);
let json_string = serde_json::to_string_pretty(&config).expect("期望接收一个JSON类型的数据");
let (_, config_file) = get_config_file_and_dir();
match File::create(config_file) {
Ok(mut f) => {
let _ = f.write(json_string.as_bytes());
}
Err(_) => return,
}
println!("配置 {} 已删除", env);
}
fn clean() {
let (_, config_file) = get_config_file_and_dir();
let config_file = config_file.as_str();
if Path::new(config_file).exists() {
match File::create(config_file) {
Ok(mut f) => {
let _ = f.write("{}".as_bytes());
}
Err(e) => {
println!("异常: {}", e);
return;
}
}
}
println!("配置已清除");
}
fn show() {
let config = parse_config();
println!("配置:");
println!("{{");
for (key, value) in config.as_object().unwrap().iter() {
println!(" \"{}\": \"{}\",", key, value.as_str().unwrap());
}
println!("}}");
}
|
use crate::errors::FormatError;
use crate::try_continue;
#[derive(Debug, PartialEq)]
enum Segment {
PlaceHolder {
padding: Option<usize>,
index: usize,
},
String(String),
}
#[derive(Debug, PartialEq)]
pub struct Formatter(Vec<Segment>);
impl Formatter {
pub fn new(format: &str) -> Result<Self, FormatError> {
let mut segments = Vec::new();
let mut should_escape = false;
let mut is_parsing_index = false;
let mut is_parsing_padding = false;
let mut current_segment = String::new();
let mut current_index: usize = 0;
let mut current_padding: Option<usize> = None;
let mut incremental_index = 1;
for (i, ch) in format.chars().enumerate() {
if !should_escape && ch == '\\' {
should_escape = true;
continue;
}
if should_escape && ch != '{' && ch != '}' && ch != '\\' {
return Err(FormatError::InvalidEscapeCharacter(i, ch));
}
match ch {
'{' if !should_escape && !is_parsing_index && !is_parsing_padding => {
if !current_segment.is_empty() {
segments.push(Segment::String(current_segment));
current_segment = String::new();
}
is_parsing_index = true;
}
'}' if !should_escape => {
if !is_parsing_index && !is_parsing_padding {
return Err(FormatError::UnopenedPlaceholder);
}
if current_segment.is_empty() {
if is_parsing_index {
current_index = incremental_index;
incremental_index += 1;
} else if is_parsing_padding {
current_padding = None;
}
} else if is_parsing_index {
current_index = current_segment
.as_str()
.parse()
.map_err(|_| FormatError::InvalidIndex(current_segment.clone()))?;
current_padding = None;
} else if is_parsing_padding {
current_padding =
Some(current_segment.as_str().parse().map_err(|_| {
FormatError::InvalidPadding(current_segment.clone())
})?);
}
segments.push(Segment::PlaceHolder {
padding: current_padding,
index: current_index,
});
current_segment.clear();
current_padding = None;
current_index = 0;
is_parsing_index = false;
is_parsing_padding = false;
}
':' if is_parsing_index => {
is_parsing_index = false;
is_parsing_padding = true;
if current_segment.is_empty() {
current_index = incremental_index;
incremental_index += 1;
} else {
current_index = current_segment
.as_str()
.parse()
.map_err(|_| FormatError::InvalidIndex(current_segment.clone()))?;
current_segment.clear();
}
}
_ => {
current_segment.push(ch);
should_escape = false;
}
}
}
if is_parsing_index || is_parsing_padding {
return Err(FormatError::UnclosedPlaceholder);
}
if !current_segment.is_empty() {
segments.push(Segment::String(current_segment));
}
Ok(Self(segments))
}
pub fn format(&self, vars: &[&str]) -> String {
let mut formatted = String::new();
for segment in self.0.as_slice() {
match segment {
Segment::PlaceHolder { padding, index } => {
let var = *try_continue!(vars.get(*index));
if let Some((padding, digits)) =
padding.zip(var.parse().map(|n: usize| n.to_string()).ok())
{
if digits.len() < padding {
let diff = padding - digits.len();
(0..diff).for_each(|_| formatted.push('0'));
}
formatted.push_str(digits.as_str());
continue;
}
formatted.push_str(var);
}
Segment::String(ref string) => formatted.push_str(string),
}
}
formatted
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_valid_formats() {
let mut format_vars_expected = vec![
("{}", vec!["first", "second"], "second"),
(r"{1}\\{0}", vec!["first", "second"], r"second\first"),
(r"{1}\\\{{0}\}", vec!["first", "second"], r"second\{first}"),
("{}{}{3}", vec!["first", "second"], "second"),
("{1}", vec!["first", "second"], "second"),
(
"{1}:{1}.{1}",
vec!["first", "second"],
"second:second.second",
),
("{:3}", vec!["0", "1"], "001"),
("{:3}", vec!["0", "-1"], "-1"),
("{:3}", vec!["0", "a"], "a"),
("{:2}{:1}", vec!["0", "1", "2"], "012"),
("{1:3}", vec!["1", "2"], "002"),
("{}.{}", vec!["first", "second", "third"], "second.third"),
("{1}.{0}", vec!["first", "second"], "second.first"),
("{1}.{}", vec!["first", "second"], "second.second"),
(
"{2} - {} - {} - {}",
vec!["first", "second", "third", "fourth"],
"third - second - third - fourth",
),
(
"init {}{} end",
vec!["first", "second", "third"],
"init secondthird end",
),
(
r"init \{{}\} end",
vec!["first", "second"],
"init {second} end",
),
(
r"init \{{1:2}:{0:2}\} end",
vec!["1", "2"],
"init {02:01} end",
),
(
r"init \{{1:2}\{\}\{:\}{0:2}\} end",
vec!["1", "2"],
"init {02{}{:}01} end",
),
(
r"init {:5}\{\}{:2} end",
vec!["0", "1", "2"],
"init 00001{}02 end",
),
];
while let Some((format, vars, expected)) = format_vars_expected.pop() {
let output = Formatter::new(format)
.expect(format!("unable to parse format '{}'", format).as_str());
let actual = output.format(vars.as_slice());
assert_eq!(actual, expected);
}
}
#[test]
fn test_invalid_formats() {
let mut format_error = vec![
("}", FormatError::UnopenedPlaceholder),
(r"\a", FormatError::InvalidEscapeCharacter(1, 'a')),
("2:5}", FormatError::UnopenedPlaceholder),
(r"\{2:5}", FormatError::UnopenedPlaceholder),
(r"{2:5\}", FormatError::UnclosedPlaceholder),
("{{2:5}}", FormatError::InvalidIndex("{2".to_string())),
("{a}", FormatError::InvalidIndex("a".to_string())),
("{2:5a}", FormatError::InvalidPadding("5a".to_string())),
("init {2:5", FormatError::UnclosedPlaceholder),
("init {2:5 end", FormatError::UnclosedPlaceholder),
];
while let Some((format, err)) = format_error.pop() {
assert_eq!(Formatter::new(format), Err(err));
}
}
}
|
fn main() {
/*
Formatated print
`format!`: 포맷이 있는 텍스트를 스트링 형태로 리턴
`print!`: `format!`과 동일하지만 텍스트를 콘솔 출력(`io::stdout`)
`println!`: `print!`에 개행문자 추가
`eprint!`: `format!`과 같으나 텍스트를 `표준에러`로 출력(`io::stderr`)
`eprintln!`: `eprint!`에 개행문자 추가
*/
let days_of_month = format!("{} days", 31);
print!("this month has {}.", days_of_month);
println!("this month has {}.", days_of_month);
eprint!("Is this error?");
eprintln!("Is this error?");
}
|
//! # The zone/world (`*.luz`) file format
//!
//! This module can be used to read the zone/world file format
//! used in the game LEGO Universe.
/// Data definitions for zone files
pub mod core;
/// Reading of zone files
pub mod io;
/// Parser functions for zone file data
pub mod parser;
/// Module for reading the path data in a zone file
pub mod paths;
|
// Copyright (C) 2021 Subspace Labs, Inc.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Data structures related to objects (useful data) stored on Subspace Network.
//!
//! Mappings provided are of 3 kinds:
//! * for objects within a block
//! * for objects within a piece
//! * for global objects in the global history of the blockchain
#[cfg(not(feature = "std"))]
extern crate alloc;
use crate::{Blake2b256Hash, PieceIndex};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use parity_scale_codec::{Decode, Encode};
use scale_info::TypeInfo;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// Object stored inside of the block
#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub enum BlockObject {
/// V0 of object mapping data structure
#[codec(index = 0)]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
V0 {
/// Object hash
hash: Blake2b256Hash,
/// Offset of object in the encoded block.
offset: u32,
},
}
impl BlockObject {
/// Object hash
pub fn hash(&self) -> Blake2b256Hash {
match self {
Self::V0 { hash, .. } => *hash,
}
}
/// Offset of object in the encoded block.
pub fn offset(&self) -> u32 {
match self {
Self::V0 { offset, .. } => *offset,
}
}
/// Sets new offset.
pub fn set_offset(&mut self, new_offset: u32) {
match self {
Self::V0 { offset, .. } => {
*offset = new_offset;
}
}
}
}
/// Mapping of objects stored inside of the block
#[derive(Debug, Default, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub struct BlockObjectMapping {
/// Objects stored inside of the block
pub objects: Vec<BlockObject>,
}
/// Object stored inside of the block
#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub enum PieceObject {
/// V0 of object mapping data structure
#[codec(index = 0)]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
V0 {
/// Object hash
hash: Blake2b256Hash,
// TODO: This is a raw record offset, not a regular one
/// Offset of the object
offset: u32,
},
}
impl PieceObject {
/// Object hash
pub fn hash(&self) -> Blake2b256Hash {
match self {
Self::V0 { hash, .. } => *hash,
}
}
/// Offset of the object
pub fn offset(&self) -> u32 {
match self {
Self::V0 { offset, .. } => *offset,
}
}
}
/// Mapping of objects stored inside of the piece
#[derive(Debug, Default, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub struct PieceObjectMapping {
/// Objects stored inside of the block
pub objects: Vec<PieceObject>,
}
/// Object stored inside in the history of the blockchain
#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
pub enum GlobalObject {
/// V0 of object mapping data structure
#[codec(index = 0)]
#[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))]
V0 {
/// Piece index where object is contained (at least its beginning, might not fit fully)
piece_index: PieceIndex,
/// Offset of the object
offset: u32,
},
}
impl GlobalObject {
/// Piece index where object is contained (at least its beginning, might not fit fully)
pub fn piece_index(&self) -> PieceIndex {
match self {
Self::V0 { piece_index, .. } => *piece_index,
}
}
/// Offset of the object
pub fn offset(&self) -> u32 {
match self {
Self::V0 { offset, .. } => *offset,
}
}
}
|
#[macro_use]
extern crate tantivy;
#[macro_use]
extern crate serde_derive;
pub mod search;
|
use ws::{Sender,Handshake,Handler,Result,CloseCode,Message};
use socket_server::gamestate;
use std::sync::{Arc,Mutex};
use socket_server::gamestate::GameState;
pub struct Client{
pub id: u64,
pub out: Sender,
gamestate: Arc<Mutex<GameState>>,
}
impl Handler for Client {
fn on_open(&mut self, _: Handshake) -> Result<()> {
println!("New socket: {}", self.id);
self.out.send("Hello precious websocket friend. <3")
}
fn on_close(&mut self,code: CloseCode, reason: &str){
match code {
CloseCode::Normal => println!("Client {}: Closed Gracefully.", self.id),
CloseCode::Away => println!("Client {}: Left the site.", self.id),
_=>println!("Unhandled error: {}", reason),
}
let mutex = self.gamestate.clone();
let mut gs = mutex.lock().unwrap();
gs.deregister(self.id);
}
fn on_message(&mut self, message: Message) -> Result<()>{
let mutex = self.gamestate.clone();
let mut gs = mutex.lock().unwrap();
let text = match message.clone().into_text(){
Ok(s)=>s,
_=>panic!("aaaaa"),
};
gs.sendAll(text,self.id);
self.out.send(message.clone())
}
}
pub fn new(id:u64,out: Sender, gs: Arc<Mutex<GameState>>) -> Client{
Client{id:id,out:out,gamestate:gs}
} |
pub fn start_server(_name: &String) {
use duct::cmd;
use std::io::{prelude::*, BufReader, stdout, Write};
let reader = cmd!("java", "-jar", "server.jar", "nogui").dir("server").reader().unwrap();
let buf_reader = BufReader::new(reader);
let mut stdout = stdout();
for l in buf_reader.lines() {
let line = l.unwrap() + &String::from("\n");
stdout.write(line.as_bytes()).unwrap(); // TODO: print each stdin character to stdout to give user feedback when typing
}
}
pub fn stop_server(_name: &String) {
use std::io::{prelude::*, Write}; // find a way to keep track of handles
}
/* pub fn start_server(name: &String) {
use std::process::{Command, Stdio};
use std::io::{Write, Read};
let child = Command::new("java")
.arg("-jar")
.arg("server.jar")
.arg("nogui")
.stdout(Stdio::piped())
.current_dir(std::path::PathBuf::from("/home/dacaseo/Documents/mcwr/server/"))
.spawn()
.expect("Failed to execute command");
let mut stdout_string = String::new();
child.stdout.expect("failed to open stdout of child").read_to_string(&mut stdout_string).unwrap();
let mut stdout = std::io::stdout();
stdout.write(&stdout_string.as_bytes()).unwrap();
} */ |
use jsonrpc_tcp_server::jsonrpc_core::*;
use mcsql_sys;
pub fn register_mcsql_funcs(io: &mut IoHandler) {
io.add_method("arc_get_all", |_params: Params| {
let res = mcsql_sys::arc_get_all();
Ok(Value::String(res))
});
io.add_method("arc_get_params", |params: Params| {
#[derive(Deserialize)]
struct ArcParams {
file_no: i32,
group: String,
}
let value: ArcParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::arc_get_params(value.file_no, value.group);
Ok(Value::String(res))
});
io.add_method("bookprogram_get_all", |_params: Params| {
let res = mcsql_sys::bookprogram_get_all();
Ok(Value::String(res))
});
io.add_method("dynamics_get_all", |_params: Params| {
let res = mcsql_sys::dynamics_get_all();
Ok(Value::String(res))
});
io.add_method("dynamics_get_by_id", |params: Params| {
#[derive(Deserialize)]
struct DynamicsParams {
id: String,
}
let value: DynamicsParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::dynamics_get_by_id(value.id);
Ok(Value::String(res))
});
io.add_method("enum_get_all", |_params: Params| {
let res = mcsql_sys::enum_get_all();
Ok(Value::String(res))
});
io.add_method("extaxis_get_all", |_params: Params| {
let res = mcsql_sys::extaxis_get_all();
Ok(Value::String(res))
});
io.add_method("interference_get_all", |_params: Params| {
let res = mcsql_sys::interference_get_all();
Ok(Value::String(res))
});
io.add_method("ios_get_all", |params: Params| {
#[derive(Deserialize)]
struct IosParams {
group: String,
lang: String,
auth: i32,
tech: i32,
}
let value: IosParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::ios_get_all(value.group, value.lang, value.auth, value.tech);
Ok(Value::String(res))
});
io.add_method("metadata_get_all", |params: Params| {
#[derive(Deserialize)]
struct MetadataParams {
lang: String,
}
let value: MetadataParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::metadata_get_all(value.lang);
Ok(Value::String(res))
});
io.add_method("params_get_params", |_params: Params| {
let res = mcsql_sys::params_get_params();
Ok(Value::String(res))
});
io.add_method("params_get_valid_param_by_id", |params: Params| {
#[derive(Deserialize)]
struct IdParams {
md_id: String,
}
let value: IdParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::params_get_valid_param_by_id(value.md_id);
Ok(Value::String(res))
});
io.add_method("params_get_valid_param_by_group", |params: Params| {
#[derive(Deserialize)]
struct GroupParams {
group: String,
}
let value: GroupParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::params_get_valid_param_by_group(value.group);
Ok(Value::String(res))
});
io.add_method("operation_record_get_all", |params: Params| {
#[derive(Deserialize)]
struct OperationParams {
created_time: i32,
start: i32,
page_size: i32,
}
let value: OperationParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::operation_record_get_all(value.created_time, value.start, value.page_size);
Ok(Value::String(res))
});
io.add_method("ref_get_all", |_params: Params| {
let res = mcsql_sys::ref_get_all();
Ok(Value::String(res))
});
io.add_method("toolframe_get_all", |_params: Params| {
let res = mcsql_sys::toolframe_get_all();
Ok(Value::String(res))
});
io.add_method("toolframe_get_by_toolno", |params: Params| {
#[derive(Deserialize)]
struct ToolNoParams {
tool_no: i32,
}
let value: ToolNoParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::toolframe_get_by_toolno(value.tool_no);
Ok(Value::String(res))
});
io.add_method("userframe_get_all", |_params: Params| {
let res = mcsql_sys::userframe_get_all();
Ok(Value::String(res))
});
io.add_method("userframe_get_by_userno", |params: Params| {
#[derive(Deserialize)]
struct UserNoParams {
user_no: i32,
}
let value: UserNoParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::userframe_get_by_userno(value.user_no);
Ok(Value::String(res))
});
io.add_method("zeropoint_get_all", |_params: Params| {
let res = mcsql_sys::zeropoint_get_all();
Ok(Value::String(res))
});
io.add_method("manager_backup_db", |params: Params| {
#[derive(Deserialize)]
struct BackupParams {
db_dir: String,
}
let value: BackupParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::manager_backup_db(value.db_dir);
Ok(Value::Number(serde_json::Number::from(res)))
});
io.add_method("manager_restore_db", |params: Params| {
#[derive(Deserialize)]
struct RestoreParams {
db_dir: String,
db_bak_name: String,
force: u8,
}
let value: RestoreParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::manager_restore_db(value.db_dir, value.db_bak_name, value.force);
Ok(Value::Number(serde_json::Number::from(res)))
});
io.add_method("manager_upgrade_db", |params: Params| {
#[derive(Deserialize)]
struct UpgradeParams {
db_dir: String,
upgrade_pkg: String,
}
let value: UpgradeParams = match params.parse() {
Ok(v) => v,
Err(_) => {
return Ok(Value::String("fail to query".to_string()));
},
};
let res = mcsql_sys::manager_upgrade_db(value.db_dir, value.upgrade_pkg);
Ok(Value::Number(serde_json::Number::from(res)))
});
} |
mod api_error;
pub use api_error::*;
|
use crate::miner::MinerClientActor;
use actix::Actor;
use actix_rt::System;
use bus::BusActor;
use config::MinerConfig;
use config::NodeConfig;
use consensus::argon::ArgonConsensus;
use futures_timer::Delay;
use logger::prelude::*;
use sc_stratum::{PushWorkHandler, Stratum};
use starcoin_miner::{
miner::{MineCtx, Miner},
stratum::StratumManager,
};
use std::sync::Arc;
use std::time::Duration;
use types::block::{Block, BlockBody, BlockHeader, BlockTemplate};
use types::U256;
#[test]
fn test_stratum_client() {
::logger::init_for_test();
let mut system = System::new("test");
system.block_on(async {
let mut miner_config = MinerConfig::default();
miner_config.consensus_strategy = config::ConsensusStrategy::Argon(4);
let conf = Arc::new(NodeConfig::random_for_test());
let mut miner = Miner::<ArgonConsensus>::new(BusActor::launch(), conf);
let stratum = {
let dispatcher = Arc::new(StratumManager::new(miner.clone()));
Stratum::start(&miner_config.stratum_server, dispatcher, None).unwrap()
};
Delay::new(Duration::from_millis(3000)).await;
info!("started stratum server");
let mine_ctx = {
let header = BlockHeader::default();
let body = BlockBody::default();
let block = Block::new(header, body);
let block_template = BlockTemplate::from_block(block);
let difficulty: U256 = 1.into();
MineCtx::new(block_template, difficulty)
};
let _addr = MinerClientActor::new(miner_config).start();
miner.set_mint_job(mine_ctx);
for _ in 1..10 {
stratum.push_work_all(miner.get_mint_job()).unwrap();
Delay::new(Duration::from_millis(2000)).await;
}
});
}
|
use crate::schema::usr_smile;
use chrono::prelude::*;
#[derive(Debug, Queryable, Identifiable, Serialize, Deserialize, PartialEq)]
#[table_name = "usr_smile"]
#[primary_key(user_id)]
pub struct User {
pub user_id: String,
pub username: String,
pub email: Option<String>,
pub joinAt: NaiveDateTime,
pub lastEditedAt: Option<NaiveDateTime>,
pub fullname: Option<String>,
#[serde(skip)]
pub password: String,
pub avatar: Option<String>,
pub isAdmin: Option<bool>,
}
#[derive(
Debug,
AsChangeset,
juniper::GraphQLInputObject,
Insertable,
Serialize,
Deserialize,
PartialEq,
Clone,
)]
#[table_name = "usr_smile"]
pub struct UserInput {
#[serde(skip_deserializing)]
pub user_id: Option<String>,
pub username: Option<String>,
pub email: Option<String>,
pub fullname: Option<String>,
pub password: Option<String>,
pub avatar: Option<String>,
pub isAdmin: Option<bool>,
}
|
//! Various enums that you can match on.
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The quarantine permissions.
pub struct QuarantinePermissions {
/// If styles are allowed.
pub styles: bool,
/// If sharing is allowed.
pub sharing: bool,
/// If subreddit images are allowed.
pub sr_images: bool,
/// If the subscriber count is visible.
pub subscriber_count: bool,
/// If media is allowed.
pub media: bool,
/// If polls are allowed.
pub polls: bool,
/// If videos are allowed.
pub videos: bool,
/// If images are allowed.
pub images: bool,
/// If crossposts are allowed.
pub crossposts: bool,
}
#[serde(rename_all = "lowercase")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The type of a subreddit.
pub enum SubredditType {
/// Anyone can post to this subreddit.
Public,
/// Only certain users can post to this subreddit.
Private,
}
#[serde(rename_all = "lowercase")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The type of a submission.
pub enum CommentSort {
/// Absolute (total karma) ranking.
Top,
/// Relative (percentage-based) ranking.
Best,
/// Prioritize controversial comments.
Controversial,
/// Newest comments.
New,
}
#[serde(rename_all = "lowercase")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The allowed types of submissions in a Subreddit.
pub enum SubredditSubmissionType {
/// All submissions allowed.
Any,
/// Only link submissions allowed.
Link,
/// Only text posts allowed.
Text,
}
#[serde(rename_all = "lowercase")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The type of a submission.
pub enum SubmissionType {
/// URL link.
Link,
/// Self-post.
Text,
}
#[serde(rename_all = "lowercase")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// A way to sort links..
pub enum LinkSort {
/// Posts made in the past hour.
Hour,
/// Posts made in the past day.
Day,
/// Posts made in the past week.
Week,
/// Posts made in the past month
Month,
/// Posts made in the past year.
Year,
/// All posts.
All,
}
#[serde(rename_all = "lowercase")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The type of an award.
pub enum AwardType {
/// Can be used anywhere.
Global,
/// Belonging to a subreddit.
Community,
}
#[serde(rename_all = "UPPERCASE")]
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The subtype of an award.
pub enum AwardSubtype {
/// Can be used anywhere.
Global,
/// Belonging to a subreddit.
Community,
/// Premium.
Premium,
}
/// Parameters for a GET query, a key-value tuple of Strings.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Params(Vec<(String, String)>);
impl Params {
/// Creates a new Params struct.
pub fn new() -> Self {
Self(Vec::new())
}
/// Adds a parameter.
pub fn add(mut self, key: &str, value: &str) -> Self {
self.0.push((key.into(), value.into()));
self
}
}
impl Default for Params {
fn default() -> Self {
Self(Vec::new())
}
}
/// Fullname is the reddit unique ID for a thing, including the type prefix.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Fullname(String);
impl AsRef<str> for Fullname {
fn as_ref(&self) -> &str {
&self.0
}
}
impl Fullname {
/// Gets the ID36 portion of a Fullname.
pub fn name(&self) -> String {
let parts: Vec<&str> = self.as_ref().split('_').collect();
parts[1].to_owned()
}
/// Gets the prefix of a Fullname (t1, t2, ...)
pub fn prefix(&self) -> String {
let parts: Vec<&str> = self.as_ref().split('_').collect();
parts[0].to_owned()
}
}
|
use std::fs::File;
use std::io::{self, BufReader, Cursor, Read, Write};
use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::ptr::null_mut;
use std::sync::Arc;
use std::task::{Context, Poll};
use futures::ready;
use hyper::server::accept::Accept;
use hyper::server::conn::{AddrIncoming, AddrStream};
use rustls::{self, ServerConfig, ServerSession, Session, Stream, TLSError};
use tokio::io::{AsyncRead, AsyncWrite};
use crate::transport::Transport;
/// Represents errors that can occur building the TlsConfig
#[derive(Debug)]
pub(crate) enum TlsConfigError {
Io(io::Error),
/// An Error parsing the Certificate
CertParseError,
/// An Error parsing a Pkcs8 key
Pkcs8ParseError,
/// An Error parsing a Rsa key
RsaParseError,
/// An error from an empty key
EmptyKey,
/// An error from an invalid key
InvalidKey(TLSError),
}
impl std::fmt::Display for TlsConfigError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TlsConfigError::Io(err) => err.fmt(f),
TlsConfigError::CertParseError => write!(f, "certificate parse error"),
TlsConfigError::Pkcs8ParseError => write!(f, "pkcs8 parse error"),
TlsConfigError::RsaParseError => write!(f, "rsa parse error"),
TlsConfigError::EmptyKey => write!(f, "key contains no private key"),
TlsConfigError::InvalidKey(err) => write!(f, "key contains an invalid key, {}", err),
}
}
}
impl std::error::Error for TlsConfigError {}
/// Builder to set the configuration for the Tls server.
pub(crate) struct TlsConfigBuilder {
cert: Box<dyn Read + Send + Sync>,
key: Box<dyn Read + Send + Sync>,
}
impl std::fmt::Debug for TlsConfigBuilder {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
f.debug_struct("TlsConfigBuilder").finish()
}
}
impl TlsConfigBuilder {
/// Create a new TlsConfigBuilder
pub(crate) fn new() -> TlsConfigBuilder {
TlsConfigBuilder {
key: Box::new(io::empty()),
cert: Box::new(io::empty()),
}
}
/// sets the Tls key via File Path, returns `TlsConfigError::IoError` if the file cannot be open
pub(crate) fn key_path(mut self, path: impl AsRef<Path>) -> Self {
self.key = Box::new(LazyFile {
path: path.as_ref().into(),
file: None,
});
self
}
/// sets the Tls key via bytes slice
pub(crate) fn key(mut self, key: &[u8]) -> Self {
self.key = Box::new(Cursor::new(Vec::from(key)));
self
}
/// Specify the file path for the TLS certificate to use.
pub(crate) fn cert_path(mut self, path: impl AsRef<Path>) -> Self {
self.cert = Box::new(LazyFile {
path: path.as_ref().into(),
file: None,
});
self
}
/// sets the Tls certificate via bytes slice
pub(crate) fn cert(mut self, cert: &[u8]) -> Self {
self.cert = Box::new(Cursor::new(Vec::from(cert)));
self
}
pub(crate) fn build(mut self) -> Result<ServerConfig, TlsConfigError> {
let mut cert_rdr = BufReader::new(self.cert);
let cert = rustls::internal::pemfile::certs(&mut cert_rdr)
.map_err(|()| TlsConfigError::CertParseError)?;
let key = {
// convert it to Vec<u8> to allow reading it again if key is RSA
let mut key_vec = Vec::new();
self.key
.read_to_end(&mut key_vec)
.map_err(TlsConfigError::Io)?;
if key_vec.is_empty() {
return Err(TlsConfigError::EmptyKey);
}
let mut pkcs8 = rustls::internal::pemfile::pkcs8_private_keys(&mut key_vec.as_slice())
.map_err(|()| TlsConfigError::Pkcs8ParseError)?;
if !pkcs8.is_empty() {
pkcs8.remove(0)
} else {
let mut rsa = rustls::internal::pemfile::rsa_private_keys(&mut key_vec.as_slice())
.map_err(|()| TlsConfigError::RsaParseError)?;
if !rsa.is_empty() {
rsa.remove(0)
} else {
return Err(TlsConfigError::EmptyKey);
}
}
};
let mut config = ServerConfig::new(rustls::NoClientAuth::new());
config
.set_single_cert(cert, key)
.map_err(|err| TlsConfigError::InvalidKey(err))?;
config.set_protocols(&["h2".into(), "http/1.1".into()]);
Ok(config)
}
}
struct LazyFile {
path: PathBuf,
file: Option<File>,
}
impl LazyFile {
fn lazy_read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.file.is_none() {
self.file = Some(File::open(&self.path)?);
}
self.file.as_mut().unwrap().read(buf)
}
}
impl Read for LazyFile {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.lazy_read(buf).map_err(|err| {
let kind = err.kind();
io::Error::new(
kind,
format!("error reading file ({:?}): {}", self.path.display(), err),
)
})
}
}
/// a wrapper arround T to allow for rustls Stream read/write translations to async read and write
#[derive(Debug)]
struct AllowStd<T> {
inner: T,
context: *mut (),
}
// *mut () context is neither Send nor Sync
unsafe impl<T: Send> Send for AllowStd<T> {}
unsafe impl<T: Sync> Sync for AllowStd<T> {}
struct Guard<'a, T>(&'a mut TlsStream<T>)
where
AllowStd<T>: Read + Write;
impl<T> Drop for Guard<'_, T>
where
AllowStd<T>: Read + Write,
{
fn drop(&mut self) {
(self.0).io.context = null_mut();
}
}
impl<T> AllowStd<T>
where
T: Unpin,
{
fn with_context<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut Context<'_>, Pin<&mut T>) -> R,
{
unsafe {
assert!(!self.context.is_null());
let waker = &mut *(self.context as *mut _);
f(waker, Pin::new(&mut self.inner))
}
}
}
impl<T> Read for AllowStd<T>
where
T: AsyncRead + Unpin,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self.with_context(|ctx, stream| stream.poll_read(ctx, buf)) {
Poll::Ready(r) => r,
Poll::Pending => Err(io::Error::from(io::ErrorKind::WouldBlock)),
}
}
}
impl<T> Write for AllowStd<T>
where
T: AsyncWrite + Unpin,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self.with_context(|ctx, stream| stream.poll_write(ctx, buf)) {
Poll::Ready(r) => r,
Poll::Pending => Err(io::Error::from(io::ErrorKind::WouldBlock)),
}
}
fn flush(&mut self) -> io::Result<()> {
match self.with_context(|ctx, stream| stream.poll_flush(ctx)) {
Poll::Ready(r) => r,
Poll::Pending => Err(io::Error::from(io::ErrorKind::WouldBlock)),
}
}
}
fn cvt<T>(r: io::Result<T>) -> Poll<io::Result<T>> {
match r {
Ok(v) => Poll::Ready(Ok(v)),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => Poll::Pending,
Err(e) => Poll::Ready(Err(e)),
}
}
/// A TlsStream that lazily does ths TLS handshake.
#[derive(Debug)]
pub(crate) struct TlsStream<T> {
io: AllowStd<T>,
is_shutdown: bool,
session: ServerSession,
}
impl<T> TlsStream<T> {
pub(crate) fn new(io: T, session: ServerSession) -> Self {
TlsStream {
io: AllowStd {
inner: io,
context: null_mut(),
},
is_shutdown: false,
session,
}
}
fn with_context<F, R>(&mut self, ctx: &mut Context<'_>, f: F) -> R
where
F: FnOnce(&mut AllowStd<T>, &mut ServerSession) -> R,
AllowStd<T>: Read + Write,
{
self.io.context = ctx as *mut _ as *mut ();
let g = Guard(self);
f(&mut (g.0).io, &mut (g.0).session)
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> AsyncRead for TlsStream<T> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
self.with_context(cx, |io, session| cvt(Stream::new(session, io).read(buf)))
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsStream<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.with_context(cx, |io, session| cvt(Stream::new(session, io).write(buf)))
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
self.with_context(cx, |io, session| {
if let Err(e) = ready!(cvt(Stream::new(session, io).flush())) {
return Poll::Ready(Err(e));
}
cvt(io.flush())
})
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let mut pin = self.get_mut();
if pin.session.is_handshaking() {
return Poll::Ready(Ok(()));
}
if !pin.is_shutdown {
pin.session.send_close_notify();
pin.is_shutdown = true;
}
if let Err(e) = ready!(Pin::new(&mut pin).poll_flush(cx)) {
return Poll::Ready(Err(e));
}
Pin::new(&mut pin.io.inner).poll_shutdown(cx)
}
}
impl<T: Transport + Unpin> Transport for TlsStream<T> {
fn remote_addr(&self) -> Option<SocketAddr> {
self.io.inner.remote_addr()
}
}
pub(crate) struct TlsAcceptor {
config: Arc<ServerConfig>,
incoming: AddrIncoming,
}
impl TlsAcceptor {
pub(crate) fn new(config: ServerConfig, incoming: AddrIncoming) -> TlsAcceptor {
TlsAcceptor {
config: Arc::new(config),
incoming,
}
}
}
impl Accept for TlsAcceptor {
type Conn = TlsStream<AddrStream>;
type Error = io::Error;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
let pin = self.get_mut();
match ready!(Pin::new(&mut pin.incoming).poll_accept(cx)) {
Some(Ok(sock)) => {
let session = ServerSession::new(&pin.config.clone());
// let tls = Arc::new($this.config);
return Poll::Ready(Some(Ok(TlsStream::new(sock, session))));
}
Some(Err(e)) => Poll::Ready(Some(Err(e))),
None => Poll::Ready(None),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn file_cert_key() {
TlsConfigBuilder::new()
.key_path("examples/tls/key.rsa")
.cert_path("examples/tls/cert.pem")
.build()
.unwrap();
}
#[test]
fn bytes_cert_key() {
let key = include_str!("../examples/tls/key.rsa");
let cert = include_str!("../examples/tls/cert.pem");
TlsConfigBuilder::new()
.key(key.as_bytes())
.cert(cert.as_bytes())
.build()
.unwrap();
}
}
|
use glace::Vec3;
use winit::event::ElementState;
pub struct InputMap {
mouse1: ElementState,
mouse_delta: (f32, f32),
}
impl InputMap {
pub fn new() -> Self {
InputMap {
mouse1: ElementState::Released,
mouse_delta: (0.0, 0.0),
}
}
pub fn update_mouse1(&mut self, state: ElementState) {
self.mouse1 = state;
}
pub fn update_mouse_motion(&mut self, (dx, dy): (f64, f64)) {
if self.mouse1 == ElementState::Pressed {
self.mouse_delta.0 += dx as f32;
self.mouse_delta.1 += dy as f32;
}
}
pub fn mouse_delta(&self) -> (f32, f32) {
self.mouse_delta
}
pub fn reset_delta(&mut self) {
self.mouse_delta = (0.0, 0.0);
}
}
#[derive(Debug, Clone)]
pub struct Camera {
rotation_speed: f32,
position: Vec3<f32>,
phi: f32,
theta: f32,
}
impl Camera {
pub fn new(position: Vec3<f32>, phi: f32, theta: f32) -> Self {
Camera {
position,
rotation_speed: 1.0,
phi,
theta,
}
}
pub fn update(&mut self, input: &InputMap) {
let rotation = input.mouse_delta();
self.phi -= rotation.0 * self.rotation_speed * 0.01;
self.theta += rotation.1 * self.rotation_speed * 0.01;
}
pub fn position(&self) -> Vec3<f32> {
self.position
}
pub fn view_dir(&self) -> Vec3<f32> {
let (sp, cp) = self.phi.sin_cos();
let (st, ct) = self.theta.sin_cos();
Vec3 {
x: ct * sp,
y: st,
z: ct * cp,
}
}
}
|
// Copyright (C) 2015-2021 Swift Navigation Inc.
// Contact: https://support.swiftnav.com
//
// This source is subject to the license found in the file 'LICENSE' which must
// be be distributed together with this source. All other rights reserved.
//
// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
// EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
//****************************************************************************
// Automatically generated from yaml/swiftnav/sbp/system.yaml
// with generate.py. Please do not hand edit!
//****************************************************************************/
//! Standardized system messages from Swift Navigation devices.
#[allow(unused_imports)]
use std::convert::TryFrom;
#[allow(unused_imports)]
use byteorder::{LittleEndian, ReadBytesExt};
#[allow(unused_imports)]
use crate::serialize::SbpSerialize;
#[allow(unused_imports)]
use crate::SbpString;
/// Experimental telemetry message
///
/// The CSAC telemetry message has an implementation defined telemetry string
/// from a device. It is not produced or available on general Swift Products.
/// It is intended to be a low rate message for status purposes.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCsacTelemetry {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Index representing the type of telemetry in use. It is implementation
/// defined.
pub id: u8,
/// Comma separated list of values as defined by the index
pub telemetry: SbpString,
}
impl MsgCsacTelemetry {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCsacTelemetry, crate::Error> {
Ok( MsgCsacTelemetry{
sender_id: None,
id: _buf.read_u8()?,
telemetry: crate::parser::read_string(_buf)?,
} )
}
}
impl super::SBPMessage for MsgCsacTelemetry {
fn get_message_name(&self) -> &'static str {
"MSG_CSAC_TELEMETRY"
}
fn get_message_type(&self) -> u16 {
65284
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgCsacTelemetry {
const MESSAGE_TYPE: u16 = 65284;
const MESSAGE_NAME: &'static str = "MSG_CSAC_TELEMETRY";
}
impl TryFrom<super::SBP> for MsgCsacTelemetry {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgCsacTelemetry(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgCsacTelemetry {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.id.append_to_sbp_buffer(buf);
self.telemetry.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.id.sbp_size();
size += self.telemetry.sbp_size();
size
}
}
/// Experimental telemetry message labels
///
/// The CSAC telemetry message provides labels for each member of the string
/// produced by MSG_CSAC_TELEMETRY. It should be provided by a device at a
/// lower rate than the MSG_CSAC_TELEMETRY.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgCsacTelemetryLabels {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Index representing the type of telemetry in use. It is implementation
/// defined.
pub id: u8,
/// Comma separated list of telemetry field values
pub telemetry_labels: SbpString,
}
impl MsgCsacTelemetryLabels {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgCsacTelemetryLabels, crate::Error> {
Ok( MsgCsacTelemetryLabels{
sender_id: None,
id: _buf.read_u8()?,
telemetry_labels: crate::parser::read_string(_buf)?,
} )
}
}
impl super::SBPMessage for MsgCsacTelemetryLabels {
fn get_message_name(&self) -> &'static str {
"MSG_CSAC_TELEMETRY_LABELS"
}
fn get_message_type(&self) -> u16 {
65285
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgCsacTelemetryLabels {
const MESSAGE_TYPE: u16 = 65285;
const MESSAGE_NAME: &'static str = "MSG_CSAC_TELEMETRY_LABELS";
}
impl TryFrom<super::SBP> for MsgCsacTelemetryLabels {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgCsacTelemetryLabels(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgCsacTelemetryLabels {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.id.append_to_sbp_buffer(buf);
self.telemetry_labels.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.id.sbp_size();
size += self.telemetry_labels.sbp_size();
size
}
}
/// Status of received corrections
///
/// This message provides information about the receipt of Differential
/// corrections. It is expected to be sent with each receipt of a complete
/// corrections packet.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgDgnssStatus {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Status flags
pub flags: u8,
/// Latency of observation receipt
pub latency: u16,
/// Number of signals from base station
pub num_signals: u8,
/// Corrections source string
pub source: SbpString,
}
impl MsgDgnssStatus {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgDgnssStatus, crate::Error> {
Ok( MsgDgnssStatus{
sender_id: None,
flags: _buf.read_u8()?,
latency: _buf.read_u16::<LittleEndian>()?,
num_signals: _buf.read_u8()?,
source: crate::parser::read_string(_buf)?,
} )
}
}
impl super::SBPMessage for MsgDgnssStatus {
fn get_message_name(&self) -> &'static str {
"MSG_DGNSS_STATUS"
}
fn get_message_type(&self) -> u16 {
65282
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgDgnssStatus {
const MESSAGE_TYPE: u16 = 65282;
const MESSAGE_NAME: &'static str = "MSG_DGNSS_STATUS";
}
impl TryFrom<super::SBP> for MsgDgnssStatus {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgDgnssStatus(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgDgnssStatus {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.flags.append_to_sbp_buffer(buf);
self.latency.append_to_sbp_buffer(buf);
self.num_signals.append_to_sbp_buffer(buf);
self.source.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.flags.sbp_size();
size += self.latency.sbp_size();
size += self.num_signals.sbp_size();
size += self.source.sbp_size();
size
}
}
/// Offset of the local time with respect to GNSS time
///
/// The GNSS time offset message contains the information that is needed to
/// translate messages tagged with a local timestamp (e.g. IMU or wheeltick
/// messages) to GNSS time for the sender producing this message.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgGnssTimeOffset {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Weeks portion of the time offset
pub weeks: i16,
/// Milliseconds portion of the time offset
pub milliseconds: i32,
/// Microseconds portion of the time offset
pub microseconds: i16,
/// Status flags (reserved)
pub flags: u8,
}
impl MsgGnssTimeOffset {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgGnssTimeOffset, crate::Error> {
Ok( MsgGnssTimeOffset{
sender_id: None,
weeks: _buf.read_i16::<LittleEndian>()?,
milliseconds: _buf.read_i32::<LittleEndian>()?,
microseconds: _buf.read_i16::<LittleEndian>()?,
flags: _buf.read_u8()?,
} )
}
}
impl super::SBPMessage for MsgGnssTimeOffset {
fn get_message_name(&self) -> &'static str {
"MSG_GNSS_TIME_OFFSET"
}
fn get_message_type(&self) -> u16 {
65287
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgGnssTimeOffset {
const MESSAGE_TYPE: u16 = 65287;
const MESSAGE_NAME: &'static str = "MSG_GNSS_TIME_OFFSET";
}
impl TryFrom<super::SBP> for MsgGnssTimeOffset {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgGnssTimeOffset(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgGnssTimeOffset {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.weeks.append_to_sbp_buffer(buf);
self.milliseconds.append_to_sbp_buffer(buf);
self.microseconds.append_to_sbp_buffer(buf);
self.flags.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.weeks.sbp_size();
size += self.milliseconds.sbp_size();
size += self.microseconds.sbp_size();
size += self.flags.sbp_size();
size
}
}
/// Solution Group Metadata
///
/// This leading message lists the time metadata of the Solution Group. It
/// also lists the atomic contents (i.e. types of messages included) of the
/// Solution Group.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgGroupMeta {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Id of the Msgs Group, 0 is Unknown, 1 is Bestpos, 2 is Gnss
pub group_id: u8,
/// Status flags (reserved)
pub flags: u8,
/// Size of list group_msgs
pub n_group_msgs: u8,
/// An in-order list of message types included in the Solution Group,
/// including GROUP_META itself
pub group_msgs: Vec<u16>,
}
impl MsgGroupMeta {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgGroupMeta, crate::Error> {
Ok( MsgGroupMeta{
sender_id: None,
group_id: _buf.read_u8()?,
flags: _buf.read_u8()?,
n_group_msgs: _buf.read_u8()?,
group_msgs: crate::parser::read_u16_array(_buf)?,
} )
}
}
impl super::SBPMessage for MsgGroupMeta {
fn get_message_name(&self) -> &'static str {
"MSG_GROUP_META"
}
fn get_message_type(&self) -> u16 {
65290
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgGroupMeta {
const MESSAGE_TYPE: u16 = 65290;
const MESSAGE_NAME: &'static str = "MSG_GROUP_META";
}
impl TryFrom<super::SBP> for MsgGroupMeta {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgGroupMeta(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgGroupMeta {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.group_id.append_to_sbp_buffer(buf);
self.flags.append_to_sbp_buffer(buf);
self.n_group_msgs.append_to_sbp_buffer(buf);
self.group_msgs.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.group_id.sbp_size();
size += self.flags.sbp_size();
size += self.n_group_msgs.sbp_size();
size += self.group_msgs.sbp_size();
size
}
}
/// System heartbeat message
///
/// The heartbeat message is sent periodically to inform the host or other
/// attached devices that the system is running. It is used to monitor system
/// malfunctions. It also contains status flags that indicate to the host the
/// status of the system and whether it is operating correctly. Currently, the
/// expected heartbeat interval is 1 sec.
///
/// The system error flag is used to indicate that an error has occurred in
/// the system. To determine the source of the error, the remaining error
/// flags should be inspected.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgHeartbeat {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Status flags
pub flags: u32,
}
impl MsgHeartbeat {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgHeartbeat, crate::Error> {
Ok( MsgHeartbeat{
sender_id: None,
flags: _buf.read_u32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgHeartbeat {
fn get_message_name(&self) -> &'static str {
"MSG_HEARTBEAT"
}
fn get_message_type(&self) -> u16 {
65535
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgHeartbeat {
const MESSAGE_TYPE: u16 = 65535;
const MESSAGE_NAME: &'static str = "MSG_HEARTBEAT";
}
impl TryFrom<super::SBP> for MsgHeartbeat {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgHeartbeat(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgHeartbeat {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.flags.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.flags.sbp_size();
size
}
}
/// Inertial Navigation System status message
///
/// The INS status message describes the state of the operation and
/// initialization of the inertial navigation system.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgInsStatus {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Status flags
pub flags: u32,
}
impl MsgInsStatus {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgInsStatus, crate::Error> {
Ok( MsgInsStatus{
sender_id: None,
flags: _buf.read_u32::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgInsStatus {
fn get_message_name(&self) -> &'static str {
"MSG_INS_STATUS"
}
fn get_message_type(&self) -> u16 {
65283
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgInsStatus {
const MESSAGE_TYPE: u16 = 65283;
const MESSAGE_NAME: &'static str = "MSG_INS_STATUS";
}
impl TryFrom<super::SBP> for MsgInsStatus {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgInsStatus(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgInsStatus {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.flags.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.flags.sbp_size();
size
}
}
/// Inertial Navigation System update status message
///
/// The INS update status message contains information about executed and
/// rejected INS updates. This message is expected to be extended in the
/// future as new types of measurements are being added.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgInsUpdates {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// GPS Time of Week
pub tow: u32,
/// GNSS position update status flags
pub gnsspos: u8,
/// GNSS velocity update status flags
pub gnssvel: u8,
/// Wheelticks update status flags
pub wheelticks: u8,
/// Wheelticks update status flags
pub speed: u8,
/// NHC update status flags
pub nhc: u8,
/// Zero velocity update status flags
pub zerovel: u8,
}
impl MsgInsUpdates {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgInsUpdates, crate::Error> {
Ok( MsgInsUpdates{
sender_id: None,
tow: _buf.read_u32::<LittleEndian>()?,
gnsspos: _buf.read_u8()?,
gnssvel: _buf.read_u8()?,
wheelticks: _buf.read_u8()?,
speed: _buf.read_u8()?,
nhc: _buf.read_u8()?,
zerovel: _buf.read_u8()?,
} )
}
}
impl super::SBPMessage for MsgInsUpdates {
fn get_message_name(&self) -> &'static str {
"MSG_INS_UPDATES"
}
fn get_message_type(&self) -> u16 {
65286
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
#[cfg(feature = "swiftnav-rs")]
fn gps_time(
&self,
) -> Option<std::result::Result<crate::time::MessageTime, crate::time::GpsTimeError>> {
let tow_s = (self.tow as f64) / 1000.0;
let gps_time = match crate::time::GpsTime::new(0, tow_s) {
Ok(gps_time) => gps_time.tow(),
Err(e) => return Some(Err(e.into())),
};
Some(Ok(crate::time::MessageTime::Rover(gps_time.into())))
}
}
impl super::ConcreteMessage for MsgInsUpdates {
const MESSAGE_TYPE: u16 = 65286;
const MESSAGE_NAME: &'static str = "MSG_INS_UPDATES";
}
impl TryFrom<super::SBP> for MsgInsUpdates {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgInsUpdates(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgInsUpdates {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.tow.append_to_sbp_buffer(buf);
self.gnsspos.append_to_sbp_buffer(buf);
self.gnssvel.append_to_sbp_buffer(buf);
self.wheelticks.append_to_sbp_buffer(buf);
self.speed.append_to_sbp_buffer(buf);
self.nhc.append_to_sbp_buffer(buf);
self.zerovel.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.tow.sbp_size();
size += self.gnsspos.sbp_size();
size += self.gnssvel.sbp_size();
size += self.wheelticks.sbp_size();
size += self.speed.sbp_size();
size += self.nhc.sbp_size();
size += self.zerovel.sbp_size();
size
}
}
/// Local time at detection of PPS pulse
///
/// The PPS time message contains the value of the sender's local time in
/// microseconds at the moment a pulse is detected on the PPS input. This is
/// to be used for syncronisation of sensor data sampled with a local
/// timestamp (e.g. IMU or wheeltick messages) where GNSS time is unknown to
/// the sender.
///
/// The local time used to timestamp the PPS pulse must be generated by the
/// same clock which is used to timestamp the IMU/wheel sensor data and should
/// follow the same roll-over rules. A separate MSG_PPS_TIME message should
/// be sent for each source of sensor data which uses PPS-relative
/// timestamping. The sender ID for each of these MSG_PPS_TIME messages
/// should match the sender ID of the respective sensor data.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgPpsTime {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Local time in microseconds
pub time: u64,
/// Status flags
pub flags: u8,
}
impl MsgPpsTime {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgPpsTime, crate::Error> {
Ok( MsgPpsTime{
sender_id: None,
time: _buf.read_u64::<LittleEndian>()?,
flags: _buf.read_u8()?,
} )
}
}
impl super::SBPMessage for MsgPpsTime {
fn get_message_name(&self) -> &'static str {
"MSG_PPS_TIME"
}
fn get_message_type(&self) -> u16 {
65288
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgPpsTime {
const MESSAGE_TYPE: u16 = 65288;
const MESSAGE_NAME: &'static str = "MSG_PPS_TIME";
}
impl TryFrom<super::SBP> for MsgPpsTime {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgPpsTime(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgPpsTime {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.time.append_to_sbp_buffer(buf);
self.flags.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.time.sbp_size();
size += self.flags.sbp_size();
size
}
}
/// System start-up message
///
/// The system start-up message is sent once on system start-up. It notifies
/// the host or other attached devices that the system has started and is now
/// ready to respond to commands or configuration requests.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgStartup {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Cause of startup
pub cause: u8,
/// Startup type
pub startup_type: u8,
/// Reserved
pub reserved: u16,
}
impl MsgStartup {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgStartup, crate::Error> {
Ok( MsgStartup{
sender_id: None,
cause: _buf.read_u8()?,
startup_type: _buf.read_u8()?,
reserved: _buf.read_u16::<LittleEndian>()?,
} )
}
}
impl super::SBPMessage for MsgStartup {
fn get_message_name(&self) -> &'static str {
"MSG_STARTUP"
}
fn get_message_type(&self) -> u16 {
65280
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgStartup {
const MESSAGE_TYPE: u16 = 65280;
const MESSAGE_NAME: &'static str = "MSG_STARTUP";
}
impl TryFrom<super::SBP> for MsgStartup {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgStartup(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgStartup {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.cause.append_to_sbp_buffer(buf);
self.startup_type.append_to_sbp_buffer(buf);
self.reserved.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.cause.sbp_size();
size += self.startup_type.sbp_size();
size += self.reserved.sbp_size();
size
}
}
/// Status report message
///
/// The status report is sent periodically to inform the host or other
/// attached devices that the system is running. It is used to monitor system
/// malfunctions. It contains status reports that indicate to the host the
/// status of each sub-system and whether it is operating correctly.
///
/// Interpretation of the subsystem specific status code is product dependent,
/// but if the generic status code is initializing, it should be ignored.
/// Refer to product documentation for details.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct MsgStatusReport {
#[cfg_attr(feature = "sbp_serde", serde(skip_serializing))]
pub sender_id: Option<u16>,
/// Identity of reporting system
pub reporting_system: u16,
/// SBP protocol version
pub sbp_version: u16,
/// Increments on each status report sent
pub sequence: u32,
/// Number of seconds since system start-up
pub uptime: u32,
/// Reported status of individual subsystems
pub status: Vec<SubSystemReport>,
}
impl MsgStatusReport {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<MsgStatusReport, crate::Error> {
Ok( MsgStatusReport{
sender_id: None,
reporting_system: _buf.read_u16::<LittleEndian>()?,
sbp_version: _buf.read_u16::<LittleEndian>()?,
sequence: _buf.read_u32::<LittleEndian>()?,
uptime: _buf.read_u32::<LittleEndian>()?,
status: SubSystemReport::parse_array(_buf)?,
} )
}
}
impl super::SBPMessage for MsgStatusReport {
fn get_message_name(&self) -> &'static str {
"MSG_STATUS_REPORT"
}
fn get_message_type(&self) -> u16 {
65534
}
fn get_sender_id(&self) -> Option<u16> {
self.sender_id
}
fn set_sender_id(&mut self, new_id: u16) {
self.sender_id = Some(new_id);
}
fn to_frame(&self) -> std::result::Result<Vec<u8>, crate::FramerError> {
let mut frame = Vec::new();
self.write_frame(&mut frame)?;
Ok(frame)
}
fn write_frame(&self, frame: &mut Vec<u8>) -> std::result::Result<(), crate::FramerError> {
crate::write_frame(self, frame)
}
}
impl super::ConcreteMessage for MsgStatusReport {
const MESSAGE_TYPE: u16 = 65534;
const MESSAGE_NAME: &'static str = "MSG_STATUS_REPORT";
}
impl TryFrom<super::SBP> for MsgStatusReport {
type Error = super::TryFromSBPError;
fn try_from(msg: super::SBP) -> Result<Self, Self::Error> {
match msg {
super::SBP::MsgStatusReport(m) => Ok(m),
_ => Err(super::TryFromSBPError),
}
}
}
impl crate::serialize::SbpSerialize for MsgStatusReport {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.reporting_system.append_to_sbp_buffer(buf);
self.sbp_version.append_to_sbp_buffer(buf);
self.sequence.append_to_sbp_buffer(buf);
self.uptime.append_to_sbp_buffer(buf);
self.status.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.reporting_system.sbp_size();
size += self.sbp_version.sbp_size();
size += self.sequence.sbp_size();
size += self.uptime.sbp_size();
size += self.status.sbp_size();
size
}
}
/// Sub-system Status report
///
/// Report the general and specific state of a sub-system. If the generic
/// state is reported as initializing, the specific state should be ignored.
///
#[cfg_attr(feature = "sbp_serde", derive(serde::Serialize))]
#[derive(Debug, Clone)]
#[allow(non_snake_case)]
pub struct SubSystemReport {
/// Identity of reporting subsystem
pub component: u16,
/// Generic form status report
pub generic: u8,
/// Subsystem specific status code
pub specific: u8,
}
impl SubSystemReport {
#[rustfmt::skip]
pub fn parse(_buf: &mut &[u8]) -> Result<SubSystemReport, crate::Error> {
Ok( SubSystemReport{
component: _buf.read_u16::<LittleEndian>()?,
generic: _buf.read_u8()?,
specific: _buf.read_u8()?,
} )
}
pub fn parse_array(buf: &mut &[u8]) -> Result<Vec<SubSystemReport>, crate::Error> {
let mut v = Vec::new();
while buf.len() > 0 {
v.push(SubSystemReport::parse(buf)?);
}
Ok(v)
}
pub fn parse_array_limit(
buf: &mut &[u8],
n: usize,
) -> Result<Vec<SubSystemReport>, crate::Error> {
let mut v = Vec::new();
for _ in 0..n {
v.push(SubSystemReport::parse(buf)?);
}
Ok(v)
}
}
impl crate::serialize::SbpSerialize for SubSystemReport {
#[allow(unused_variables)]
fn append_to_sbp_buffer(&self, buf: &mut Vec<u8>) {
self.component.append_to_sbp_buffer(buf);
self.generic.append_to_sbp_buffer(buf);
self.specific.append_to_sbp_buffer(buf);
}
fn sbp_size(&self) -> usize {
let mut size = 0;
size += self.component.sbp_size();
size += self.generic.sbp_size();
size += self.specific.sbp_size();
size
}
}
|
use crate::DeriveInputExt;
use itertools::Itertools;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{DeriveInput, Ident, LitInt, LitStr, Type};
#[allow(clippy::expect_used)]
pub(crate) fn locks_await(input: &DeriveInput) -> TokenStream {
let type_ident = &input.ident;
let from_ctx = LitStr::new(&format!("{}::from_ctx", &type_ident), type_ident.span());
let mut init_fields = Vec::new();
let mut as_refs = Vec::new();
let mut tags = Vec::new();
let mut declare = Vec::new();
let mut part = Vec::new();
for (idx, chunks) in input
.fields()
.expect("fields")
.iter()
.chunks(5)
.into_iter()
.enumerate()
{
part.clear();
part.extend(chunks);
if part.len() == 5 {
let block = Ident::new(&format!("block{idx}"), Span::call_site());
declare.push(quote! {let #block = storm::tri!(storm::async_ref_block5(ctx).await); });
for (idx, field) in part.iter().enumerate() {
let f_ident = &field.ident;
let idx = LitInt::new(&idx.to_string(), Span::call_site());
let ty = unref(&field.ty);
init_fields.push(quote! { #f_ident: #block.#idx, });
as_refs.push(quote! {
impl<'a> AsRef<#ty> for #type_ident<'a> {
#[inline]
fn as_ref(&self) -> &#ty {
&self.#f_ident
}
}
});
tags.push(quote!(storm::Tag::tag(&self.#f_ident)));
}
} else {
for field in &part {
let f_ident = &field.ident;
let ty = unref(&field.ty);
init_fields.push(
quote!(#f_ident: storm::tri!(storm::AsRefAsync::as_ref_async(ctx).await),),
);
as_refs.push(quote! {
impl<'a> AsRef<#ty> for #type_ident<'a> {
#[inline]
fn as_ref(&self) -> &#ty {
&self.#f_ident
}
}
});
tags.push(quote!(storm::Tag::tag(&self.#f_ident)));
}
}
}
let declare = quote!(#(#declare)*);
let as_refs = quote!(#(#as_refs)*);
let init_fields = quote!(#(#init_fields)*);
let tags = quote!(storm::version_tag::combine(&[#(#tags,)*]));
quote! {
impl<'a> storm::AsyncTryFrom<'a, &'a storm::Ctx> for #type_ident<'a> {
#[allow(clippy::eval_order_dependence)]
fn async_try_from(ctx: &'a storm::Ctx) -> storm::BoxFuture<'a, storm::Result<#type_ident<'a>>> {
Box::pin(async move {
#declare
Ok(#type_ident {
#init_fields
})
})
}
}
impl<'a> #type_ident<'a> {
#[tracing::instrument(name = #from_ctx, level = "debug", skip(ctx), err)]
pub fn from_ctx(ctx: &'a storm::Ctx) -> storm::BoxFuture<'a, storm::Result<storm::CtxLocks<'a, #type_ident<'a>>>> {
Box::pin(async move {
Ok(storm::CtxLocks {
locks: storm::tri!(storm::AsyncTryFrom::async_try_from(ctx).await),
ctx,
})
})
}
}
impl<'a> storm::Tag for #type_ident<'a> {
fn tag(&self) -> storm::VersionTag {
#tags
}
}
#as_refs
}
}
fn unref(t: &Type) -> &Type {
match t {
Type::Reference(r) => unref(&r.elem),
_ => t,
}
}
|
fn main() {
let _a = vec![true; 0];
let _b = vec![false; 0];
}
|
pub use super::value::Value;
pub struct Element {
value: Value
} |
extern crate day_08_registers;
extern crate utils;
use day_08_registers::{Processor, ProcessorTrait};
use utils::{file2str, str2linevec};
fn main() {
let puzzle = file2str("puzzle.txt");
let puzzle = str2linevec(&puzzle);
let mut cpu = Processor::new();
for instruction in puzzle.iter() {
cpu.process_instruction(instruction)
}
let max_value = cpu.highest_value_in_register();
println!("Highest value in register is: {}", max_value);
}
|
use std::fmt::Write;
use crate::{
commands::{osu::ProfileSize, utility::GuildData},
database::{EmbedsSize, GuildConfig, MinimizedPp},
embeds::Author,
};
pub struct ServerConfigEmbed {
author: Author,
description: String,
footer: &'static str,
title: &'static str,
}
impl ServerConfigEmbed {
pub fn new(guild: GuildData, config: GuildConfig, authorities: &[String]) -> Self {
let mut author = Author::new(guild.name);
if let Some(ref hash) = guild.icon {
let url = format!(
"https://cdn.discordapp.com/icons/{}/{hash}.{}",
guild.id,
if hash.is_animated() { "gif" } else { "webp" }
);
author = author.icon_url(url);
}
let title = "Current server configuration:";
let mut description = String::with_capacity(256);
description.push_str("```\nAuthorities: ");
let mut authorities = authorities.iter();
if let Some(auth) = authorities.next() {
let _ = write!(description, "@{auth}");
for auth in authorities {
let _ = write!(description, ", @{auth}");
}
} else {
description.push_str("None");
}
description.push_str("\nPrefixes: ");
let mut prefixes = config.prefixes.iter();
if let Some(prefix) = prefixes.next() {
let _ = write!(description, "`{prefix}`");
for prefix in prefixes {
let _ = write!(description, ", `{prefix}`");
}
}
description.push_str("\n\nSong commands: | Retries*: | Minimized PP*:\n");
let songs = config.with_lyrics();
if songs {
description.push('>');
} else {
description.push(' ');
}
description.push_str("enabled | ");
let retries = config.show_retries();
if retries {
description.push('>');
} else {
description.push(' ');
}
description.push_str("show | ");
let minimized_pp = config.minimized_pp();
if minimized_pp == MinimizedPp::Max {
description.push('>');
} else {
description.push(' ');
}
description.push_str("max pp\n");
if songs {
description.push(' ');
} else {
description.push('>');
}
description.push_str("disabled | ");
if retries {
description.push(' ');
} else {
description.push('>');
}
description.push_str("hide | ");
if minimized_pp == MinimizedPp::IfFc {
description.push('>');
} else {
description.push(' ');
}
description.push_str(
"if FC\n-------------------------------------------\n\
Embeds*: | Profile*:\n",
);
let embeds = config.embeds_size();
if embeds == EmbedsSize::AlwaysMinimized {
description.push('>');
} else {
description.push(' ');
}
description.push_str("always minimized | ");
let profile = config.profile_size.unwrap_or_default();
if profile == ProfileSize::Compact {
description.push('>');
} else {
description.push(' ');
}
description.push_str("compact\n");
if embeds == EmbedsSize::AlwaysMaximized {
description.push('>');
} else {
description.push(' ');
}
description.push_str("always maximized | ");
if profile == ProfileSize::Medium {
description.push('>');
} else {
description.push(' ');
}
description.push_str("medium\n");
if embeds == EmbedsSize::InitialMaximized {
description.push('>');
} else {
description.push(' ');
}
description.push_str("initial maximized | ");
if profile == ProfileSize::Full {
description.push('>');
} else {
description.push(' ');
}
description.push_str("full\n-------------------------------------------\n");
let track_limit = config.track_limit();
let _ = writeln!(description, "Default track limit: {track_limit}");
description.push_str("```");
Self {
author,
description,
footer: "*: Only applies if not set in member's user config",
title,
}
}
}
impl_builder!(ServerConfigEmbed {
author,
description,
footer,
title,
});
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.