text stringlengths 8 4.13M |
|---|
/*!
Contains the `Value` type that represents a conditional compilation expression's value.
*/
use WinVersion;
use features::{Features, Partitions, WinVersions};
/**
Represents a conditional compilation expression's value.
Under normal circumstances, this would be synonymous with "an integer". These are not normal circumstances.
This type also defines operations for combining values. *Many* combinations are undefined. The general attitude is that *because* there are no complete, well-defined semantics for this, operations are implemented as-needed with whatever semantics make sense for that narrow case.
In other words, don't speculatively implement operations, because it may not be clear *what* an operation even *means*.
*/
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum Value {
/**
A boolean true/false value. These are *not* use to represent logical values. They exist as a kludge for when an expression needs to be "disableable" in the abstract.
The motivating example was something akin to:
```c
#if !(defined(_CONTRACT_GEN) && ... && defined(_X86_))
...
```
The expression was falsy, but because unknown symbols are simply ignored, it was being evaluated as a feature set that excluded the x86 architecture.
The solution was to represent the result of `defined(_CONTRACT_GEN)` as `false`, and ensure that taking the intersection of `false` and any feature set resulted in `false`.
Also note that converting *either* true or false to a feature set yields the default "says nothing" feature set.
*/
Bool(bool),
/**
An integer value.
We are only interested in these when used as version numbers.
*/
Int(u32),
/**
A feature set.
*/
Feat(Features),
/**
An API partition.
This is distinct from a full feature set, mostly to ensure `WINAPI_FAMILY_PARTITION(_)` expressions have vaguely reasonable semantics (it ends up acting as a type-check).
*/
Part(Partitions),
/**
A "full" (*i.e.* `NTDDI_VERSION`) version value. Usually generated by `NTDDI_*` identifiers.
*/
FullVersionValue(u32),
/**
A "short" (*i.e.* `_WIN32_WINNT`) version value. Usually generated by `WIN32_WINNT_*` identifiers, but can *also* come from right-shift expressions (*i.e.* `FullVersionValue(_) >> Int(16)`).
*/
ShortVersionValue(u32),
/**
The abstract "full version". Only useful when compared to a version value, or passed through an `OSVER(_)` or `SPVER(_)` expression.
*/
FullVersion,
/**
The abstract "short version". Only useful when compared to a version value.
*/
ShortVersion,
/**
The abstract "full OS version". Differs from `FullVersion` in that it covers all service packs.
*/
OsVersion,
/**
The abstract "full service pack version". Differs from `FullVersion` in that it doesn't cover any specific major OS version.
*/
SpVersion,
/**
An ignorable value.
*/
Ignore,
}
impl Value {
/**
Tries to convert this value into a feature set.
*/
pub fn to_features(self) -> Result<Features, String> {
use self::Value::*;
match self {
Bool(_) => Ok(Features::default()),
Feat(f) => Ok(f),
Part(p) => Ok(p.into()),
FullVersionValue(v) => {
let wv = WinVersion::from_u32_round_up(v).expect("valid full version");
Ok(WinVersions::from(wv).into())
},
ShortVersionValue(v) => {
let wv = WinVersion::from_u32_round_up(v << 16).expect("valid full version");
Ok(WinVersions::from(wv).into())
},
FullVersion
| ShortVersion
| Ignore => Ok(Features::default()),
n => Err(format!("cannot convert to Features: {:?}", n))
}
}
/**
Computes the complement of this value.
*/
pub fn complement(self) -> Result<Value, String> {
use self::Value::*;
match self {
Ignore => Ok(Ignore),
Bool(b) => Ok(Bool(!b)),
Feat(f) => Ok(Feat(f.complement())),
Part(p) => Ok(Part(!p)),
n => Err(format!("invalid op: ! {:?}", n))
}
}
/**
Computes the intersection of two values.
*/
pub fn and(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Bool(b)) | (Bool(b), Ignore) => Ok(Bool(b)),
(Ignore, Feat(f)) | (Feat(f), Ignore) => Ok(Feat(f)),
(Bool(true), Feat(f)) | (Feat(f), Bool(true)) => Ok(Feat(f)),
(Bool(false), Feat(_)) | (Feat(_), Bool(false)) => Ok(Ignore),
(Feat(l), Feat(r)) => Ok(Feat(l.and(r))),
(Part(l), Part(r)) => Ok(Part(l & r)),
(l, r) => Err(format!("invalid op: {:?} && {:?}", l, r))
}
}
/**
Computes the union of two values.
*/
pub fn or(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Bool(b)) | (Bool(b), Ignore) => Ok(Bool(b)),
(Ignore, Feat(f)) | (Feat(f), Ignore) => Ok(Feat(f)),
(Feat(l), Feat(r)) => Ok(Feat(l.or(r))),
(Part(l), Part(r)) => Ok(Part(l | r)),
(l, r) => Err(format!("invalid op: {:?} || {:?}", l, r))
}
}
/**
Computes the feature set given by an equality comparison of two values.
*/
pub fn eq(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(FullVersion, FullVersionValue(i)) => {
let start = WinVersion::from_u32_round_up(i).expect("valid version for fs == fvv");
let end = WinVersion::from_u32_round_up((i & 0xFFFF_0000) + 1);
let wv = match end {
Some(end) => WinVersions::from(start..end),
None => WinVersions::from(start..)
};
Ok(Feat(wv.into()))
},
(OsVersion, FullVersionValue(i)) => {
let start = WinVersion::from_u32_round_up(i).expect("valid version for os == fvv");
let end = WinVersion::from_u32_round_up((i & 0xFFFF_0000) + 0x1_0000);
let wv = match end {
Some(end) => WinVersions::from(start..end),
None => WinVersions::from(start..)
};
Ok(Feat(wv.into()))
},
(ShortVersion, Int(v)) | (Int(v), ShortVersion) => {
let start = WinVersion::from_u32_round_up(v << 16).expect("valid version for sv == int");
let end = WinVersion::from_u32_round_up(((v << 16) + 0x1_0000));
let wv = WinVersions::from(Some(start)..end);
Ok(Feat(wv.into()))
}
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} == {:?}", l, r))
}
}
/**
Computes the feature set given by an inequality comparison of two values.
*/
pub fn ne(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(OsVersion, FullVersionValue(i)) => {
let start = WinVersion::from_u32_round_up(i).expect("valid full os version for !=");
let end = WinVersion::from_u32_round_up((i & 0xFFFF_0000) + 0x1_0000);
let wv = match end {
Some(end) => WinVersions::from((..start, end..)),
None => WinVersions::from(..start)
};
Ok(Feat(wv.into()))
},
(ShortVersion, ShortVersionValue(i)) => {
let start = WinVersion::from_u32_round_up(i << 16).expect("valid short version for !=");
let end = start.next_version();
let wv = match end {
Some(end) => WinVersions::from((..start, end..)),
None => WinVersions::from(..start)
};
Ok(Feat(wv.into()))
},
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} != {:?}", l, r))
}
}
/**
Computes the feature set given by a less-than comparison of two values.
*/
pub fn lt(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(FullVersion, FullVersionValue(i)) | (FullVersion, Int(i)) => {
let end = WinVersion::from_u32_round_up(i).expect("valid full version for <");
Ok(Feat(WinVersions::from(..end).into()))
},
(ShortVersion, ShortVersionValue(i)) | (ShortVersion, Int(i)) => {
let end = WinVersion::from_u32_round_up(i << 16).expect("valid short version for <");
Ok(Feat(WinVersions::from(..end).into()))
},
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} < {:?}", l, r))
}
}
/**
Computes the feature set given by a less-than or equal comparison of two values.
*/
pub fn le(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(FullVersion, FullVersionValue(i)) | (FullVersion, Int(i)) => {
let end = WinVersion::from_u32_round_up(i + 1).expect("valid full version for <=");
Ok(Feat(WinVersions::from(..end).into()))
},
(ShortVersion, ShortVersionValue(i)) | (ShortVersion, Int(i)) => {
let end = WinVersion::from_u32_round_up((i << 16) + 1).expect("valid short version for <=");
Ok(Feat(WinVersions::from(..end).into()))
},
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} <= {:?}", l, r))
}
}
/**
Computes the feature set given by a greater-than comparison of two values.
*/
pub fn gt(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(FullVersion, FullVersionValue(i)) | (FullVersion, Int(i)) => {
let start = WinVersion::from_u32_round_up(i + 1).expect("valid full version for >");
Ok(Feat(WinVersions::from(start..).into()))
},
(ShortVersion, ShortVersionValue(i)) | (ShortVersion, Int(i)) => {
let start = WinVersion::from_u32_round_up((i << 16) + 1).expect("valid full version for >");
Ok(Feat(WinVersions::from(start..).into()))
},
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} > {:?}", l, r))
}
}
/**
Computes the feature set given by a greater-than or equal comparison of two values.
*/
pub fn ge(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(FullVersion, FullVersionValue(i)) | (FullVersion, Int(i)) => {
let start = WinVersion::from_u32_round_up(i).expect("valid full version for >=");
Ok(Feat(WinVersions::from(start..).into()))
},
(ShortVersion, ShortVersionValue(i)) | (ShortVersion, Int(i)) => {
let start = WinVersion::from_u32_round_up(i << 16).expect("valid full version for >=");
Ok(Feat(WinVersions::from(start..).into()))
},
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} >= {:?}", l, r))
}
}
/**
Computes the result of a right-shift of a value.
*/
pub fn rs(self, rhs: Value) -> Result<Value, String> {
use self::Value::*;
match (self, rhs) {
(FullVersionValue(v), Int(16)) => Ok(ShortVersionValue(v >> 16)),
(Ignore, Ignore) => Ok(Ignore),
(Ignore, Int(_)) | (Int(_), Ignore) => Ok(Ignore),
(l, r) => Err(format!("invalid op: {:?} >> {:?}", l, r))
}
}
/**
Computes the result of taking the "os version" of this value.
*/
pub fn os_ver(self) -> Result<Value, String> {
use self::Value::*;
match self {
FullVersion => Ok(OsVersion),
FullVersionValue(v) => Ok(FullVersionValue(v & 0xFFFF_0000)),
n => Err(format!("invalid op: OSVER({:?})", n))
}
}
/**
Computes the result of taking the "service pack version" of this value.
*/
pub fn sp_ver(self) -> Result<Value, String> {
use self::Value::*;
match self {
FullVersion => Ok(SpVersion),
n => Err(format!("invalid op: SPVER({:?})", n))
}
}
/**
Attempts to ignore this value.
*/
pub fn ignore(self) -> Result<Value, String> {
use self::Value::*;
match self {
Ignore => Ok(Ignore),
n => Err(format!("cannot ignore {:?}", n))
}
}
}
|
thread_local! {
static POLL: ::std::cell::RefCell<::mio::Poll> = ::std::cell::RefCell::new(::mio::Poll::new().unwrap());
static EVENT_HANDLERS: ::std::cell::RefCell<::std::collections::BTreeMap<usize, ::std::rc::Rc<dyn Fn(::mio::Event) -> () + 'static>>> = ::std::default::Default::default();
static TIME_CALLBACKS: ::std::cell::RefCell<::std::collections::BTreeMap<usize, TimeCallback>> = ::std::default::Default::default();
// Starts at 1 because 0 is reserved for run_in_thread
static TICKER: ::std::cell::RefCell<usize> = ::std::cell::RefCell::new(1);
static EVENT_BUFFER: ::std::rc::Rc<::std::cell::RefCell<::mio::Events>> = ::std::rc::Rc::new(::std::cell::RefCell::new(::mio::Events::with_capacity(1024)));
static REMOTE_RECEIVER: ::std::cell::RefCell<Option<::std::sync::mpsc::Receiver<Box<dyn Fn() -> () + Send + 'static>>>> = ::std::default::Default::default();
static REMOTE_REGISTRATION: ::std::cell::RefCell<Option<::mio::Registration>> = ::std::default::Default::default();
static SHOULD_CONTINUE: ::std::cell::RefCell<bool> = ::std::default::Default::default();
static FLUSH_FLAG: ::std::cell::RefCell<bool> = ::std::cell::RefCell::new(false);
}
lazy_static! {
static ref REMOTES: ::std::sync::Mutex<::std::collections::HashMap<::std::thread::ThreadId, Remote>> = ::std::default::Default::default();
}
struct TimeCallback {
callback: ::std::rc::Rc<::std::cell::RefCell<dyn FnMut() -> () + 'static>>,
when: ::std::time::Instant,
interval: Option<::std::time::Duration>,
}
struct Remote {
set_readiness: ::mio::SetReadiness,
run_sender: ::std::sync::mpsc::Sender<Box<dyn Fn() -> () + Send + 'static>>,
}
fn tick() -> usize {
TICKER.with(|x| {
let a: usize = *x.borrow();
*x.borrow_mut() = a.checked_add(1).expect("Ran out of callback IDs");
a
})
}
/// Drop the existing Poll for this thread and replace it with a freshly generated poll. Also drops all registered event_handler callbacks,
/// set_timeout callbacks, and set_interval callbacks. This is (exclusively?) useful after a fork() when you want to separate yourself from the old
/// process without calling exec(). Should only be used from the main thread and when no other threads have been created. Should only be used when
/// inside the transportation event loop (i.e. when run() or run_worker() are on the stack).
pub fn flush() {
let new_poll = ::mio::Poll::new().unwrap();
POLL.with(|x| x.replace(new_poll));
EVENT_HANDLERS.with(|x| x.borrow_mut().clear());
TIME_CALLBACKS.with(|x| x.borrow_mut().clear());
TICKER.with(|x| *x.borrow_mut() = 1);
REMOTE_RECEIVER.with(|x| x.borrow_mut().take());
REMOTE_REGISTRATION.with(|x| x.borrow_mut().take());
init_loop();
FLUSH_FLAG.with(|x| *x.borrow_mut() = true);
}
/// Insert a listener and return a unique usize value that can be used to register one or more [`Evented`](::mio::Evented)s with the internal poll
/// for this thread. Note: this callback will never be called unless [`borrow_poll`]`(|poll| poll.`[`register`](::mio::Poll::register)`())` is called
/// with the returned token.
pub fn insert_listener(listener: impl Fn(::mio::Event) -> () + 'static) -> usize {
let idx = tick();
EVENT_HANDLERS.with(|x| x.borrow_mut().insert(idx, ::std::rc::Rc::new(listener)));
idx
}
/// Remove a previously registered listener. Returns true if a listener was removed and false otherwise. Note: The Evented item must still be removed
/// from the poll separately.
pub fn remove_listener(idx: usize) -> bool {
EVENT_HANDLERS.with(|x| x.borrow_mut().remove(&idx).is_some())
}
/// Call callback once after timeout has passed. Returns an identifier that is unique across insert_listener, set_timeout, and set_interval that can
/// be used with the clear_timeout function.
pub fn set_timeout(callback: impl FnMut() -> () + 'static, timeout: ::std::time::Duration) -> usize {
let callback = ::std::rc::Rc::new(::std::cell::RefCell::new(callback));
let when = ::std::time::Instant::now() + timeout;
let idx = tick();
TIME_CALLBACKS.with(|x| {
x.borrow_mut().insert(
idx,
TimeCallback {
callback,
when,
interval: None,
},
)
});
idx
}
/// Call callback after interval time has passed, then again once every interval. Returns an identifier that is unique across insert_listener,
/// set_timeout, and set_interval that can be used with the clear_interval function.
pub fn set_interval(callback: impl FnMut() -> () + 'static, interval: ::std::time::Duration) -> usize {
let callback = ::std::rc::Rc::new(::std::cell::RefCell::new(callback));
let when = ::std::time::Instant::now() + interval;
let idx = tick();
TIME_CALLBACKS.with(|x| {
x.borrow_mut().insert(
idx,
TimeCallback {
callback,
when,
interval: Some(interval),
},
)
});
idx
}
/// Remove an existing timeout before it has occured. Returns true if a timeout was removed or false otherwise.
pub fn clear_timeout(idx: usize) -> bool {
if TIME_CALLBACKS.with(|x| x.borrow().get(&idx).map(|y| y.interval.is_some()).unwrap_or(true)) {
return false;
}
TIME_CALLBACKS.with(|x| x.borrow_mut().remove(&idx).is_some())
}
/// Remove an existing interval. Returns true if an interval was removed or false otherwise.
pub fn clear_interval(idx: usize) -> bool {
if TIME_CALLBACKS.with(|x| x.borrow().get(&idx).map(|y| y.interval.is_none()).unwrap_or(true)) {
return false;
}
TIME_CALLBACKS.with(|x| x.borrow_mut().remove(&idx).is_some())
}
fn get_soonest_timeout() -> (Option<usize>, Option<::std::time::Duration>) {
let mut idx = None;
let mut soonest_instant = None;
TIME_CALLBACKS.with(|x| {
for (k, v) in x.borrow_mut().iter() {
if soonest_instant.is_none() || v.when < soonest_instant.unwrap() {
idx = Some(*k);
soonest_instant = Some(v.when);
}
}
});
if idx.is_none() {
(None, None)
} else {
let now = ::std::time::Instant::now();
if soonest_instant.unwrap() <= now {
(idx, Some(::std::time::Duration::from_secs(0)))
} else {
(idx, Some(soonest_instant.unwrap().duration_since(now)))
}
}
}
fn dispatch_timeout(time_idx: usize) {
let now = ::std::time::Instant::now();
if TIME_CALLBACKS.with(|x| x.borrow().get(&time_idx).unwrap().when <= now) {
if TIME_CALLBACKS.with(|x| x.borrow().get(&time_idx).unwrap().interval.is_none()) {
let callback = TIME_CALLBACKS.with(|x| x.borrow_mut().remove(&time_idx).unwrap());
(&mut *callback.callback.borrow_mut())();
} else {
let callback = TIME_CALLBACKS.with(|x| {
let interval = x.borrow().get(&time_idx).unwrap().interval.unwrap();
x.borrow_mut().get_mut(&time_idx).unwrap().when = now + interval;
x.borrow().get(&time_idx).unwrap().callback.clone()
});
(&mut *callback.borrow_mut())();
}
}
}
/// Run callback with a reference to the internal MIO poll for the current thread. Transportation keeps a separate poll for every thread and it is
/// appropriate to run transportation from multiple threads simultaneously.
pub fn borrow_poll<T, R>(callback: T) -> R
where
T: Fn(&::mio::Poll) -> R,
{
POLL.with(|x| (callback)(&*x.borrow()))
}
fn dispatch_event(event: ::mio::Event) {
let token: usize = event.token().0;
if token == 0 {
while let Ok(callback) = REMOTE_RECEIVER.with(|x| x.borrow().as_ref().unwrap().try_recv()) {
(*callback)();
}
return;
}
let callback = EVENT_HANDLERS.with(|x| x.borrow().get(&token).map(|x| x.clone()));
if let Some(callback) = callback {
(*callback)(event);
}
}
fn empty() -> bool {
TIME_CALLBACKS.with(|x| x.borrow().is_empty()) && EVENT_HANDLERS.with(|x| x.borrow().is_empty())
}
fn turn_internal(events: &mut ::mio::Events) {
let (time_idx, timeout) = get_soonest_timeout();
events.clear();
POLL.with(|x| x.borrow().poll(events, timeout)).unwrap();
if let Some(time_idx) = time_idx {
dispatch_timeout(time_idx);
if FLUSH_FLAG.with(|x| *x.borrow()) {
FLUSH_FLAG.with(|x| *x.borrow_mut() = false);
return;
}
}
for event in events.iter() {
dispatch_event(event);
if FLUSH_FLAG.with(|x| *x.borrow()) {
FLUSH_FLAG.with(|x| *x.borrow_mut() = false);
return;
}
}
}
/// Blocks the thread by polling on the internal MIO poll, dispatching events to event callbacks and calling set_timeout/set_interval callbacks at
/// the appropriate times. Returns when there are no remaining registered time or event callbacks. Panics if called while transportation is already
/// running in this same thread.
pub fn run() {
init_loop();
let events_rc = EVENT_BUFFER.with(|x| x.clone());
let events: &mut ::mio::Events = &mut *events_rc.borrow_mut();
while SHOULD_CONTINUE.with(|x| *x.borrow()) {
if empty() {
return;
}
turn_internal(events);
}
}
/// Exactly the same as [`run`], but does not exit when empty. This is useful for threadpool workers (see [`run_in_thread`])
pub fn run_worker() {
init_loop();
let events_rc = EVENT_BUFFER.with(|x| x.clone());
let events: &mut ::mio::Events = &mut *events_rc.borrow_mut();
while SHOULD_CONTINUE.with(|x| *x.borrow()) {
turn_internal(events);
}
}
fn init_loop() {
SHOULD_CONTINUE.with(|x| *x.borrow_mut() = true);
if REMOTE_RECEIVER.with(|x| x.borrow().is_some()) {
return;
}
let (tx, rx) = ::std::sync::mpsc::channel();
let thread_id = ::std::thread::current().id();
let (registration, set_readiness) = ::mio::Registration::new2();
REMOTE_RECEIVER.with(|x| *x.borrow_mut() = Some(rx));
borrow_poll(|poll| {
poll.register(®istration, ::mio::Token(0), ::mio::Ready::readable(), ::mio::PollOpt::edge())
.unwrap()
});
REMOTE_REGISTRATION.with(|x| *x.borrow_mut() = Some(registration));
let mut remotes_lock = REMOTES.lock().unwrap();
remotes_lock.insert(
thread_id,
Remote {
set_readiness,
run_sender: tx,
},
);
}
/// Run a callback in a remote thread that is owned by transportation. Spins until the remote thread calls [`run`] or [`run_worker`]. May deadlock if
/// the remote thread doesn't enter the transportation event loop. May return `Err(())` if the remote thread has terminated.
pub fn run_in_thread(thread_id: ::std::thread::ThreadId, callback: impl Fn() -> () + Send + 'static) -> Result<(), ()> {
let callback = Box::new(callback);
loop {
let remotes = REMOTES.lock().unwrap();
let remote = match remotes.get(&thread_id) {
Some(remote) => remote,
None => {
::std::thread::yield_now();
continue;
}
};
remote.run_sender.send(callback).map_err(|_| ())?;
remote.set_readiness.set_readiness(::mio::Ready::readable()).map_err(|_| ())?;
return Ok(());
}
}
/// Causes the event loop to exit the next time it would iterate. The event loop may be resumed with [`run()`] or [`run_worker()`]
pub fn stop() {
SHOULD_CONTINUE.with(|x| *x.borrow_mut() = false);
}
|
extern crate alloc;
use alloc::string::String;
use alloc::vec::Vec;
use core::num::NonZeroUsize;
#[cfg(feature = "parallel")]
use rayon::prelude::*;
use subspace_core_primitives::crypto::kzg::{Commitment, Kzg, Polynomial};
use subspace_core_primitives::crypto::{blake2b_256_254_hash_to_scalar, Scalar};
use subspace_core_primitives::{ArchivedHistorySegment, Piece, RawRecord};
use subspace_erasure_coding::ErasureCoding;
/// Reconstructor-related instantiation error.
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[cfg_attr(feature = "thiserror", derive(thiserror::Error))]
pub enum ReconstructorInstantiationError {
/// Failed to initialize erasure coding
#[cfg_attr(
feature = "thiserror",
error("Failed to initialize erasure coding: {0}")
)]
FailedToInitializeErasureCoding(String),
}
/// Reconstructor-related instantiation error
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "thiserror", derive(thiserror::Error))]
pub enum ReconstructorError {
/// Segment size is not bigger than record size
#[cfg_attr(
feature = "thiserror",
error("Error during data shards reconstruction: {0}")
)]
DataShardsReconstruction(String),
/// Commitment of input piece is invalid.
#[cfg_attr(feature = "thiserror", error("Commitment of input piece is invalid."))]
InvalidInputPieceCommitment,
/// Incorrect piece position provided.
#[cfg_attr(feature = "thiserror", error("Incorrect piece position provided."))]
IncorrectPiecePosition,
}
/// Reconstructor helps to retrieve blocks from archived pieces.
#[derive(Debug, Clone)]
pub struct PiecesReconstructor {
/// Erasure coding data structure
erasure_coding: ErasureCoding,
/// KZG instance
kzg: Kzg,
}
impl PiecesReconstructor {
// TODO: Make erasure coding an explicit argument
pub fn new(kzg: Kzg) -> Result<Self, ReconstructorInstantiationError> {
// TODO: Check if KZG can process number configured number of elements and update proof
// message in `.expect()`
let erasure_coding = ErasureCoding::new(
NonZeroUsize::new(ArchivedHistorySegment::NUM_PIECES.ilog2() as usize)
.expect("Archived history segment contains at very least one piece; qed"),
)
.map_err(ReconstructorInstantiationError::FailedToInitializeErasureCoding)?;
Ok(Self {
erasure_coding,
kzg,
})
}
/// Returns incomplete pieces (witness missing) and polynomial that can be used to generate
/// necessary witnesses later.
fn reconstruct_shards(
&self,
input_pieces: &[Option<Piece>],
) -> Result<(ArchivedHistorySegment, Polynomial), ReconstructorError> {
let mut reconstructed_pieces = ArchivedHistorySegment::default();
if !input_pieces
.iter()
// Take each source shards here
.step_by(2)
.zip(
reconstructed_pieces
.iter_mut()
.map(|piece| piece.record_mut().iter_mut()),
)
.all(|(maybe_piece, raw_record)| {
if let Some(piece) = maybe_piece {
piece
.record()
.iter()
.zip(raw_record)
.for_each(|(source, target)| {
*target = *source;
});
true
} else {
false
}
})
{
// If not all data pieces are available, need to reconstruct data shards using erasure
// coding.
// Scratch buffer to avoid re-allocation
let mut tmp_shards_scalars =
Vec::<Option<Scalar>>::with_capacity(ArchivedHistorySegment::NUM_PIECES);
// Iterate over the chunks of `Scalar::SAFE_BYTES` bytes of all records
for record_offset in 0..RawRecord::SIZE / Scalar::SAFE_BYTES {
// Collect chunks of each record at the same offset
for maybe_piece in input_pieces.iter() {
let maybe_scalar = maybe_piece
.as_ref()
.map(|piece| {
piece
.record()
.iter()
.nth(record_offset)
.expect("Statically guaranteed to exist in a piece; qed")
})
.map(Scalar::try_from)
.transpose()
.map_err(ReconstructorError::DataShardsReconstruction)?;
tmp_shards_scalars.push(maybe_scalar);
}
self.erasure_coding
.recover(&tmp_shards_scalars)
.map_err(ReconstructorError::DataShardsReconstruction)?
.into_iter()
.zip(reconstructed_pieces.iter_mut().map(|piece| {
piece
.record_mut()
.iter_mut()
.nth(record_offset)
.expect("Statically guaranteed to exist in a piece; qed")
}))
.for_each(|(source_scalar, segment_data)| {
segment_data.copy_from_slice(&source_scalar.to_bytes());
});
tmp_shards_scalars.clear();
}
}
let source_record_commitments = {
#[cfg(not(feature = "parallel"))]
let iter = reconstructed_pieces.iter_mut().zip(input_pieces).step_by(2);
#[cfg(feature = "parallel")]
let iter = reconstructed_pieces
.par_iter_mut()
.zip_eq(input_pieces)
.step_by(2);
iter.map(|(piece, maybe_input_piece)| {
if let Some(input_piece) = maybe_input_piece {
Commitment::try_from_bytes(input_piece.commitment())
.map_err(|_error| ReconstructorError::InvalidInputPieceCommitment)
} else {
let scalars = {
let mut scalars =
Vec::with_capacity(piece.record().len().next_power_of_two());
for record_chunk in piece.record().iter() {
scalars.push(
Scalar::try_from(record_chunk)
.map_err(ReconstructorError::DataShardsReconstruction)?,
);
}
// Number of scalars for KZG must be a power of two elements
scalars.resize(scalars.capacity(), Scalar::default());
scalars
};
let polynomial = self.kzg.poly(&scalars).expect(
"KZG instance must be configured to support this many scalars; qed",
);
let commitment = self.kzg.commit(&polynomial).expect(
"KZG instance must be configured to support this many scalars; qed",
);
Ok(commitment)
}
})
.collect::<Result<Vec<_>, _>>()?
};
let record_commitments = self
.erasure_coding
.extend_commitments(&source_record_commitments)
.expect(
"Erasure coding instance is deliberately configured to support this input; qed",
);
drop(source_record_commitments);
let record_commitment_hashes = reconstructed_pieces
.iter_mut()
.zip(record_commitments)
.map(|(reconstructed_piece, commitment)| {
let commitment_bytes = commitment.to_bytes();
reconstructed_piece
.commitment_mut()
.copy_from_slice(&commitment_bytes);
blake2b_256_254_hash_to_scalar(&commitment_bytes)
})
.collect::<Vec<_>>();
let polynomial = self
.kzg
.poly(&record_commitment_hashes)
.expect("Internally produced values must never fail; qed");
Ok((reconstructed_pieces, polynomial))
}
/// Returns all the pieces for a segment using given set of pieces of a segment of the archived
/// history (any half of all pieces are required to be present, the rest will be recovered
/// automatically due to use of erasure coding if needed).
pub fn reconstruct_segment(
&self,
segment_pieces: &[Option<Piece>],
) -> Result<ArchivedHistorySegment, ReconstructorError> {
let (mut pieces, polynomial) = self.reconstruct_shards(segment_pieces)?;
#[cfg(not(feature = "parallel"))]
let iter = pieces.iter_mut().enumerate();
#[cfg(feature = "parallel")]
let iter = pieces.par_iter_mut().enumerate();
iter.for_each(|(position, piece)| {
piece.witness_mut().copy_from_slice(
&self
.kzg
.create_witness(
&polynomial,
ArchivedHistorySegment::NUM_PIECES,
position as u32,
)
// TODO: Update this proof here and in other places, we don't use Merkle
// trees anymore
.expect("Position is statically known to be valid; qed")
.to_bytes(),
);
});
Ok(pieces)
}
/// Returns the missing piece for a segment using given set of pieces of a segment of the archived
/// history (any half of all pieces are required to be present).
pub fn reconstruct_piece(
&self,
segment_pieces: &[Option<Piece>],
piece_position: usize,
) -> Result<Piece, ReconstructorError> {
let (reconstructed_records, polynomial) = self.reconstruct_shards(segment_pieces)?;
if piece_position >= ArchivedHistorySegment::NUM_PIECES {
return Err(ReconstructorError::IncorrectPiecePosition);
}
let mut piece = Piece::from(&reconstructed_records[piece_position]);
piece.witness_mut().copy_from_slice(
&self
.kzg
.create_witness(
&polynomial,
ArchivedHistorySegment::NUM_PIECES,
piece_position as u32,
)
.expect("Position is verified to be valid above; qed")
.to_bytes(),
);
Ok(piece)
}
}
|
mod shared;
use rdisk::{PartitionedDisk, PhysicalDisk};
use shared::*;
#[test]
fn partitions() {
use serde::Deserialize;
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct PartitionInfo {
DiskNumber: u32,
PartitionNumber: u32,
Offset: u64,
Size: u64,
AccessPaths: Option<Vec<String>>,
Guid: Option<String>,
GptType: Option<String>,
Type: String,
MbrType: Option<u32>,
DriveLetter: Option<char>,
IsBoot: bool,
IsSystem: bool,
}
if let Some(dir) = get_testdata_path() {
let json_file = dir.join("partitions.json");
println!("Read from: {}", json_file.display());
let data = std::fs::read_to_string(json_file).unwrap();
let v: Vec<PartitionInfo> = serde_json::from_str(&data).unwrap();
let mut disks = std::collections::HashMap::new();
for item in v.into_iter() {
disks
.entry(item.DiskNumber)
.or_insert_with(|| Vec::<PartitionInfo>::new())
.push(item);
}
for (number, partitions) in disks {
println!("Disk # {}", number);
let disk = PhysicalDisk::open(number).unwrap();
let disk = PartitionedDisk::new(disk).unwrap();
for (hw, p) in partitions.iter().zip(disk.partitions()) {
println!(" @{:13}, len: {:13}, kind: {:?}", hw.Offset, hw.Size, p.kind());
assert_eq!(hw.Offset, p.offset());
assert_eq!(hw.Size, p.length());
}
}
}
}
#[test]
fn volumes() {
use serde::Deserialize;
#[derive(Deserialize)]
#[allow(non_snake_case)]
struct VolumeInfo {
#[serde(rename="Path")]
VolPath: String,
Size: u64,
SizeRemaining: u64,
FileSystemType: String,
FileSystemLabel: String,
DriveLetter: Option<char>,
}
if let Some(dir) = get_testdata_path() {
let json_file = dir.join("volumes.json");
println!("Read from: {}", json_file.display());
let data = std::fs::read_to_string(json_file).unwrap();
let v: Vec<VolumeInfo> = serde_json::from_str(&data).unwrap();
for item in v {
println!("Volume: {}, {:?}", item.VolPath, item.DriveLetter)
}
}
} |
#[doc = "Register `IDMALAR` reader"]
pub type R = crate::R<IDMALAR_SPEC>;
#[doc = "Register `IDMALAR` writer"]
pub type W = crate::W<IDMALAR_SPEC>;
#[doc = "Field `IDMALA` reader - Word aligned linked list item address offset Linked list item offset pointer to the base of the next linked list item structure. Linked list item base address is IDMABA + IDMALA. These bits can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
pub type IDMALA_R = crate::FieldReader<u16>;
#[doc = "Field `IDMALA` writer - Word aligned linked list item address offset Linked list item offset pointer to the base of the next linked list item structure. Linked list item base address is IDMABA + IDMALA. These bits can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
pub type IDMALA_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 14, O, u16>;
#[doc = "Field `ABR` reader - Acknowledge linked list buffer ready This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0). This bit is not taken into account when starting the first linked list buffer from the software programmed register information. ABR is only taken into account on subsequent loaded linked list items."]
pub type ABR_R = crate::BitReader;
#[doc = "Field `ABR` writer - Acknowledge linked list buffer ready This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0). This bit is not taken into account when starting the first linked list buffer from the software programmed register information. ABR is only taken into account on subsequent loaded linked list items."]
pub type ABR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ULS` reader - Update SDMMC_IDMABSIZE from the next linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode and ULA = 1) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
pub type ULS_R = crate::BitReader;
#[doc = "Field `ULS` writer - Update SDMMC_IDMABSIZE from the next linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode and ULA = 1) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
pub type ULS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ULA` reader - Update SDMMC_IDMALAR from linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
pub type ULA_R = crate::BitReader;
#[doc = "Field `ULA` writer - Update SDMMC_IDMALAR from linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
pub type ULA_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bits 2:15 - Word aligned linked list item address offset Linked list item offset pointer to the base of the next linked list item structure. Linked list item base address is IDMABA + IDMALA. These bits can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
#[inline(always)]
pub fn idmala(&self) -> IDMALA_R {
IDMALA_R::new(((self.bits >> 2) & 0x3fff) as u16)
}
#[doc = "Bit 29 - Acknowledge linked list buffer ready This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0). This bit is not taken into account when starting the first linked list buffer from the software programmed register information. ABR is only taken into account on subsequent loaded linked list items."]
#[inline(always)]
pub fn abr(&self) -> ABR_R {
ABR_R::new(((self.bits >> 29) & 1) != 0)
}
#[doc = "Bit 30 - Update SDMMC_IDMABSIZE from the next linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode and ULA = 1) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
#[inline(always)]
pub fn uls(&self) -> ULS_R {
ULS_R::new(((self.bits >> 30) & 1) != 0)
}
#[doc = "Bit 31 - Update SDMMC_IDMALAR from linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
#[inline(always)]
pub fn ula(&self) -> ULA_R {
ULA_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bits 2:15 - Word aligned linked list item address offset Linked list item offset pointer to the base of the next linked list item structure. Linked list item base address is IDMABA + IDMALA. These bits can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
#[inline(always)]
#[must_use]
pub fn idmala(&mut self) -> IDMALA_W<IDMALAR_SPEC, 2> {
IDMALA_W::new(self)
}
#[doc = "Bit 29 - Acknowledge linked list buffer ready This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0). This bit is not taken into account when starting the first linked list buffer from the software programmed register information. ABR is only taken into account on subsequent loaded linked list items."]
#[inline(always)]
#[must_use]
pub fn abr(&mut self) -> ABR_W<IDMALAR_SPEC, 29> {
ABR_W::new(self)
}
#[doc = "Bit 30 - Update SDMMC_IDMABSIZE from the next linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode and ULA = 1) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
#[inline(always)]
#[must_use]
pub fn uls(&mut self) -> ULS_W<IDMALAR_SPEC, 30> {
ULS_W::new(self)
}
#[doc = "Bit 31 - Update SDMMC_IDMALAR from linked list when in linked list mode (SDMMC_IDMACTRLR.IDMABMODE select linked list mode) This bit can only be written by firmware when DPSM is inactive (DPSMACT = 0)."]
#[inline(always)]
#[must_use]
pub fn ula(&mut self) -> ULA_W<IDMALAR_SPEC, 31> {
ULA_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "SDMMC_IDMALAR\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`idmalar::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`idmalar::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct IDMALAR_SPEC;
impl crate::RegisterSpec for IDMALAR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`idmalar::R`](R) reader structure"]
impl crate::Readable for IDMALAR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`idmalar::W`](W) writer structure"]
impl crate::Writable for IDMALAR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets IDMALAR to value 0"]
impl crate::Resettable for IDMALAR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CLOCKENSTAT {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `CLOCKENSTAT`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CLOCKENSTATR {
#[doc = "Clock enable for the ADC. value."]
ADC_CLKEN,
#[doc = "Clock enable for the APBDMA ACTIVITY value."]
APBDMA_ACTIVITY_CLKEN,
#[doc = "Clock enable for the APBDMA AOH DOMAIN value."]
APBDMA_AOH_CLKEN,
#[doc = "Clock enable for the APBDMA AOL DOMAIN value."]
APBDMA_AOL_CLKEN,
#[doc = "Clock enable for the APBDMA_APB value."]
APBDMA_APB_CLKEN,
#[doc = "Clock enable for the APBDMA_BLEL value."]
APBDMA_BLEL_CLKEN,
#[doc = "Clock enable for the APBDMA_HCPA value."]
APBDMA_HCPA_CLKEN,
#[doc = "Clock enable for the APBDMA_HCPB value."]
APBDMA_HCPB_CLKEN,
#[doc = "Clock enable for the APBDMA_HCPC value."]
APBDMA_HCPC_CLKEN,
#[doc = "Clock enable for the APBDMA_MSPI value."]
APBDMA_MSPI_CLKEN,
#[doc = "Clock enable for the APBDMA_PDM value."]
APBDMA_PDM_CLKEN,
#[doc = "Clock enable for the BLEIF value."]
BLEIF_CLK_CLKEN,
#[doc = "Clock enable for the BLEIF 32khZ CLOCK value."]
BLEIF_CLK32K_CLKEN,
#[doc = "Clock enable for the CTIMER BLOCK value."]
CTIMER_CLKEN,
#[doc = "Clock enable for the CTIMER0A value."]
CTIMER0A_CLKEN,
#[doc = "Clock enable for the CTIMER0B value."]
CTIMER0B_CLKEN,
#[doc = "Clock enable for the CTIMER1A value."]
CTIMER1A_CLKEN,
#[doc = "Clock enable for the CTIMER1B value."]
CTIMER1B_CLKEN,
#[doc = "Clock enable for the CTIMER2A value."]
CTIMER2A_CLKEN,
#[doc = "Clock enable for the CTIMER2B value."]
CTIMER2B_CLKEN,
#[doc = "Clock enable for the CTIMER3A value."]
CTIMER3A_CLKEN,
#[doc = "Clock enable for the CTIMER3B value."]
CTIMER3B_CLKEN,
#[doc = "Clock enable for the CTIMER4A value."]
CTIMER4A_CLKEN,
#[doc = "Clock enable for the CTIMER4B value."]
CTIMER4B_CLKEN,
#[doc = "Clock enable for the CTIMER5A value."]
CTIMER5A_CLKEN,
#[doc = "Clock enable for the CTIMER5B value."]
CTIMER5B_CLKEN,
#[doc = "Clock enable for the CTIMER6A value."]
CTIMER6A_CLKEN,
#[doc = "Clock enable for the CTIMER6B value."]
CTIMER6B_CLKEN,
#[doc = "Clock enable for the CTIMER7A value."]
CTIMER7A_CLKEN,
#[doc = "Clock enable for the CTIMER7B value."]
CTIMER7B_CLKEN,
#[doc = "Clock enable for the DAP value."]
DAP_CLKEN,
#[doc = "Clock enable for the IOMSTRIFC0 value."]
IOMSTRIFC0_CLKEN,
#[doc = r" Reserved"]
_Reserved(u32),
}
impl CLOCKENSTATR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
match *self {
CLOCKENSTATR::ADC_CLKEN => 1,
CLOCKENSTATR::APBDMA_ACTIVITY_CLKEN => 2,
CLOCKENSTATR::APBDMA_AOH_CLKEN => 4,
CLOCKENSTATR::APBDMA_AOL_CLKEN => 8,
CLOCKENSTATR::APBDMA_APB_CLKEN => 16,
CLOCKENSTATR::APBDMA_BLEL_CLKEN => 32,
CLOCKENSTATR::APBDMA_HCPA_CLKEN => 64,
CLOCKENSTATR::APBDMA_HCPB_CLKEN => 128,
CLOCKENSTATR::APBDMA_HCPC_CLKEN => 256,
CLOCKENSTATR::APBDMA_MSPI_CLKEN => 512,
CLOCKENSTATR::APBDMA_PDM_CLKEN => 1024,
CLOCKENSTATR::BLEIF_CLK_CLKEN => 2048,
CLOCKENSTATR::BLEIF_CLK32K_CLKEN => 4096,
CLOCKENSTATR::CTIMER_CLKEN => 8192,
CLOCKENSTATR::CTIMER0A_CLKEN => 16384,
CLOCKENSTATR::CTIMER0B_CLKEN => 32768,
CLOCKENSTATR::CTIMER1A_CLKEN => 65536,
CLOCKENSTATR::CTIMER1B_CLKEN => 131072,
CLOCKENSTATR::CTIMER2A_CLKEN => 262144,
CLOCKENSTATR::CTIMER2B_CLKEN => 524288,
CLOCKENSTATR::CTIMER3A_CLKEN => 1048576,
CLOCKENSTATR::CTIMER3B_CLKEN => 2097152,
CLOCKENSTATR::CTIMER4A_CLKEN => 4194304,
CLOCKENSTATR::CTIMER4B_CLKEN => 8388608,
CLOCKENSTATR::CTIMER5A_CLKEN => 16777216,
CLOCKENSTATR::CTIMER5B_CLKEN => 33554432,
CLOCKENSTATR::CTIMER6A_CLKEN => 67108864,
CLOCKENSTATR::CTIMER6B_CLKEN => 134217728,
CLOCKENSTATR::CTIMER7A_CLKEN => 268435456,
CLOCKENSTATR::CTIMER7B_CLKEN => 536870912,
CLOCKENSTATR::DAP_CLKEN => 1073741824,
CLOCKENSTATR::IOMSTRIFC0_CLKEN => 2147483648,
CLOCKENSTATR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u32) -> CLOCKENSTATR {
match value {
1 => CLOCKENSTATR::ADC_CLKEN,
2 => CLOCKENSTATR::APBDMA_ACTIVITY_CLKEN,
4 => CLOCKENSTATR::APBDMA_AOH_CLKEN,
8 => CLOCKENSTATR::APBDMA_AOL_CLKEN,
16 => CLOCKENSTATR::APBDMA_APB_CLKEN,
32 => CLOCKENSTATR::APBDMA_BLEL_CLKEN,
64 => CLOCKENSTATR::APBDMA_HCPA_CLKEN,
128 => CLOCKENSTATR::APBDMA_HCPB_CLKEN,
256 => CLOCKENSTATR::APBDMA_HCPC_CLKEN,
512 => CLOCKENSTATR::APBDMA_MSPI_CLKEN,
1024 => CLOCKENSTATR::APBDMA_PDM_CLKEN,
2048 => CLOCKENSTATR::BLEIF_CLK_CLKEN,
4096 => CLOCKENSTATR::BLEIF_CLK32K_CLKEN,
8192 => CLOCKENSTATR::CTIMER_CLKEN,
16384 => CLOCKENSTATR::CTIMER0A_CLKEN,
32768 => CLOCKENSTATR::CTIMER0B_CLKEN,
65536 => CLOCKENSTATR::CTIMER1A_CLKEN,
131072 => CLOCKENSTATR::CTIMER1B_CLKEN,
262144 => CLOCKENSTATR::CTIMER2A_CLKEN,
524288 => CLOCKENSTATR::CTIMER2B_CLKEN,
1048576 => CLOCKENSTATR::CTIMER3A_CLKEN,
2097152 => CLOCKENSTATR::CTIMER3B_CLKEN,
4194304 => CLOCKENSTATR::CTIMER4A_CLKEN,
8388608 => CLOCKENSTATR::CTIMER4B_CLKEN,
16777216 => CLOCKENSTATR::CTIMER5A_CLKEN,
33554432 => CLOCKENSTATR::CTIMER5B_CLKEN,
67108864 => CLOCKENSTATR::CTIMER6A_CLKEN,
134217728 => CLOCKENSTATR::CTIMER6B_CLKEN,
268435456 => CLOCKENSTATR::CTIMER7A_CLKEN,
536870912 => CLOCKENSTATR::CTIMER7B_CLKEN,
1073741824 => CLOCKENSTATR::DAP_CLKEN,
2147483648 => CLOCKENSTATR::IOMSTRIFC0_CLKEN,
i => CLOCKENSTATR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `ADC_CLKEN`"]
#[inline]
pub fn is_adc_clken(&self) -> bool {
*self == CLOCKENSTATR::ADC_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_ACTIVITY_CLKEN`"]
#[inline]
pub fn is_apbdma_activity_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_ACTIVITY_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_AOH_CLKEN`"]
#[inline]
pub fn is_apbdma_aoh_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_AOH_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_AOL_CLKEN`"]
#[inline]
pub fn is_apbdma_aol_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_AOL_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_APB_CLKEN`"]
#[inline]
pub fn is_apbdma_apb_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_APB_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_BLEL_CLKEN`"]
#[inline]
pub fn is_apbdma_blel_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_BLEL_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_HCPA_CLKEN`"]
#[inline]
pub fn is_apbdma_hcpa_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_HCPA_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_HCPB_CLKEN`"]
#[inline]
pub fn is_apbdma_hcpb_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_HCPB_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_HCPC_CLKEN`"]
#[inline]
pub fn is_apbdma_hcpc_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_HCPC_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_MSPI_CLKEN`"]
#[inline]
pub fn is_apbdma_mspi_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_MSPI_CLKEN
}
#[doc = "Checks if the value of the field is `APBDMA_PDM_CLKEN`"]
#[inline]
pub fn is_apbdma_pdm_clken(&self) -> bool {
*self == CLOCKENSTATR::APBDMA_PDM_CLKEN
}
#[doc = "Checks if the value of the field is `BLEIF_CLK_CLKEN`"]
#[inline]
pub fn is_bleif_clk_clken(&self) -> bool {
*self == CLOCKENSTATR::BLEIF_CLK_CLKEN
}
#[doc = "Checks if the value of the field is `BLEIF_CLK32K_CLKEN`"]
#[inline]
pub fn is_bleif_clk32k_clken(&self) -> bool {
*self == CLOCKENSTATR::BLEIF_CLK32K_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER_CLKEN`"]
#[inline]
pub fn is_ctimer_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER0A_CLKEN`"]
#[inline]
pub fn is_ctimer0a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER0A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER0B_CLKEN`"]
#[inline]
pub fn is_ctimer0b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER0B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER1A_CLKEN`"]
#[inline]
pub fn is_ctimer1a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER1A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER1B_CLKEN`"]
#[inline]
pub fn is_ctimer1b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER1B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER2A_CLKEN`"]
#[inline]
pub fn is_ctimer2a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER2A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER2B_CLKEN`"]
#[inline]
pub fn is_ctimer2b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER2B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER3A_CLKEN`"]
#[inline]
pub fn is_ctimer3a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER3A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER3B_CLKEN`"]
#[inline]
pub fn is_ctimer3b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER3B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER4A_CLKEN`"]
#[inline]
pub fn is_ctimer4a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER4A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER4B_CLKEN`"]
#[inline]
pub fn is_ctimer4b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER4B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER5A_CLKEN`"]
#[inline]
pub fn is_ctimer5a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER5A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER5B_CLKEN`"]
#[inline]
pub fn is_ctimer5b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER5B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER6A_CLKEN`"]
#[inline]
pub fn is_ctimer6a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER6A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER6B_CLKEN`"]
#[inline]
pub fn is_ctimer6b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER6B_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER7A_CLKEN`"]
#[inline]
pub fn is_ctimer7a_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER7A_CLKEN
}
#[doc = "Checks if the value of the field is `CTIMER7B_CLKEN`"]
#[inline]
pub fn is_ctimer7b_clken(&self) -> bool {
*self == CLOCKENSTATR::CTIMER7B_CLKEN
}
#[doc = "Checks if the value of the field is `DAP_CLKEN`"]
#[inline]
pub fn is_dap_clken(&self) -> bool {
*self == CLOCKENSTATR::DAP_CLKEN
}
#[doc = "Checks if the value of the field is `IOMSTRIFC0_CLKEN`"]
#[inline]
pub fn is_iomstrifc0_clken(&self) -> bool {
*self == CLOCKENSTATR::IOMSTRIFC0_CLKEN
}
}
#[doc = "Values that can be written to the field `CLOCKENSTAT`"]
pub enum CLOCKENSTATW {
#[doc = "Clock enable for the ADC. value."]
ADC_CLKEN,
#[doc = "Clock enable for the APBDMA ACTIVITY value."]
APBDMA_ACTIVITY_CLKEN,
#[doc = "Clock enable for the APBDMA AOH DOMAIN value."]
APBDMA_AOH_CLKEN,
#[doc = "Clock enable for the APBDMA AOL DOMAIN value."]
APBDMA_AOL_CLKEN,
#[doc = "Clock enable for the APBDMA_APB value."]
APBDMA_APB_CLKEN,
#[doc = "Clock enable for the APBDMA_BLEL value."]
APBDMA_BLEL_CLKEN,
#[doc = "Clock enable for the APBDMA_HCPA value."]
APBDMA_HCPA_CLKEN,
#[doc = "Clock enable for the APBDMA_HCPB value."]
APBDMA_HCPB_CLKEN,
#[doc = "Clock enable for the APBDMA_HCPC value."]
APBDMA_HCPC_CLKEN,
#[doc = "Clock enable for the APBDMA_MSPI value."]
APBDMA_MSPI_CLKEN,
#[doc = "Clock enable for the APBDMA_PDM value."]
APBDMA_PDM_CLKEN,
#[doc = "Clock enable for the BLEIF value."]
BLEIF_CLK_CLKEN,
#[doc = "Clock enable for the BLEIF 32khZ CLOCK value."]
BLEIF_CLK32K_CLKEN,
#[doc = "Clock enable for the CTIMER BLOCK value."]
CTIMER_CLKEN,
#[doc = "Clock enable for the CTIMER0A value."]
CTIMER0A_CLKEN,
#[doc = "Clock enable for the CTIMER0B value."]
CTIMER0B_CLKEN,
#[doc = "Clock enable for the CTIMER1A value."]
CTIMER1A_CLKEN,
#[doc = "Clock enable for the CTIMER1B value."]
CTIMER1B_CLKEN,
#[doc = "Clock enable for the CTIMER2A value."]
CTIMER2A_CLKEN,
#[doc = "Clock enable for the CTIMER2B value."]
CTIMER2B_CLKEN,
#[doc = "Clock enable for the CTIMER3A value."]
CTIMER3A_CLKEN,
#[doc = "Clock enable for the CTIMER3B value."]
CTIMER3B_CLKEN,
#[doc = "Clock enable for the CTIMER4A value."]
CTIMER4A_CLKEN,
#[doc = "Clock enable for the CTIMER4B value."]
CTIMER4B_CLKEN,
#[doc = "Clock enable for the CTIMER5A value."]
CTIMER5A_CLKEN,
#[doc = "Clock enable for the CTIMER5B value."]
CTIMER5B_CLKEN,
#[doc = "Clock enable for the CTIMER6A value."]
CTIMER6A_CLKEN,
#[doc = "Clock enable for the CTIMER6B value."]
CTIMER6B_CLKEN,
#[doc = "Clock enable for the CTIMER7A value."]
CTIMER7A_CLKEN,
#[doc = "Clock enable for the CTIMER7B value."]
CTIMER7B_CLKEN,
#[doc = "Clock enable for the DAP value."]
DAP_CLKEN,
#[doc = "Clock enable for the IOMSTRIFC0 value."]
IOMSTRIFC0_CLKEN,
}
impl CLOCKENSTATW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u32 {
match *self {
CLOCKENSTATW::ADC_CLKEN => 1,
CLOCKENSTATW::APBDMA_ACTIVITY_CLKEN => 2,
CLOCKENSTATW::APBDMA_AOH_CLKEN => 4,
CLOCKENSTATW::APBDMA_AOL_CLKEN => 8,
CLOCKENSTATW::APBDMA_APB_CLKEN => 16,
CLOCKENSTATW::APBDMA_BLEL_CLKEN => 32,
CLOCKENSTATW::APBDMA_HCPA_CLKEN => 64,
CLOCKENSTATW::APBDMA_HCPB_CLKEN => 128,
CLOCKENSTATW::APBDMA_HCPC_CLKEN => 256,
CLOCKENSTATW::APBDMA_MSPI_CLKEN => 512,
CLOCKENSTATW::APBDMA_PDM_CLKEN => 1024,
CLOCKENSTATW::BLEIF_CLK_CLKEN => 2048,
CLOCKENSTATW::BLEIF_CLK32K_CLKEN => 4096,
CLOCKENSTATW::CTIMER_CLKEN => 8192,
CLOCKENSTATW::CTIMER0A_CLKEN => 16384,
CLOCKENSTATW::CTIMER0B_CLKEN => 32768,
CLOCKENSTATW::CTIMER1A_CLKEN => 65536,
CLOCKENSTATW::CTIMER1B_CLKEN => 131072,
CLOCKENSTATW::CTIMER2A_CLKEN => 262144,
CLOCKENSTATW::CTIMER2B_CLKEN => 524288,
CLOCKENSTATW::CTIMER3A_CLKEN => 1048576,
CLOCKENSTATW::CTIMER3B_CLKEN => 2097152,
CLOCKENSTATW::CTIMER4A_CLKEN => 4194304,
CLOCKENSTATW::CTIMER4B_CLKEN => 8388608,
CLOCKENSTATW::CTIMER5A_CLKEN => 16777216,
CLOCKENSTATW::CTIMER5B_CLKEN => 33554432,
CLOCKENSTATW::CTIMER6A_CLKEN => 67108864,
CLOCKENSTATW::CTIMER6B_CLKEN => 134217728,
CLOCKENSTATW::CTIMER7A_CLKEN => 268435456,
CLOCKENSTATW::CTIMER7B_CLKEN => 536870912,
CLOCKENSTATW::DAP_CLKEN => 1073741824,
CLOCKENSTATW::IOMSTRIFC0_CLKEN => 2147483648,
}
}
}
#[doc = r" Proxy"]
pub struct _CLOCKENSTATW<'a> {
w: &'a mut W,
}
impl<'a> _CLOCKENSTATW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: CLOCKENSTATW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "Clock enable for the ADC. value."]
#[inline]
pub fn adc_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::ADC_CLKEN)
}
#[doc = "Clock enable for the APBDMA ACTIVITY value."]
#[inline]
pub fn apbdma_activity_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_ACTIVITY_CLKEN)
}
#[doc = "Clock enable for the APBDMA AOH DOMAIN value."]
#[inline]
pub fn apbdma_aoh_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_AOH_CLKEN)
}
#[doc = "Clock enable for the APBDMA AOL DOMAIN value."]
#[inline]
pub fn apbdma_aol_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_AOL_CLKEN)
}
#[doc = "Clock enable for the APBDMA_APB value."]
#[inline]
pub fn apbdma_apb_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_APB_CLKEN)
}
#[doc = "Clock enable for the APBDMA_BLEL value."]
#[inline]
pub fn apbdma_blel_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_BLEL_CLKEN)
}
#[doc = "Clock enable for the APBDMA_HCPA value."]
#[inline]
pub fn apbdma_hcpa_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_HCPA_CLKEN)
}
#[doc = "Clock enable for the APBDMA_HCPB value."]
#[inline]
pub fn apbdma_hcpb_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_HCPB_CLKEN)
}
#[doc = "Clock enable for the APBDMA_HCPC value."]
#[inline]
pub fn apbdma_hcpc_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_HCPC_CLKEN)
}
#[doc = "Clock enable for the APBDMA_MSPI value."]
#[inline]
pub fn apbdma_mspi_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_MSPI_CLKEN)
}
#[doc = "Clock enable for the APBDMA_PDM value."]
#[inline]
pub fn apbdma_pdm_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::APBDMA_PDM_CLKEN)
}
#[doc = "Clock enable for the BLEIF value."]
#[inline]
pub fn bleif_clk_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::BLEIF_CLK_CLKEN)
}
#[doc = "Clock enable for the BLEIF 32khZ CLOCK value."]
#[inline]
pub fn bleif_clk32k_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::BLEIF_CLK32K_CLKEN)
}
#[doc = "Clock enable for the CTIMER BLOCK value."]
#[inline]
pub fn ctimer_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER_CLKEN)
}
#[doc = "Clock enable for the CTIMER0A value."]
#[inline]
pub fn ctimer0a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER0A_CLKEN)
}
#[doc = "Clock enable for the CTIMER0B value."]
#[inline]
pub fn ctimer0b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER0B_CLKEN)
}
#[doc = "Clock enable for the CTIMER1A value."]
#[inline]
pub fn ctimer1a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER1A_CLKEN)
}
#[doc = "Clock enable for the CTIMER1B value."]
#[inline]
pub fn ctimer1b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER1B_CLKEN)
}
#[doc = "Clock enable for the CTIMER2A value."]
#[inline]
pub fn ctimer2a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER2A_CLKEN)
}
#[doc = "Clock enable for the CTIMER2B value."]
#[inline]
pub fn ctimer2b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER2B_CLKEN)
}
#[doc = "Clock enable for the CTIMER3A value."]
#[inline]
pub fn ctimer3a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER3A_CLKEN)
}
#[doc = "Clock enable for the CTIMER3B value."]
#[inline]
pub fn ctimer3b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER3B_CLKEN)
}
#[doc = "Clock enable for the CTIMER4A value."]
#[inline]
pub fn ctimer4a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER4A_CLKEN)
}
#[doc = "Clock enable for the CTIMER4B value."]
#[inline]
pub fn ctimer4b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER4B_CLKEN)
}
#[doc = "Clock enable for the CTIMER5A value."]
#[inline]
pub fn ctimer5a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER5A_CLKEN)
}
#[doc = "Clock enable for the CTIMER5B value."]
#[inline]
pub fn ctimer5b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER5B_CLKEN)
}
#[doc = "Clock enable for the CTIMER6A value."]
#[inline]
pub fn ctimer6a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER6A_CLKEN)
}
#[doc = "Clock enable for the CTIMER6B value."]
#[inline]
pub fn ctimer6b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER6B_CLKEN)
}
#[doc = "Clock enable for the CTIMER7A value."]
#[inline]
pub fn ctimer7a_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER7A_CLKEN)
}
#[doc = "Clock enable for the CTIMER7B value."]
#[inline]
pub fn ctimer7b_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::CTIMER7B_CLKEN)
}
#[doc = "Clock enable for the DAP value."]
#[inline]
pub fn dap_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::DAP_CLKEN)
}
#[doc = "Clock enable for the IOMSTRIFC0 value."]
#[inline]
pub fn iomstrifc0_clken(self) -> &'a mut W {
self.variant(CLOCKENSTATW::IOMSTRIFC0_CLKEN)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:31 - Clock enable status"]
#[inline]
pub fn clockenstat(&self) -> CLOCKENSTATR {
CLOCKENSTATR::_from({
const MASK: u32 = 4294967295;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u32
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:31 - Clock enable status"]
#[inline]
pub fn clockenstat(&mut self) -> _CLOCKENSTATW {
_CLOCKENSTATW { w: self }
}
}
|
use pest::Parser;
use pest_derive::*;
#[derive(Parser)]
#[grammar = "assembler.pest"]
struct AssemblerParser;
use crate::instruction::Instruction;
use pest::iterators::Pairs;
pub type Error = pest::error::Error<Rule>;
pub fn create_program(text: &str) -> Result<Vec<Instruction>, Error> {
let mut assembler: Pairs<Rule> = AssemblerParser::parse(Rule::assembler, text)?;
let instructions = assembler
.next()
.unwrap()
.into_inner()
// .inspect(|pair| println!("{:?}", pair))
.filter(|pair| {
let r = pair.as_rule();
r != Rule::comment && r != Rule::EOI
})
.map(|pair| {
let mut pairs = pair.into_inner();
let instr = pairs.next().unwrap();
let rule = instr.as_rule();
let get_index = || instr.into_inner().next().unwrap().as_str().parse().unwrap();
match rule {
Rule::get => Instruction::Get,
Rule::put => Instruction::Put,
Rule::load => Instruction::Load(get_index()),
Rule::loadi => Instruction::Loadi(get_index()),
Rule::store => Instruction::Store(get_index()),
Rule::storei => Instruction::Storei(get_index()),
Rule::add => Instruction::Add(get_index()),
Rule::sub => Instruction::Sub(get_index()),
Rule::shift => Instruction::Shift(get_index()),
Rule::inc => Instruction::Inc,
Rule::dec => Instruction::Dec,
Rule::jump => Instruction::Jump(get_index()),
Rule::jpos => Instruction::Jpos(get_index()),
Rule::jzero => Instruction::Jzero(get_index()),
Rule::jneg => Instruction::Jneg(get_index()),
Rule::halt => Instruction::Halt,
_ => unreachable!(),
}
})
.collect();
Ok(instructions)
}
|
extern crate time;
use std::fs::File;
use std::io::prelude::*;
use std::io::{BufRead, BufReader};
use std::io::ErrorKind;
use std::os::unix::net::UnixStream;
use std::path::PathBuf;
use std::thread;
use std::time::Duration;
use config::Config;
use itertools::Itertools;
use relm_core::Sender;
use ::monitor::*;
pub struct Battery {
syspath: PathBuf,
capacity: u8,
charging: bool,
}
impl Default for Battery {
fn default() -> Self {
let mut bat = Battery {
syspath: PathBuf::from("/sys/class/power_supply/BAT0"),
capacity: 0,
charging: false,
};
bat.read_capacity();
bat.read_charging();
bat
}
}
impl Battery {
fn read_capacity(&mut self) {
let mut f = File::open(self.syspath.join("capacity")).expect("failed to open BAT/capacity");
let mut s = String::with_capacity(5);
f.read_to_string(&mut s).expect("failed to read BAT/capacity");;
self.capacity = u8::from_str_radix(s.trim(), 10).expect("invalid number from BAT/capacity");
}
fn read_charging(&mut self) {
let mut f = File::open(self.syspath.join("status")).expect("failed to open BAT/status");
let mut s = String::with_capacity(16);
f.read_to_string(&mut s).expect("failed to read BAT/status");;
self.charging = match s.trim() {
"Charging" => true,
"Full" => true,
_ => false
};
}
fn send_state(&self, config: &'static Config, channel: &Sender<MonitorMsg>) {
let color = if self.charging {
config.get_color("blue")
} else {
match self.capacity {
0..= 15 => config.get_color("red"),
16..= 40 => config.get_color("yellow"),
41..=100 => config.get_color("green"),
_ => panic!("battery capacity outside range 0..100")
}
};
let relevance = match self.capacity < 40 {
true => Relevance::Urgent,
false => Relevance::Background,
};
let text = format!("{} {}", self.charging, self.capacity);
channel.send(MonitorMsg::SetText(text));
channel.send(MonitorMsg::SetColor(color));
channel.send(MonitorMsg::SetRelevance(Relevance::Urgent));
}
}
impl Monitor for Battery {
fn start(mut self, config: &'static Config, channel: Sender<MonitorMsg>) {
self.send_state(config, &channel);
let sock_timeout = 10;
thread::spawn(move || {
let acpi = UnixStream::connect("/var/run/acpid.socket").expect("couldn't open acpid socket");
acpi.set_read_timeout(Some(Duration::from_secs(sock_timeout))).expect("failed to set timeout on acpid socket");
let mut acpi = BufReader::new(acpi);
let mut s = String::new();
loop {
loop {
match acpi.read_line(&mut s) {
Ok(_) => {
s.truncate(s.len() - 1);
let (event, _, _, value) = s.split(' ').collect_tuple().expect("unexpected output from acpid");
match event {
"ac_adapter" => {
let value = usize::from_str_radix(value, 10).expect("unexpected output from acpid");
self.charging = value != 0;
},
_ => (),
}
s.clear();
acpi.get_mut().set_read_timeout(Some(Duration::from_millis(200)))
.expect("failed to set timeout on acpid socket");
},
Err(ref e) if e.kind() == ErrorKind::WouldBlock => {
acpi.get_mut().set_read_timeout(Some(Duration::from_secs(sock_timeout)))
.expect("failed to set timeout on acpid socket");
break;
},
Err(e) => panic!("couldn't read from acpid socket: {}", e),
}
}
self.read_capacity();
self.send_state(config, &channel);
}
});
}
}
|
use {
std::net::{
SocketAddr,
TcpListener,
TcpStream,
},
std::rc::Rc,
std::cell::RefCell,
std::prelude::v1::*,
};
struct Stream {
data_stream : Rc<Data>,
}
struct Data {
data : Vec<usize>,
}
enum Event<T> {
Poll(T),
//...
}
trait StreamT {
fn poll_event_stream(&self, rcs : Rc<Stream>, ep : fn());//amx := Arc<Mutex>
}
impl<'a> dyn StreamT {
fn poll_event_stream(&self, rcs : Rc<Stream>) {
//TODO(Stream):= ...stream data by using the std::thread library.
//\n Rc<>
match self {
Event::Poll(rcs)
};
}
}
/*
Event takes input from Data which is derived from Collection
*/ |
use autorel_chlg::{BreakingInfo, Change, ChangeLog, ChangeType};
#[test]
fn markdown_example() {
let mut changelog = ChangeLog::default();
changelog += Change {
type_: ChangeType::Feature,
scope: None,
description: "Feature without scope",
breaking: BreakingInfo::NotBreaking,
body: None,
};
changelog += Change {
type_: ChangeType::Feature,
scope: Some("test-scope"),
description: "Feature with scope",
breaking: BreakingInfo::NotBreaking,
body: None,
};
changelog += Change {
type_: ChangeType::Fix,
scope: Some("test-scope"),
description: "Breaking fix",
breaking: BreakingInfo::Breaking,
body: None,
};
changelog += Change {
type_: ChangeType::Feature,
scope: None,
description: "Breaking feature with more info",
breaking: BreakingInfo::BreakingWithDescriptions(vec!["because!"]),
body: None,
};
let formated = format!("{}", changelog.markdown());
assert_eq!(
formated,
r"### Breaking changes
* because!
#### test-scope
* Breaking fix
### Features
* Feature without scope
* Breaking feature with more info
#### test-scope
* Feature with scope
### Bug fixes
#### test-scope
* Breaking fix
"
)
}
|
use crate::block::{Block, Cid};
use crate::bitswap::Priority;
use crate::repo::{Repo, RepoTypes};
use libp2p::PeerId;
use std::sync::mpsc::{channel, Sender, Receiver};
pub trait Strategy<TRepoTypes: RepoTypes>: Send + Unpin {
fn new(repo: Repo<TRepoTypes>) -> Self;
fn process_want(&self, source: PeerId, cid: Cid, priority: Priority);
fn process_block(&self, source: PeerId, block: Block);
fn poll(&self) -> Option<StrategyEvent>;
}
pub enum StrategyEvent {
Send {
peer_id: PeerId,
block: Block,
}
}
pub struct AltruisticStrategy<TRepoTypes: RepoTypes> {
repo: Repo<TRepoTypes>,
events: (Sender<StrategyEvent>, Receiver<StrategyEvent>),
}
impl<TRepoTypes: RepoTypes> Strategy<TRepoTypes> for AltruisticStrategy<TRepoTypes> {
fn new(repo: Repo<TRepoTypes>) -> Self {
AltruisticStrategy {
repo,
events: channel::<StrategyEvent>(),
}
}
fn process_want(&self, source: PeerId, cid: Cid, priority: Priority) {
use futures::FutureExt;
use futures::TryFutureExt;
info!("Peer {} wants block {} with priority {}", source.to_base58(), cid.to_string(), priority);
let events = self.events.0.clone();
let mut repo = self.repo.clone();
tokio::spawn(async move {
let res = repo.get_block(&cid).await;
let block = if let Err(e) = res {
warn!("Peer {} wanted block {} but we failed: {}", source.to_base58(), cid, e);
return;
} else {
res.unwrap()
};
let req = StrategyEvent::Send {
peer_id: source.clone(),
block: block,
};
if let Err(e) = events.send(req) {
warn!("Peer {} wanted block {} we failed start sending it: {}", source.to_base58(), cid, e);
}
}.unit_error().boxed().compat());
}
fn process_block(&self, source: PeerId, block: Block) {
use futures::FutureExt;
use futures::TryFutureExt;
let cid = block.cid().to_string();
info!("Received block {} from peer {}", cid, source.to_base58());
let mut repo = self.repo.clone();
tokio::spawn(async move {
let future = repo.put_block(block).boxed();
if let Err(e) = future.await {
debug!("Got block {} from peer {} but failed to store it: {}", cid, source.to_base58(), e);
}
}.unit_error().boxed().compat());
}
fn poll(&self) -> Option<StrategyEvent> {
self.events.1.try_recv().ok()
}
}
#[cfg(test)]
mod tests {
/*
use super::*;
use crate::block::Block;
#[test]
fn test_altruistic_strategy() {
let block_1 = Block::from("1");
let block_2 = Block::from("2");
let repo = Repo::new();
repo.put(block_1.clone());
let mut strategy = AltruisticStrategy::new(repo);
let mut ledger = Ledger::new();
let peer_id = PeerId::random();
ledger.peer_connected(peer_id.clone());
strategy.process_want(&mut ledger, &peer_id, block_1.cid(), 1);
strategy.process_want(&mut ledger, &peer_id, block_2.cid(), 1);
ledger.send_messages();
let peer_ledger = ledger.peer_ledger(&peer_id);
assert_eq!(peer_ledger.sent_blocks(), 1);
}*/
}
|
#[doc = "Reader of register PROC1_INTS3"]
pub type R = crate::R<u32, super::PROC1_INTS3>;
#[doc = "Reader of field `GPIO29_EDGE_HIGH`"]
pub type GPIO29_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO29_EDGE_LOW`"]
pub type GPIO29_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO29_LEVEL_HIGH`"]
pub type GPIO29_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO29_LEVEL_LOW`"]
pub type GPIO29_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO28_EDGE_HIGH`"]
pub type GPIO28_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO28_EDGE_LOW`"]
pub type GPIO28_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO28_LEVEL_HIGH`"]
pub type GPIO28_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO28_LEVEL_LOW`"]
pub type GPIO28_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO27_EDGE_HIGH`"]
pub type GPIO27_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO27_EDGE_LOW`"]
pub type GPIO27_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO27_LEVEL_HIGH`"]
pub type GPIO27_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO27_LEVEL_LOW`"]
pub type GPIO27_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO26_EDGE_HIGH`"]
pub type GPIO26_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO26_EDGE_LOW`"]
pub type GPIO26_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO26_LEVEL_HIGH`"]
pub type GPIO26_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO26_LEVEL_LOW`"]
pub type GPIO26_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO25_EDGE_HIGH`"]
pub type GPIO25_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO25_EDGE_LOW`"]
pub type GPIO25_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO25_LEVEL_HIGH`"]
pub type GPIO25_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO25_LEVEL_LOW`"]
pub type GPIO25_LEVEL_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO24_EDGE_HIGH`"]
pub type GPIO24_EDGE_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO24_EDGE_LOW`"]
pub type GPIO24_EDGE_LOW_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO24_LEVEL_HIGH`"]
pub type GPIO24_LEVEL_HIGH_R = crate::R<bool, bool>;
#[doc = "Reader of field `GPIO24_LEVEL_LOW`"]
pub type GPIO24_LEVEL_LOW_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 23"]
#[inline(always)]
pub fn gpio29_edge_high(&self) -> GPIO29_EDGE_HIGH_R {
GPIO29_EDGE_HIGH_R::new(((self.bits >> 23) & 0x01) != 0)
}
#[doc = "Bit 22"]
#[inline(always)]
pub fn gpio29_edge_low(&self) -> GPIO29_EDGE_LOW_R {
GPIO29_EDGE_LOW_R::new(((self.bits >> 22) & 0x01) != 0)
}
#[doc = "Bit 21"]
#[inline(always)]
pub fn gpio29_level_high(&self) -> GPIO29_LEVEL_HIGH_R {
GPIO29_LEVEL_HIGH_R::new(((self.bits >> 21) & 0x01) != 0)
}
#[doc = "Bit 20"]
#[inline(always)]
pub fn gpio29_level_low(&self) -> GPIO29_LEVEL_LOW_R {
GPIO29_LEVEL_LOW_R::new(((self.bits >> 20) & 0x01) != 0)
}
#[doc = "Bit 19"]
#[inline(always)]
pub fn gpio28_edge_high(&self) -> GPIO28_EDGE_HIGH_R {
GPIO28_EDGE_HIGH_R::new(((self.bits >> 19) & 0x01) != 0)
}
#[doc = "Bit 18"]
#[inline(always)]
pub fn gpio28_edge_low(&self) -> GPIO28_EDGE_LOW_R {
GPIO28_EDGE_LOW_R::new(((self.bits >> 18) & 0x01) != 0)
}
#[doc = "Bit 17"]
#[inline(always)]
pub fn gpio28_level_high(&self) -> GPIO28_LEVEL_HIGH_R {
GPIO28_LEVEL_HIGH_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 16"]
#[inline(always)]
pub fn gpio28_level_low(&self) -> GPIO28_LEVEL_LOW_R {
GPIO28_LEVEL_LOW_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 15"]
#[inline(always)]
pub fn gpio27_edge_high(&self) -> GPIO27_EDGE_HIGH_R {
GPIO27_EDGE_HIGH_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 14"]
#[inline(always)]
pub fn gpio27_edge_low(&self) -> GPIO27_EDGE_LOW_R {
GPIO27_EDGE_LOW_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 13"]
#[inline(always)]
pub fn gpio27_level_high(&self) -> GPIO27_LEVEL_HIGH_R {
GPIO27_LEVEL_HIGH_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 12"]
#[inline(always)]
pub fn gpio27_level_low(&self) -> GPIO27_LEVEL_LOW_R {
GPIO27_LEVEL_LOW_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 11"]
#[inline(always)]
pub fn gpio26_edge_high(&self) -> GPIO26_EDGE_HIGH_R {
GPIO26_EDGE_HIGH_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 10"]
#[inline(always)]
pub fn gpio26_edge_low(&self) -> GPIO26_EDGE_LOW_R {
GPIO26_EDGE_LOW_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9"]
#[inline(always)]
pub fn gpio26_level_high(&self) -> GPIO26_LEVEL_HIGH_R {
GPIO26_LEVEL_HIGH_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8"]
#[inline(always)]
pub fn gpio26_level_low(&self) -> GPIO26_LEVEL_LOW_R {
GPIO26_LEVEL_LOW_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 7"]
#[inline(always)]
pub fn gpio25_edge_high(&self) -> GPIO25_EDGE_HIGH_R {
GPIO25_EDGE_HIGH_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 6"]
#[inline(always)]
pub fn gpio25_edge_low(&self) -> GPIO25_EDGE_LOW_R {
GPIO25_EDGE_LOW_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5"]
#[inline(always)]
pub fn gpio25_level_high(&self) -> GPIO25_LEVEL_HIGH_R {
GPIO25_LEVEL_HIGH_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4"]
#[inline(always)]
pub fn gpio25_level_low(&self) -> GPIO25_LEVEL_LOW_R {
GPIO25_LEVEL_LOW_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3"]
#[inline(always)]
pub fn gpio24_edge_high(&self) -> GPIO24_EDGE_HIGH_R {
GPIO24_EDGE_HIGH_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2"]
#[inline(always)]
pub fn gpio24_edge_low(&self) -> GPIO24_EDGE_LOW_R {
GPIO24_EDGE_LOW_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1"]
#[inline(always)]
pub fn gpio24_level_high(&self) -> GPIO24_LEVEL_HIGH_R {
GPIO24_LEVEL_HIGH_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0"]
#[inline(always)]
pub fn gpio24_level_low(&self) -> GPIO24_LEVEL_LOW_R {
GPIO24_LEVEL_LOW_R::new((self.bits & 0x01) != 0)
}
}
|
use crate::geometry::Axial;
use crate::terrain::TileTerrainType;
use serde::{Deserialize, Serialize};
/// Represents a connection of a room to another.
/// Length of the Bridge is defined by `radius - offset_end - offset_start`.
/// I choose to represent connections this way because it is much easier to invert them.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
pub struct RoomConnection {
pub direction: Axial,
/// Where the bridge points start on the edge
pub offset_start: u32,
/// Where the bridge points end on the edge
pub offset_end: u32,
}
/// Represents connections a room has to their neighbours. At most 6.
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct RoomConnections(pub [Option<RoomConnection>; 6]);
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Default, Copy)]
#[serde(rename_all = "camelCase")]
pub struct TerrainComponent(pub TileTerrainType);
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct RoomProperties {
pub radius: u32,
pub center: Axial,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Default)]
#[serde(rename_all = "camelCase")]
pub struct RoomComponent {
/// Offset coordinates in world space
pub offset: Axial,
pub seed: u64,
}
|
use std::path::Path;
use self::error::Error;
use crate::Config;
pub mod error;
#[cfg(feature = "json")]
pub mod json;
#[cfg(feature = "toml")]
pub mod toml;
#[cfg(feature = "yaml")]
pub mod yaml;
pub fn load<P>(path: P) -> Result<Config, Error>
where
P: AsRef<Path>,
{
match path.as_ref().extension() {
Some(ext) => match ext.to_str() {
#[cfg(feature = "json")]
Some("json") => self::json::load(path),
#[cfg(feature = "toml")]
Some("toml") => self::toml::load(path),
#[cfg(feature = "yaml")]
Some("yaml") => self::yaml::load(path),
#[cfg(feature = "yaml")]
Some("yml") => self::yaml::load(path),
Some(ext) => Err(Error::invalid_file_type(
Some(ext.to_string()),
path.as_ref(),
)),
None => Err(Error::invalid_file_type(None, path.as_ref())),
},
None => Err(Error::invalid_file_type(None, path.as_ref())),
}
}
pub fn save<P>(path: P, config: &Config) -> Result<(), Error>
where
P: AsRef<Path>,
{
match path.as_ref().extension() {
Some(ext) => match ext.to_str() {
#[cfg(feature = "json")]
Some("json") => self::json::save(path, config),
#[cfg(feature = "toml")]
Some("toml") => self::toml::save(path, config),
#[cfg(feature = "yaml")]
Some("yaml") => self::yaml::save(path, config),
#[cfg(feature = "yaml")]
Some("yml") => self::yaml::save(path, config),
Some(ext) => Err(Error::invalid_file_type(
Some(ext.to_string()),
path.as_ref(),
)),
None => Err(Error::invalid_file_type(None, path.as_ref())),
},
None => Err(Error::invalid_file_type(None, path.as_ref())),
}
}
|
use crate::Error;
use derivative::Derivative;
use std::collections::{BTreeMap, BTreeSet};
use typed_index_collection::{CollectionWithId, Id, Idx};
/// The corresponding result type used by the crate.
type Result<T, E = Error> = std::result::Result<T, E>;
/// A set of `Idx<T>`
pub type IdxSet<T> = BTreeSet<Idx<T>>;
/// An object linking 2 types together.
pub trait Relation {
/// The type of the source object
type From;
/// The type of the targer object
type To;
/// Returns the complete set of the source objects.
fn get_from(&self) -> IdxSet<Self::From>;
/// Returns the complete set of the target objects.
fn get_to(&self) -> IdxSet<Self::To>;
/// For a given set of the source objects, returns the
/// corresponding targets objects.
fn get_corresponding_forward(&self, from: &IdxSet<Self::From>) -> IdxSet<Self::To>;
/// For a given set of the target objects, returns the
/// corresponding source objects.
fn get_corresponding_backward(&self, from: &IdxSet<Self::To>) -> IdxSet<Self::From>;
}
/// A one to many relation, i.e. to one `T` corresponds many `U`,
/// and a `U` has one corresponding `T`.
#[derive(Derivative, Debug)]
#[derivative(Default(bound = ""))]
pub struct OneToMany<T, U> {
one_to_many: BTreeMap<Idx<T>, IdxSet<U>>,
many_to_one: BTreeMap<Idx<U>, Idx<T>>,
}
impl<T, U> OneToMany<T, U>
where
T: Id<T>,
U: Id<U> + Id<T>,
{
/// Construct the relation automatically from the 2 given
/// `CollectionWithId`s.
pub fn new(
one: &CollectionWithId<T>,
many: &CollectionWithId<U>,
rel_name: &str,
) -> Result<Self> {
let mut one_to_many = BTreeMap::default();
let mut many_to_one = BTreeMap::default();
for (many_idx, obj) in many {
let one_id = <U as Id<T>>::id(obj);
let one_idx = one
.get_idx(one_id)
.ok_or_else(|| Error::IdentifierNotFound(one_id.to_owned(), rel_name.to_owned()))?;
many_to_one.insert(many_idx, one_idx);
one_to_many
.entry(one_idx)
.or_insert_with(IdxSet::default)
.insert(many_idx);
}
Ok(OneToMany {
one_to_many,
many_to_one,
})
}
}
impl<T, U> Relation for OneToMany<T, U> {
type From = T;
type To = U;
fn get_from(&self) -> IdxSet<T> {
self.one_to_many.keys().cloned().collect()
}
fn get_to(&self) -> IdxSet<U> {
self.many_to_one.keys().cloned().collect()
}
fn get_corresponding_forward(&self, from: &IdxSet<T>) -> IdxSet<U> {
get_corresponding(&self.one_to_many, from)
}
fn get_corresponding_backward(&self, from: &IdxSet<U>) -> IdxSet<T> {
from.iter()
.filter_map(|from_idx| self.many_to_one.get(from_idx))
.cloned()
.collect()
}
}
/// A many to many relation, i.e. a `T` can have multiple `U`, and
/// vice versa.
#[derive(Default, Debug)]
pub struct ManyToMany<T, U> {
forward: BTreeMap<Idx<T>, IdxSet<U>>,
backward: BTreeMap<Idx<U>, IdxSet<T>>,
}
impl<T, U> ManyToMany<T, U> {
/// Constructor from the forward relation.
pub fn from_forward(forward: BTreeMap<Idx<T>, IdxSet<U>>) -> Self {
let mut backward = BTreeMap::default();
forward
.iter()
.flat_map(|(&from_idx, obj)| obj.iter().map(move |&to_idx| (from_idx, to_idx)))
.for_each(|(from_idx, to_idx)| {
backward
.entry(to_idx)
.or_insert_with(IdxSet::default)
.insert(from_idx);
});
ManyToMany { forward, backward }
}
/// Constructor from 2 chained relations, i.e. from the relations
/// `A->B` and `B->C`, constructs the relation `A->C`.
pub fn from_relations_chain<R1, R2>(r1: &R1, r2: &R2) -> Self
where
R1: Relation<From = T>,
R2: Relation<From = R1::To, To = U>,
{
let forward = r1
.get_from()
.into_iter()
.map(|idx| {
let from = Some(idx).into_iter().collect();
let tmp = r1.get_corresponding_forward(&from);
(idx, r2.get_corresponding_forward(&tmp))
})
.collect();
Self::from_forward(forward)
}
/// Constructor from 2 relations with a common sink, i.e. from the
/// relations `A->B` and `C->B`, constructs the relation `A->C`.
pub fn from_relations_sink<R1, R2>(r1: &R1, r2: &R2) -> Self
where
R1: Relation<From = T>,
R2: Relation<From = U, To = R1::To>,
{
let forward = r1
.get_from()
.into_iter()
.map(|idx| {
let from = Some(idx).into_iter().collect();
let tmp = r1.get_corresponding_forward(&from);
(idx, r2.get_corresponding_backward(&tmp))
})
.collect();
Self::from_forward(forward)
}
/// Constructor from 2 relations with a common source, i.e. from
/// the relations `B->A` and `B->C`, constructs the relation
/// `A->C`.
pub fn from_relations_source<R1, R2>(r1: &R1, r2: &R2) -> Self
where
R1: Relation<To = T>,
R2: Relation<From = R1::From, To = U>,
{
let forward = r1
.get_to()
.into_iter()
.map(|idx| {
let from = Some(idx).into_iter().collect();
let tmp = r1.get_corresponding_backward(&from);
(idx, r2.get_corresponding_forward(&tmp))
})
.collect();
Self::from_forward(forward)
}
}
impl<T, U> Relation for ManyToMany<T, U> {
type From = T;
type To = U;
fn get_from(&self) -> IdxSet<T> {
self.forward.keys().cloned().collect()
}
fn get_to(&self) -> IdxSet<U> {
self.backward.keys().cloned().collect()
}
fn get_corresponding_forward(&self, from: &IdxSet<T>) -> IdxSet<U> {
get_corresponding(&self.forward, from)
}
fn get_corresponding_backward(&self, from: &IdxSet<U>) -> IdxSet<T> {
get_corresponding(&self.backward, from)
}
}
fn get_corresponding<T, U>(map: &BTreeMap<Idx<T>, IdxSet<U>>, from: &IdxSet<T>) -> IdxSet<U> {
from.iter()
.filter_map(|from_idx| map.get(from_idx))
.flat_map(|indices| indices.iter().cloned())
.collect()
}
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
use core::{mem};
use crate::{
kty::{c_int, itimerspec, TFD_TIMER_ABSTIME},
syscall::{close, timerfd_settime, timerfd_gettime, read},
fd::{FdContainer},
util::retry::{retry},
time::{Time, time_to_timespec, time_from_timespec},
result::{Result},
lmem,
};
/// A timer.
pub struct Timer {
fd: c_int,
owned: bool,
}
impl Timer {
/// Disables the timer.
pub fn disable(&self) -> Result {
let arg = lmem::zeroed();
rv!(timerfd_settime(self.fd, 0, &arg, None))
}
/// Sets the timer to expire every `iv` time units.
pub fn interval(&self, iv: Time) -> Result {
let arg = itimerspec {
it_interval: time_to_timespec(iv),
it_value: time_to_timespec(iv),
};
rv!(timerfd_settime(self.fd, 0, &arg, None))
}
/// Sets the timer to expire every `iv` time units, starting at the absolute `start`.
pub fn interval_from(&self, iv: Time, start: Time) -> Result {
let arg = itimerspec {
it_interval: time_to_timespec(iv),
it_value: time_to_timespec(start),
};
rv!(timerfd_settime(self.fd, TFD_TIMER_ABSTIME, &arg, None))
}
/// Sets the timer to expire every `iv` time units, starting in `when` units.
pub fn interval_in(&self, iv: Time, when: Time) -> Result {
let arg = itimerspec {
it_interval: time_to_timespec(iv),
it_value: time_to_timespec(when),
};
rv!(timerfd_settime(self.fd, 0, &arg, None))
}
/// Sets the timer to expire once at the absolute `when`.
pub fn once_at(&self, when: Time) -> Result {
let arg = itimerspec {
it_interval: lmem::zeroed(),
it_value: time_to_timespec(when),
};
rv!(timerfd_settime(self.fd, TFD_TIMER_ABSTIME, &arg, None))
}
/// Sets the timer to expire in `when` time units.
pub fn once_in(&self, when: Time) -> Result {
let arg = itimerspec {
it_interval: lmem::zeroed(),
it_value: time_to_timespec(when),
};
rv!(timerfd_settime(self.fd, 0, &arg, None))
}
/// Returns the status of the timer.
///
/// TODO: Document this.
pub fn status(&self) -> Result<(Time, Time)> {
let mut arg = lmem::zeroed();
rv!(timerfd_gettime(self.fd, &mut arg))?;
Ok((time_from_timespec(arg.it_interval), time_from_timespec(arg.it_value)))
}
/// Returns the number of times the timer expired since this function was last called.
pub fn ticks(&self) -> Result<u64> {
let mut buf = 0;
retry(|| read(self.fd, lmem::as_mut_data(&mut buf)))?;
Ok(buf)
}
}
impl Drop for Timer {
fn drop(&mut self) {
if self.owned {
close(self.fd);
}
}
}
impl From<Timer> for c_int {
fn from(timer: Timer) -> Self {
let fd = timer.fd;
mem::forget(timer);
fd
}
}
impl FdContainer for Timer {
fn is_owned(&self) -> bool {
self.owned
}
fn borrow(&self) -> c_int {
self.fd
}
fn from_owned(fd: c_int) -> Timer {
Timer { fd, owned: true }
}
fn from_borrowed(fd: c_int) -> Timer {
Timer { fd, owned: false }
}
}
|
/*
*
* swarm_service.rs
*
*/
use crate::iot_manager;
use crate::message_store;
use crate::message_buffer;
use crate::avlo::swarm_server::Swarm;
use crate::avlo::{IoTProcess, IoTDevice, DeviceGroup, IoTDeviceStatus, SwarmMessage};
use tokio::sync::mpsc;
use tonic::{Request, Response, Status};
#[derive(Debug)]
pub struct SwarmService {
all_messages: Vec<SwarmMessage>,
}
impl SwarmService {
pub fn new() -> Self {
Self {
all_messages: Vec::new()
}
}
}
#[tonic::async_trait]
impl Swarm for SwarmService {
type StartCommunicationStream = mpsc::Receiver<Result<SwarmMessage, Status>>;
async fn join_swarm(&self, request: Request<IoTProcess>) -> Result<Response<IoTDevice>, Status> {
let iot_device_ident = format!("iot-{}", iot_manager::IoTManager::singleton().get_count());
let cur_proc = request.into_inner();
let new_device = IoTDevice {
device_id: iot_device_ident,
owner: Some(cur_proc),
neighbour: Vec::new()
};
let new_device_cpy = new_device.clone();
let _res_manager = iot_manager::IoTManager::singleton_mut().add_new_device(new_device_cpy);
Ok(Response::new(new_device))
}
async fn suspect_device(&self, request: Request<DeviceGroup>) -> Result<Response<IoTDeviceStatus>, Status> {
let suspected_devices = request.into_inner().device_id;
match iot_manager::IoTManager::singleton_mut().add_suspected_device(suspected_devices) {
Err(_suspect_err) => {
Err(Status::unknown("there was an error adding suspected devices to the monitoring list"))
},
Ok(_suspect_success) => {
let device_status = IoTDeviceStatus {
value: 1
};
Ok(Response::new(device_status))
}
}
}
async fn resurrect_device(&self, request: Request<DeviceGroup>) -> Result<Response<IoTDeviceStatus>, Status> {
let to_be_resurrected = request.into_inner().device_id;
match iot_manager::IoTManager::singleton_mut().resurrect_device(to_be_resurrected) {
Err(failed_resurrection_err) => {
Err(Status::unknown(failed_resurrection_err))
},
Ok(_resurrection_success) => {
let device_status = IoTDeviceStatus {
value: 0
};
Ok(Response::new(device_status))
}
}
}
async fn start_communication(&self, _request: Request<()>) -> Result<Response<Self::StartCommunicationStream>, Status> {
let (mut tx, rx) = mpsc::channel(5);
let msgs = self.all_messages.clone();
tokio::spawn(async move {
for single_msg in &msgs[..] {
let _res = message_buffer::MessageBuffer::singleton_mut().add_to_buffer(single_msg.clone());
tx.send(Ok(single_msg.clone())).await.unwrap();
}
});
Ok(Response::new(rx))
}
async fn deliver_message(&self, request: Request<SwarmMessage>) -> Result<Response<()>, Status> {
let for_delivery = request.into_inner();
let the_topic = for_delivery.topic.clone();
// should mark the time the message was delivered
match message_store::MessageStore::singleton_mut().add_message(the_topic, for_delivery) {
Err(failed_delivery) => {Err(Status::unknown(failed_delivery))}
Ok(_delivery_success) => {Ok(Response::new(()))}
}
}
} |
extern crate clap;
extern crate pac;
use clap::{Arg, App};
use pac::Pac;
use pac::direct::DPac;
use std::io::BufReader;
use std::fs::File;
fn main() {
let matches = App::new("PAC File Lister")
.version("0.1")
.author("Marime Gui")
.about("Lists all files inside of a .pac file")
.arg(
Arg::with_name("INPUT")
.help("File to list")
.required(true)
.index(1),
)
.get_matches();
let reader = &mut BufReader::new(File::open(matches.value_of("INPUT").unwrap()).unwrap());
let direct_pac = DPac::import(reader).unwrap();
let pac = Pac::from_direct(direct_pac).unwrap();
for file in pac.files {
println!("{}", file.path);
}
} |
// ===============================================================================
// Authors: AFRL/RQQA
// Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division
//
// Copyright (c) 2017 Government of the United State of America, as represented by
// the Secretary of the Air Force. No copyright is claimed in the United States under
// Title 17, U.S. Code. All Other Rights Reserved.
// ===============================================================================
// This file was auto-created by LmcpGen. Modifications will be overwritten.
use avtas::lmcp::{Error, ErrorType, Lmcp, LmcpSubscription, SrcLoc, Struct, StructInfo};
use std::fmt::Debug;
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct RadioConfiguration {
pub payload_id: i64,
pub payload_kind: Vec<u8>,
pub parameters: Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>>,
pub range: f32,
pub rally_point: Option<Box<::afrl::cmasi::location3d::Location3DT>>,
pub timeout: i64,
}
impl PartialEq for RadioConfiguration {
fn eq(&self, _other: &RadioConfiguration) -> bool {
true
&& &self.range == &_other.range
&& &self.rally_point == &_other.rally_point
&& &self.timeout == &_other.timeout
}
}
impl LmcpSubscription for RadioConfiguration {
fn subscription() -> &'static str { "afrl.impact.RadioConfiguration" }
}
impl Struct for RadioConfiguration {
fn struct_info() -> StructInfo {
StructInfo {
exist: 1,
series: 5281966179208134656u64,
version: 14,
struct_ty: 2,
}
}
}
impl Lmcp for RadioConfiguration {
fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> {
let mut pos = 0;
{
let x = Self::struct_info().ser(buf)?;
pos += x;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.payload_id.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.payload_kind.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.parameters.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.range.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.rally_point.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.timeout.ser(r)?;
pos += writeb;
}
Ok(pos)
}
fn deser(buf: &[u8]) -> Result<(RadioConfiguration, usize), Error> {
let mut pos = 0;
let (si, u) = StructInfo::deser(buf)?;
pos += u;
if si == RadioConfiguration::struct_info() {
let mut out: RadioConfiguration = Default::default();
{
let r = get!(buf.get(pos ..));
let (x, readb): (i64, usize) = Lmcp::deser(r)?;
out.payload_id = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (Vec<u8>, usize) = Lmcp::deser(r)?;
out.payload_kind = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>>, usize) = Lmcp::deser(r)?;
out.parameters = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.range = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (Option<Box<::afrl::cmasi::location3d::Location3DT>>, usize) = Lmcp::deser(r)?;
out.rally_point = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (i64, usize) = Lmcp::deser(r)?;
out.timeout = x;
pos += readb;
}
Ok((out, pos))
} else {
Err(error!(ErrorType::InvalidStructInfo))
}
}
fn size(&self) -> usize {
let mut size = 15;
size += self.payload_id.size();
size += self.payload_kind.size();
size += self.parameters.size();
size += self.range.size();
size += self.rally_point.size();
size += self.timeout.size();
size
}
}
pub trait RadioConfigurationT: Debug + Send + ::afrl::cmasi::payload_configuration::PayloadConfigurationT {
fn as_afrl_impact_radio_configuration(&self) -> Option<&RadioConfiguration> { None }
fn as_mut_afrl_impact_radio_configuration(&mut self) -> Option<&mut RadioConfiguration> { None }
fn range(&self) -> f32;
fn range_mut(&mut self) -> &mut f32;
fn rally_point(&self) -> &Option<Box<::afrl::cmasi::location3d::Location3DT>>;
fn rally_point_mut(&mut self) -> &mut Option<Box<::afrl::cmasi::location3d::Location3DT>>;
fn timeout(&self) -> i64;
fn timeout_mut(&mut self) -> &mut i64;
}
impl Clone for Box<RadioConfigurationT> {
fn clone(&self) -> Box<RadioConfigurationT> {
if let Some(x) = RadioConfigurationT::as_afrl_impact_radio_configuration(self.as_ref()) {
Box::new(x.clone())
} else {
unreachable!()
}
}
}
impl Default for Box<RadioConfigurationT> {
fn default() -> Box<RadioConfigurationT> { Box::new(RadioConfiguration::default()) }
}
impl PartialEq for Box<RadioConfigurationT> {
fn eq(&self, other: &Box<RadioConfigurationT>) -> bool {
if let (Some(x), Some(y)) =
(RadioConfigurationT::as_afrl_impact_radio_configuration(self.as_ref()),
RadioConfigurationT::as_afrl_impact_radio_configuration(other.as_ref())) {
x == y
} else {
false
}
}
}
impl Lmcp for Box<RadioConfigurationT> {
fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> {
if let Some(x) = RadioConfigurationT::as_afrl_impact_radio_configuration(self.as_ref()) {
x.ser(buf)
} else {
unreachable!()
}
}
fn deser(buf: &[u8]) -> Result<(Box<RadioConfigurationT>, usize), Error> {
let (si, _) = StructInfo::deser(buf)?;
if si == RadioConfiguration::struct_info() {
let (x, readb) = RadioConfiguration::deser(buf)?;
Ok((Box::new(x), readb))
} else {
Err(error!(ErrorType::InvalidStructInfo))
}
}
fn size(&self) -> usize {
if let Some(x) = RadioConfigurationT::as_afrl_impact_radio_configuration(self.as_ref()) {
x.size()
} else {
unreachable!()
}
}
}
impl ::afrl::cmasi::payload_configuration::PayloadConfigurationT for RadioConfiguration {
fn as_afrl_impact_radio_configuration(&self) -> Option<&RadioConfiguration> { Some(self) }
fn as_mut_afrl_impact_radio_configuration(&mut self) -> Option<&mut RadioConfiguration> { Some(self) }
fn payload_id(&self) -> i64 { self.payload_id }
fn payload_id_mut(&mut self) -> &mut i64 { &mut self.payload_id }
fn payload_kind(&self) -> &Vec<u8> { &self.payload_kind }
fn payload_kind_mut(&mut self) -> &mut Vec<u8> { &mut self.payload_kind }
fn parameters(&self) -> &Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>> { &self.parameters }
fn parameters_mut(&mut self) -> &mut Vec<Box<::afrl::cmasi::key_value_pair::KeyValuePairT>> { &mut self.parameters }
}
impl RadioConfigurationT for RadioConfiguration {
fn as_afrl_impact_radio_configuration(&self) -> Option<&RadioConfiguration> { Some(self) }
fn as_mut_afrl_impact_radio_configuration(&mut self) -> Option<&mut RadioConfiguration> { Some(self) }
fn range(&self) -> f32 { self.range }
fn range_mut(&mut self) -> &mut f32 { &mut self.range }
fn rally_point(&self) -> &Option<Box<::afrl::cmasi::location3d::Location3DT>> { &self.rally_point }
fn rally_point_mut(&mut self) -> &mut Option<Box<::afrl::cmasi::location3d::Location3DT>> { &mut self.rally_point }
fn timeout(&self) -> i64 { self.timeout }
fn timeout_mut(&mut self) -> &mut i64 { &mut self.timeout }
}
#[cfg(test)]
pub mod tests {
use super::*;
use quickcheck::*;
impl Arbitrary for RadioConfiguration {
fn arbitrary<G: Gen>(_g: &mut G) -> RadioConfiguration {
RadioConfiguration {
payload_id: Arbitrary::arbitrary(_g),
payload_kind: Arbitrary::arbitrary(_g),
parameters: Vec::<::afrl::cmasi::key_value_pair::KeyValuePair>::arbitrary(_g).into_iter().map(|x| Box::new(x) as Box<::afrl::cmasi::key_value_pair::KeyValuePairT>).collect(),
range: Arbitrary::arbitrary(_g),
rally_point: {
if _g.gen() {
Some(Box::new(::afrl::cmasi::location3d::Location3D::arbitrary(_g)))
} else {
None
}
},
timeout: Arbitrary::arbitrary(_g),
}
}
}
quickcheck! {
fn serializes(x: RadioConfiguration) -> Result<TestResult, Error> {
use std::u16;
if x.parameters.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
let mut buf: Vec<u8> = vec![0; x.size()];
let sx = x.ser(&mut buf)?;
Ok(TestResult::from_bool(sx == x.size()))
}
fn roundtrips(x: RadioConfiguration) -> Result<TestResult, Error> {
use std::u16;
if x.parameters.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
let mut buf: Vec<u8> = vec![0; x.size()];
let sx = x.ser(&mut buf)?;
let (y, sy) = RadioConfiguration::deser(&buf)?;
Ok(TestResult::from_bool(sx == sy && x == y))
}
}
}
|
/*!
```rudra-poc
[target]
crate = "parc"
version = "1.0.1"
[report]
issue_url = "https://github.com/hyyking/rustracts/pull/6"
issue_date = 2020-11-14
rustsec_url = "https://github.com/RustSec/advisory-db/pull/650"
rustsec_id = "RUSTSEC-2020-0134"
[[bugs]]
analyzer = "SendSyncVariance"
bug_class = "SendSyncVariance"
rudra_report_locations = ["src/lib.rs:383:1: 383:39"]
```
!*/
#![forbid(unsafe_code)]
use parc::ParentArc;
use std::rc::Rc;
fn main() {
// `Rc` neither implements `Send` nor `Sync`.
let parent = ParentArc::new(Rc::new(0));
let mut children = vec![];
for _ in 0..5 {
let weak = ParentArc::downgrade(&parent);
let child_thr = std::thread::spawn(move || {
loop {
// `weak` is moved into child thread.
let child = weak.upgrade();
match child {
Some(rc) => {
for _ in 0..2000 {
// `strong_count` of `rc`
// is updated by multiple threads without synchronization.
let _ = Rc::clone(rc.as_ref());
}
break;
}
None => continue,
}
}
});
children.push(child_thr);
}
for child_thr in children {
child_thr.join().expect("Failed to join with child thread");
}
let rc = parent.block_into_inner();
// if (`strong_count` > 1): indicates a memory leak
assert_eq!(1, Rc::strong_count(&rc));
}
|
mod utils;
use wasm_bindgen::prelude::*;
use tiny_keccak::{Keccak, Hasher};
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern "C" {
fn read_buffer(idx: i32) -> i32;
fn setlen(idx: i32);
fn getlen() -> i32;
fn write_buffer(idx: i32, c: i32);
fn usegas(gas: i32);
fn rvec(ptr: *mut u8, idx: i32, len: i32);
fn wvec(ptr: *mut u8, idx: i32, len: i32);
}
#[wasm_bindgen]
pub fn test() -> u32 {
let input_len = getlen();
let mut input = vec![0; input_len as usize];
rvec(input.as_mut_ptr(), 0, input_len);
usegas(input_len / 10 + 1);
let mut hasher = Keccak::v256();
// write input message
hasher.update(&input[..]);
let mut output = vec![0u8; 32];
// read hash digest
hasher.finalize(&mut output);
/*
for i in 0..32 {
write_buffer(i, input[i as usize] as i32)
};
for i in 0..32 {
write_buffer(i, output[i as usize] as i32)
};
*/
wvec(output.as_mut_ptr(), 0, 32);
setlen(32);
0
}
/*
#[wasm_bindgen]
pub fn test() -> u32 {
let mut input = vec![];
for i in 0..10000 {
input.push(123)
}
for i in 0..100000 {
let mut hasher = Keccak::v256();
// write input message
hasher.update(&input[..]);
let mut output = vec![0u8; 32];
// read hash digest
hasher.finalize(&mut output);
}
0
}
*/
|
use std::convert::From;
use std::io;
#[derive(Debug)]
pub enum Error {
Io(io::Error),
Freetype(freetype::Error),
Png(lodepng::ffi::Error),
UnsupportedFormat,
}
impl From<io::Error> for Error {
fn from(error: io::Error) -> Self {
Error::Io(error)
}
}
impl From<freetype::Error> for Error {
fn from(error: freetype::Error) -> Self {
Error::Freetype(error)
}
}
impl From<lodepng::ffi::Error> for Error {
fn from(error: lodepng::ffi::Error) -> Self {
Error::Png(error)
}
}
pub struct Font {
face: freetype::Face,
}
impl Font {
pub fn load(path: &str) -> Result<Font, Error> {
use freetype::Library;
let lib = Library::init().unwrap();
let face = lib.new_face(path, 0)?;
Ok(Font { face: face })
}
pub fn generate(&mut self, name: &str, size: isize, subset: &str, epd_crate: &str) -> String {
let mut subset = subset.chars().collect::<Vec<_>>();
subset.sort();
// Set the resultion to 72dpi so that a point equals a pixel.
self.face.set_char_size(0, size * 64, 72, 72).unwrap();
// Generate all glyphs.
let mut glyphs = Vec::new();
for c in subset.iter() {
glyphs.push(self.generate_glyph(*c, epd_crate));
}
// Generate the font.
let size = self.face.size_metrics().unwrap();
format!(
"pub const {}: {}::gui::font::Font = {}::gui::font::Font {{
ascender: {},
descender: {},
glyphs: &[
{}
],
get_glyph_index: {},
}};
",
name,
epd_crate,
epd_crate,
(size.ascender + 63) / 64,
-(size.descender + 63) / 64,
glyphs.join(",\n "),
Self::generate_get_glyph_index(subset),
)
}
fn generate_get_glyph_index(chars: Vec<char>) -> String {
let mut code = "".to_string();
let mut run_start = chars[0] as u32;
let mut run_length = 1;
for i in 1..chars.len() {
let c = chars[i] as u32;
if c == run_start + run_length {
run_length += 1;
} else {
code += &Self::generate_get_glyph_index_range(
run_start,
run_length,
i - run_length as usize,
);
run_start = c;
run_length = 1;
}
}
code += &Self::generate_get_glyph_index_range(
run_start,
run_length,
chars.len() - run_length as usize,
);
format!(
"|c: char| -> Option<usize> {{
let c = c as usize;
{}None
}}",
code
)
}
fn generate_get_glyph_index_range(
run_start: u32,
run_length: u32,
start_index: usize,
) -> String {
if run_length == 1 {
format!(
"if c == {} {{
return Some({});
}}
",
run_start, start_index
)
} else {
format!(
"if c >= {} && c < {} {{
return Some({} + c - {});
}}
",
run_start,
run_start + run_length,
start_index,
run_start
)
}
}
fn generate_glyph(&mut self, c: char, epd_crate: &str) -> String {
self.face
.load_char(
c as usize,
freetype::face::LoadFlag::RENDER | freetype::face::LoadFlag::TARGET_MONO,
)
.unwrap();
let glyph = self.face.glyph();
let image = Self::generate_rle_image(&glyph.bitmap(), epd_crate);
//assert!(glyph.bitmap_left() >= 0);
assert!(glyph.bitmap_top() >= 0);
format!(
"{}::gui::font::Glyph {{
image: {},
image_left: {},
image_top: {},
advance: {},
}}",
epd_crate,
image,
glyph.bitmap_left(),
glyph.bitmap_top(),
(glyph.advance().x + 63) / 64
)
}
fn generate_rle_image(bm: &freetype::Bitmap, epd_crate: &str) -> String {
let buffer = bm.buffer();
let pitch = bm.pitch() as usize;
let width = bm.width() as usize;
let height = bm.rows() as usize;
let mut data = vec![0u16; height + 1];
data[0] = data.len() as u16;
for y in 0..height {
let row = &buffer[y * pitch..(y + 1) * pitch];
Self::generate_rle(&mut data, row, width);
data[y + 1] = data.len() as u16;
}
let mut data_text = "[".to_string();
for i in 0..data.len() {
if (i & 15) == 0 {
data_text += "\n ";
}
data_text += &format!("{},", data[i]);
if i & 15 != 15 && i != data.len() - 1 {
data_text += " ";
}
}
data_text += "\n ]";
format!(
"{}::gui::image::RLEImage {{
data: &{},
width: {},
height: {},
}}",
epd_crate, data_text, width, height
)
}
fn generate_rle(output: &mut Vec<u16>, row: &[u8], width: usize) {
let mut run_color = (row[0] & 0x80) >> 7;
let mut run_length = 0;
let mut bits = 0;
for i in 0..width {
let byte = row[i / 8];
let bit = (byte >> (7 - bits)) & 1;
if bit == run_color {
run_length += 1;
} else {
output.push(((run_color as u16) << 15) | run_length);
run_length = 1;
run_color = bit;
}
bits += 1;
if bits == 8 {
bits = 0;
}
}
output.push(((run_color as u16) << 15) | run_length);
}
}
pub struct Image {
data: Vec<u8>,
stride: u32,
width: u32,
height: u32,
}
impl Image {
pub fn load(path: &str) -> Result<Image, Error> {
let image = lodepng::decode32_file(path)?;
let width = image.width;
let height = image.height;
let stride = (width + 7) / 8;
let mut data = vec![0u8; (stride * height) as usize];
for y in 0..height {
for x in 0..width {
let pixel = image.buffer[y * width + x];
let avg_color = (pixel.r + pixel.g + pixel.b) / 3;
let alpha = pixel.a;
let level = (255 - avg_color) as u32 * alpha as u32 / 255;
if level < 128 {
data[(y * stride + x / 8) as usize] |= 1 << (x & 7);
}
}
}
Ok(Image {
data: data,
stride: stride as u32,
width: width as u32,
height: height as u32,
})
}
pub fn generate_bitmap(&self, name: &str, epd_crate: &str) -> String {
let mut data_str = "[\n".to_string();
for i in 0..self.height {
data_str += " ";
for j in 0..self.stride {
data_str += &format!(" {},", self.data[(i * self.stride + j) as usize]);
}
data_str += "\n";
}
data_str += " ]";
format!(
"pub const {}: {}::gui::image::BitmapImage = {}::gui::image::BitmapImage {{
data: &{},
width: {},
height: {},
stride: {},
}};
",
name, epd_crate, epd_crate, data_str, self.width, self.height, self.stride,
)
}
}
|
fn print_admiration(name: &str) {
println!("Wow, {} really makes you think.", name,);
}
fn main() {
let value = String::new();
print_admiration(value.as_str());
}
|
//! An iterator over incoming signals.
//!
//! This provides a higher abstraction over the signals, providing
//! the [`SignalsInfo`] structure which is able to iterate over the
//! incoming signals. The structure is parametrized by an
//! [`Exfiltrator`][self::exfiltrator::Exfiltrator], which specifies what information is returned
//! for each delivered signal. Note that some exfiltrators are behind a feature flag.
//!
//! The [`Signals`] is a type alias for the common case when it is enough to get the signal number.
//!
//! This module (and everything in it) is turned by the `iterator` feature. It is **on** by
//! default, the possibility to turn off is mostly possible for very special purposes (compiling on
//! `<rustc-1.36`, minimizing the amount of code compiled, …). In a sense, this is the highest
//! level abstraction of the crate and the API expected to be used by most of the people.
//!
//! # Examples
//!
//! ```rust
//! extern crate libc;
//! extern crate signal_hook;
//!
//! use std::io::Error;
//!
//! use signal_hook::consts::signal::*;
//! use signal_hook::iterator::Signals;
//!
//! fn main() -> Result<(), Error> {
//! let mut signals = Signals::new(&[
//! SIGHUP,
//! SIGTERM,
//! SIGINT,
//! SIGQUIT,
//! # SIGUSR1,
//! ])?;
//! # // A trick to terminate the example when run as doc-test. Not part of the real code.
//! # signal_hook::low_level::raise(SIGUSR1).unwrap();
//! 'outer: loop {
//! // Pick up signals that arrived since last time
//! for signal in signals.pending() {
//! match signal as libc::c_int {
//! SIGHUP => {
//! // Reload configuration
//! // Reopen the log file
//! }
//! SIGTERM | SIGINT | SIGQUIT => {
//! break 'outer;
//! },
//! # SIGUSR1 => return Ok(()),
//! _ => unreachable!(),
//! }
//! }
//! // Do some bit of work ‒ something with upper limit on waiting, so we don't block
//! // forever with a SIGTERM already waiting.
//! }
//! println!("Terminating. Bye bye");
//! Ok(())
//! }
//! ```
pub mod backend;
pub mod exfiltrator;
use std::borrow::Borrow;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use std::io::{Error, ErrorKind, Read};
use std::os::unix::net::UnixStream;
use libc::{self, c_int};
pub use self::backend::{Handle, Pending};
use self::backend::{PollResult, RefSignalIterator, SignalDelivery};
use self::exfiltrator::{Exfiltrator, SignalOnly};
/// The main structure of the module, representing interest in some signals.
///
/// Unlike the helpers in other modules, this registers the signals when created and unregisters
/// them on drop. It provides the pending signals during its lifetime, either in batches or as an
/// infinite iterator.
///
/// Most users will want to use it through the [`Signals`] type alias for simplicity.
///
/// # Multiple threads
///
/// Instances of this struct can be [sent][std::marker::Send] to other threads. In a multithreaded
/// application this can be used to dedicate a separate thread for signal handling. In this case
/// you should get a [`Handle`] using the [`handle`][Signals::handle] method before sending the
/// `Signals` instance to a background thread. With the handle you will be able to shut down the
/// background thread later, or to operatively add more signals.
///
/// The controller handle can be shared between as many threads as you like using its
/// [`clone`][Handle::clone] method.
///
/// # Exfiltrators
///
/// The [`SignalOnly]` provides only the signal number. There are further exfiltrators available in
/// the [`exfiltrator`] module. Note that some of them are behind feature flags that need to be
/// enabled.
///
/// # Examples
///
/// ```rust
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// #
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// let thread = thread::spawn(move || {
/// for signal in &mut signals {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
///
/// // Some time later...
/// handle.close();
/// thread.join().unwrap();
/// # Ok(())
/// # }
/// ```
pub struct SignalsInfo<E: Exfiltrator = SignalOnly>(SignalDelivery<UnixStream, E>);
impl<E: Exfiltrator> SignalsInfo<E> {
/// Creates the `Signals` structure.
///
/// This registers all the signals listed. The same restrictions (panics, errors) apply as
/// for the [`Handle::add_signal`] method.
pub fn new<I, S>(signals: I) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
E: Default,
{
Self::with_exfiltrator(signals, E::default())
}
/// An advanced constructor with explicit [`Exfiltrator`].
pub fn with_exfiltrator<I, S>(signals: I, exfiltrator: E) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: Borrow<c_int>,
{
let (read, write) = UnixStream::pair()?;
Ok(SignalsInfo(SignalDelivery::with_pipe(
read,
write,
exfiltrator,
signals,
)?))
}
/// Registers another signal to the set watched by this [`Signals`] instance.
///
/// The same restrictions (panics, errors) apply as for the [`Handle::add_signal`]
/// method.
pub fn add_signal(&self, signal: c_int) -> Result<(), Error> {
self.handle().add_signal(signal)
}
/// Returns an iterator of already received signals.
///
/// This returns an iterator over all the signal numbers of the signals received since last
/// time they were read (out of the set registered by this `Signals` instance). Note that they
/// are returned in arbitrary order and a signal instance may returned only once even if it was
/// received multiple times.
///
/// This method returns immediately (does not block) and may produce an empty iterator if there
/// are no signals ready.
pub fn pending(&mut self) -> Pending<E> {
self.0.pending()
}
/// Block until the stream contains some bytes.
///
/// Returns true if it was possible to read a byte and false otherwise.
fn has_signals(read: &mut UnixStream) -> Result<bool, Error> {
loop {
match read.read(&mut [0u8]) {
Ok(num_read) => break Ok(num_read > 0),
// If we get an EINTR error it is fine to retry reading from the stream.
// Otherwise we should pass on the error to the caller.
Err(error) => {
if error.kind() != ErrorKind::Interrupted {
break Err(error);
}
}
}
}
}
/// Waits for some signals to be available and returns an iterator.
///
/// This is similar to [`pending`][SignalsInfo::pending]. If there are no signals available, it
/// tries to wait for some to arrive. However, due to implementation details, this still can
/// produce an empty iterator.
///
/// This can block for arbitrary long time. If the [`Handle::close`] method is used in
/// another thread this method will return immediately.
///
/// Note that the blocking is done in this method, not in the iterator.
pub fn wait(&mut self) -> Pending<E> {
match self.0.poll_pending(&mut Self::has_signals) {
Ok(Some(pending)) => pending,
// Because of the blocking has_signals method the poll_pending method
// only returns None if the instance is closed. But we want to return
// a possibly empty pending object anyway.
Ok(None) => self.pending(),
// Users can't manipulate the internal file descriptors and the way we use them
// shouldn't produce any errors. So it is OK to panic.
Err(error) => panic!("Unexpected error: {}", error),
}
}
/// Is it closed?
///
/// See [`close`][Handle::close].
pub fn is_closed(&self) -> bool {
self.handle().is_closed()
}
/// Get an infinite iterator over arriving signals.
///
/// The iterator's `next()` blocks as necessary to wait for signals to arrive. This is adequate
/// if you want to designate a thread solely to handling signals. If multiple signals come at
/// the same time (between two values produced by the iterator), they will be returned in
/// arbitrary order. Multiple instances of the same signal may be collated.
///
/// This is also the iterator returned by `IntoIterator` implementation on `&mut Signals`.
///
/// This iterator terminates only if explicitly [closed][Handle::close].
///
/// # Examples
///
/// ```rust
/// # extern crate libc;
/// # extern crate signal_hook;
/// #
/// # use std::io::Error;
/// # use std::thread;
/// #
/// use signal_hook::consts::signal::*;
/// use signal_hook::iterator::Signals;
///
/// # fn main() -> Result<(), Error> {
/// let mut signals = Signals::new(&[SIGUSR1, SIGUSR2])?;
/// let handle = signals.handle();
/// thread::spawn(move || {
/// for signal in signals.forever() {
/// match signal {
/// SIGUSR1 => {},
/// SIGUSR2 => {},
/// _ => unreachable!(),
/// }
/// }
/// });
/// handle.close();
/// # Ok(())
/// # }
/// ```
pub fn forever(&mut self) -> Forever<E> {
Forever(RefSignalIterator::new(&mut self.0))
}
/// Get a shareable handle to a [`Handle`] for this instance.
///
/// This can be used to add further signals or close the [`Signals`] instance.
pub fn handle(&self) -> Handle {
self.0.handle()
}
}
impl<E> Debug for SignalsInfo<E>
where
E: Debug + Exfiltrator,
E::Storage: Debug,
{
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
fmt.debug_tuple("Signals").field(&self.0).finish()
}
}
impl<'a, E: Exfiltrator> IntoIterator for &'a mut SignalsInfo<E> {
type Item = E::Output;
type IntoIter = Forever<'a, E>;
fn into_iter(self) -> Self::IntoIter {
self.forever()
}
}
/// An infinite iterator of arriving signals.
pub struct Forever<'a, E: Exfiltrator>(RefSignalIterator<'a, UnixStream, E>);
impl<'a, E: Exfiltrator> Iterator for Forever<'a, E> {
type Item = E::Output;
fn next(&mut self) -> Option<E::Output> {
loop {
match self.0.poll_signal(&mut SignalsInfo::<E>::has_signals) {
PollResult::Signal(result) => break Some(result),
PollResult::Closed => break None,
// In theory, the poll_signal should not return PollResult::Pending. Nevertheless,
// there's a race condition - if the other side closes the pipe/socket after
// checking for it being closed, then the `read` there returns 0 as EOF. That
// appears as pending here. Next time we should get Closed.
PollResult::Pending => continue,
// Users can't manipulate the internal file descriptors and the way we use them
// shouldn't produce any errors. So it is OK to panic.
PollResult::Err(error) => panic!("Unexpected error: {}", error),
}
}
}
}
/// A type alias for an iterator returning just the signal numbers.
///
/// This is the simplified version for most of the use cases. For advanced usages, the
/// [`SignalsInfo`] with explicit [`Exfiltrator`] type can be used.
pub type Signals = SignalsInfo<SignalOnly>;
|
//! Defines common interfaces for interacting with statistical distributions
//! and provides
//! concrete implementations for a variety of distributions.
use super::statistics::{Max, Min};
use ::num_traits::{float::Float, Bounded, Num};
pub use self::bernoulli::Bernoulli;
pub use self::beta::Beta;
pub use self::binomial::Binomial;
pub use self::categorical::Categorical;
pub use self::cauchy::Cauchy;
pub use self::chi::Chi;
pub use self::chi_squared::ChiSquared;
pub use self::dirac::Dirac;
pub use self::dirichlet::Dirichlet;
pub use self::discrete_uniform::DiscreteUniform;
pub use self::empirical::Empirical;
pub use self::erlang::Erlang;
pub use self::exponential::Exp;
pub use self::fisher_snedecor::FisherSnedecor;
pub use self::gamma::Gamma;
pub use self::geometric::Geometric;
pub use self::hypergeometric::Hypergeometric;
pub use self::inverse_gamma::InverseGamma;
pub use self::laplace::Laplace;
pub use self::log_normal::LogNormal;
pub use self::multinomial::Multinomial;
pub use self::multivariate_normal::MultivariateNormal;
pub use self::negative_binomial::NegativeBinomial;
pub use self::normal::Normal;
pub use self::pareto::Pareto;
pub use self::poisson::Poisson;
pub use self::students_t::StudentsT;
pub use self::triangular::Triangular;
pub use self::uniform::Uniform;
pub use self::weibull::Weibull;
mod bernoulli;
mod beta;
mod binomial;
mod categorical;
mod cauchy;
mod chi;
mod chi_squared;
mod dirac;
mod dirichlet;
mod discrete_uniform;
mod empirical;
mod erlang;
mod exponential;
mod fisher_snedecor;
mod gamma;
mod geometric;
mod hypergeometric;
mod internal;
mod inverse_gamma;
mod laplace;
mod log_normal;
mod multinomial;
mod multivariate_normal;
mod negative_binomial;
mod normal;
mod pareto;
mod poisson;
mod students_t;
mod triangular;
mod uniform;
mod weibull;
mod ziggurat;
mod ziggurat_tables;
use crate::Result;
/// The `ContinuousCDF` trait is used to specify an interface for univariate
/// distributions for which cdf float arguments are sensible.
pub trait ContinuousCDF<K: Float, T: Float>: Min<K> + Max<K> {
/// Returns the cumulative distribution function calculated
/// at `x` for a given distribution. May panic depending
/// on the implementor.
///
/// # Examples
///
/// ```
/// use statrs::distribution::{ContinuousCDF, Uniform};
///
/// let n = Uniform::new(0.0, 1.0).unwrap();
/// assert_eq!(0.5, n.cdf(0.5));
/// ```
fn cdf(&self, x: K) -> T;
/// Due to issues with rounding and floating-point accuracy the default
/// implementation may be ill-behaved.
/// Specialized inverse cdfs should be used whenever possible.
/// Performs a binary search on the domain of `cdf` to obtain an approximation
/// of `F^-1(p) := inf { x | F(x) >= p }`. Needless to say, performance may
/// may be lacking.
fn inverse_cdf(&self, p: T) -> K {
if p == T::zero() {
return self.min();
};
if p == T::one() {
return self.max();
};
let two = K::one() + K::one();
let mut high = two;
let mut low = -high;
while self.cdf(low) > p {
low = low + low;
}
while self.cdf(high) < p {
high = high + high;
}
let mut i = 16;
while i != 0 {
let mid = (high + low) / two;
if self.cdf(mid) >= p {
high = mid;
} else {
low = mid;
}
i -= 1;
}
(high + low) / two
}
}
/// The `DiscreteCDF` trait is used to specify an interface for univariate
/// discrete distributions.
pub trait DiscreteCDF<K: Bounded + Clone + Num, T: Float>: Min<K> + Max<K> {
/// Returns the cumulative distribution function calculated
/// at `x` for a given distribution. May panic depending
/// on the implementor.
///
/// # Examples
///
/// ```
/// use statrs::distribution::{ContinuousCDF, Uniform};
///
/// let n = Uniform::new(0.0, 1.0).unwrap();
/// assert_eq!(0.5, n.cdf(0.5));
/// ```
fn cdf(&self, x: K) -> T;
/// Due to issues with rounding and floating-point accuracy the default implementation may be ill-behaved
/// Specialized inverse cdfs should be used whenever possible.
fn inverse_cdf(&self, p: T) -> K {
// TODO: fix integer implementation
if p == T::zero() {
return self.min();
};
if p == T::one() {
return self.max();
};
let two = K::one() + K::one();
let mut high = two.clone();
let mut low = K::min_value();
while self.cdf(high.clone()) < p {
high = high.clone() + high.clone();
}
while high != low {
let mid = (high.clone() + low.clone()) / two.clone();
if self.cdf(mid.clone()) >= p {
high = mid;
} else {
low = mid;
}
}
high
}
}
/// The `Continuous` trait provides an interface for interacting with
/// continuous statistical distributions
///
/// # Remarks
///
/// All methods provided by the `Continuous` trait are unchecked, meaning
/// they can panic if in an invalid state or encountering invalid input
/// depending on the implementing distribution.
pub trait Continuous<K, T> {
/// Returns the probability density function calculated at `x` for a given
/// distribution.
/// May panic depending on the implementor.
///
/// # Examples
///
/// ```
/// use statrs::distribution::{Continuous, Uniform};
///
/// let n = Uniform::new(0.0, 1.0).unwrap();
/// assert_eq!(1.0, n.pdf(0.5));
/// ```
fn pdf(&self, x: K) -> T;
/// Returns the log of the probability density function calculated at `x`
/// for a given distribution.
/// May panic depending on the implementor.
///
/// # Examples
///
/// ```
/// use statrs::distribution::{Continuous, Uniform};
///
/// let n = Uniform::new(0.0, 1.0).unwrap();
/// assert_eq!(0.0, n.ln_pdf(0.5));
/// ```
fn ln_pdf(&self, x: K) -> T;
}
/// The `Discrete` trait provides an interface for interacting with discrete
/// statistical distributions
///
/// # Remarks
///
/// All methods provided by the `Discrete` trait are unchecked, meaning
/// they can panic if in an invalid state or encountering invalid input
/// depending on the implementing distribution.
pub trait Discrete<K, T> {
/// Returns the probability mass function calculated at `x` for a given
/// distribution.
/// May panic depending on the implementor.
///
/// # Examples
///
/// ```
/// use statrs::distribution::{Discrete, Binomial};
/// use statrs::prec;
///
/// let n = Binomial::new(0.5, 10).unwrap();
/// assert!(prec::almost_eq(n.pmf(5), 0.24609375, 1e-15));
/// ```
fn pmf(&self, x: K) -> T;
/// Returns the log of the probability mass function calculated at `x` for
/// a given distribution.
/// May panic depending on the implementor.
///
/// # Examples
///
/// ```
/// use statrs::distribution::{Discrete, Binomial};
/// use statrs::prec;
///
/// let n = Binomial::new(0.5, 10).unwrap();
/// assert!(prec::almost_eq(n.ln_pmf(5), (0.24609375f64).ln(), 1e-15));
/// ```
fn ln_pmf(&self, x: K) -> T;
}
|
use model::person::Person;
#[derive(Deserialize, Serialize, Debug)]
pub struct PersonView {
pub status: i32,
pub message: String,
pub person: Person,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct PersonListView {
pub status: i32,
pub message: String,
pub person_list: Vec<Person>,
}
|
mod turnip_pattern;
use std::collections::HashMap;
use regex::{Captures, CaptureNames, Regex};
pub trait Pattern {
fn to_regex(&self) -> Regex;
fn to_string(&self) -> String;
}
#[derive(PartialEq, Eq, Debug)]
pub enum StepArgument {
String(String),
}
#[derive(Clone, Debug)]
pub struct StepPattern {
source: String,
regex: Regex,
}
impl StepPattern {
pub fn new<P: Pattern>(pattern: P) -> Self {
let regex = pattern.to_regex();
let source = pattern.to_string();
StepPattern {
source: source,
regex: regex,
}
}
pub fn is_match(&self, description: &String) -> bool {
self.regex.is_match(description)
}
pub fn captures(&self, description: &String) -> HashMap<String, StepArgument> {
match self.regex.captures(description) {
Some(arguments) => extract_step_arguments(self.regex.capture_names(), arguments),
None => HashMap::new(),
}
}
}
fn extract_step_arguments(argument_names: CaptureNames, arguments: Captures) -> HashMap<String, StepArgument> {
let mut step_arguments = HashMap::new();
for argument_name in argument_names {
match argument_name {
Some(name) => {
let argument = arguments[name].to_string();
let step_argument = StepArgument::String(argument);
step_arguments.insert(name.to_string(), step_argument);
}
None => (),
}
}
step_arguments
}
impl PartialEq for StepPattern {
fn eq(&self, other: &StepPattern) -> bool {
self.source == other.source
}
}
impl Eq for StepPattern {}
#[cfg(test)]
mod test {
use super::{Pattern, StepPattern, StepArgument};
use pattern::turnip_pattern::TurnipPattern;
use hamcrest::prelude::*;
use testing::mock::PatternMock;
const A_PATTERN: &str = "I have cukes in my belly";
const A_PATTERN_WITH_ARGUMENTS: &str = "I eat :cuke_count cukes";
const A_DESCRIPTION: &str = "It does not match pattern";
const A_DESCRIPTION_WITH_ARGUMENTS: &str = "I eat 7 cukes";
const CUKE_COUNT: &str = "7";
#[test]
fn it_is_matching_a_description_that_matches_its_pattern() {
let pattern = PatternMock {};
let description = pattern.to_string();
let step_pattern = StepPattern::new(pattern);
assert!(step_pattern.is_match(&description));
}
#[test]
fn it_is_not_matching_a_description_that_does_not_match_its_pattern() {
let pattern = PatternMock {};
let step_pattern = StepPattern::new(pattern);
assert!(!step_pattern.is_match(&A_DESCRIPTION.to_string()));
}
#[test]
fn it_captures_step_arguments_from_a_description_given_a_pattern_with_arguments() {
let pattern = TurnipPattern::from(A_PATTERN_WITH_ARGUMENTS);
let description = A_DESCRIPTION_WITH_ARGUMENTS.to_string();
let step_pattern = StepPattern::new(pattern);
let step_arguments = step_pattern.captures(&description);
let ref step_argument = step_arguments["cuke_count"];
let expected_step_argument = StepArgument::String(CUKE_COUNT.to_string());
assert_that!(step_argument, is(equal_to(&expected_step_argument)));
}
#[test]
fn it_does_not_capture_step_arguments_from_a_description_given_a_pattern_without_arguments() {
let pattern = TurnipPattern::from(A_PATTERN);
let description = A_DESCRIPTION.to_string();
let step_pattern = StepPattern::new(pattern);
let step_arguments = step_pattern.captures(&description);
assert!(step_arguments.is_empty());
}
}
|
extern crate serde;
use crate::api::components::address::geolocation::Geolocation;
#[derive(Serialize, Deserialize, Default, Queryable)]
pub struct Address {
street: String,
city: String,
state: String,
zipcode: i32,
building_num: String,
geolocation: Option<Geolocation>
} |
use crate::{BotResult, CommandData, Context};
use std::sync::Arc;
#[command]
#[short_desc("https://youtu.be/g7VNvg_QTMw&t=29")]
#[bucket("songs")]
#[no_typing()]
async fn startagain(ctx: Arc<Context>, data: CommandData) -> BotResult<()> {
let (lyrics, delay) = _startagain();
super::song_send(lyrics, delay, ctx, data).await
}
pub fn _startagain() -> (&'static [&'static str], u64) {
let lyrics = &[
"I'm not always perfect, but I'm always myself.",
"If you don't think I'm worth it - find someone eeeelse.",
"I won't say I'm sorry, for being who I aaaaaam.",
"Is the eeeend a chance to start agaaaaain?",
];
(lyrics, 5500)
}
|
use amethyst::ecs::{Component, DenseVecStorage};
#[derive(Clone, Debug)]
pub struct Health {
current: u32,
max: u32
}
impl Component for Health {
type Storage = DenseVecStorage<Self>;
}
impl Health {
pub fn new(max: u32) -> Self {
Health {
current: max,
max
}
}
pub fn current(&self) -> u32 {
self.current
}
pub fn max(&self) -> u32 {
self.max
}
pub fn damage(&mut self, damage: u32) {
if damage > self.current {
self.current = 0
} else {
self.current = self.current - damage
}
}
pub fn heal(&mut self, health: u32) {
self.current = self.max.min(self.current + health);
}
}
|
use std::borrow::Cow;
use std::collections::HashSet;
use std::sync::Arc;
use itertools::Itertools;
use command_data_derive::CommandData;
use discorsd::async_trait;
use discorsd::BotState;
use discorsd::commands::*;
use discorsd::errors::BotError;
use crate::{avalon, Bot};
use crate::games::GameType;
#[derive(Clone, Debug)]
pub struct StartCommand {
games: HashSet<GameType>,
default_permissions: bool,
}
impl Default for StartCommand {
fn default() -> Self {
Self { games: set!(GameType::Hangman), default_permissions: true }
}
}
impl StartCommand {
pub fn insert(&mut self, game: GameType) -> Option<GameType> {
self.default_permissions = true;
self.games.replace(game)
}
pub fn remove(&mut self, game: GameType) -> Option<GameType> {
let removed = self.games.remove(&game);
if self.games.is_empty() {
self.default_permissions = false;
}
removed.then_some(game)
}
}
#[derive(CommandData)]
#[command(command = "StartCommand")]
pub struct StartData {
#[command(desc = "Choose the game to start", required = "req", retain = "retain")]
game: Option<GameType>,
}
fn req(command: &StartCommand) -> bool {
command.games.len() > 1
}
fn retain(command: &StartCommand, choice: GameType) -> bool {
command.games.contains(&choice)
}
#[async_trait]
impl SlashCommand for StartCommand {
type Bot = Bot;
type Data = StartData;
type Use = Deferred;
const NAME: &'static str = "start";
fn description(&self) -> Cow<'static, str> {
match self.games.iter().exactly_one() {
Ok(game) => format!("Starts {game} in this channel").into(),
Err(_) => "Choose a game to start in this channel".into()
}
}
fn default_permissions(&self) -> bool {
self.default_permissions
}
async fn run(&self,
state: Arc<BotState<Bot>>,
interaction: InteractionUse<AppCommandData, Unused>,
data: StartData,
) -> Result<InteractionUse<AppCommandData, Self::Use>, BotError> {
let deferred = interaction.defer(&state).await?;
let guild = deferred.guild().unwrap();
let game = data.game.unwrap_or_else(|| *self.games.iter().exactly_one().unwrap());
{
let commands = state.slash_commands.read().await;
let mut commands = commands.get(&guild).unwrap()
.write().await;
// let (stop_id, stop_cmd) = state.get_command_mut::<StopCommand>(guild, &mut commands).await;
// stop_cmd.insert(game);
// stop_cmd.edit_command(&state, guild, stop_id).await?;
}
match game {
GameType::Avalon => avalon::start::start(&state, &deferred).await?,
GameType::Hangman => todo!(),
GameType::Coup => todo!(),
GameType::Kittens => todo!(),
}
Ok(deferred)
}
} |
use crate::error::RubyError;
use crate::vm::*;
use fancy_regex::{Captures, Error, Match, Regex};
//#[macro_use]
use crate::*;
#[derive(Debug)]
pub struct RegexpInfo {
pub regexp: Regexp,
}
impl RegexpInfo {
pub fn new(regexp: Regex) -> Self {
RegexpInfo {
regexp: Regexp(regexp),
}
}
}
pub type RegexpRef = Ref<RegexpInfo>;
impl RegexpRef {
pub fn from(reg: Regex) -> Self {
RegexpRef::new(RegexpInfo::new(reg))
}
pub fn from_string(reg_str: &str) -> Result<Self, Error> {
let regex = Regex::new(reg_str)?;
Ok(RegexpRef::new(RegexpInfo::new(regex)))
}
}
#[derive(Debug)]
pub struct Regexp(Regex);
impl std::ops::Deref for Regexp {
type Target = Regex;
fn deref(&self) -> &Regex {
&self.0
}
}
impl Regexp {
pub fn new(re: Regex) -> Self {
Regexp(re)
}
}
pub fn init_regexp(globals: &mut Globals) -> Value {
let id = globals.get_ident_id("Regexp");
let classref = ClassRef::from(id, globals.builtins.object);
let regexp = Value::class(globals, classref);
globals.add_builtin_class_method(regexp, "new", regexp_new);
globals.add_builtin_class_method(regexp, "compile", regexp_new);
globals.add_builtin_class_method(regexp, "escape", regexp_escape);
globals.add_builtin_class_method(regexp, "quote", regexp_escape);
regexp
}
// Class methods
fn regexp_new(vm: &mut VM, _: Value, args: &Args) -> VMResult {
vm.check_args_num(args.len(), 1)?;
expect_string!(string, vm, args[0]);
let val = vm.create_regexp_from_string(string)?;
Ok(val)
}
fn regexp_escape(vm: &mut VM, _: Value, args: &Args) -> VMResult {
vm.check_args_num(args.len(), 1)?;
expect_string!(string, vm, args[0]);
let res = regex::escape(string);
let regexp = Value::string(&vm.globals, res);
Ok(regexp)
}
// Instance methods
// Utility methods
impl Regexp {
fn get_captures(vm: &mut VM, captures: &Captures, given: &str) {
let id1 = vm.globals.get_ident_id("$&");
let id2 = vm.globals.get_ident_id("$'");
match captures.get(0) {
Some(m) => {
let val = Value::string(&vm.globals, given[m.start()..m.end()].to_string());
vm.set_global_var(id1, val);
let val = Value::string(&vm.globals, given[m.end()..].to_string());
vm.set_global_var(id2, val);
}
None => {
vm.set_global_var(id1, Value::nil());
vm.set_global_var(id2, Value::nil());
}
};
for i in 1..captures.len() {
match captures.get(i) {
Some(m) => Regexp::set_special_global(vm, i, given, m.start(), m.end()),
None => Regexp::set_special_global_nil(vm, i),
};
}
}
fn set_special_global(vm: &mut VM, i: usize, given: &str, start: usize, end: usize) {
let id = vm.globals.get_ident_id(format!("${}", i));
let val = Value::string(&vm.globals, given[start..end].to_string());
//eprintln!("${}: {}", i, given[start..end].to_string());
vm.set_global_var(id, val);
}
fn set_special_global_nil(vm: &mut VM, i: usize) {
let id = vm.globals.get_ident_id(format!("${}", i));
vm.set_global_var(id, Value::nil());
}
/// Replaces the leftmost-first match with `replace`.
pub fn replace_one(
vm: &mut VM,
re_val: Value,
given: &str,
replace: &str,
) -> Result<String, RubyError> {
fn replace_(
vm: &mut VM,
re: &Regexp,
given: &str,
replace: &str,
) -> Result<String, RubyError> {
match re.captures(given) {
Ok(None) => Ok(given.to_string()),
Ok(Some(captures)) => {
let mut res = given.to_string();
let m = captures.get(0).unwrap();
Regexp::get_captures(vm, &captures, given);
let mut rep = "".to_string();
let mut escape = false;
for ch in replace.chars() {
if escape {
match ch {
'0'..='9' => {
let i = ch as usize - '0' as usize;
match captures.get(i) {
Some(m) => rep += m.as_str(),
None => {}
};
}
_ => rep.push(ch),
};
escape = false;
} else {
if ch != '\\' {
rep.push(ch);
} else {
escape = true;
};
}
}
res.replace_range(m.start()..m.end(), &rep);
Ok(res)
}
Err(err) => return Err(vm.error_internal(format!("Capture failed. {:?}", err))),
}
}
if let Some(s) = re_val.as_string() {
let re = vm.regexp_from_string(&s)?;
return replace_(vm, &re, given, replace);
} else if let Some(re) = re_val.as_regexp() {
return replace_(vm, &re.regexp, given, replace);
} else {
return Err(vm.error_argument("1st arg must be RegExp or String."));
};
}
pub fn replace_one_block(
vm: &mut VM,
re_val: Value,
given: &str,
block: MethodRef,
) -> Result<(String, bool), RubyError> {
fn replace_(
vm: &mut VM,
re: &Regexp,
given: &str,
block: MethodRef,
) -> Result<(String, bool), RubyError> {
let (start, end, matched_str) = match re.captures_from_pos(given, 0) {
Ok(None) => return Ok((given.to_string(), false)),
Ok(Some(captures)) => {
let m = captures.get(0).unwrap();
Regexp::get_captures(vm, &captures, given);
(m.start(), m.end(), m.as_str())
}
Err(err) => return Err(vm.error_internal(format!("Capture failed. {:?}", err))),
};
let mut res = given.to_string();
let matched = Value::string(&vm.globals, matched_str.to_string());
let result = vm.eval_block(block, &Args::new1(matched))?;
let s = vm.val_to_s(result);
res.replace_range(start..end, &s);
Ok((res, true))
}
if let Some(s) = re_val.as_string() {
let re = vm.regexp_from_string(&s)?;
return replace_(vm, &re, given, block);
} else if let Some(re) = re_val.as_regexp() {
return replace_(vm, &re.regexp, given, block);
} else {
return Err(vm.error_argument("1st arg must be RegExp or String."));
};
}
/// Replaces all non-overlapping matches in `given` string with `replace`.
pub fn replace_all(
vm: &mut VM,
re_val: Value,
given: &str,
replace: &str,
) -> Result<(String, bool), RubyError> {
fn replace_(
vm: &mut VM,
re: &Regexp,
given: &str,
replace: &str,
) -> Result<(String, bool), RubyError> {
let mut range = vec![];
let mut i = 0;
loop {
if i >= given.len() {
break;
}
match re.captures_from_pos(given, i) {
Ok(None) => break,
Ok(Some(captures)) => {
let m = captures.get(0).unwrap();
// the length of matched string can be 0.
// this is neccesary to avoid infinite loop.
i = if m.end() == m.start() {
m.end() + 1
} else {
m.end()
};
range.push((m.start(), m.end()));
//eprintln!("{} {} [{:?}]", m.start(), m.end(), m.as_str());
Regexp::get_captures(vm, &captures, given);
}
Err(err) => return Err(vm.error_internal(format!("Capture failed. {:?}", err))),
};
}
let mut res = given.to_string();
for (start, end) in range.iter().rev() {
res.replace_range(start..end, replace);
}
Ok((res, range.len() != 0))
}
if let Some(s) = re_val.as_string() {
let re = vm.regexp_from_string(&s)?;
return replace_(vm, &re, given, replace);
} else if let Some(re) = re_val.as_regexp() {
return replace_(vm, &re.regexp, given, replace);
} else {
return Err(vm.error_argument("1st arg must be RegExp or String."));
};
}
/// Replaces all non-overlapping matches in `given` string with `replace`.
pub fn replace_all_block(
vm: &mut VM,
re_val: Value,
given: &str,
block: MethodRef,
) -> Result<(String, bool), RubyError> {
fn replace_(
vm: &mut VM,
re: &Regexp,
given: &str,
block: MethodRef,
) -> Result<(String, bool), RubyError> {
let mut range = vec![];
let mut i = 0;
loop {
let (start, end, matched_str) = match re.captures_from_pos(given, i) {
Ok(None) => break,
Ok(Some(captures)) => {
let m = captures.get(0).unwrap();
i = m.end();
Regexp::get_captures(vm, &captures, given);
(m.start(), m.end(), m.as_str())
}
Err(err) => return Err(vm.error_internal(format!("Capture failed. {:?}", err))),
};
let matched = Value::string(&vm.globals, matched_str.to_string());
let result = vm.eval_block(block, &Args::new1(matched))?;
let replace = vm.val_to_s(result);
range.push((start, end, replace));
}
let mut res = given.to_string();
for (start, end, replace) in range.iter().rev() {
res.replace_range(start..end, replace);
}
Ok((res, range.len() != 0))
}
if let Some(s) = re_val.as_string() {
let re = vm.regexp_from_string(&s)?;
return replace_(vm, &re, given, block);
} else if let Some(re) = re_val.as_regexp() {
return replace_(vm, &re.regexp, given, block);
} else {
return Err(vm.error_argument("1st arg must be RegExp or String."));
};
}
pub fn find_one<'a>(
vm: &mut VM,
re: &Regexp,
given: &'a str,
) -> Result<Option<Match<'a>>, RubyError> {
match re.captures(given) {
Ok(None) => Ok(None),
Ok(Some(captures)) => {
Regexp::get_captures(vm, &captures, given);
Ok(captures.get(0))
}
Err(err) => Err(vm.error_internal(format!("Capture failed. {:?}", err))),
}
}
pub fn find_all(vm: &mut VM, re: &Regexp, given: &str) -> Result<Vec<Value>, RubyError> {
let mut ary = vec![];
let mut idx = 0;
let mut last_captures = None;
loop {
match re.captures_from_pos(given, idx) {
Ok(None) => break,
Ok(Some(captures)) => {
let m = captures.get(0).unwrap();
idx = m.end();
match captures.len() {
1 => {
let val =
Value::string(&vm.globals, given[m.start()..m.end()].to_string());
ary.push(val);
}
len => {
let mut vec = vec![];
for i in 1..len {
match captures.get(i) {
Some(m) => {
let s = given[m.start()..m.end()].to_string();
vec.push(Value::string(&vm.globals, s));
}
None => vec.push(Value::nil()),
}
}
let val = Value::array_from(&vm.globals, vec);
ary.push(val);
}
}
last_captures = Some(captures);
}
Err(err) => return Err(vm.error_internal(format!("Capture failed. {:?}", err))),
};
}
match last_captures {
Some(c) => Regexp::get_captures(vm, &c, given),
None => {}
}
Ok(ary)
}
}
#[cfg(test)]
mod test {
use crate::test::*;
#[test]
fn regexp1() {
let program = r#"
assert "abc!!g", "abcdefg".gsub(/def/, "!!")
assert "2.5".gsub(".", ","), "2,5"
assert true, /(aa).*(bb)/ === "andaadefbbje"
assert "aadefbb", $&
assert "aa", $1
assert "bb", $2
"#;
assert_script(program);
}
}
|
// Copyright 2015-2016 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
/// RSA PKCS#1 1.5 signatures.
use {der, digest, error};
use untrusted;
#[cfg(feature = "rsa_signing")]
use c;
#[cfg(feature = "rsa_signing")]
use core;
pub struct RSAPadding {
digest_alg: &'static digest::Algorithm,
digestinfo_prefix: &'static [u8],
}
macro_rules! rsa_pkcs1_padding {
( $PADDING_ALGORITHM:ident, $digest_alg:expr, $digestinfo_prefix:expr,
$doc_str:expr ) => {
#[doc=$doc_str]
/// Feature: `rsa_signing`.
pub static $PADDING_ALGORITHM: RSAPadding = RSAPadding {
digest_alg: $digest_alg,
digestinfo_prefix: $digestinfo_prefix,
};
}
}
rsa_pkcs1_padding!(RSA_PKCS1_SHA1, &digest::SHA1,
&SHA1_PKCS1_DIGESTINFO_PREFIX,
"Signing using RSA with PKCS#1 1.5 padding and SHA-1.");
rsa_pkcs1_padding!(RSA_PKCS1_SHA256, &digest::SHA256,
&SHA256_PKCS1_DIGESTINFO_PREFIX,
"Signing using RSA with PKCS#1 1.5 padding and SHA-256.");
rsa_pkcs1_padding!(RSA_PKCS1_SHA384, &digest::SHA384,
&SHA384_PKCS1_DIGESTINFO_PREFIX,
"Signing using RSA with PKCS#1 1.5 padding and SHA3846.");
rsa_pkcs1_padding!(RSA_PKCS1_SHA512, &digest::SHA512,
&SHA512_PKCS1_DIGESTINFO_PREFIX,
"Signing using RSA with PKCS#1 1.5 padding and SHA-512.");
macro_rules! pkcs1_digestinfo_prefix {
( $name:ident, $digest_len:expr, $digest_oid_len:expr,
[ $( $digest_oid:expr ),* ] ) => {
static $name: [u8; 2 + 8 + $digest_oid_len] = [
der::Tag::Sequence as u8, 8 + $digest_oid_len + $digest_len,
der::Tag::Sequence as u8, 2 + $digest_oid_len + 2,
der::Tag::OID as u8, $digest_oid_len, $( $digest_oid ),*,
der::Tag::Null as u8, 0,
der::Tag::OctetString as u8, $digest_len,
];
}
}
macro_rules! pkcs1_digestinfo_prefix {
( $name:ident, $digest_len:expr, $digest_oid_len:expr,
[ $( $digest_oid:expr ),* ] ) => {
static $name: [u8; 2 + 8 + $digest_oid_len] = [
der::Tag::Sequence as u8, 8 + $digest_oid_len + $digest_len,
der::Tag::Sequence as u8, 2 + $digest_oid_len + 2,
der::Tag::OID as u8, $digest_oid_len, $( $digest_oid ),*,
der::Tag::Null as u8, 0,
der::Tag::OctetString as u8, $digest_len,
];
}
}
pkcs1_digestinfo_prefix!(
SHA1_PKCS1_DIGESTINFO_PREFIX, 20, 5, [ 0x2b, 0x0e, 0x03, 0x02, 0x1a ]);
pkcs1_digestinfo_prefix!(
SHA256_PKCS1_DIGESTINFO_PREFIX, 32, 9,
[ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01 ]);
pkcs1_digestinfo_prefix!(
SHA384_PKCS1_DIGESTINFO_PREFIX, 48, 9,
[ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02 ]);
pkcs1_digestinfo_prefix!(
SHA512_PKCS1_DIGESTINFO_PREFIX, 64, 9,
[ 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03 ]);
/// Parameters for RSA signing and verification.
pub struct RSAParameters {
padding_alg: &'static RSAPadding,
min_bits: usize,
}
fn parse_public_key(input: untrusted::Input)
-> Result<(&[u8], &[u8]), error::Unspecified> {
input.read_all(error::Unspecified, |input| {
der::nested(input, der::Tag::Sequence, error::Unspecified, |input| {
let n = try!(der::positive_integer(input));
let e = try!(der::positive_integer(input));
Ok((n.as_slice_less_safe(), e.as_slice_less_safe()))
})
})
}
#[cfg(feature = "rsa_signing")]
struct PositiveInteger {
value: Option<*mut BIGNUM>,
}
#[allow(unsafe_code)]
#[cfg(feature = "rsa_signing")]
impl PositiveInteger {
// Parses a single ASN.1 DER-encoded `Integer`, which most be positive.
fn from_der(input: &mut untrusted::Reader)
-> Result<PositiveInteger, error::Unspecified> {
let bytes = try!(der::positive_integer(input)).as_slice_less_safe();
let res = unsafe {
GFp_BN_bin2bn(bytes.as_ptr(), bytes.len(), core::ptr::null_mut())
};
if res.is_null() {
return Err(error::Unspecified);
}
Ok(PositiveInteger { value: Some(res) })
}
unsafe fn as_ref<'a>(&'a self) -> &'a BIGNUM { &*self.value.unwrap() }
fn into_raw(&mut self) -> *mut BIGNUM {
let res = self.value.unwrap();
self.value = None;
res
}
}
#[allow(unsafe_code)]
#[cfg(feature = "rsa_signing")]
impl Drop for PositiveInteger {
fn drop(&mut self) {
match self.value {
Some(val) => unsafe {
GFp_BN_free(val);
},
None => {},
}
}
}
#[cfg(feature = "rsa_signing")]
#[allow(non_camel_case_types)]
enum BN_MONT_CTX {}
pub mod verification;
#[cfg(feature = "rsa_signing")]
enum BIGNUM {}
#[cfg(feature = "rsa_signing")]
pub mod signing;
#[cfg(feature = "rsa_signing")]
extern {
fn GFp_BN_bin2bn(in_: *const u8, len: c::size_t, ret: *mut BIGNUM)
-> *mut BIGNUM;
fn GFp_BN_free(bn: *mut BIGNUM);
fn GFp_BN_MONT_CTX_free(mont: *mut BN_MONT_CTX);
}
|
use exonum::{
api::{self, ServiceApiBuilder, ServiceApiState},
blockchain::{self, BlockProof, TransactionMessage},
crypto::{Hash, PublicKey},
explorer::BlockchainExplorer,
helpers::Height,
};
use exonum_merkledb::{ListProof, MapProof};
use super::{schema::Schema, SERVICE_ID};
use crate::queue::Queue;
use crate::profile::Profile;
use crate::profile_attribute_value::ProfileAttributeValue;
use crate::queue_attributes::AttributesInQueue;
/// Describes the query parameters for the `get_wallet` endpoint.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub struct GetFirstQuery {
///key to get queue
pub pub_key: PublicKey,
}
/// Describes the query parameters for the `get_wallet` endpoint.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub struct GetQueuesQuery {
}
/// Describes the query parameters for the `get_wallet` endpoint.
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
pub struct GetProfilesQuery {
///key to get queue
pub pub_key: PublicKey,
}
/// Public service API description.
#[derive(Debug, Clone, Copy)]
pub struct PublicApi;
impl PublicApi {
//get queue by key
fn get_queue(state: &ServiceApiState, query: GetFirstQuery) -> api::Result<String> {
let snapshot = state.snapshot();
let schema = Schema::new(&snapshot);
let first = schema.queue(&query.pub_key).unwrap();
Ok(first.name)
}
fn get_queue_properties(state: &ServiceApiState, query: GetQueuesQuery) -> api::Result<Vec<AttributesInQueue>> {
let snapshot = state.snapshot();
let schema = Schema::new(&snapshot);
let history = schema.attributes_in_queues();
let mut vec = Vec::new();
for value in history.values() {
vec.push(value);
}
Ok(vec)
}
// //get queue by key
fn get_all_queues(state: &ServiceApiState, _: GetQueuesQuery) -> api::Result<Vec<Queue>> {
let snapshot = state.snapshot();
let schema = Schema::new(&snapshot);
let allQueues = schema.queues();
let mut vec = Vec::new();
for value in allQueues.values() {
vec.push(value);
}
Ok(vec)
}
fn get_profiles(state: &ServiceApiState, _: GetQueuesQuery) -> api::Result<Vec<Profile>> {
let snapshot = state.snapshot();
let schema = Schema::new(&snapshot);
let allprofiles = schema.profiles();
let mut vec = Vec::new();
for value in allprofiles.values() {
vec.push(value);
}
vec.sort_by(|a, b| b.rating.cmp(&a.rating));
Ok(vec)
}
fn get_profile_attributes(state: &ServiceApiState, _: GetQueuesQuery) -> api::Result<Vec<ProfileAttributeValue>> {
let snapshot = state.snapshot();
let schema = Schema::new(&snapshot);
let allprofiles = schema.profiles_attributes();
let mut vec = Vec::new();
for value in allprofiles.values() {
vec.push(value);
}
Ok(vec)
}
/// Wires the above endpoint to public scope of the given `ServiceApiBuilder`.
pub fn wire(builder: &mut ServiceApiBuilder) {
builder
.public_scope()
.endpoint("v1/queue_constructor/get_queue", Self::get_queue)
.endpoint("v1/queue_constructor/get_queue_properties", Self::get_queue_properties)
.endpoint("v1/queue_constructor/get_all_queues", Self::get_all_queues)
.endpoint("v1/queue_constructor/get_profiles", Self::get_profiles)
.endpoint("v1/queue_constructor/get_profile_attributes", Self::get_profile_attributes);
}
}
|
#[doc = "Reader of register LL_CLK_EN"]
pub type R = crate::R<u32, super::LL_CLK_EN>;
#[doc = "Writer for register LL_CLK_EN"]
pub type W = crate::W<u32, super::LL_CLK_EN>;
#[doc = "Register LL_CLK_EN `reset()`'s with value 0x26"]
impl crate::ResetValue for super::LL_CLK_EN {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x26
}
}
#[doc = "Reader of field `CLK_EN`"]
pub type CLK_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CLK_EN`"]
pub struct CLK_EN_W<'a> {
w: &'a mut W,
}
impl<'a> CLK_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `CY_CORREL_EN`"]
pub type CY_CORREL_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CY_CORREL_EN`"]
pub struct CY_CORREL_EN_W<'a> {
w: &'a mut W,
}
impl<'a> CY_CORREL_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `MXD_IF_OPTION`"]
pub type MXD_IF_OPTION_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `MXD_IF_OPTION`"]
pub struct MXD_IF_OPTION_W<'a> {
w: &'a mut W,
}
impl<'a> MXD_IF_OPTION_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `SEL_RCB_CLK`"]
pub type SEL_RCB_CLK_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SEL_RCB_CLK`"]
pub struct SEL_RCB_CLK_W<'a> {
w: &'a mut W,
}
impl<'a> SEL_RCB_CLK_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `BLESS_RESET`"]
pub type BLESS_RESET_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `BLESS_RESET`"]
pub struct BLESS_RESET_W<'a> {
w: &'a mut W,
}
impl<'a> BLESS_RESET_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `DPSLP_HWRCB_EN`"]
pub type DPSLP_HWRCB_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DPSLP_HWRCB_EN`"]
pub struct DPSLP_HWRCB_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DPSLP_HWRCB_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
impl R {
#[doc = "Bit 0 - Set this bit 1 to enable the clock to Link Layer."]
#[inline(always)]
pub fn clk_en(&self) -> CLK_EN_R {
CLK_EN_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - If MXD_IF option is 1, this bit needs to be set to enable configuring the correlator through BLELL.DPLL_CONFIG register"]
#[inline(always)]
pub fn cy_correl_en(&self) -> CY_CORREL_EN_R {
CY_CORREL_EN_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - 1: MXD IF option 0: CYBLERD55 correlates Access Code 0: MXD IF option 1: LL correlates Access Code"]
#[inline(always)]
pub fn mxd_if_option(&self) -> MXD_IF_OPTION_R {
MXD_IF_OPTION_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - 0: AHB clock (clk_sys) is used as the clock for RCB access 1: LL clock (clk_eco) is used as the clock for RCB access"]
#[inline(always)]
pub fn sel_rcb_clk(&self) -> SEL_RCB_CLK_R {
SEL_RCB_CLK_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - 0: No Soft Reset 1: Initiate Soft Reset Setting this bit will reset entire BLESS_VER3"]
#[inline(always)]
pub fn bless_reset(&self) -> BLESS_RESET_R {
BLESS_RESET_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - Controls the DPSLP entry and exit writes to RD and controls the active domain reset and clock. 1 - LL HW controls the RD active domain reset and clock. 0 - The RD active domain reset and clock. Must be controlled by the FW"]
#[inline(always)]
pub fn dpslp_hwrcb_en(&self) -> DPSLP_HWRCB_EN_R {
DPSLP_HWRCB_EN_R::new(((self.bits >> 5) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Set this bit 1 to enable the clock to Link Layer."]
#[inline(always)]
pub fn clk_en(&mut self) -> CLK_EN_W {
CLK_EN_W { w: self }
}
#[doc = "Bit 1 - If MXD_IF option is 1, this bit needs to be set to enable configuring the correlator through BLELL.DPLL_CONFIG register"]
#[inline(always)]
pub fn cy_correl_en(&mut self) -> CY_CORREL_EN_W {
CY_CORREL_EN_W { w: self }
}
#[doc = "Bit 2 - 1: MXD IF option 0: CYBLERD55 correlates Access Code 0: MXD IF option 1: LL correlates Access Code"]
#[inline(always)]
pub fn mxd_if_option(&mut self) -> MXD_IF_OPTION_W {
MXD_IF_OPTION_W { w: self }
}
#[doc = "Bit 3 - 0: AHB clock (clk_sys) is used as the clock for RCB access 1: LL clock (clk_eco) is used as the clock for RCB access"]
#[inline(always)]
pub fn sel_rcb_clk(&mut self) -> SEL_RCB_CLK_W {
SEL_RCB_CLK_W { w: self }
}
#[doc = "Bit 4 - 0: No Soft Reset 1: Initiate Soft Reset Setting this bit will reset entire BLESS_VER3"]
#[inline(always)]
pub fn bless_reset(&mut self) -> BLESS_RESET_W {
BLESS_RESET_W { w: self }
}
#[doc = "Bit 5 - Controls the DPSLP entry and exit writes to RD and controls the active domain reset and clock. 1 - LL HW controls the RD active domain reset and clock. 0 - The RD active domain reset and clock. Must be controlled by the FW"]
#[inline(always)]
pub fn dpslp_hwrcb_en(&mut self) -> DPSLP_HWRCB_EN_W {
DPSLP_HWRCB_EN_W { w: self }
}
}
|
/*=======================================
* @FileName: [1].两数之和.rs
* @Description:
* @Author: TonyLaw
* @Date: 2021-08-26 03:02:33 Thursday
* @Copyright: © 2021 TonyLaw. All Rights reserved.
=========================================*/
/*=======================================
(题目难度:简单)
给定一个整数数组 nums 和一个整数目标值 target,请你在该数组中找出 和为目标值 target 的那 两个 整数,并返回它们的数组下标。
你可以假设每种输入只会对应一个答案。但是,数组中同一个元素在答案里不能重复出现。
你可以按任意顺序返回答案。
示例 1:
输入:nums = [2,7,11,15], target = 9
输出:[0,1]
解释:因为 nums[0] + nums[1] == 9 ,返回 [0, 1] 。
示例 2:
输入:nums = [3,2,4], target = 6
输出:[1,2]
示例 3:
输入:nums = [3,3], target = 6
输出:[0,1]
提示:
2 <= nums.length <= 104
-109 <= nums[i] <= 109
-109 <= target <= 109
只会存在一个有效答案
=========================================*/
use std::collections::HashMap;
struct Solution;
impl Solution {
pub fn two_sum(nums: Vec<i32>, target: i32) -> Vec<i32> {
let mut map = HashMap::with_capacity(nums.len());
for i in 0..nums.len() {
if let Some(k) = map.get(&(target - nums[i])) {
if *k != i {
return vec![*k as i32, i as i32];
}
}
map.insert(nums[i], i);
}
panic!("not found")
}
}
fn main() {
let l1 = vec![2, 3, 4];
let i1 = 6;
let result = Solution::two_sum(l1, i1);
println!("{:?}", result);
} |
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(clippy::useless_transmute)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::missing_safety_doc)]
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
|
use std::io::*;
use std::fs::{File, OpenOptions};
use std::path::Path;
mod config;
mod game;
use game::*;
use config::*;
#[allow(dead_code)]
fn main() {
let stdin = stdin();
let stdout = stdout();
let config = read_config(&mut BufReader::new(stdin.lock()));
let mut board = Board::new();
let debug_log_path = Path::new("E:\\rustbot.log");
let mut options = OpenOptions::new();
options
.write(true)
.create(true);
let debug_log_file = options.open(&debug_log_path).unwrap();
let mut log_writer = BufWriter::new(debug_log_file);
log_writer.write_all(format!("Start game: Config: {:?}\n", config).as_bytes());
println!("go");
run_game(&mut log_writer, &mut BufReader::new(stdin.lock()), &mut BufWriter::new(stdout.lock()), config, &mut board);
}
fn run_game<L: Write, R: Read, W: Write>(log_writer: &mut BufWriter<L>,in_stream: &mut BufReader<R>, out_stream: &mut BufWriter<W>, config: GameConfig, board: &mut Board) {
loop {
out_stream.flush();
log_writer.flush();
let mut line = String::new();
in_stream.read_line(&mut line).unwrap();
line = line.trim().to_string();
if line.is_empty() {
continue;
} else if line.eq("go") {
out_stream.write_all(b"go\n");
} else {
log_writer.write_all(format!("RECEIVED: {}\n", line).as_bytes());
continue;
}
}
}
|
#[doc = "Reader of register ENABLED1"]
pub type R = crate::R<u32, super::ENABLED1>;
#[doc = "Reader of field `clk_sys_xosc`"]
pub type CLK_SYS_XOSC_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_xip`"]
pub type CLK_SYS_XIP_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_watchdog`"]
pub type CLK_SYS_WATCHDOG_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_usb_usbctrl`"]
pub type CLK_USB_USBCTRL_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_usbctrl`"]
pub type CLK_SYS_USBCTRL_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_uart1`"]
pub type CLK_SYS_UART1_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_peri_uart1`"]
pub type CLK_PERI_UART1_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_uart0`"]
pub type CLK_SYS_UART0_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_peri_uart0`"]
pub type CLK_PERI_UART0_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_timer`"]
pub type CLK_SYS_TIMER_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_tbman`"]
pub type CLK_SYS_TBMAN_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_sysinfo`"]
pub type CLK_SYS_SYSINFO_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_syscfg`"]
pub type CLK_SYS_SYSCFG_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_sram5`"]
pub type CLK_SYS_SRAM5_R = crate::R<bool, bool>;
#[doc = "Reader of field `clk_sys_sram4`"]
pub type CLK_SYS_SRAM4_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 14"]
#[inline(always)]
pub fn clk_sys_xosc(&self) -> CLK_SYS_XOSC_R {
CLK_SYS_XOSC_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 13"]
#[inline(always)]
pub fn clk_sys_xip(&self) -> CLK_SYS_XIP_R {
CLK_SYS_XIP_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 12"]
#[inline(always)]
pub fn clk_sys_watchdog(&self) -> CLK_SYS_WATCHDOG_R {
CLK_SYS_WATCHDOG_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 11"]
#[inline(always)]
pub fn clk_usb_usbctrl(&self) -> CLK_USB_USBCTRL_R {
CLK_USB_USBCTRL_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 10"]
#[inline(always)]
pub fn clk_sys_usbctrl(&self) -> CLK_SYS_USBCTRL_R {
CLK_SYS_USBCTRL_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9"]
#[inline(always)]
pub fn clk_sys_uart1(&self) -> CLK_SYS_UART1_R {
CLK_SYS_UART1_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8"]
#[inline(always)]
pub fn clk_peri_uart1(&self) -> CLK_PERI_UART1_R {
CLK_PERI_UART1_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 7"]
#[inline(always)]
pub fn clk_sys_uart0(&self) -> CLK_SYS_UART0_R {
CLK_SYS_UART0_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 6"]
#[inline(always)]
pub fn clk_peri_uart0(&self) -> CLK_PERI_UART0_R {
CLK_PERI_UART0_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5"]
#[inline(always)]
pub fn clk_sys_timer(&self) -> CLK_SYS_TIMER_R {
CLK_SYS_TIMER_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4"]
#[inline(always)]
pub fn clk_sys_tbman(&self) -> CLK_SYS_TBMAN_R {
CLK_SYS_TBMAN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3"]
#[inline(always)]
pub fn clk_sys_sysinfo(&self) -> CLK_SYS_SYSINFO_R {
CLK_SYS_SYSINFO_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2"]
#[inline(always)]
pub fn clk_sys_syscfg(&self) -> CLK_SYS_SYSCFG_R {
CLK_SYS_SYSCFG_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1"]
#[inline(always)]
pub fn clk_sys_sram5(&self) -> CLK_SYS_SRAM5_R {
CLK_SYS_SRAM5_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0"]
#[inline(always)]
pub fn clk_sys_sram4(&self) -> CLK_SYS_SRAM4_R {
CLK_SYS_SRAM4_R::new((self.bits & 0x01) != 0)
}
}
|
use std::fs;
mod day03 {
use std::collections::{HashMap, HashSet};
pub type ClaimId = i32;
#[derive(Debug, PartialEq, Eq, Hash)]
pub struct Pos {
pub x: i32,
pub y: i32,
}
type ClaimsReport = HashMap<Pos, HashSet<ClaimId>>;
pub fn solve_a(input: &str) -> usize {
let claims_report = create_claims_report(input);
count_overlapping_positions(&claims_report)
}
pub fn solve_b(input: &str) -> i32 {
let claims_report = create_claims_report(input);
find_not_overlapping_claim(&claims_report)
}
// #1 @ 265,241: 16x26
pub fn parse_claim(line: &str) -> Vec<(Pos, ClaimId)> {
let chunks = line.split(" ").collect::<Vec<&str>>();
let id = chunks[0][1..].parse::<i32>().unwrap();
let pos = chunks[2].split(":").collect::<Vec<&str>>()[0]
.split(",")
.collect::<Vec<&str>>();
let x = pos[0].parse::<i32>().unwrap();
let y = pos[1].parse::<i32>().unwrap();
let dim = chunks[3].split("x").collect::<Vec<&str>>();
let w = dim[0].parse::<i32>().unwrap();
let h = dim[1].parse::<i32>().unwrap();
let mut claims = vec![];
for y in y..y + h {
for x in x..x + w {
claims.push((Pos { x: x, y: y }, id));
}
}
claims
}
pub fn parse_claims(input: &str) -> Vec<(Pos, ClaimId)> {
input
.split("\n")
.filter(|l| !l.is_empty())
.flat_map(|line| parse_claim(line))
.collect::<Vec<(Pos, ClaimId)>>()
}
pub fn analyze_claims(claims: Vec<(Pos, ClaimId)>) -> ClaimsReport {
let mut dict: ClaimsReport = HashMap::new();
for (pos, id) in claims {
match dict.get_mut(&pos) {
None => {
let mut set: HashSet<ClaimId> = HashSet::new();
set.insert(id);
dict.insert(pos, set);
}
Some(set) => {
set.insert(id);
}
}
}
dict
}
pub fn create_claims_report(input: &str) -> ClaimsReport {
analyze_claims(parse_claims(input))
}
pub fn count_overlapping_positions(report: &ClaimsReport) -> usize {
report.values().filter(|set| set.len() > 1).count()
}
pub fn find_not_overlapping_claim(report: &ClaimsReport) -> ClaimId {
let mut overlapping: HashSet<ClaimId> = HashSet::new();
let mut isolated: HashSet<ClaimId> = HashSet::new();
for (_, ids) in report {
if ids.len() > 1 {
for id in ids.iter() {
overlapping.insert(*id);
isolated.remove(id);
}
} else {
for id in ids.iter() {
if !overlapping.contains(id) {
isolated.insert(*id);
}
}
}
}
assert_eq!(
isolated.len(),
1,
"There should be only one isolated claim!"
);
*isolated.iter().nth(0).unwrap()
}
}
#[cfg(test)]
mod test_day03 {
use super::day03::*;
#[test]
pub fn test_parse_claim() {
let input = "#123 @ 3,2: 5x4";
let result = parse_claim(input);
let expected = "3 2 123;4 2 123;5 2 123;6 2 123;7 2 123;3 3 123;4 3 123;5 3 123;6 3 123;7 3 123;3 4 123;4 4 123;5 4 123;6 4 123;7 4 123;3 5 123;4 5 123;5 5 123;6 5 123;7 5 123";
let result_str = result
.iter()
.map(|(pos, id)| format!("{} {} {}", pos.x, pos.y, id))
.collect::<Vec<String>>()
.join(";");
assert_eq!(result.len(), 20);
assert_eq!(result_str, expected);
}
#[test]
pub fn test_parse_claims() {
let input = "#1 @ 1,3: 4x4\n#2 @ 3,1: 4x4\n#3 @ 5,5: 2x2";
let result = parse_claims(input);
let expected = "1 3 1;2 3 1;3 3 1;4 3 1;1 4 1;2 4 1;3 4 1;4 4 1;1 5 1;2 5 1;3 5 1;4 5 1;1 6 1;2 6 1;3 6 1;4 6 1;3 1 2;4 1 2;5 1 2;6 1 2;3 2 2;4 2 2;5 2 2;6 2 2;3 3 2;4 3 2;5 3 2;6 3 2;3 4 2;4 4 2;5 4 2;6 4 2;5 5 3;6 5 3;5 6 3;6 6 3";
let result_str = result
.iter()
.map(|(pos, id)| format!("{} {} {}", pos.x, pos.y, id))
.collect::<Vec<String>>()
.join(";");
assert_eq!(result.len(), 36);
assert_eq!(result_str, expected);
}
#[test]
pub fn test_analyze_claims() {
let input = vec![
(Pos { x: 3, y: 2 }, 1),
(Pos { x: 3, y: 3 }, 1),
(Pos { x: 3, y: 3 }, 2),
(Pos { x: 2, y: 3 }, 2),
];
let result = analyze_claims(input);
assert_eq!(result.get(&Pos { x: 3, y: 2 }).unwrap().len(), 1);
assert_eq!(result.get(&Pos { x: 3, y: 3 }).unwrap().len(), 2);
assert_eq!(result.get(&Pos { x: 2, y: 3 }).unwrap().len(), 1);
}
}
fn main() {
let input = fs::read_to_string("input.txt").unwrap();
// 101781
println!("Solving Day03A...");
let result = crate::day03::solve_a(&input);
println!("{}", result);
// 909
println!("Solving Day03B...");
let result = crate::day03::solve_b(&input);
println!("{}", result);
}
|
use super::definitions::Filter;
use super::definitions::Ordering;
use super::DataSource;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
#[serde(tag = "queryType", rename = "scan")]
#[serde(rename_all = "camelCase")]
pub struct Scan {
pub data_source: DataSource,
pub intervals: Vec<String>,
pub result_format: ResultFormat,
pub filter: Option<Filter>,
pub columns: Vec<String>,
pub batch_size: usize,
pub limit: Option<usize>,
pub ordering: Option<Ordering>,
pub context: std::collections::HashMap<String, String>,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub enum ResultFormat {
List,
CompactedList,
ValueVector,
}
|
use std::fs;
use std::path::PathBuf;
static ARTIFACT_WRITE_ERROR: &str = "Could not write artifact to file";
pub struct Artifact {
pub location: PathBuf,
}
impl Artifact {
pub fn write<S>(&self, content: S)
where S: AsRef<str>
{
let content_bytes = content.as_ref().as_bytes();
fs::write(&self.location, content_bytes).expect(ARTIFACT_WRITE_ERROR);
}
}
|
use crate::models::{ComicId, ComicIdInvalidity, Token};
use crate::util::{ensure_is_authorized, ensure_is_valid};
use actix_web::{error, web, HttpResponse, Result};
use actix_web_grants::permissions::AuthDetails;
use database::models::{Comic as DatabaseComic, LogEntry};
use database::DbPool;
use parse_display::Display;
use semval::{context::Context as ValidationContext, Validate};
use serde::Deserialize;
use shared::token_permissions;
pub(crate) async fn set_guest(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::IsGuestComic).await?;
Ok(HttpResponse::Ok().body("Guest comic set or updated for comic"))
}
pub(crate) async fn set_non_canon(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::IsNonCanon).await?;
Ok(HttpResponse::Ok().body("Non-canon set or updated for comic"))
}
pub(crate) async fn set_no_cast(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::HasNoCast).await?;
Ok(HttpResponse::Ok().body("No cast set or updated for comic"))
}
pub(crate) async fn set_no_location(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::HasNoLocation).await?;
Ok(HttpResponse::Ok().body("No location set or updated for comic"))
}
pub(crate) async fn set_no_storyline(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::HasNoStoryline).await?;
Ok(HttpResponse::Ok().body("No storyline set or updated for comic"))
}
pub(crate) async fn set_no_title(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::HasNoTitle).await?;
Ok(HttpResponse::Ok().body("No title set or updated for comic"))
}
pub(crate) async fn set_no_tagline(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
set_flag(pool, request, auth, FlagType::HasNoTagline).await?;
Ok(HttpResponse::Ok().body("No tagline set or updated for comic"))
}
#[allow(clippy::too_many_lines)]
async fn set_flag(
pool: web::Data<DbPool>,
request: web::Json<SetFlagBody>,
auth: AuthDetails,
flag: FlagType,
) -> Result<()> {
ensure_is_authorized(&auth, token_permissions::CAN_CHANGE_COMIC_DATA)
.map_err(error::ErrorForbidden)?;
ensure_is_valid(&*request).map_err(error::ErrorBadRequest)?;
let mut transaction = pool
.begin()
.await
.map_err(error::ErrorInternalServerError)?;
DatabaseComic::ensure_exists_by_id(&mut *transaction, request.comic_id.into_inner())
.await
.map_err(error::ErrorInternalServerError)?;
let (true_value_log_text, false_value_log_text, sql_result) = match flag {
FlagType::IsGuestComic => (
"to be a guest comic",
"to be a Jeph comic",
DatabaseComic::update_is_guest_comic_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
FlagType::IsNonCanon => (
"to be non-canon",
"to be canon",
DatabaseComic::update_is_non_canon_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
FlagType::HasNoCast => (
"to have no cast",
"to have cast",
DatabaseComic::update_has_no_cast_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
FlagType::HasNoLocation => (
"to have no locations",
"to have locations",
DatabaseComic::update_has_no_location_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
FlagType::HasNoStoryline => (
"to have no storylines",
"to have storylines",
DatabaseComic::update_has_no_storyline_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
FlagType::HasNoTitle => (
"to have no title",
"to have a title",
DatabaseComic::update_has_no_title_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
FlagType::HasNoTagline => (
"to have no tagline",
"to have a tagline",
DatabaseComic::update_has_no_tagline_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.flag_value,
)
.await,
),
};
sql_result.map_err(error::ErrorInternalServerError)?;
LogEntry::log_action(
&mut *transaction,
request.token.to_string(),
format!(
"Set comic #{} {}",
request.comic_id,
if request.flag_value {
true_value_log_text
} else {
false_value_log_text
}
),
)
.await
.map_err(error::ErrorInternalServerError)?;
transaction
.commit()
.await
.map_err(error::ErrorInternalServerError)?;
Ok(())
}
pub enum FlagType {
IsGuestComic,
IsNonCanon,
HasNoCast,
HasNoLocation,
HasNoStoryline,
HasNoTitle,
HasNoTagline,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct SetFlagBody {
token: Token,
comic_id: ComicId,
flag_value: bool,
}
impl Validate for SetFlagBody {
type Invalidity = SetFlagBodyInvalidity;
fn validate(&self) -> semval::ValidationResult<Self::Invalidity> {
ValidationContext::new()
.validate_with(&self.comic_id, SetFlagBodyInvalidity::ComicId)
.into()
}
}
#[derive(Copy, Clone, Debug, Display, Eq, PartialEq)]
pub(crate) enum SetFlagBodyInvalidity {
#[display("{0}")]
ComicId(ComicIdInvalidity),
}
|
#[macro_use]
extern crate criterion;
use criterion::{BatchSize, Criterion};
use hacspec_dev::prelude::*;
use hacspec_lib::prelude::*;
fn bench(c: &mut Criterion) {
for chunk_size in (10..2000).step_by(500) {
c.bench_function(&format!("Seq slice no-copy {}", chunk_size), |b| {
b.iter_batched(
|| ByteSeq::from_public_slice(&random_byte_vec(2048)),
|seq| {
let _new_seq_slice = seq.into_slice_range(40..40 + chunk_size);
},
BatchSize::SmallInput,
);
});
c.bench_function(&format!("Seq slice copy {}", chunk_size), |b| {
b.iter_batched(
|| ByteSeq::from_public_slice(&random_byte_vec(2048)),
|seq| {
let _new_seq_slice = seq.slice_range(40..40 + chunk_size);
},
BatchSize::SmallInput,
);
});
}
c.bench_function(&format!("Seq concat no-copy"), |b| {
b.iter_batched(
|| {
let seq1 = ByteSeq::from_public_slice(&random_byte_vec(2048));
let seq2 = ByteSeq::from_public_slice(&random_byte_vec(2048));
(seq1, seq2)
},
|(seq1, seq2)| {
let _new_seq = seq1.concat_owned(seq2);
},
BatchSize::SmallInput,
);
});
c.bench_function(&format!("Seq concat copy"), |b| {
b.iter_batched(
|| {
let seq1 = ByteSeq::from_public_slice(&random_byte_vec(2048));
let seq2 = ByteSeq::from_public_slice(&random_byte_vec(2048));
(seq1, seq2)
},
|(seq1, seq2)| {
let _new_seq = seq1.concat(&seq2);
},
BatchSize::SmallInput,
);
});
}
criterion_group!(benches, bench);
criterion_main!(benches);
|
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
fn next_floor(floor: i32, inst: char) -> i32 {
match inst {
'(' => floor + 1,
')' => floor - 1,
_ => floor,
}
}
fn main() {
let file = File::open("input.txt").expect("file not found");
let mut reader = BufReader::new(file);
let mut contents = String::new();
reader.read_to_string(&mut contents).expect("could not read input file");
// Fold the input characters, accumulating a 3-element tuple of
// (floor, position, position-of-first-basement).
let result = contents.chars().fold((0, 0, 0), |acc, ch| (
next_floor(acc.0, ch),
acc.1 + 1,
if acc.2 == 0 && next_floor(acc.0, ch) == -1 { acc.1 + 1 } else { acc.2 }
));
println!("A: {}", result.0);
println!("B: {}", result.2);
}
|
use serde_scan::scan;
#[derive(Debug)]
struct State {
current_marble: Marble,
current_marble_idx: usize,
next_marble: Marble,
num_players: usize,
circle: Vec<Marble>,
points: Vec<usize>,
}
type Marble = usize;
type Result<T> = std::result::Result<T, std::boxed::Box<dyn std::error::Error>>;
fn main() -> Result<()> {
let input = std::fs::read_to_string("input")?;
let input_str = input.as_str();
let (num_players, last_marble): (usize, usize) =
scan!("{} players; last marble is worth {} points" <- input_str)?;
let state = State::initial_state(num_players);
println!("{}", part1(state, last_marble)?);
Ok(())
}
fn part1(mut state: State, last_marble: Marble) -> Result<usize> {
while state.current_marble != last_marble {
state.place_marble();
}
state
.points
.iter()
.cloned()
.max()
.ok_or_else(|| std::boxed::Box::from("error"))
}
impl State {
fn initial_state(num_players: usize) -> Self {
State {
current_marble: 0,
current_marble_idx: 0,
next_marble: 1,
num_players,
circle: vec![0],
points: vec![0; num_players],
}
}
fn place_marble(&mut self) {
if self.next_marble % 23 == 0 {
let current_player = (self.next_marble - 1) % self.num_players;
let remove_marble_idx =
(self.current_marble_idx as i32 - 7).rem_euclid(self.circle.len() as i32) as usize;
let removed_marble_value = self.circle.remove(remove_marble_idx);
self.points[current_player] += self.next_marble + removed_marble_value;
self.current_marble_idx = remove_marble_idx % self.circle.len();
self.current_marble = self.circle[remove_marble_idx % self.circle.len()];
} else {
let next_marble_idx = (self.current_marble_idx + 2) % self.circle.len();
self.circle.insert(next_marble_idx, self.next_marble);
self.current_marble = self.next_marble;
self.current_marble_idx = next_marble_idx;
}
self.next_marble += 1;
}
}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"current marble: {}, current_idx: {}, circle: {:?}",
self.current_marble, self.current_marble_idx, self.circle
)
}
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Build script. Just copies default.toml from the src to the target dir.
use std::env;
use std::path::{Path, PathBuf};
fn main() {
let in_file = Path::new("src/default.toml");
let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let mut out_file = PathBuf::new();
out_file.push(manifest_dir);
out_file.push("default.toml");
std::fs::copy(in_file, out_file).unwrap();
}
|
#![allow(unused_variables)]
#![allow(dead_code)]
#![feature(fs_walk)]
#![feature(split_off)]
#![feature(path_relative_from)]
#![feature(convert)]
#![feature(path_ext)]
// common
extern crate rustc_serialize;
#[macro_use]extern crate clap;
#[macro_use]extern crate log;
extern crate fern;
extern crate time;
// server
extern crate iron;
extern crate router;
extern crate logger;
extern crate persistent;
extern crate urlencoded;
extern crate url;
extern crate crypto;
// client
extern crate copperline;
extern crate tabwriter;
extern crate terminal_size;
mod server;
mod client;
mod network_file;
mod progress;
mod http;
mod error;
mod torrent;
mod download_handle;
use clap::{App,Arg,SubCommand,AppSettings};
fn main() {
let logger_config = fern::DispatchConfig {
format: Box::new(|msg: &str, level: &log::LogLevel, _location: &log::LogLocation| {
format!("[{}][{}] {}", time::now().strftime("%Y-%m-%d][%H:%M:%S").unwrap(), level, msg)
}),
// output: vec![fern::OutputConfig::stdout(), fern::OutputConfig::file("output.log")],
output: vec![fern::OutputConfig::stdout()],
level: log::LogLevelFilter::Trace,
};
fern::init_global_logger(logger_config, log::LogLevelFilter::Trace)
.unwrap();
let matches = App::new("fspl")
.author("Ellis Adigvom<ellisadigvom@gmail.com>")
.version(&crate_version!())
.about("file server for getting past UMaT's proxy restrictions")
.setting(AppSettings::UnifiedHelpMessage)
.setting(AppSettings::SubcommandsNegateReqs)
.versionless_subcommands(true)
.subcommand(SubCommand::with_name("serve")
.about("runs the file server")
.arg(Arg::from_usage("-a --addr=[addr] 'the address to listen on'"))
.arg(Arg::from_usage("-p --port=[port] 'the port to listen on'"))
.arg(Arg::from_usage("[directory] 'the directory to serve files from'")))
.subcommand(SubCommand::with_name("show")
.about("prints info about the given .part file")
.arg(Arg::from_usage("<file>... 'the file to print info about'")))
.arg(Arg::from_usage("[remote] 'the remote server to connect to'"))
.get_matches();
match matches.subcommand() {
("serve", Some(matches)) => {
info!("running server");
let addr = matches.value_of("addr")
.map(|s| s.to_owned())
.unwrap_or(
format!("0.0.0.0:{}",
matches.value_of("port")
.unwrap_or("8080")));
let dir = matches.value_of("directory").unwrap_or(".").to_owned();
server::run(addr, dir);
}
("show", Some(matches)) => {
info!("running show");
client::show(matches.values_of("file").unwrap());
}
_ => {
info!("running client");
let addr = matches.value_of("remote").unwrap_or("localhost:8080");
let mut client = client::Client::new(&addr);
client.run();
}
}
}
|
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, Example, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
#[derive(Clone)]
pub struct Rename;
impl Command for Rename {
fn name(&self) -> &str {
"rename"
}
fn signature(&self) -> Signature {
Signature::build("rename")
.named(
"column",
SyntaxShape::List(Box::new(SyntaxShape::String)),
"column name to be changed",
Some('c'),
)
.rest("rest", SyntaxShape::String, "the new names for the columns")
.category(Category::Filters)
}
fn usage(&self) -> &str {
"Creates a new table with columns renamed."
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
rename(engine_state, stack, call, input)
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "Rename a column",
example: "[[a, b]; [1, 2]] | rename my_column",
result: Some(Value::List {
vals: vec![Value::Record {
cols: vec!["my_column".to_string(), "b".to_string()],
vals: vec![Value::test_int(1), Value::test_int(2)],
span: Span::test_data(),
}],
span: Span::test_data(),
}),
},
Example {
description: "Rename many columns",
example: "[[a, b, c]; [1, 2, 3]] | rename eggs ham bacon",
result: Some(Value::List {
vals: vec![Value::Record {
cols: vec!["eggs".to_string(), "ham".to_string(), "bacon".to_string()],
vals: vec![Value::test_int(1), Value::test_int(2), Value::test_int(3)],
span: Span::test_data(),
}],
span: Span::test_data(),
}),
},
Example {
description: "Rename a specific column",
example: "[[a, b, c]; [1, 2, 3]] | rename -c [a ham]",
result: Some(Value::List {
vals: vec![Value::Record {
cols: vec!["ham".to_string(), "b".to_string(), "c".to_string()],
vals: vec![Value::test_int(1), Value::test_int(2), Value::test_int(3)],
span: Span::test_data(),
}],
span: Span::test_data(),
}),
},
]
}
}
fn rename(
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
let specified_column: Option<Vec<String>> = call.get_flag(engine_state, stack, "column")?;
// get the span for the column's name to be changed and for the given list
let (specified_col_span, list_span) = if let Some(Value::List {
vals: columns,
span: column_span,
}) = call.get_flag(engine_state, stack, "column")?
{
(Some(columns[0].span()?), column_span)
} else {
(None, call.head)
};
if let Some(ref cols) = specified_column {
if cols.len() != 2 {
return Err(ShellError::UnsupportedInput(
"The list must contain only two values: the column's name and its replacement value"
.to_string(),
list_span,
));
}
}
let columns: Vec<String> = call.rest(engine_state, stack, 0)?;
let metadata = input.metadata();
let head_span = call.head;
input
.map(
move |item| match item {
Value::Record {
mut cols,
vals,
span,
} => {
match &specified_column {
Some(c) => {
// check if the specified column to be renamed exists
if !cols.contains(&c[0]) {
return Value::Error {
error: ShellError::UnsupportedInput(
"The specified column does not exist".to_string(),
specified_col_span.unwrap_or(span),
),
};
}
for (idx, val) in cols.iter_mut().enumerate() {
if *val == c[0] {
cols[idx] = c[1].to_string();
break;
}
}
}
None => {
for (idx, val) in columns.iter().enumerate() {
if idx >= cols.len() {
// skip extra new columns names if we already reached the final column
break;
}
cols[idx] = val.clone();
}
}
}
Value::Record { cols, vals, span }
}
x => Value::Error {
error: ShellError::UnsupportedInput(
"can't rename: input is not table, so no column names available for rename"
.to_string(),
x.span().unwrap_or(head_span),
),
},
},
engine_state.ctrlc.clone(),
)
.map(|x| x.set_metadata(metadata))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_examples() {
use crate::test_examples;
test_examples(Rename {})
}
}
|
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unstable_features,
unused_import_braces,
unused_qualifications
)]
//! po2 is a command line application based on Pullover
use pullover::{Attachment, Monospace, Notification, Priority, Sound, HTML};
use std::path::PathBuf;
use std::str::FromStr;
use structopt::StructOpt;
#[derive(StructOpt)]
#[structopt(about, author)]
struct Opts {
/// your application's API token <https://pushover.net/api#identifiers>
#[structopt(short, long, env = "PUSHOVER_TOKEN")]
token: String,
/// the user / group key (not e-mail address) of your user (or you) <https://pushover.net/api#identifiers>
#[structopt(short, long, env = "PUSHOVER_USER")]
user: String,
/// your message <https://pushover.net/api#messages>
#[structopt(short, long)]
message: String,
/// verbose
#[structopt(short, long)]
verbose: bool,
/// To enable HTML formatting <https://pushover.net/api#html>
#[structopt(long)]
html: bool,
/// To enable monospace messages <https://pushover.net/api#html>
#[structopt(long)]
monospace: bool,
/// your user's device name to send the message directly to that device, rather than all of the user's devices <https://pushover.net/api#identifiers>
#[structopt(long)]
device: Option<String>,
/// your message's title, otherwise your app's name is used <https://pushover.net/api#messages>
#[structopt(long)]
title: Option<String>,
/// a Unix timestamp of your message's date and time to display to the user, rather than the time your message is received by our API <https://pushover.net/api#timestamp>
#[structopt(long)]
timestamp: Option<u64>,
/// attach file as notification attachment
#[structopt(short, long)]
file: Option<PathBuf>,
/// Messages may be sent with a different priority that affects how the message is presented to the user e.g. -2, -1, 0, 1, 2 <https://pushover.net/api#priority>
#[structopt(long)]
priority: Option<String>,
/// Users can choose from a number of different default sounds to play when receiving notifications <https://pushover.net/api#sounds>
#[structopt(long)]
sound: Option<String>,
/// a supplementary URL to show with your message <https://pushover.net/api#urls>
#[structopt(long)]
url: Option<String>,
/// a title for your supplementary URL, otherwise just the URL is shown <https://pushover.net/api#urls>
#[structopt(long)]
url_title: Option<String>,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let opts: Opts = Opts::from_args();
let mut notification = Notification::new(&opts.token, &opts.user, &opts.message);
// set extra options
if let Some(ref d) = opts.device {
notification.request.device = Some(d.into());
}
if let Some(ref t) = opts.title {
notification.request.title = Some(t.into());
}
if let Some(ref t) = opts.timestamp {
notification.request.timestamp = Some(*t);
}
if let Some(ref p) = opts.priority {
notification.request.priority = Some(Priority::from_str(p)?);
}
if let Some(ref s) = opts.sound {
notification.request.sound = Some(Sound::from_str(s)?);
}
if let Some(ref u) = opts.url {
notification.request.url = Some(u.into());
if let Some(ref t) = opts.url_title {
notification.request.url_title = Some(t.into());
}
}
if opts.html {
notification.request.html = Some(HTML::Enabled);
if opts.monospace {
notification.request.monospace = Some(Monospace::Enabled);
}
}
// send request with file as attachment
let attachment;
if let Some(p) = &opts.file {
attachment = Attachment::from_path(p).await?;
notification.attach(&attachment);
}
// send request
let res = notification.send().await?;
if opts.verbose {
println!("{:?}", res);
}
Ok(())
}
|
use proc_macro::{self, TokenStream};
use quote::{quote, ToTokens};
use syn::{
parse_macro_input,
DataEnum,
Data,
DeriveInput,
Fields
};
#[proc_macro_derive(QuickFrom, attributes(quick_from))]
pub fn quick_from(input: TokenStream) -> TokenStream {
let DeriveInput{ident, data, generics, ..} = parse_macro_input!(input);
let generics = generics.to_token_stream();
let variants = if let Data::Enum(DataEnum{variants, ..}) = data {
variants
} else {
return quote!{ compile_error!("QuickFrom only accepts enums") }.into()
};
let mut out = TokenStream::new();
for variant in variants {
let has_attr = variant.attrs.iter().find(|attr| {
attr.path.get_ident().map_or(false, |id| {
id == "quick_from"
})
}).is_some();
if !has_attr {
continue
}
let enum_type = &ident;
let var_name = variant.ident;
let var_type = if let Fields::Unnamed(fields) = variant.fields {
if fields.unnamed.len() != 1 {
return quote!{
compile_error!("QuickFrom #[quick_from] variant must have \
exactly one unnamed field")
}.into()
}
fields.unnamed.first().unwrap().ty.clone()
} else {
return quote!{
compile_error!("QuickFrom #[quick_from] variant must have \
exactly one unnamed field")
}.into()
};
let x : TokenStream = quote!{
impl#generics From<#var_type> for #enum_type#generics {
fn from(x : #var_type) -> Self {
Self::#var_name(x)
}
}
}.into();
out.extend(x);
}
out.into()
}
|
//! Advertises this device to Spotify clients in the local network.
//!
//! This device will show up in the list of "available devices".
//! Once it is selected from the list, [`Credentials`] are received.
//! Those can be used to establish a new Session with [`librespot_core`].
//!
//! This library uses mDNS and DNS-SD so that other devices can find it,
//! and spawns an http server to answer requests of Spotify clients.
mod server;
use std::{
borrow::Cow,
io,
pin::Pin,
task::{Context, Poll},
};
use futures_core::Stream;
use thiserror::Error;
use self::server::DiscoveryServer;
pub use crate::core::Error;
use librespot_core as core;
/// Credentials to be used in [`librespot`](`librespot_core`).
pub use crate::core::authentication::Credentials;
/// Determining the icon in the list of available devices.
pub use crate::core::config::DeviceType;
/// Makes this device visible to Spotify clients in the local network.
///
/// `Discovery` implements the [`Stream`] trait. Every time this device
/// is selected in the list of available devices, it yields [`Credentials`].
pub struct Discovery {
server: DiscoveryServer,
#[cfg(not(feature = "with-dns-sd"))]
_svc: libmdns::Service,
#[cfg(feature = "with-dns-sd")]
_svc: dns_sd::DNSService,
}
/// A builder for [`Discovery`].
pub struct Builder {
server_config: server::Config,
port: u16,
zeroconf_ip: Vec<std::net::IpAddr>,
}
/// Errors that can occur while setting up a [`Discovery`] instance.
#[derive(Debug, Error)]
pub enum DiscoveryError {
#[error("Creating SHA1 block cipher failed")]
AesError(#[from] aes::cipher::InvalidLength),
#[error("Setting up dns-sd failed: {0}")]
DnsSdError(#[from] io::Error),
#[error("Creating SHA1 HMAC failed for base key {0:?}")]
HmacError(Vec<u8>),
#[error("Setting up the HTTP server failed: {0}")]
HttpServerError(#[from] hyper::Error),
#[error("Missing params for key {0}")]
ParamsError(&'static str),
}
impl From<DiscoveryError> for Error {
fn from(err: DiscoveryError) -> Self {
match err {
DiscoveryError::AesError(_) => Error::unavailable(err),
DiscoveryError::DnsSdError(_) => Error::unavailable(err),
DiscoveryError::HmacError(_) => Error::invalid_argument(err),
DiscoveryError::HttpServerError(_) => Error::unavailable(err),
DiscoveryError::ParamsError(_) => Error::invalid_argument(err),
}
}
}
impl Builder {
/// Starts a new builder using the provided device and client IDs.
pub fn new<T: Into<String>>(device_id: T, client_id: T) -> Self {
Self {
server_config: server::Config {
name: "Librespot".into(),
device_type: DeviceType::default(),
device_id: device_id.into(),
client_id: client_id.into(),
},
port: 0,
zeroconf_ip: vec![],
}
}
/// Sets the name to be displayed. Default is `"Librespot"`.
pub fn name(mut self, name: impl Into<Cow<'static, str>>) -> Self {
self.server_config.name = name.into();
self
}
/// Sets the device type which is visible as icon in other Spotify clients. Default is `Speaker`.
pub fn device_type(mut self, device_type: DeviceType) -> Self {
self.server_config.device_type = device_type;
self
}
/// Set the ip addresses on which it should listen to incoming connections. The default is all interfaces.
pub fn zeroconf_ip(mut self, zeroconf_ip: Vec<std::net::IpAddr>) -> Self {
self.zeroconf_ip = zeroconf_ip;
self
}
/// Sets the port on which it should listen to incoming connections.
/// The default value `0` means any port.
pub fn port(mut self, port: u16) -> Self {
self.port = port;
self
}
/// Sets up the [`Discovery`] instance.
///
/// # Errors
/// If setting up the mdns service or creating the server fails, this function returns an error.
pub fn launch(self) -> Result<Discovery, Error> {
let mut port = self.port;
let name = self.server_config.name.clone().into_owned();
let server = DiscoveryServer::new(self.server_config, &mut port)??;
let _zeroconf_ip = self.zeroconf_ip;
let svc;
#[cfg(feature = "with-dns-sd")]
{
svc = dns_sd::DNSService::register(
Some(name.as_ref()),
"_spotify-connect._tcp",
None,
None,
port,
&["VERSION=1.0", "CPath=/"],
)?;
}
#[cfg(not(feature = "with-dns-sd"))]
{
let _svc = if !_zeroconf_ip.is_empty() {
libmdns::Responder::spawn_with_ip_list(
&tokio::runtime::Handle::current(),
_zeroconf_ip,
)?
} else {
libmdns::Responder::spawn(&tokio::runtime::Handle::current())?
};
svc = _svc.register(
"_spotify-connect._tcp".to_owned(),
name,
port,
&["VERSION=1.0", "CPath=/"],
);
}
Ok(Discovery { server, _svc: svc })
}
}
impl Discovery {
/// Starts a [`Builder`] with the provided device id.
pub fn builder<T: Into<String>>(device_id: T, client_id: T) -> Builder {
Builder::new(device_id, client_id)
}
/// Create a new instance with the specified device id and default paramaters.
pub fn new<T: Into<String>>(device_id: T, client_id: T) -> Result<Self, Error> {
Self::builder(device_id, client_id).launch()
}
}
impl Stream for Discovery {
type Item = Credentials;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.server).poll_next(cx)
}
}
|
use tui::widgets::{Block, Borders, List, Text, Paragraph, ListState};
use tui::widgets::canvas::{Canvas, Points};
use tui::layout::{Layout, Constraint, Alignment, Direction};
use tui::style::{Style, Color};
use tui::{Frame, backend};
use crossterm::event::KeyCode;
use crate::computer::{Computer, SCR_ADDRESS, KBD_ADDRESS};
use crate::assembler::to_asm;
use crate::utils::get_bit;
#[derive(Eq, PartialEq)]
enum InputMode {
Normal,
Editing,
Keyboard
}
pub struct App {
filename: String,
computer: Computer,
rom_cursor: ListState,
ram_cursor: ListState,
input: String,
input_mode: InputMode,
pub cursor_pos: Option<(u16, u16)>,
is_full_screen: bool
}
impl App {
pub fn new(filename: String, program: Vec<i16>) -> App {
let mut rom_cursor = ListState::default();
rom_cursor.select(Some(0));
let mut ram_cursor = ListState::default();
ram_cursor.select(Some(0));
let mut computer = Computer::new();
for (i, instr) in program.iter().enumerate() {
computer.rom[i] = Some(*instr);
}
App {
filename,
computer,
rom_cursor,
ram_cursor,
input: String::new(),
input_mode: InputMode::Normal,
cursor_pos: None,
is_full_screen: false
}
}
pub fn clear_input_event(&mut self) {
match self.input_mode {
InputMode::Keyboard => self.computer.memory[KBD_ADDRESS] = 0,
_ => {}
}
}
pub fn handle_input_event(&mut self, event: KeyCode) -> bool {
match self.input_mode {
InputMode::Editing => match event {
KeyCode::Char(c @ '0'..='9') | KeyCode::Char(c @ '-') => {
self.input.push(c);
}
KeyCode::Backspace => {
self.input.pop();
}
KeyCode::Enter => {
let input: String = self.input.drain(..).collect();
let cursor = self.ram_cursor.selected().unwrap_or(0);
self.computer.memory[cursor] = input.parse().unwrap();
self.input_mode = InputMode::Normal;
}
KeyCode::Esc => {
self.input.drain(..);
self.input_mode = InputMode::Normal
}
_ => {}
}
InputMode::Normal => match event {
KeyCode::Char('n') => {
self.computer.step();
self.rom_cursor.select(Some(self.computer.pc as usize));
}
KeyCode::Char('f') => {
self.is_full_screen = !self.is_full_screen;
}
KeyCode::Char('j') => {
if let Some(i) = self.ram_cursor.selected() {
self.ram_cursor.select(Some(i + 1));
}
}
KeyCode::Char('k') => {
if let Some(i) = self.ram_cursor.selected() {
if i != 0 {
self.ram_cursor.select(Some(i-1));
}
}
}
KeyCode::Char('r') => {
self.input_mode = InputMode::Editing;
}
KeyCode::Char('b') => {
self.input_mode = InputMode::Keyboard;
}
KeyCode::Char('q') => {
return true;
}
_ => {}
}
InputMode::Keyboard => match event {
KeyCode::Esc => {
self.input_mode = InputMode::Normal;
}
KeyCode::Char(c) => {
self.computer.memory[KBD_ADDRESS] = c as i16;
}
_ => {}
}
}
false
}
pub fn draw<B: backend::Backend>(&mut self, f: &mut Frame<B>) {
let rows = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Min(4), Constraint::Length(1)])
.split(f.size());
let columns = Layout::default()
.direction(Direction::Horizontal)
.constraints(vec![Constraint::Percentage(20), Constraint::Percentage(20), Constraint::Percentage(60)])
.split(rows[0]);
let column1 = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Min(4), Constraint::Length(3)])
.split(columns[0]);
let column2 = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Min(4), Constraint::Length(3), Constraint::Length(3)])
.split(columns[1]);
let column3 = Layout::default()
.direction(Direction::Vertical)
.constraints(vec![Constraint::Percentage(60), Constraint::Percentage(40)])
.split(columns[2]);
let text = self.computer.rom.iter().enumerate()
.map(|(i, v)| {
let asm = match v {
Some(v) => to_asm(*v),
None => "".to_owned()
};
Text::raw(format!("{:5}| {}", i, asm))
});
let rom_block = List::new(text)
.block(Block::default().title("[ROM]").borders(Borders::ALL))
.highlight_symbol(">")
.highlight_style(Style::default().fg(Color::Yellow));
let text = self.computer.memory.iter().enumerate()
.map(|(i, v)| Text::raw(format!("{:5}| {}", i, v)));
let ram_block = List::new(text)
.block(Block::default().title("[RAM]").borders(Borders::ALL))
.highlight_style(Style::default().fg(Color::Yellow));
let text = [Text::raw(self.computer.d_register.to_string())];
let d_register_block = Paragraph::new(text.iter())
.block(Block::default().title("[D Register]").borders(Borders::ALL))
.alignment(Alignment::Center);
let text = [Text::raw(self.computer.a_register.to_string())];
let a_register_block = Paragraph::new(text.iter())
.block(Block::default().title("[A Register]").borders(Borders::ALL))
.alignment(Alignment::Center);
let text = [Text::raw(self.computer.pc.to_string())];
let pc_block = Paragraph::new(text.iter())
.block(Block::default().title("[PC]").borders(Borders::ALL))
.alignment(Alignment::Center);
let dots = Points {coords: &self.get_screen_dots(), color: Color::White};
let screen_block = Canvas::default()
.block(Block::default().borders(Borders::ALL).title("[Screen]"))
.paint(|ctx| {
ctx.draw(&dots);
})
.x_bounds([0.0, 512.0])
.y_bounds([0.0, 256.0]);
let (text, style, cursor_pos) = match self.input_mode {
InputMode::Editing => {
let prompt = format!(
" Enter the new value at memory address ({}): {}",
self.ram_cursor.selected().unwrap_or(0),
self.input
);
let cursor_pos = Some((prompt.len() as u16, rows[1].y));
let text = [Text::raw(prompt)];
let style = Style::default().bg(Color::Yellow).fg(Color::Black);
(text, style, cursor_pos)
}
InputMode::Normal => {
let text = [Text::raw(format!(" {}", self.filename))];
let style = Style::default().bg(Color::White).fg(Color::Black);
let cursor_pos = None;
(text, style, cursor_pos)
}
InputMode::Keyboard => {
let prompt = format!(" [Keyboard mode] {}", self.computer.memory[KBD_ADDRESS]);
let text = [Text::raw(prompt)];
let style = Style::default().bg(Color::Yellow).fg(Color::Black);
let cursor_pos = None;
(text, style, cursor_pos)
}
};
let command_input = Paragraph::new(text.iter()).style(style);
self.cursor_pos = cursor_pos;
if self.is_full_screen {
f.render_widget(screen_block, rows[0]);
} else {
f.render_stateful_widget(rom_block, column1[0], &mut self.rom_cursor);
f.render_widget(pc_block, column1[1]);
f.render_stateful_widget(ram_block, column2[0], &mut self.ram_cursor);
f.render_widget(d_register_block, column2[1]);
f.render_widget(a_register_block, column2[2]);
f.render_widget(screen_block, column3[0]);
f.render_widget(command_input, rows[1]);
}
}
fn get_screen_dots(&self) -> Vec<(f64, f64)> {
let mut coords = vec![];
let mut n = 0;
for word in self.computer.memory[SCR_ADDRESS..KBD_ADDRESS].iter() {
if *word != 0 {
let x = (n % 512) as f64;
let y = (256 - 1 - (n / 512)) as f64;
for i in 0..16 {
if get_bit(*word, i) {
coords.push((x + (i as f64), y));
}
}
}
n += 16;
}
coords
}
}
|
use std::time;
#[macro_use]
mod util;
mod client;
mod core;
mod endpoints;
mod http;
mod verify;
pub use crate::client::Client;
pub use crate::core::{Config, Error, Info, Random, Result};
const MAINNET_CHAIN_HASH: &'static str =
"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce";
// Trait for DrandClient, must eventually move to Client type.
trait DrandClient {
/// Returns parameters of the chain this client is connected to.
/// The public key, when it started, and how frequently it updates.
fn to_info(&self) -> Result<Info>;
/// Return the most recent round of randomness that will be available
/// at time for the current client.
fn round_at(&self, t: time::SystemTime) -> u128;
/// Returns a the randomness at `round` or an error.
/// Requesting round = 0 will return randomness for the most
/// recent known round.
fn get(&self, round: u128) -> Result<Random>;
/// Returns new randomness as it becomes available.
fn watch(&self) -> Result<Box<dyn Iterator<Item = Result<Random>>>>;
}
|
#[doc = "Register `GINTMSK` reader"]
pub type R = crate::R<GINTMSK_SPEC>;
#[doc = "Register `GINTMSK` writer"]
pub type W = crate::W<GINTMSK_SPEC>;
#[doc = "Field `MMISM` reader - Mode mismatch interrupt mask"]
pub type MMISM_R = crate::BitReader;
#[doc = "Field `MMISM` writer - Mode mismatch interrupt mask"]
pub type MMISM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OTGINT` reader - OTG interrupt mask"]
pub type OTGINT_R = crate::BitReader;
#[doc = "Field `OTGINT` writer - OTG interrupt mask"]
pub type OTGINT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SOFM` reader - Start of frame mask"]
pub type SOFM_R = crate::BitReader;
#[doc = "Field `SOFM` writer - Start of frame mask"]
pub type SOFM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RXFLVLM` reader - Receive FIFO non-empty mask"]
pub type RXFLVLM_R = crate::BitReader;
#[doc = "Field `RXFLVLM` writer - Receive FIFO non-empty mask"]
pub type RXFLVLM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `NPTXFEM` reader - Non-periodic TxFIFO empty mask"]
pub type NPTXFEM_R = crate::BitReader;
#[doc = "Field `NPTXFEM` writer - Non-periodic TxFIFO empty mask"]
pub type NPTXFEM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GINAKEFFM` reader - Global non-periodic IN NAK effective mask"]
pub type GINAKEFFM_R = crate::BitReader;
#[doc = "Field `GINAKEFFM` writer - Global non-periodic IN NAK effective mask"]
pub type GINAKEFFM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GONAKEFFM` reader - Global OUT NAK effective mask"]
pub type GONAKEFFM_R = crate::BitReader;
#[doc = "Field `GONAKEFFM` writer - Global OUT NAK effective mask"]
pub type GONAKEFFM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ESUSPM` reader - Early suspend mask"]
pub type ESUSPM_R = crate::BitReader;
#[doc = "Field `ESUSPM` writer - Early suspend mask"]
pub type ESUSPM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USBSUSPM` reader - USB suspend mask"]
pub type USBSUSPM_R = crate::BitReader;
#[doc = "Field `USBSUSPM` writer - USB suspend mask"]
pub type USBSUSPM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `USBRST` reader - USB reset mask"]
pub type USBRST_R = crate::BitReader;
#[doc = "Field `USBRST` writer - USB reset mask"]
pub type USBRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ENUMDNEM` reader - Enumeration done mask"]
pub type ENUMDNEM_R = crate::BitReader;
#[doc = "Field `ENUMDNEM` writer - Enumeration done mask"]
pub type ENUMDNEM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ISOODRPM` reader - Isochronous OUT packet dropped interrupt mask"]
pub type ISOODRPM_R = crate::BitReader;
#[doc = "Field `ISOODRPM` writer - Isochronous OUT packet dropped interrupt mask"]
pub type ISOODRPM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOPFM` reader - End of periodic frame interrupt mask"]
pub type EOPFM_R = crate::BitReader;
#[doc = "Field `EOPFM` writer - End of periodic frame interrupt mask"]
pub type EOPFM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EPMISM` reader - Endpoint mismatch interrupt mask"]
pub type EPMISM_R = crate::BitReader;
#[doc = "Field `EPMISM` writer - Endpoint mismatch interrupt mask"]
pub type EPMISM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `IEPINT` reader - IN endpoints interrupt mask"]
pub type IEPINT_R = crate::BitReader;
#[doc = "Field `IEPINT` writer - IN endpoints interrupt mask"]
pub type IEPINT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OEPINT` reader - OUT endpoints interrupt mask"]
pub type OEPINT_R = crate::BitReader;
#[doc = "Field `OEPINT` writer - OUT endpoints interrupt mask"]
pub type OEPINT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `IISOIXFRM` reader - Incomplete isochronous IN transfer mask"]
pub type IISOIXFRM_R = crate::BitReader;
#[doc = "Field `IISOIXFRM` writer - Incomplete isochronous IN transfer mask"]
pub type IISOIXFRM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `IPXFRM_IISOOXFRM` reader - Incomplete periodic transfer mask(Host mode)/Incomplete isochronous OUT transfer mask(Device mode)"]
pub type IPXFRM_IISOOXFRM_R = crate::BitReader;
#[doc = "Field `IPXFRM_IISOOXFRM` writer - Incomplete periodic transfer mask(Host mode)/Incomplete isochronous OUT transfer mask(Device mode)"]
pub type IPXFRM_IISOOXFRM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RSTDETM` reader - Reset detected interrupt mask"]
pub type RSTDETM_R = crate::BitReader;
#[doc = "Field `RSTDETM` writer - Reset detected interrupt mask"]
pub type RSTDETM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `PRTIM` reader - Host port interrupt mask"]
pub type PRTIM_R = crate::BitReader;
#[doc = "Field `HCIM` reader - Host channels interrupt mask"]
pub type HCIM_R = crate::BitReader;
#[doc = "Field `HCIM` writer - Host channels interrupt mask"]
pub type HCIM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `PTXFEM` reader - Periodic TxFIFO empty mask"]
pub type PTXFEM_R = crate::BitReader;
#[doc = "Field `PTXFEM` writer - Periodic TxFIFO empty mask"]
pub type PTXFEM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPMINTM` reader - LPM interrupt mask"]
pub type LPMINTM_R = crate::BitReader;
#[doc = "Field `LPMINTM` writer - LPM interrupt mask"]
pub type LPMINTM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CIDSCHGM` reader - Connector ID status change mask"]
pub type CIDSCHGM_R = crate::BitReader;
#[doc = "Field `CIDSCHGM` writer - Connector ID status change mask"]
pub type CIDSCHGM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DISCINT` reader - Disconnect detected interrupt mask"]
pub type DISCINT_R = crate::BitReader;
#[doc = "Field `DISCINT` writer - Disconnect detected interrupt mask"]
pub type DISCINT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SRQIM` reader - Session request/new session detected interrupt mask"]
pub type SRQIM_R = crate::BitReader;
#[doc = "Field `SRQIM` writer - Session request/new session detected interrupt mask"]
pub type SRQIM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUIM` reader - Resume/remote wakeup detected interrupt mask"]
pub type WUIM_R = crate::BitReader;
#[doc = "Field `WUIM` writer - Resume/remote wakeup detected interrupt mask"]
pub type WUIM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 1 - Mode mismatch interrupt mask"]
#[inline(always)]
pub fn mmism(&self) -> MMISM_R {
MMISM_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - OTG interrupt mask"]
#[inline(always)]
pub fn otgint(&self) -> OTGINT_R {
OTGINT_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Start of frame mask"]
#[inline(always)]
pub fn sofm(&self) -> SOFM_R {
SOFM_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Receive FIFO non-empty mask"]
#[inline(always)]
pub fn rxflvlm(&self) -> RXFLVLM_R {
RXFLVLM_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Non-periodic TxFIFO empty mask"]
#[inline(always)]
pub fn nptxfem(&self) -> NPTXFEM_R {
NPTXFEM_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Global non-periodic IN NAK effective mask"]
#[inline(always)]
pub fn ginakeffm(&self) -> GINAKEFFM_R {
GINAKEFFM_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - Global OUT NAK effective mask"]
#[inline(always)]
pub fn gonakeffm(&self) -> GONAKEFFM_R {
GONAKEFFM_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 10 - Early suspend mask"]
#[inline(always)]
pub fn esuspm(&self) -> ESUSPM_R {
ESUSPM_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - USB suspend mask"]
#[inline(always)]
pub fn usbsuspm(&self) -> USBSUSPM_R {
USBSUSPM_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - USB reset mask"]
#[inline(always)]
pub fn usbrst(&self) -> USBRST_R {
USBRST_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - Enumeration done mask"]
#[inline(always)]
pub fn enumdnem(&self) -> ENUMDNEM_R {
ENUMDNEM_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - Isochronous OUT packet dropped interrupt mask"]
#[inline(always)]
pub fn isoodrpm(&self) -> ISOODRPM_R {
ISOODRPM_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - End of periodic frame interrupt mask"]
#[inline(always)]
pub fn eopfm(&self) -> EOPFM_R {
EOPFM_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 17 - Endpoint mismatch interrupt mask"]
#[inline(always)]
pub fn epmism(&self) -> EPMISM_R {
EPMISM_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - IN endpoints interrupt mask"]
#[inline(always)]
pub fn iepint(&self) -> IEPINT_R {
IEPINT_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - OUT endpoints interrupt mask"]
#[inline(always)]
pub fn oepint(&self) -> OEPINT_R {
OEPINT_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - Incomplete isochronous IN transfer mask"]
#[inline(always)]
pub fn iisoixfrm(&self) -> IISOIXFRM_R {
IISOIXFRM_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - Incomplete periodic transfer mask(Host mode)/Incomplete isochronous OUT transfer mask(Device mode)"]
#[inline(always)]
pub fn ipxfrm_iisooxfrm(&self) -> IPXFRM_IISOOXFRM_R {
IPXFRM_IISOOXFRM_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 23 - Reset detected interrupt mask"]
#[inline(always)]
pub fn rstdetm(&self) -> RSTDETM_R {
RSTDETM_R::new(((self.bits >> 23) & 1) != 0)
}
#[doc = "Bit 24 - Host port interrupt mask"]
#[inline(always)]
pub fn prtim(&self) -> PRTIM_R {
PRTIM_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 25 - Host channels interrupt mask"]
#[inline(always)]
pub fn hcim(&self) -> HCIM_R {
HCIM_R::new(((self.bits >> 25) & 1) != 0)
}
#[doc = "Bit 26 - Periodic TxFIFO empty mask"]
#[inline(always)]
pub fn ptxfem(&self) -> PTXFEM_R {
PTXFEM_R::new(((self.bits >> 26) & 1) != 0)
}
#[doc = "Bit 27 - LPM interrupt mask"]
#[inline(always)]
pub fn lpmintm(&self) -> LPMINTM_R {
LPMINTM_R::new(((self.bits >> 27) & 1) != 0)
}
#[doc = "Bit 28 - Connector ID status change mask"]
#[inline(always)]
pub fn cidschgm(&self) -> CIDSCHGM_R {
CIDSCHGM_R::new(((self.bits >> 28) & 1) != 0)
}
#[doc = "Bit 29 - Disconnect detected interrupt mask"]
#[inline(always)]
pub fn discint(&self) -> DISCINT_R {
DISCINT_R::new(((self.bits >> 29) & 1) != 0)
}
#[doc = "Bit 30 - Session request/new session detected interrupt mask"]
#[inline(always)]
pub fn srqim(&self) -> SRQIM_R {
SRQIM_R::new(((self.bits >> 30) & 1) != 0)
}
#[doc = "Bit 31 - Resume/remote wakeup detected interrupt mask"]
#[inline(always)]
pub fn wuim(&self) -> WUIM_R {
WUIM_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bit 1 - Mode mismatch interrupt mask"]
#[inline(always)]
#[must_use]
pub fn mmism(&mut self) -> MMISM_W<GINTMSK_SPEC, 1> {
MMISM_W::new(self)
}
#[doc = "Bit 2 - OTG interrupt mask"]
#[inline(always)]
#[must_use]
pub fn otgint(&mut self) -> OTGINT_W<GINTMSK_SPEC, 2> {
OTGINT_W::new(self)
}
#[doc = "Bit 3 - Start of frame mask"]
#[inline(always)]
#[must_use]
pub fn sofm(&mut self) -> SOFM_W<GINTMSK_SPEC, 3> {
SOFM_W::new(self)
}
#[doc = "Bit 4 - Receive FIFO non-empty mask"]
#[inline(always)]
#[must_use]
pub fn rxflvlm(&mut self) -> RXFLVLM_W<GINTMSK_SPEC, 4> {
RXFLVLM_W::new(self)
}
#[doc = "Bit 5 - Non-periodic TxFIFO empty mask"]
#[inline(always)]
#[must_use]
pub fn nptxfem(&mut self) -> NPTXFEM_W<GINTMSK_SPEC, 5> {
NPTXFEM_W::new(self)
}
#[doc = "Bit 6 - Global non-periodic IN NAK effective mask"]
#[inline(always)]
#[must_use]
pub fn ginakeffm(&mut self) -> GINAKEFFM_W<GINTMSK_SPEC, 6> {
GINAKEFFM_W::new(self)
}
#[doc = "Bit 7 - Global OUT NAK effective mask"]
#[inline(always)]
#[must_use]
pub fn gonakeffm(&mut self) -> GONAKEFFM_W<GINTMSK_SPEC, 7> {
GONAKEFFM_W::new(self)
}
#[doc = "Bit 10 - Early suspend mask"]
#[inline(always)]
#[must_use]
pub fn esuspm(&mut self) -> ESUSPM_W<GINTMSK_SPEC, 10> {
ESUSPM_W::new(self)
}
#[doc = "Bit 11 - USB suspend mask"]
#[inline(always)]
#[must_use]
pub fn usbsuspm(&mut self) -> USBSUSPM_W<GINTMSK_SPEC, 11> {
USBSUSPM_W::new(self)
}
#[doc = "Bit 12 - USB reset mask"]
#[inline(always)]
#[must_use]
pub fn usbrst(&mut self) -> USBRST_W<GINTMSK_SPEC, 12> {
USBRST_W::new(self)
}
#[doc = "Bit 13 - Enumeration done mask"]
#[inline(always)]
#[must_use]
pub fn enumdnem(&mut self) -> ENUMDNEM_W<GINTMSK_SPEC, 13> {
ENUMDNEM_W::new(self)
}
#[doc = "Bit 14 - Isochronous OUT packet dropped interrupt mask"]
#[inline(always)]
#[must_use]
pub fn isoodrpm(&mut self) -> ISOODRPM_W<GINTMSK_SPEC, 14> {
ISOODRPM_W::new(self)
}
#[doc = "Bit 15 - End of periodic frame interrupt mask"]
#[inline(always)]
#[must_use]
pub fn eopfm(&mut self) -> EOPFM_W<GINTMSK_SPEC, 15> {
EOPFM_W::new(self)
}
#[doc = "Bit 17 - Endpoint mismatch interrupt mask"]
#[inline(always)]
#[must_use]
pub fn epmism(&mut self) -> EPMISM_W<GINTMSK_SPEC, 17> {
EPMISM_W::new(self)
}
#[doc = "Bit 18 - IN endpoints interrupt mask"]
#[inline(always)]
#[must_use]
pub fn iepint(&mut self) -> IEPINT_W<GINTMSK_SPEC, 18> {
IEPINT_W::new(self)
}
#[doc = "Bit 19 - OUT endpoints interrupt mask"]
#[inline(always)]
#[must_use]
pub fn oepint(&mut self) -> OEPINT_W<GINTMSK_SPEC, 19> {
OEPINT_W::new(self)
}
#[doc = "Bit 20 - Incomplete isochronous IN transfer mask"]
#[inline(always)]
#[must_use]
pub fn iisoixfrm(&mut self) -> IISOIXFRM_W<GINTMSK_SPEC, 20> {
IISOIXFRM_W::new(self)
}
#[doc = "Bit 21 - Incomplete periodic transfer mask(Host mode)/Incomplete isochronous OUT transfer mask(Device mode)"]
#[inline(always)]
#[must_use]
pub fn ipxfrm_iisooxfrm(&mut self) -> IPXFRM_IISOOXFRM_W<GINTMSK_SPEC, 21> {
IPXFRM_IISOOXFRM_W::new(self)
}
#[doc = "Bit 23 - Reset detected interrupt mask"]
#[inline(always)]
#[must_use]
pub fn rstdetm(&mut self) -> RSTDETM_W<GINTMSK_SPEC, 23> {
RSTDETM_W::new(self)
}
#[doc = "Bit 25 - Host channels interrupt mask"]
#[inline(always)]
#[must_use]
pub fn hcim(&mut self) -> HCIM_W<GINTMSK_SPEC, 25> {
HCIM_W::new(self)
}
#[doc = "Bit 26 - Periodic TxFIFO empty mask"]
#[inline(always)]
#[must_use]
pub fn ptxfem(&mut self) -> PTXFEM_W<GINTMSK_SPEC, 26> {
PTXFEM_W::new(self)
}
#[doc = "Bit 27 - LPM interrupt mask"]
#[inline(always)]
#[must_use]
pub fn lpmintm(&mut self) -> LPMINTM_W<GINTMSK_SPEC, 27> {
LPMINTM_W::new(self)
}
#[doc = "Bit 28 - Connector ID status change mask"]
#[inline(always)]
#[must_use]
pub fn cidschgm(&mut self) -> CIDSCHGM_W<GINTMSK_SPEC, 28> {
CIDSCHGM_W::new(self)
}
#[doc = "Bit 29 - Disconnect detected interrupt mask"]
#[inline(always)]
#[must_use]
pub fn discint(&mut self) -> DISCINT_W<GINTMSK_SPEC, 29> {
DISCINT_W::new(self)
}
#[doc = "Bit 30 - Session request/new session detected interrupt mask"]
#[inline(always)]
#[must_use]
pub fn srqim(&mut self) -> SRQIM_W<GINTMSK_SPEC, 30> {
SRQIM_W::new(self)
}
#[doc = "Bit 31 - Resume/remote wakeup detected interrupt mask"]
#[inline(always)]
#[must_use]
pub fn wuim(&mut self) -> WUIM_W<GINTMSK_SPEC, 31> {
WUIM_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "OTG_FS interrupt mask register (OTG_FS_GINTMSK)\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gintmsk::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gintmsk::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct GINTMSK_SPEC;
impl crate::RegisterSpec for GINTMSK_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`gintmsk::R`](R) reader structure"]
impl crate::Readable for GINTMSK_SPEC {}
#[doc = "`write(|w| ..)` method takes [`gintmsk::W`](W) writer structure"]
impl crate::Writable for GINTMSK_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets GINTMSK to value 0"]
impl crate::Resettable for GINTMSK_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#![crate_name = "uu_wc"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Boden Garman <bpgarman@gmail.com>
* (c) Geordon Worley <vadixidav@gmail.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use itertools::Itertools;
use std::fs::File;
use std::io::{stdin, BufRead, BufReader};
use std::path::Path;
use std::result::Result as StdResult;
use std::str::from_utf8;
use structopt::*;
use uucore::{executable, show_error, show_info, show_warning};
#[derive(StructOpt)]
struct Settings {
#[structopt(short = "c", long)]
bytes: bool,
#[structopt(short = "m", long)]
chars: bool,
#[structopt(short = "l", long)]
lines: bool,
#[structopt(short = "w", long)]
words: bool,
#[structopt(short = "L")]
max_line_length: bool,
files: Vec<String>,
}
struct Outcome<'a> {
title: &'a str,
bytes: usize,
chars: usize,
lines: usize,
words: usize,
max_line_length: usize,
}
pub fn uumain(args: Vec<String>) -> i32 {
let mut settings = Settings::from_iter(args.into_iter());
// If no options are passed, we need to set bytes, lines, and words.
if !(settings.bytes
|| settings.chars
|| settings.lines
|| settings.words
|| settings.max_line_length)
{
settings.bytes = true;
settings.lines = true;
settings.words = true;
}
if settings.files.is_empty() {
settings.files.push("-".to_owned());
}
wc(settings).err().unwrap_or(0)
}
const CR: u8 = '\r' as u8;
const LF: u8 = '\n' as u8;
const SPACE: u8 = ' ' as u8;
const TAB: u8 = '\t' as u8;
const SYN: u8 = 0x16 as u8;
const FF: u8 = 0x0C as u8;
#[inline(always)]
fn is_word_seperator(byte: u8) -> bool {
byte == SPACE || byte == TAB || byte == CR || byte == SYN || byte == FF
}
fn wc_reader<'a>(path: &'a str, mut reader: impl BufRead) -> Outcome<'a> {
let mut line_count: usize = 0;
let mut word_count: usize = 0;
let mut byte_count: usize = 0;
let mut char_count: usize = 0;
let mut longest_line_length: usize = 0;
let mut raw_line = Vec::new();
// reading from a TTY seems to raise a condition on, rather than return Some(0) like a file.
// hence the option wrapped in a result here
while match reader.read_until(LF, &mut raw_line) {
Ok(n) if n > 0 => true,
Err(ref e) if !raw_line.is_empty() => {
show_warning!("Error while reading {}: {}", path, e);
!raw_line.is_empty()
}
_ => false,
} {
// GNU 'wc' only counts lines that end in LF as lines
if *raw_line.last().unwrap() == LF {
line_count += 1;
}
byte_count += raw_line.len();
// try and convert the bytes to UTF-8 first
let current_char_count = match from_utf8(&raw_line[..]) {
Ok(line) => {
word_count += line.split_whitespace().count();
line.chars().count()
}
Err(..) => {
word_count += raw_line.split(|&x| is_word_seperator(x)).count();
raw_line.iter().filter(|c| c.is_ascii()).count()
}
};
char_count += current_char_count;
if current_char_count > longest_line_length {
// We subtract one here because `line.len()` includes the LF.
// This matches GNU 'wc' behaviour.
longest_line_length = current_char_count - 1;
}
raw_line.truncate(0);
}
Outcome {
title: path,
bytes: byte_count,
chars: char_count,
lines: line_count,
words: word_count,
max_line_length: longest_line_length,
}
}
fn wc(settings: Settings) -> StdResult<(), i32> {
// This is an iterator over the outcomes of running wc on each file.
let mut outcome_iter = settings.files.iter().map(|path| {
if path == "-" {
Ok(wc_reader(path, stdin().lock()))
} else {
open(&path).map(|reader| wc_reader(path, reader))
}
});
// Fold the outcomes to get the total and collect them to a vector.
// We also use fold_results to exit on an error condition.
let (outcomes, total) = outcome_iter.fold_results(
(
vec![],
Outcome {
title: "total",
bytes: 0,
chars: 0,
lines: 0,
words: 0,
max_line_length: 0,
},
),
|(mut outcomes, mut total), item| {
total.bytes += item.bytes;
total.chars += item.chars;
total.lines += item.lines;
total.words += item.words;
total.max_line_length = std::cmp::max(total.max_line_length, item.max_line_length);
outcomes.push(item);
(outcomes, total)
},
)?;
// The max width of the output table's number section.
let max_width = total.bytes.to_string().len() + 1;
// Print the outcomes for each file.
for outcome in &outcomes {
print_stats(&settings, &outcome, max_width);
}
// Print the total outcome if we had multiple inputs.
if settings.files.len() > 1 {
print_stats(&settings, &total, max_width);
}
Ok(())
}
fn print_stats(settings: &Settings, outcome: &Outcome, max_width: usize) {
if settings.lines {
print!("{:1$}", outcome.lines, max_width);
}
if settings.words {
print!("{:1$}", outcome.words, max_width);
}
if settings.bytes {
print!("{:1$}", outcome.bytes, max_width);
}
if settings.chars {
print!("{:1$}", outcome.chars, max_width);
}
if settings.max_line_length {
print!("{:1$}", outcome.max_line_length, max_width);
}
if outcome.title != "-" {
println!(" {}", outcome.title);
} else {
println!();
}
}
fn open(path: &str) -> StdResult<BufReader<File>, i32> {
let fpath = Path::new(path);
if fpath.is_dir() {
show_info!("{}: is a directory", path);
}
File::open(&fpath).map(BufReader::new).map_err(|e| {
show_error!("wc: {}: {}", path, e);
1
})
}
|
//! Slydot: The Sunrise Event
//! =========================
//!
//! An original game written in Rust.
//!
//! Any similarity between this game and *Spybot: The Nightfall Incident* is purely coincidental.
extern crate piston;
extern crate piston_window;
extern crate graphics;
extern crate opengl_graphics;
extern crate vec_map;
use piston::input::*;
use piston::window::WindowSettings;
use opengl_graphics::*;
use piston_window::PistonWindow;
pub mod game;
pub use game::Game;
pub mod unit;
pub use unit::Unit;
pub mod grid;
pub use grid::Grid;
pub mod controller;
pub use controller::Controller;
pub const CELL_SIZE: f64 = 28.0;
pub const CELL_PADDING: f64 = 4.0;
pub const CELL_OFFSET_X: f64 = 50.0;
pub const CELL_OFFSET_Y: f64 = 50.0;
pub fn cell_pos(a: i16) -> f64 {
CELL_OFFSET_X + a as f64 * (CELL_SIZE + CELL_PADDING)
}
fn main() {
let opengl = OpenGL::V3_2;
let window: PistonWindow =
WindowSettings::new("sunrise", [640, 480])
.opengl(opengl)
.build()
.unwrap();
let ref mut gl = GlGraphics::new(opengl);
let mut game = Game::sample();
let idx = game.units.iter().find(|&(_, ref x)| x.team == 0).unwrap().0;
game.select(idx);
for e in window {
if let Some(args) = e.render_args() {
gl.draw(args.viewport(), |c, gl| {
game.draw(&c, gl);
});
game.handle_frame();
}
if let Some(a) = e.mouse_cursor_args() {
game.handle_mouse(a[0], a[1]);
}
if let Some(b) = e.press_args() {
game.handle_press(b);
}
}
}
|
use elements::HmfGen;
use bignum::{BigNumber, RealQuadElement};
use flint::fmpq::Fmpq;
use flint::fmpz::Fmpz;
use std::ops::{AddAssign, MulAssign};
fn diff_mut<T>(res: &mut HmfGen<T>, expt: (usize, usize), f: &HmfGen<T>)
where
T: BigNumber + From<(i64, u64)> + Clone,
{
res.set(f);
let mut tmp = T::R::default();
let mut tmp_t = T::new_g();
let mut tmp_t1 = T::new_g();
v_u_bd_iter!(
(f.m, f.u_bds, v, u, bd)
{
tmp_t1.set_ui_g(1);
if expt.0 > 0 {
let a: T = From::from((u, v as u64));
tmp_t.pow_mut(&a, expt.0);
tmp_t1.mul_assign_g(&tmp_t, &mut tmp);
}
if expt.1 > 0 {
let a: T = From::from((-u, v as u64));
tmp_t.pow_mut(&a, expt.1);
tmp_t1.mul_assign_g(&tmp_t, &mut tmp);
}
res.fcvec.fc_ref_mut(v, u, bd).mul_assign_g(&tmp_t1, &mut tmp);
}
);
}
fn diff_mul<T>(
res: &mut HmfGen<T>,
expt0: (usize, usize),
expt1: (usize, usize),
f: &HmfGen<T>,
g: &HmfGen<T>,
) where
T: BigNumber + From<(i64, u64)> + Clone,
for<'a> T: AddAssign<&'a T>,
{
let mut tmp = HmfGen::<T>::new(f.m, f.prec);
diff_mut(res, expt0, f);
diff_mut(&mut tmp, expt1, g);
*res *= &tmp;
}
#[derive(Debug)]
pub struct NotHhmError {}
pub fn rankin_cohen<T>(n: usize, f: &HmfGen<T>, g: &HmfGen<T>) -> Result<HmfGen<T>, NotHhmError>
where
T: BigNumber + From<(i64, u64)> + Clone + RealQuadElement<Fmpq>,
for<'a> T: MulAssign<&'a Fmpq>,
for<'a> T: AddAssign<&'a T>,
{
assert_eq!(f.prec, g.prec);
if !f.weight.is_none() && !g.weight.is_none() {
let mut res = HmfGen::<T>::new(f.m, f.prec);
let mut tmp = HmfGen::<T>::new(f.m, f.prec);
let mut tmp_z = Fmpz::new();
let mut tmp_z1 = Fmpz::new();
let mut sgn = if is_even!(n) { 1 } else { -1 } as i64;
let (k1, k2) = f.weight.unwrap();
let (l1, l2) = g.weight.unwrap();
for i in 0..(n + 1) {
tmp_z.bi_uiui_mut((n + k2 - 1) as u64, (n - i) as u64);
tmp_z1.bi_uiui_mut((n + l2 - 1) as u64, i as u64);
tmp_z *= &tmp_z1;
tmp_z *= sgn;
diff_mul(&mut tmp, (0, i), (0, n - i), &f, &g);
tmp *= &Into::<Fmpq>::into(&tmp_z);
res += &tmp;
sgn *= -1;
}
res.weight = Some((k1 + l1, k2 + l2 + 2 * n));
Ok(res)
} else {
Err(NotHhmError {})
}
}
|
use log::{info, LevelFilter};
use loggest::init;
use std::thread;
fn main() {
let _flush = init(LevelFilter::Info, "example").unwrap();
info!("Main thread");
thread::spawn(move || {
info!("A thread");
})
.join()
.unwrap();
}
|
use super::dto;
use actix_web::web;
use failure::Error;
use futures::Future;
use r2d2;
use r2d2_sqlite;
use rusqlite::params;
/// Database module
///
/// As a takeaway of the talk "Immutable Relational Data" by Richard Feldman
/// I decided to include no id values in any of the structs describing data.
/// Instead, the ids need to be managed separately.
///
/// https://www.youtube.com/watch?v=28OdemxhfbU
pub type Pool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
pub type Connection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
pub fn check_password(
username: String,
password: String,
pool: &Pool,
) -> impl Future<Item = bool, Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || check_password_(&username, &password, pool.get()?)).from_err()
}
fn check_password_(username: &str, password: &str, conn: Connection) -> Result<bool, Error> {
use pbkdf2::pbkdf2_check;
// TODO: Use a prepared statement
let stmt = "SELECT password FROM user WHERE username = :username";
let mut prep_stmt = conn.prepare(&stmt)?;
let password_hash: String = prep_stmt
.query_map_named(&[(":username", &username)], |row| row.get(0))?
.nth(0)
.unwrap()?;
Ok(pbkdf2_check(password, &password_hash).is_ok())
}
pub fn create_game(
username: String,
game: dto::GameCreate,
pool: &Pool,
) -> impl Future<Item = dto::GameHeader, Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || create_game_(username, game, &pool.get()?)).from_err()
}
fn create_game_(
username: String,
game: dto::GameCreate,
conn: &Connection,
) -> Result<dto::GameHeader, Error> {
conn.execute(
"INSERT INTO game (description) VALUES (?1)",
params![game.description],
)?;
let game_id = conn.last_insert_rowid();
let default_role = 1;
// The user automatically accepts their own game invite.
conn.execute(
"INSERT INTO game_member (user, game, role, accepted) VALUES \
((select id from user where username = ?1), ?2, ?3, 1)",
params![username, game_id, default_role],
)?;
Ok(dto::GameHeader {
id: game_id,
description: game.description,
members: members_by_game_(game_id, conn)?,
})
}
pub fn games_by_user(
username: String,
pool: &Pool,
) -> impl Future<Item = Vec<dto::GameHeader>, Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || games_by_user_(&username, &pool.get()?)).from_err()
}
/// This function takes a user id and returns all games that the user is a member of.
fn games_by_user_(username: &str, conn: &Connection) -> Result<Vec<dto::GameHeader>, Error> {
let mut stmt = conn.prepare(
"select game.id, game.description from game \
inner join game_member on game_member.game = game.id \
inner join user on user.id = game_member.user \
where user.username = ?1",
)?;
let game_iter = stmt.query_map(params![username], |row| {
let id = row.get(0)?;
Ok(dto::GameHeader {
id,
description: row.get(1)?,
members: members_by_game_(id, conn)?,
})
})?;
let mut result = Vec::new();
for game in game_iter {
result.push(game?);
}
Ok(result)
}
/// This function takes a game id and returns all members of the game.
fn members_by_game_(game: i64, conn: &Connection) -> Result<Vec<dto::Member>, rusqlite::Error> {
let mut stmt = conn.prepare(
"select user.id, user.username, game_member.role, game_member.accepted from game_member \
inner join user on user.id = game_member.user \
where game_member.game = ?1",
)?;
let member_iter = stmt.query_map(params![game], |row| {
Ok(dto::Member {
id: row.get(0)?,
username: row.get(1)?,
role: row.get(2)?,
accepted: row.get(3)?,
})
})?;
let mut members = Vec::new();
for member in member_iter {
members.push(member?);
}
Ok(members)
}
fn member_info_(
game_id: i64,
user_id: i64,
conn: &Connection,
) -> Result<Option<dto::Member>, rusqlite::Error> {
let mut stmt = conn.prepare(
"select user.id, user.username, game_member.role, game_member.accepted from game_member \
inner join user on user.id = game_member.user \
where game_member.game = ?1
and game_member.user = ?2",
)?;
let mut member_iter = stmt.query_map(params![game_id, user_id], |row| {
Ok(dto::Member {
id: row.get(0)?,
username: row.get(1)?,
role: row.get(2)?,
accepted: row.get(3)?,
})
})?;
if let Some(row) = member_iter.next() {
Ok(Some(row?))
} else {
Ok(None)
}
}
/// This function updates an existing member_info object. This matches via
/// the unique key (user_id, game_id) and does not change these values.
fn update_member_info_(
game_id: i64,
member_info: dto::Member,
conn: &Connection,
) -> Result<(), rusqlite::Error> {
conn.execute(
"update game_member
set role = ?1,
accepted = ?2
where user = ?3 and game = ?4",
params![
member_info.role,
member_info.accepted,
member_info.id,
game_id
],
)?;
Ok(())
}
/// This function inserts a new member_info object. This requires that
/// the unique key (user_id, game_id) is not used yet.
fn insert_member_info_(
game_id: i64,
member_info: dto::Member,
conn: &Connection,
) -> Result<(), rusqlite::Error> {
conn.execute(
"insert into game_member (user, game, role, accepted) values (?1, ?2, ?3, ?4)",
params![
member_info.id,
game_id,
member_info.role,
member_info.accepted,
],
)?;
Ok(())
}
pub fn game(
game_id: i64,
pool: &Pool,
) -> impl Future<Item = Option<dto::GameHeader>, Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || game_(game_id, &pool.get()?)).from_err()
}
fn game_(game_id: i64, conn: &Connection) -> Result<Option<dto::GameHeader>, Error> {
let mut stmt = conn.prepare(
"select game.description from game \
where game.id = ?1",
)?;
let mut game_iter = stmt.query_map(params![game_id], |row| {
Ok(dto::GameHeader {
id: game_id,
description: row.get(0)?,
members: members_by_game_(game_id, conn)?,
})
})?;
if let Some(row) = game_iter.next() {
Ok(Some(row?))
} else {
Ok(None)
}
}
pub fn all_users(pool: &Pool) -> impl Future<Item = Vec<dto::UserInfo>, Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || all_users_(&pool.get()?)).from_err()
}
fn all_users_(conn: &Connection) -> Result<Vec<dto::UserInfo>, Error> {
let mut stmt = conn.prepare("select id, username from user")?;
let user_iter = stmt.query_map(params![], |row| {
Ok(dto::UserInfo {
id: row.get(0)?,
username: row.get(1)?,
})
})?;
let mut users = Vec::new();
for user in user_iter {
users.push(user?);
}
Ok(users)
}
pub fn update_description(
username: String,
game_id: i64,
new_description: String,
pool: &Pool,
) -> impl Future<Item = (), Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || update_description_(username, game_id, new_description, &pool.get()?))
.from_err()
}
fn update_description_(
username: String,
game_id: i64,
new_description: String,
conn: &Connection,
) -> Result<(), Error> {
// TODO: The database module will contain business logic, until actix
// updates to async await. Then we can move it outside.
// Check if the user is a member of the game
let user_id = get_user_id_(username, conn)?;
let members = members_by_game_(game_id, conn)?;
if members.iter().any(|member| Some(member.id) == user_id) {
conn.execute(
"update game set description = ?1 where id = ?2",
params![new_description, game_id],
)?;
Ok(())
} else {
// TODO: Fail with error
Ok(())
}
}
fn get_user_id_(username: String, conn: &Connection) -> Result<Option<i64>, Error> {
let mut stmt = conn.prepare("select id from user where username = ?1")?;
let mut user_iter = stmt.query_map(params![username], |row| Ok(row.get(0)?))?;
if let Some(row) = user_iter.next() {
Ok(Some(row?))
} else {
Ok(None)
}
}
pub fn update_member(
username: String,
game_id: i64,
new_member: dto::Member,
pool: &Pool,
) -> impl Future<Item = (), Error = actix_web::Error> {
let pool = pool.clone();
web::block(move || update_member_(username, game_id, new_member, &pool.get()?)).from_err()
}
fn update_member_(
username: String,
game_id: i64,
mut new_member: dto::Member,
conn: &Connection,
) -> Result<(), Error> {
// TODO: The database module will contain business logic, until actix
// updates to async await. Then we can move it outside.
if let Some(user_id) = get_user_id_(username, conn)? {
if member_info_(game_id, user_id, conn)?.is_none() {
// The user giving the command is not part of the game.
return Ok(());
} else if let Some(mut member_info) = member_info_(game_id, new_member.id, conn)? {
member_info.role = new_member.role;
update_member_info_(game_id, member_info, conn)?;
} else {
// We make sure that the client can't decide to accept the request
// for another user.
new_member.accepted = false;
insert_member_info_(game_id, new_member, conn)?;
}
Ok(())
} else {
// TODO: Fail with error
Ok(())
}
}
|
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use crate::routing::MetaBundle;
use tokio::sync::mpsc::Sender;
pub use self::cla_handle::HandleId as HandleId;
pub mod cla_handle;
pub mod cla_manager;
pub mod stcp_server;
pub mod stcp;
pub mod loopback;
#[derive(Clone, Debug, PartialEq)]
pub enum ClaMessage {
TransmitBundle(MetaBundle),
}
#[derive(Clone, Debug, PartialEq)]
pub enum ClaBundleStatus {
Failure(MetaBundle),
Success(MetaBundle),
New(bp7::Bundle, usize),
}
pub trait ClaTrait: Send + Sync {
fn send(&mut self, bundle: MetaBundle);
fn start(&mut self, tx: Sender<ClaBundleStatus>);
// async fn accept() -> Arc<MetaBundle>;
fn stop(&mut self);
}
// pub trait ClaHandleTrait: Send + Sync {
// /// The Handle queues the bundle and promises to return quickly
// fn process_bundle(&self, bundle: bp7::Bundle);
// }
#[derive(Clone, Debug, PartialEq)]
pub enum ClaRW {
R,
RW,
W,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub enum ClaType {
StcpListener( String, u16), // local address, port
Stcp(String, u16), // remote address, port
StcpIp(String, u16, String), // remote address, port, dns domain to search (. for ip.earth)
LoopBack, // ...
}
impl Default for ClaType {
fn default() -> Self { ClaType::LoopBack }
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)]
pub struct ClaConfiguration {
pub adapters: HashMap<String, AdapterConfiguration>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AdapterConfiguration {
pub(crate) name: String,
pub(crate) peernode: String,
pub(crate) shutdown: bool,
pub(crate) cla_type: ClaType,
}
impl PartialEq for AdapterConfiguration {
fn eq(&self, other: &Self) -> bool {
return self.name.eq(&other.name);
}
}
impl Eq for AdapterConfiguration {}
impl Hash for AdapterConfiguration {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name.hash(state);
}
}
impl Default for AdapterConfiguration {
fn default() -> AdapterConfiguration {
AdapterConfiguration {
name: String::from(""),
peernode: String::from(""),
shutdown: true,
cla_type: ClaType::LoopBack,
}
}
}
|
use regex::{RegexSet, SetMatches, SetMatchesIntoIter};
pub struct RegexSetMatches<'a> {
iter: SetMatchesIntoIter,
patterns: &'a[String],
}
impl<'a> RegexSetMatches<'a> {
pub fn new(set: &'a RegexSet, matches: SetMatches) -> Self {
Self {
iter: matches.into_iter(),
patterns: set.patterns(),
}
}
}
impl<'a> Iterator for RegexSetMatches<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
let ix = self.iter.next()?;
Some(
self.patterns[ix].as_ref()
)
}
}
|
use std::{ fs };
pub fn main() -> Option<bool> {
let file_contents = match fs::read_to_string(
"./inputs/2020-12-04-aoc-01-input.txt"
) {
Ok(c) => c,
Err(e) => panic!("{:?}", e)
};
let (
valid_passports, invalid_passports
): (Vec<Passport>, Vec<Passport>) = file_contents
.split("\n\n")
.map(|block| Passport::new(block))
.partition(|passport| {
passport.is_valid()
});
println!(
"There are {} valid passports and {} invalid passports",
valid_passports.len(), invalid_passports.len()
);
Some(true)
}
#[derive(Debug)]
struct Passport {
byr: u16,
iyr: u16,
eyr: u16,
hgt: String,
hcl: String,
ecl: String,
pid: String,
cid: u16,
}
impl Passport {
pub fn new(raw: &str) -> Passport {
raw
.split_whitespace()
.fold(Passport {
byr: 0,
iyr: 0,
eyr: 0,
hgt: String::from(""),
hcl: String::from(""),
ecl: String::from(""),
pid: String::from(""),
cid: 0,
}, |mut passport, field| {
let collection: Vec<_> = field.split(':').collect();
match collection[0] {
"byr" => {
passport.byr = collection[1].parse::<u16>().unwrap();
passport
},
"iyr" => {
passport.iyr = collection[1].parse::<u16>().unwrap();
passport
},
"eyr" => {
passport.eyr = collection[1].parse::<u16>().unwrap();
passport
},
"hgt" => {
passport.hgt = String::from(collection[1]);
passport
},
"hcl" => {
passport.hcl = String::from(collection[1]);
passport
},
"ecl" => {
passport.ecl = String::from(collection[1]);
passport
},
"pid" => {
passport.pid = String::from(collection[1]);
passport
},
"cid" => {
passport.cid = collection[1].parse::<u16>().unwrap();
passport
},
_ => passport
}
})
}
pub fn is_valid(&self) -> bool {
if
self.byr != 0
&& self.iyr != 0
&& self.eyr != 0
&& self.hgt != ""
&& self.hcl != ""
&& self.ecl != ""
&& self.pid != ""
{
true
} else {
false
}
}
}
|
use crate::html::Attribute;
macro_rules! declare_text_attributes {
($($x:ident, $tag:expr)*) => ($(
pub fn $x<Msg>(value: &str) -> Attribute<Msg> {
Attribute::Text($tag.to_owned(), value.to_owned())
}
)*);
($($x:ident)*) => ($(
declare_text_attributes!($x, stringify!($x));
)*);
}
macro_rules! declare_bool_attributes {
($($x:ident, $tag:expr)*) => ($(
pub fn $x<Msg>() -> Attribute<Msg> {
Attribute::Bool($tag.to_owned())
}
)*);
($($x:ident)*) => ($(
declare_bool_attributes!($x, stringify!($x));
)*);
}
pub fn class_list<Msg>(classes: &[(&str, bool)]) -> Attribute<Msg> {
let active = classes
.iter()
.filter(|(_, active)| *active)
.map(|(name, _)| *name)
.collect::<Vec<_>>();
// TODO: Change `class` to use Into<Cow> and use it here
Attribute::Text("className".to_owned(), active.join(" "))
}
pub fn key<Msg>(key: String) -> Attribute<Msg> {
Attribute::Key(key)
}
declare_text_attributes! {
placeholder
name
value
id
href
class
src
}
declare_text_attributes! {
type_, "type"
for_, "for"
}
declare_bool_attributes! {
autofocus
checked
hidden
}
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under both the MIT license found in the
* LICENSE-MIT file in the root directory of this source tree and the Apache
* License, Version 2.0 found in the LICENSE-APACHE file in the root directory
* of this source tree.
*/
//! A trait to represent zero-cost conversions.
use crate::cast::{self, transmute_unchecked};
pub use gazebo_derive::Coerce;
use std::{
alloc::Layout,
collections::{HashMap, HashSet},
};
/// A marker trait such that the existence of `From: Coerce<To>` implies
/// that `From` can be treat as `To` without any data manipulation.
/// Particularly useful for containers, e.g. `Vec<From>` can be treated as
/// `Vec<To>` in _O(1)_. If such an instance is available,
/// you can use [`coerce`] and [`coerce_ref`] to perform the conversion.
///
/// Importantly, you must make sure Rust does not change the type representation
/// between the different types (typically using a `repr` directive),
/// and it must be safe for the `From` to be treated as `To`, namely same (or less restrictive) alignment,
/// no additional invariants, value can be dropped as `To`.
///
/// One use of `Coerce` is around newtype wrappers:
///
/// ```
/// use gazebo::coerce::{Coerce, coerce, coerce_ref};
/// #[repr(transparent)]
/// #[derive(Debug, Coerce)]
/// struct Wrapper(String);
///
/// let value = vec![Wrapper("hello".to_owned()), Wrapper("world".to_owned())];
/// assert_eq!(
/// coerce_ref::<_, Vec<String>>(&value).join(" "),
/// "hello world"
/// );
/// let mut value = coerce::<_, Vec<String>>(value);
/// assert_eq!(value.pop(), Some("world".to_owned()));
/// ```
///
/// Another involves containers:
///
/// ```
/// use gazebo::coerce::{Coerce, coerce_ref};
/// # #[derive(Coerce)]
/// # #[repr(transparent)]
/// # struct Wrapper(String);
/// #[derive(Coerce)]
/// #[repr(C)]
/// struct Container<T>(i32, T);
///
/// let value = Container(20, Wrapper("twenty".to_owned()));
/// assert_eq!(
/// coerce_ref::<_, Container<String>>(&value).1,
/// "twenty"
/// );
/// ```
///
/// If you only need [`coerce_ref`] on newtypes, then the [`ref-cast` crate](https://crates.io/crates/ref-cast)
/// provides that, along with automatic derivations (no `unsafe` required).
pub unsafe trait Coerce<To: ?Sized> {}
/// A marker trait such that the existence of `From: CoerceKey<To>` implies
/// that `From` can be treat as `To` without any data manipulation.
/// Furthermore, above and beyond [`Coerce`], any provided [`Hash`](std::hash::Hash),
/// [`Eq`], [`PartialEq`], [`Ord`] and [`PartialOrd`] traits must give identical results
/// on the `From` and `To` values.
///
/// This trait is mostly expected to be a requirement for the keys of associative-map
/// containers, hence the `Key` in the name.
pub unsafe trait CoerceKey<To: ?Sized>: Coerce<To> {}
unsafe impl<'a, From: ?Sized, To: ?Sized> Coerce<&'a To> for &'a From where From: Coerce<To> {}
unsafe impl<'a, From: ?Sized, To: ?Sized> CoerceKey<&'a To> for &'a From where From: CoerceKey<To> {}
unsafe impl<From, To> Coerce<[To]> for [From] where From: Coerce<To> {}
unsafe impl<From, To> CoerceKey<[To]> for [From] where From: CoerceKey<To> {}
unsafe impl<From, To> Coerce<Vec<To>> for Vec<From> where From: Coerce<To> {}
unsafe impl<From, To> CoerceKey<Vec<To>> for Vec<From> where From: CoerceKey<To> {}
unsafe impl<From: ?Sized, To: ?Sized> CoerceKey<Box<To>> for Box<From> where From: CoerceKey<To> {}
unsafe impl<From: ?Sized, To: ?Sized> Coerce<Box<To>> for Box<From> where From: Coerce<To> {}
unsafe impl<From, To> Coerce<HashSet<To>> for HashSet<From> where From: CoerceKey<To> {}
unsafe impl<FromK, FromV, ToK, ToV> Coerce<HashMap<ToK, ToV>> for HashMap<FromK, FromV>
where
FromK: CoerceKey<ToK>,
FromV: Coerce<ToV>,
{
}
unsafe impl<From1: Coerce<To1>, To1> Coerce<(To1,)> for (From1,) {}
unsafe impl<From1: CoerceKey<To1>, To1> CoerceKey<(To1,)> for (From1,) {}
unsafe impl<From1: Coerce<To1>, From2: Coerce<To2>, To1, To2> Coerce<(To1, To2)>
for (From1, From2)
{
}
unsafe impl<From1: CoerceKey<To1>, From2: CoerceKey<To2>, To1, To2> CoerceKey<(To1, To2)>
for (From1, From2)
{
}
// We can't define a blanket `Coerce<T> for T` because that conflicts with the specific traits above.
// Therefore, we define instances where we think they might be useful, rather than trying to do every concrete type.
unsafe impl Coerce<String> for String {}
unsafe impl CoerceKey<String> for String {}
unsafe impl Coerce<str> for str {}
unsafe impl CoerceKey<str> for str {}
unsafe impl Coerce<()> for () {}
unsafe impl CoerceKey<()> for () {}
/// Safely convert between types which have a `Coerce` relationship.
/// Often the second type argument will need to be given explicitly,
/// e.g. `coerce::<_, ToType>(x)`.
pub fn coerce<From, To>(x: From) -> To
where
From: Coerce<To>,
{
assert_eq!(Layout::new::<From>(), Layout::new::<To>());
unsafe { transmute_unchecked(x) }
}
/// Safely convert between types which have a `Coerce` relationship.
/// Often the second type argument will need to be given explicitly,
/// e.g. `coerce_ref::<_, ToType>(x)`.
pub fn coerce_ref<From, To>(x: &From) -> &To
where
From: Coerce<To>,
{
assert_eq!(Layout::new::<From>(), Layout::new::<To>());
unsafe { cast::ptr(x) }
}
#[cfg(test)]
mod test {
use super::*;
use crate as gazebo;
#[test]
fn test_ptr_coerce() {
fn f<'v>(x: (&'static str,)) -> (&'v str,) {
coerce(x)
}
let x = "test".to_owned();
assert_eq!(f(("test",)), (x.as_str(),))
}
#[test]
fn test_coerce_lifetime() {
#[derive(Coerce)]
#[repr(transparent)]
struct NewtypeWithLifetime<'v>(&'v [usize]);
let newtype = NewtypeWithLifetime(&[1, 2]);
assert_eq!(&[1, 2], coerce(newtype))
}
}
|
use cookie_factory::GenError;
use bytes::BytesMut;
use futures::{Future, Stream};
use nom::{IResult, Offset};
use std::io;
use std::iter::repeat;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
use tokio;
use tokio::net::{TcpListener, TcpStream};
use tokio_io::IoFuture;
use tokio_io::codec::{Decoder, Encoder, Framed};
use tokio_timer::Deadline;
use crypto::{Aes256, SigningPrivateKey};
use data::{I2PString, RouterIdentity, RouterInfo};
use i2np::Message;
mod frame;
mod handshake;
lazy_static! {
pub static ref NTCP_STYLE: I2PString = I2PString::new("NTCP");
}
// Max NTCP message size is 16kB
const NTCP_MTU: usize = 16384;
//
// Message transport
//
pub enum Frame {
Standard(Message),
TimeSync(u32),
}
use std::fmt;
impl fmt::Debug for Frame {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
&Frame::Standard(_) => "Standard message".fmt(formatter),
&Frame::TimeSync(ts) => format!("Timesync ({})", ts).fmt(formatter),
}
}
}
pub struct Codec {
aes: Aes256,
decrypted: usize,
}
impl Decoder for Codec {
type Item = Frame;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<Frame>> {
// Encrypt message in-place
match self.aes.decrypt_blocks(&mut buf[self.decrypted..]) {
Some(end) => self.decrypted += end,
None => return Ok(None),
};
// Parse a frame
let (consumed, f) = match frame::frame(&buf[0..self.decrypted]) {
IResult::Incomplete(_) => return Ok(None),
IResult::Error(e) => {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("parse error: {:?}", e),
))
}
IResult::Done(i, frame) => (buf.offset(i), frame),
};
buf.split_to(consumed);
self.decrypted -= consumed;
Ok(Some(f))
}
}
impl Encoder for Codec {
type Item = Frame;
type Error = io::Error;
fn encode(&mut self, frame: Frame, buf: &mut BytesMut) -> io::Result<()> {
let start = buf.len();
buf.extend(repeat(0).take(NTCP_MTU));
match frame::gen_frame((buf, start), &frame).map(|tup| tup.1) {
Ok(sz) => {
buf.truncate(sz);
// Encrypt message in-place
match self.aes.encrypt_blocks(&mut buf[start..]) {
Some(end) if start + end == sz => Ok(()),
_ => Err(io::Error::new(
io::ErrorKind::InvalidData,
"invalid serialization",
)),
}
}
Err(e) => match e {
GenError::BufferTooSmall(sz) => Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("message ({}) larger than MTU ({})", sz - start, NTCP_MTU),
)),
GenError::InvalidOffset
| GenError::CustomError(_)
| GenError::NotYetImplemented => Err(io::Error::new(
io::ErrorKind::InvalidData,
"could not generate",
)),
},
}
}
}
//
// Connection management engine
//
pub struct Engine;
impl Engine {
pub fn new() -> Self {
Engine
}
pub fn listen(
&self,
own_ri: RouterIdentity,
own_key: SigningPrivateKey,
addr: &SocketAddr,
) -> IoFuture<()> {
// Bind to the address
let listener = TcpListener::bind(addr).unwrap();
// For each incoming connection:
Box::new(listener.incoming().for_each(move |conn| {
info!("Incoming connection!");
// Execute the handshake
let conn = handshake::HandshakeTransport::<
TcpStream,
handshake::InboundHandshakeCodec,
handshake::IBHandshakeState,
>::listen(conn, own_ri.clone(), own_key.clone());
// Once connected:
let process_conn = conn.and_then(|conn| {
info!("Connection established!");
// For every message received:
conn.for_each(|frame| {
debug!("Received frame: {:?}", frame);
// TODO: Do something
Ok(())
})
});
tokio::spawn(process_conn.map_err(|_| ()));
Ok(())
}))
}
pub fn connect(
&self,
own_ri: RouterIdentity,
own_key: SigningPrivateKey,
peer_ri: RouterInfo,
) -> IoFuture<Framed<TcpStream, Codec>> {
// TODO return error if there are no valid NTCP addresses (for some reason)
let addr = peer_ri.address(&NTCP_STYLE).unwrap().addr().unwrap();
// Connect to the peer
// Return a transport ready for sending and receiving Frames
// The layer above will convert I2NP packets to Frames
// (or should the Engine handle timesync packets itself?)
let transport = Box::new(TcpStream::connect(&addr).and_then(|socket| {
handshake::HandshakeTransport::<
TcpStream,
handshake::OutboundHandshakeCodec,
handshake::OBHandshakeState,
>::connect(socket, own_ri, own_key, peer_ri.router_id)
}));
// Add a timeout
Box::new(
Deadline::new(transport, Instant::now() + Duration::new(10, 0))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e)),
)
}
}
|
pub mod runner;
//use puck_core::{HashMap, Tick};
use puck_core::app::{App};
use puck_core::event::*;
use input::Input;
use audio::SoundRender;
//use std::hash::Hash;
//use std::fmt::Debug;
use std::collections::BTreeMap as Map;
use {RenderTick, Dimensions};
use render::gfx::{OpenGLRenderer};
// - abstract trait of EventSink?
// - how do we manage identifiers? ... across kinds?
// - how do we determine viewability of render events?
// - initial state?
// - notions of client identity?
// - move entity state to tree map ...
// how does a server request/force a change to something it doesn't own? ... how does this affect ordering?
// how does client and server negotiate over player location? In PUBG, how do we put the player in the plane (with location being client side)
// "route", who's the owner of this entity
// "visibility" server notion foreach client
#[derive(Clone)]
pub struct RenderSettings {
pub dimensions: (u32, u32),
pub vsync: bool,
pub title: String,
}
pub trait RenderedApp : App {
type RenderState;
fn handle_input(input:&Input, dimensions: &Dimensions, entities: &Map<Self::Id, Self::Entity>, sink: &mut Sink<Event<Self::Id, Self::Entity, Self::EntityEvent, Self::RenderEvent>>);
fn handle_render_event(event: &Self::RenderEvent, render_state: &mut Self::RenderState);
fn render(time: RenderTick, dimensions: &Dimensions, entities:&Map<Self::Id, Self::Entity>, render_state: &mut Self::RenderState, renderer: &mut OpenGLRenderer) -> SoundRender;
}
|
use std::sync::Arc;
use num;
use crate::prelude::*;
use super::Material;
use crate::interaction::SurfaceInteraction;
use crate::bxdf::{ Bsdf, LambertianReflection, TransportMode };
use crate::texture::Texture;
#[derive(Clone, Debug)]
pub struct MatteMaterial {
kd: Arc<dyn Texture<Spectrum> + Send + Sync>,
sigma: Arc<dyn Texture<Float> + Send + Sync>,
bump: Option<Arc<dyn Texture<Float> + Send + Sync>>,
}
impl MatteMaterial {
pub fn new(kd: Arc<dyn Texture<Spectrum> + Send + Sync>, sigma: Arc<dyn Texture<Float> + Send + Sync>, bump: Option<Arc<dyn Texture<Float> + Send + Sync>>) -> Self {
Self { kd, sigma, bump }
}
}
impl Material for MatteMaterial {
fn compute_scattering_functions(&self, isect: SurfaceInteraction<'a>, _arena: &(), _mode: TransportMode, _allow_multiple_lobes: bool) -> SurfaceInteraction<'a> {
let isect = match &self.bump {
Some(bump) => super::bump(&isect, bump),
None => isect,
};
let mut isect = isect.clone();
let mut bsdf = Bsdf::new(&isect, None);
let r = self.kd.evaluate(&isect).clamp(None, None);
let sig = num::clamp(self.sigma.evaluate(&isect), float(0.0), float(90.0));
if !r.is_black() {
if sig == 0.0 {
bsdf.add(Arc::new(LambertianReflection::new(r)));
} else {
unimplemented!()
}
}
isect.bsdf = Some(bsdf);
isect
}
}
|
//! Process Stack Pointer
#[cfg(cortex_m)]
use core::arch::asm;
/// Reads the CPU register
#[cfg(cortex_m)]
#[inline]
pub fn read() -> u32 {
let r;
unsafe { asm!("mrs {}, PSP", out(reg) r, options(nomem, nostack, preserves_flags)) };
r
}
/// Writes `bits` to the CPU register
#[cfg(cortex_m)]
#[inline]
pub unsafe fn write(bits: u32) {
// See comment on msp_w. Unlike MSP, there are legitimate use-cases for modifying PSP
// if MSP is currently being used as the stack pointer.
asm!("msr PSP, {}", in(reg) bits, options(nomem, nostack, preserves_flags));
}
|
use anyhow::{anyhow, Context};
use flate2::bufread::GzEncoder;
use flate2::Compression;
use log::{debug, info, warn};
use notify::{watcher, DebouncedEvent, RecursiveMode, Watcher};
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::Path;
use std::sync::mpsc::channel;
use std::time::Duration;
pub struct Client {
pub username: String,
pub api_key: String,
pub api_url: String,
}
impl Client {
fn upload_zip(&self, path: &Path) -> anyhow::Result<()> {
let file = File::open(path).context("unable to open")?;
let size = file.metadata().map(|m| m.len()).unwrap_or(0);
let reader = BufReader::new(file);
let resp = ureq::post(&self.api_url)
.auth(&self.username, &self.api_key)
.set("Content-Length", &size.to_string())
.set("Content-Type", "application/zip")
.send(reader);
if resp.ok() {
Ok(())
} else {
let err = resp
.into_string()
.context("unable to interpret eror server response")?;
Err(anyhow!("server responded with an error: {}", err))
}
}
fn upload_txt(&self, path: &Path) -> anyhow::Result<()> {
let file = File::open(path).context("unable to open")?;
let reader = BufReader::new(file);
let mut buffer = Vec::new();
let mut gz = GzEncoder::new(reader, Compression::new(4));
gz.read_to_end(&mut buffer).context("unable to compress")?;
let resp = ureq::post(&self.api_url)
.auth(&self.username, &self.api_key)
.set("Content-Encoding", "gzip")
.send_bytes(&buffer);
if resp.ok() {
Ok(())
} else {
let err = resp
.into_string()
.context("unable to interpret eror server response")?;
Err(anyhow!("server responded with an error: {}", err))
}
}
}
pub fn core_loop(watch_dir: &Path, client: &Client) -> anyhow::Result<()> {
let (tx, rx) = channel();
let mut watcher = watcher(tx, Duration::from_secs(5))
.with_context(|| "unable to create file watcher".to_string())?;
watcher
.watch(watch_dir, RecursiveMode::Recursive)
.with_context(|| format!("unable to watch: {}", watch_dir.display()))?;
info!("watching directory for save files: {}", watch_dir.display());
log::logger().flush();
loop {
match rx.recv() {
Ok(DebouncedEvent::Error(e, path)) => {
if let Some(path) = path {
warn!("watch error on {}: {:?}", path.as_path().display(), e);
} else {
warn!("watch error: {:?}", e);
}
}
Ok(DebouncedEvent::Write(path)) | Ok(DebouncedEvent::Create(path)) => {
if !path.as_path().extension().map_or(false, |x| x == "eu4") {
continue;
}
let path_display = path.as_path().display();
info!("detected write: {}", path_display);
match process_file(client, &path) {
Ok(_) => info!("successfully uploaded {}", path_display),
Err(e) => warn!("{:?}", e),
}
}
Ok(event) => {
debug!("{:?}", event);
continue;
}
Err(e) => warn!("watch error: {:?}", e),
}
log::logger().flush();
}
}
fn process_file(client: &Client, path: &Path) -> anyhow::Result<()> {
let path_display = path.display();
let magic = {
let mut buffer = [0; 4];
let mut file =
File::open(path).with_context(|| format!("unable to open: {}", path_display))?;
file.read_exact(&mut buffer)
.with_context(|| format!("unable to read: {}", path_display))?;
buffer
};
match magic {
[0x50, 0x4b, 0x03, 0x04] => client
.upload_zip(&path)
.with_context(|| format!("unable to upload zip: {}", path_display)),
[b'E', b'U', b'4', b't'] => client
.upload_txt(&path)
.with_context(|| format!("unable to upload txt: {}", path_display)),
x => Err(anyhow!(
"unexpected file signature: {:?} - {}",
x,
path_display
)),
}
}
|
extern crate memmap;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::fs::File;
use memmap::MmapMut;
mod sbt_link;
//mod sbt_methods;
pub use sbt_link::Link;
struct LinksDB<T> {
links: *mut Link<T>,
allocated_links: T,
reserved_links: T,
free_links: T,
first_free_link: T,
last_free_link: T,
block_size: T,
db_file: File,
mmap: MmapMut
}
impl<T> LinksDB<T> {
fn open(&mut self, path: PathBuf) {
self.db_file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.ok()
.unwrap();
unsafe {
self.mmap = MmapMut::map_mut(&self.db_file)
.ok()
.unwrap();
let mmap_ptr = self.mmap
.first()
.unwrap();
self.links = std::mem::transmute::<&u8, *mut Link<T>>(mmap_ptr);
};
}
}
|
//https://www.codewars.com/kata/566fc12495810954b1000030
fn nb_dig(n: i32, d: i32) -> i32 {
let digit = d.to_string().chars().next().unwrap();
(1..=n).fold(0,|count,n| count + (n * n).to_string().chars().filter(|i| *i == digit).collect::<Vec<char>>().len() as i32)
}
fn main(){
println!("{}",nb_dig(10,1)); // 4
println!("{}",nb_dig(25,1)); // 11
} |
pub fn xorshift32(seed: u32) -> u32 {
let y = seed ^ (seed << 13);
let y = y ^ (y >> 17);
y ^ (y << 15)
}
|
use std::fmt;
use self::Msg::*;
use super::token::Position;
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Msg {
Unimplemented,
UnknownClass(String),
UnknownType(String),
UnknownIdentifier(String),
UnknownStruct(String),
UnknownFunction(String),
UnknownField(String, String),
UnknownMethod(String, String, Vec<String>),
UnknownStaticMethod(String, String, Vec<String>),
UnknownCtor(String, Vec<String>),
MethodExists(String, String, Position),
IncompatibleWithNil(String),
IdentifierExists(String),
ShadowFunction(String),
ShadowParam(String),
ShadowClass(String),
ShadowStruct(String),
ShadowTrait(String),
ShadowField(String),
ShadowGlobal(String),
ShadowConst(String),
VarNeedsTypeInfo(String),
ParamTypesIncompatible(String, Vec<String>, Vec<String>),
WhileCondType(String),
IfCondType(String),
ReturnType(String, String),
LvalueExpected,
AssignType(String, String, String),
AssignField(String, String, String, String),
UnOpType(String, String),
BinOpType(String, String, String),
ConstValueExpected,
OutsideLoop,
NoReturnValue,
MainNotFound,
WrongMainDefinition,
ThisUnavailable,
SelfTypeUnavailable,
SuperUnavailable,
SuperNeedsMethodCall,
ReferenceTypeExpected(String),
ThrowNil,
CatchOrFinallyExpected,
LetMissingInitialization,
LetReassigned,
UnderivableType(String),
CycleInHierarchy,
SuperfluousOverride(String),
Superfluousimport(String),
MissingOverride(String),
ThrowsDifference(String),
MethodNotOverridable(String),
TypesIncompatible(String, String),
ReturnTypeMismatch(String, String),
UnresolvedInternal,
UnclosedComment,
UnknownChar(char),
UnclosedChar,
UnclosedString,
NumberOverflow(String),
ExpectedClass(String),
ExpectedFactor(String),
ExpectedToken(String, String),
ExpectedTopLevelElement(String),
ExpectedTrait(String),
ExpectedType(String),
ExpectedIdentifier(String),
MisplacedElse,
IoError,
ExpectedClassElement(String),
RedundantModifier(String),
MisplacedModifier(String),
InvalidEscapeSequence(char),
MissingFctBody,
FctCallExpected,
ThisOrSuperExpected(String),
NoSuperDelegationWithPrimaryCtor(String),
NoSuperClass(String),
RecursiveStructure,
TraitMethodWithBody,
TryNeedsCall,
TryCallNonThrowing,
ThrowingCallWithoutTry,
TypeParamsExpected,
TypeParamNameNotUnique(String),
StaticMethodNotInTrait(String, String, Vec<String>),
MethodNotInTrait(String, String, Vec<String>),
StaticMethodMissingFromTrait(String, String, Vec<String>),
MethodMissingFromTrait(String, String, Vec<String>),
WrongNumberTypeParams(usize, usize),
ClassExpected(String),
ClassExpectedAsTypeParam,
AssignmentToConst,
BoundExpected,
NoTypeParamsExpected,
MultipleClassBounds,
DuplicateTraitBound,
ClassBoundNotSatisfied(String, String),
TraitBoundNotSatisfied(String, String),
AbstractMethodNotInAbstractClass,
AbstractMethodWithImplementation,
NewAbstractClass,
MissingAbstractOverride(String, String),
ModifierNotAllowedForStaticMethod(String),
GlobalInitializerNotSupported,
MakeIteratorReturnType(String),
UnknownStructField(String, String),
StructFieldNotInitialized(String, String),
Custom(String),
}
impl Msg {
pub fn message(&self) -> String {
match *self {
Custom(ref msg) => format!("{}", msg),
Unimplemented => format!("feature not implemented yet."),
UnknownClass(ref name) => format!("class `{}` does not exist.", name),
UnknownType(ref name) => format!("type `{}` does not exist.", name),
UnknownIdentifier(ref name) => format!("unknown identifier `{}`.", name),
UnknownStruct(ref name) => format!("unknown struct `{}`.", name),
UnknownFunction(ref name) => format!("unknown function `{}`", name),
UnknownMethod(ref cls, ref name, ref args) => {
let args = args.join(", ");
format!(
"no method with definition `{}({})` in class `{}`.",
name, args, cls
)
}
UnknownStaticMethod(ref cls, ref name, ref args) => {
let args = args.join(", ");
format!("no static method `{}::{}({})`.", cls, name, args)
}
UnknownCtor(ref name, ref args) => {
let args = args.join(", ");
format!("no ctor with definition `{}({})`.", name, args)
}
MethodExists(ref cls, ref name, pos) => format!(
"method with name `{}` already exists in class `{}` at line {}.",
name, cls, pos
),
IncompatibleWithNil(ref ty) => format!("cannot assign `null` to type `{}`.", ty),
UnknownField(ref field, ref ty) => {
format!("unknown field `{}` for type `{}`", field, ty)
}
IdentifierExists(ref name) => format!("can not redefine identifier `{}`.", name),
ShadowFunction(ref name) => format!("can not shadow function `{}`.", name),
ShadowParam(ref name) => format!("can not shadow param `{}`.", name),
ShadowClass(ref name) => format!("can not shadow class `{}`.", name),
ShadowStruct(ref name) => format!("can not shadow struct `{}`.", name),
ShadowTrait(ref name) => format!("can not shadow trait `{}`.", name),
ShadowField(ref name) => format!("field with name `{}` already exists.", name),
ShadowGlobal(ref name) => format!("can not shadow global variable `{}`.", name),
ShadowConst(ref name) => format!("can not shadow const `{}`", name),
VarNeedsTypeInfo(ref name) => format!(
"variable `{}` needs either type declaration or expression.",
name
),
ParamTypesIncompatible(ref name, ref def, ref expr) => {
let def = def.join(", ");
let expr = expr.join(", ");
format!(
"function `{}({})` cannot be called as `{}({})`",
name, def, name, expr
)
}
WhileCondType(ref ty) => {
format!("`while` expects condition of type `bool` but got `{}`.", ty)
}
IfCondType(ref ty) => {
format!("`if` expects condition of type `bool` but got `{}`.", ty)
}
ReturnType(ref def, ref expr) => format!(
"`return` expects value of type `{}` but got `{}`.",
def, expr
),
LvalueExpected => format!("lvalue expected for assignment"),
AssignType(ref name, ref def, ref expr) => format!(
"cannot assign `{}` to variable `{}` of type `{}`.",
expr, name, def
),
AssignField(ref name, ref cls, ref def, ref expr) => format!(
"cannot assign `{}` to field `{}`.`{}` of type `{}`.",
expr, cls, name, def
),
UnOpType(ref op, ref expr) => format!(
"unary operator `{}` can not handle value of type `{} {}`.",
op, op, expr
),
BinOpType(ref op, ref lhs, ref rhs) => format!(
"binary operator `{}` can not handle expression of type `{} {} {}`",
op, lhs, op, rhs
),
ConstValueExpected => "constant value expected".into(),
OutsideLoop => "statement only allowed inside loops".into(),
NoReturnValue => "function does not return a value in all code paths".into(),
MainNotFound => "no `main` function found in the program".into(),
WrongMainDefinition => "`main` function has wrong definition".into(),
ThisUnavailable => "`self` can only be used in methods not functions".into(),
SelfTypeUnavailable => "`Self` can only be used in traits.".into(),
SuperUnavailable => {
"`super` only available in methods of classes with parent class".into()
}
SuperNeedsMethodCall => "`super` only allowed in method calls".into(),
ReferenceTypeExpected(ref name) => format!("`{}` is not a reference type.", name),
ThrowNil => "throwing `null` is not allowed.".into(),
CatchOrFinallyExpected => "`try` without `catch` or `finally`.".into(),
LetMissingInitialization => "`let` binding is missing initialization.".into(),
LetReassigned => "`let` binding cannot be reassigned.".into(),
UnderivableType(ref name) => format!("type `{}` cannot be used as super class.", name),
CycleInHierarchy => "cycle in type hierarchy detected.".into(),
SuperfluousOverride(_) => {
"method `{}` uses modifier `override` without overriding a function.".into()
}
MissingOverride(_) => "method `{}` is missing modifier `override`.".into(),
Superfluousimport(_) => {
"method `{}` uses modifier `import` but class allows no subclasses.".into()
}
ThrowsDifference(_) => {
"use of `throws` in method `{}`needs to match super class".into()
}
MethodNotOverridable(ref name) => {
format!("method `{}` in super class not overridable.", name)
}
TypesIncompatible(ref na, ref nb) => {
format!("types `{}` and `{}` incompatible.", na, nb)
}
ReturnTypeMismatch(ref fct, ref sup) => {
format!("return types `{}` and `{}` do not match.", fct, sup)
}
UnresolvedInternal => "unresolved internal.".into(),
MisplacedElse => "misplace else.".into(),
ExpectedToken(ref exp, ref got) => format!("expected {} but got {}.", exp, got),
NumberOverflow(ref ty) => format!("number does not fit into type {}.", ty),
ExpectedClass(ref cls) => format!("expected class name but got {}.", cls),
ExpectedFactor(ref got) => format!("factor expected but got {}.", got),
ExpectedTrait(ref trt) => format!("expected trait name but got {}.", trt),
ExpectedType(ref got) => format!("type expected but got {}.", got),
ExpectedIdentifier(ref tok) => format!("identifier expected but got {}.", tok),
MisplacedModifier(ref modifier) => format!("misplaced modifier `{}`.", modifier),
ExpectedTopLevelElement(ref token) => {
format!("expected function or class but got {}.", token)
}
ExpectedClassElement(ref token) => {
format!("field or method expected but got {}.", token)
}
RedundantModifier(ref token) => format!("redundant modifier {}.", token),
UnknownChar(ch) => format!("unknown character {} (codepoint {}).", ch, ch as usize),
UnclosedComment => "unclosed comment.".into(),
InvalidEscapeSequence(ch) => format!("unknown escape sequence `\\{}`.", ch),
UnclosedString => "unclosed string.".into(),
UnclosedChar => "unclosed char.".into(),
IoError => "error reading from file.".into(),
MissingFctBody => "missing function body.".into(),
FctCallExpected => format!("function call expected"),
ThisOrSuperExpected(ref val) => format!("`self` or `super` expected but got {}.", val),
NoSuperDelegationWithPrimaryCtor(ref name) => format!(
"no `super` delegation allowed for ctor in class {}, because class has \
primary ctor.",
name
),
NoSuperClass(ref name) => format!("class `{}` does not have super class.", name),
RecursiveStructure => "recursive structure is not allowed.".into(),
TraitMethodWithBody => "trait method is not allowed to have definition".into(),
TryNeedsCall => "`try` expects function or method call.".into(),
TryCallNonThrowing => "given function or method call for `try` does not throw.".into(),
ThrowingCallWithoutTry => {
"function or method call that is able to throw, needs `try`.".into()
}
TypeParamsExpected => "type params expected.".into(),
TypeParamNameNotUnique(ref name) => format!("type param `{}` name already used.", name),
StaticMethodNotInTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` does not define static method `{}({})`.",
trait_name, mtd_name, args
)
}
MethodNotInTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` does not define method `{}({})`.",
trait_name, mtd_name, args
)
}
StaticMethodMissingFromTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` defines static method `{}({})` but is missing in `impl`.",
trait_name, mtd_name, args
)
}
MethodMissingFromTrait(ref trait_name, ref mtd_name, ref args) => {
let args = args.join(", ");
format!(
"trait `{}` defines method `{}({})` but is missing in `impl`.",
trait_name, mtd_name, args
)
}
WrongNumberTypeParams(exp, actual) => {
format!("expected {} type parameters but got {}.", exp, actual)
}
ClassExpected(ref name) => format!("`{}` is not a class.", name),
ClassExpectedAsTypeParam => "class as type parameter expected.".into(),
AssignmentToConst => "cannot assign to const variable.".into(),
BoundExpected => "class or trait bound expected".into(),
NoTypeParamsExpected => "no type params allowed".into(),
MultipleClassBounds => "multiple class bounds not allowed".into(),
DuplicateTraitBound => "duplicate trait bound".into(),
ClassBoundNotSatisfied(ref name, ref xclass) => {
format!("type `{}` not a subclass of `{}`.", name, xclass)
}
TraitBoundNotSatisfied(ref name, ref xtrait) => {
format!("type `{}` does not implement trait `{}`.", name, xtrait)
}
AbstractMethodWithImplementation => "abstract methods cannot be implemented.".into(),
AbstractMethodNotInAbstractClass => {
"abstract methods only allowed in abstract classes.".into()
}
NewAbstractClass => "cannot create object of abstract class.".into(),
MissingAbstractOverride(ref cls, ref name) => format!(
"missing override of abstract method `{}` in class `{}`.",
cls, name
),
ModifierNotAllowedForStaticMethod(ref modifier) => {
format!("modifier `{}` not allowed for static method.", modifier)
}
GlobalInitializerNotSupported => {
"global variables do no support initial assignment for now.".into()
}
MakeIteratorReturnType(ref ty) => format!(
"makeIterator() returns `{}` which does not implement Iterator.",
ty
),
UnknownStructField(ref struc, ref field) => {
format!("struct `{}` does not have field named `{}`.", struc, field)
}
StructFieldNotInitialized(ref struc, ref field) => {
format!("field `{}` in struct `{}` not initialized.", field, struc)
}
}
}
}
#[derive(Clone, Debug)]
pub struct MsgWithPos {
pub msg: Msg,
pub pos: Position,
}
impl MsgWithPos {
pub fn new(pos: Position, msg: Msg) -> MsgWithPos {
MsgWithPos { pos: pos, msg: msg }
}
pub fn message(&self) -> String {
format!("error at {}: {}", self.pos, self.msg.message())
}
}
impl fmt::Display for MsgWithPos {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "error at {}: {}", self.pos, self.msg.message())
}
}
|
use backend::{Backend, BackendImpl};
use widget::Widget;
pub struct Window(<BackendImpl as Backend>::Window);
impl Widget for Window {
type Builder = WindowBuilder;
fn build(builder: WindowBuilder) -> Self {
Window(<BackendImpl as Backend>::Window::build(builder))
}
}
#[derive(Default)]
pub struct WindowBuilder {
pub title_: String,
}
impl WindowBuilder {
pub fn title<S: Into<String>>(mut self, title: S) -> Self {
self.title_ = title.into();
self
}
}
|
use crate::core::*;
use crate::processing::*;
use core::ops::Neg;
use ndarray::prelude::*;
use num_traits::{cast::FromPrimitive, real::Real, Num, NumAssignOps};
use std::marker::Sized;
/// Runs the sobel operator on an image
pub trait SobelExt
where
Self: Sized,
{
/// Type to output
type Output;
/// Returns the magnitude output of the sobel - an image of only lines
fn apply_sobel(&self) -> Result<Self, Error>;
/// Returns the magntitude and rotation outputs for use in other algorithms
/// like the Canny edge detector. Rotation is in radians
fn full_sobel(&self) -> Result<(Self::Output, Self::Output), Error>;
}
fn get_edge_images<T>(mat: &Array3<T>) -> Result<(Array3<T>, Array3<T>), Error>
where
T: Copy + Clone + Num + NumAssignOps + Neg<Output = T> + FromPrimitive + Real,
{
let v_temp: Array3<T> = SobelFilter::build_with_params(Orientation::Vertical).unwrap();
let h_temp: Array3<T> = SobelFilter::build_with_params(Orientation::Horizontal).unwrap();
let shape = (v_temp.shape()[0], v_temp.shape()[1], mat.shape()[2]);
let h_kernel = Array3::<T>::from_shape_fn(shape, |(i, j, _)| h_temp[[i, j, 0]]);
let v_kernel = Array3::<T>::from_shape_fn(shape, |(i, j, _)| v_temp[[i, j, 0]]);
let h_deriv = mat.conv2d(h_kernel.view())?;
let v_deriv = mat.conv2d(v_kernel.view())?;
Ok((h_deriv, v_deriv))
}
impl<T> SobelExt for Array3<T>
where
T: Copy + Clone + Num + NumAssignOps + Neg<Output = T> + FromPrimitive + Real,
{
type Output = Self;
fn apply_sobel(&self) -> Result<Self, Error> {
let (h_deriv, v_deriv) = get_edge_images(self)?;
let h_deriv = h_deriv.mapv(|x| x.powi(2));
let v_deriv = v_deriv.mapv(|x| x.powi(2));
let mut result = h_deriv + v_deriv;
result.mapv_inplace(|x| x.sqrt());
// squash values above 1.0
result.mapv_inplace(|x| if x > T::one() { T::one() } else { x });
Ok(result)
}
fn full_sobel(&self) -> Result<(Self::Output, Self::Output), Error> {
let (h_deriv, v_deriv) = get_edge_images(self)?;
let mut magnitude = h_deriv.mapv(|x| x.powi(2)) + v_deriv.mapv(|x| x.powi(2));
magnitude.mapv_inplace(|x| x.sqrt());
magnitude.mapv_inplace(|x| if x > T::one() { T::one() } else { x });
let mut rotation = v_deriv / h_deriv;
rotation.mapv_inplace(|x| x.atan());
Ok((magnitude, rotation))
}
}
impl<T, C> SobelExt for Image<T, C>
where
T: Copy + Clone + Num + NumAssignOps + Neg<Output = T> + FromPrimitive + Real,
C: ColourModel,
{
type Output = Array3<T>;
fn apply_sobel(&self) -> Result<Self, Error> {
let data = self.data.apply_sobel()?;
Ok(Image::from_data(data))
}
fn full_sobel(&self) -> Result<(Self::Output, Self::Output), Error> {
self.data.full_sobel()
}
}
|
// This file was generated by gir (https://github.com/gtk-rs/gir @ fbb95f4)
// from gir-files (https://github.com/gtk-rs/gir-files @ 77d1f70)
// DO NOT EDIT
use Error;
use SocketConnectable;
use TlsCertificateFlags;
use ffi;
use glib;
use glib::StaticType;
use glib::Value;
use glib::object::Downcast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std;
use std::boxed::Box as Box_;
use std::mem;
use std::mem::transmute;
use std::ptr;
glib_wrapper! {
pub struct TlsCertificate(Object<ffi::GTlsCertificate, ffi::GTlsCertificateClass>);
match fn {
get_type => || ffi::g_tls_certificate_get_type(),
}
}
impl TlsCertificate {
pub fn new_from_file<P: AsRef<std::path::Path>>(file: P) -> Result<TlsCertificate, Error> {
unsafe {
let mut error = ptr::null_mut();
let ret = ffi::g_tls_certificate_new_from_file(file.as_ref().to_glib_none().0, &mut error);
if error.is_null() { Ok(from_glib_full(ret)) } else { Err(from_glib_full(error)) }
}
}
pub fn new_from_files<P: AsRef<std::path::Path>, Q: AsRef<std::path::Path>>(cert_file: P, key_file: Q) -> Result<TlsCertificate, Error> {
unsafe {
let mut error = ptr::null_mut();
let ret = ffi::g_tls_certificate_new_from_files(cert_file.as_ref().to_glib_none().0, key_file.as_ref().to_glib_none().0, &mut error);
if error.is_null() { Ok(from_glib_full(ret)) } else { Err(from_glib_full(error)) }
}
}
pub fn new_from_pem(data: &str) -> Result<TlsCertificate, Error> {
let length = data.len() as isize;
unsafe {
let mut error = ptr::null_mut();
let ret = ffi::g_tls_certificate_new_from_pem(data.to_glib_none().0, length, &mut error);
if error.is_null() { Ok(from_glib_full(ret)) } else { Err(from_glib_full(error)) }
}
}
pub fn list_new_from_file<P: AsRef<std::path::Path>>(file: P) -> Result<Vec<TlsCertificate>, Error> {
unsafe {
let mut error = ptr::null_mut();
let ret = ffi::g_tls_certificate_list_new_from_file(file.as_ref().to_glib_none().0, &mut error);
if error.is_null() { Ok(FromGlibPtrContainer::from_glib_full(ret)) } else { Err(from_glib_full(error)) }
}
}
}
pub trait TlsCertificateExt {
fn get_issuer(&self) -> Option<TlsCertificate>;
#[cfg(any(feature = "v2_34", feature = "dox"))]
fn is_same(&self, cert_two: &TlsCertificate) -> bool;
fn verify<'a, 'b, P: IsA<SocketConnectable> + 'a, Q: Into<Option<&'a P>>, R: Into<Option<&'b TlsCertificate>>>(&self, identity: Q, trusted_ca: R) -> TlsCertificateFlags;
//fn get_property_certificate(&self) -> /*Ignored*/Option<glib::ByteArray>;
fn get_property_certificate_pem(&self) -> Option<String>;
fn connect_property_certificate_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_certificate_pem_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_issuer_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_private_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_private_key_pem_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<TlsCertificate> + IsA<glib::object::Object>> TlsCertificateExt for O {
fn get_issuer(&self) -> Option<TlsCertificate> {
unsafe {
from_glib_none(ffi::g_tls_certificate_get_issuer(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_34", feature = "dox"))]
fn is_same(&self, cert_two: &TlsCertificate) -> bool {
unsafe {
from_glib(ffi::g_tls_certificate_is_same(self.to_glib_none().0, cert_two.to_glib_none().0))
}
}
fn verify<'a, 'b, P: IsA<SocketConnectable> + 'a, Q: Into<Option<&'a P>>, R: Into<Option<&'b TlsCertificate>>>(&self, identity: Q, trusted_ca: R) -> TlsCertificateFlags {
let identity = identity.into();
let identity = identity.to_glib_none();
let trusted_ca = trusted_ca.into();
let trusted_ca = trusted_ca.to_glib_none();
unsafe {
from_glib(ffi::g_tls_certificate_verify(self.to_glib_none().0, identity.0, trusted_ca.0))
}
}
//fn get_property_certificate(&self) -> /*Ignored*/Option<glib::ByteArray> {
// unsafe {
// let mut value = Value::from_type(</*Unknown type*/ as StaticType>::static_type());
// gobject_ffi::g_object_get_property(self.to_glib_none().0, "certificate".to_glib_none().0, value.to_glib_none_mut().0);
// value.get()
// }
//}
fn get_property_certificate_pem(&self) -> Option<String> {
unsafe {
let mut value = Value::from_type(<String as StaticType>::static_type());
gobject_ffi::g_object_get_property(self.to_glib_none().0, "certificate-pem".to_glib_none().0, value.to_glib_none_mut().0);
value.get()
}
}
fn connect_property_certificate_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::certificate",
transmute(notify_certificate_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
fn connect_property_certificate_pem_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::certificate-pem",
transmute(notify_certificate_pem_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
fn connect_property_issuer_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::issuer",
transmute(notify_issuer_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
fn connect_property_private_key_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::private-key",
transmute(notify_private_key_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
fn connect_property_private_key_pem_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe {
let f: Box_<Box_<Fn(&Self) + 'static>> = Box_::new(Box_::new(f));
connect(self.to_glib_none().0, "notify::private-key-pem",
transmute(notify_private_key_pem_trampoline::<Self> as usize), Box_::into_raw(f) as *mut _)
}
}
}
unsafe extern "C" fn notify_certificate_trampoline<P>(this: *mut ffi::GTlsCertificate, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<TlsCertificate> {
callback_guard!();
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&TlsCertificate::from_glib_borrow(this).downcast_unchecked())
}
unsafe extern "C" fn notify_certificate_pem_trampoline<P>(this: *mut ffi::GTlsCertificate, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<TlsCertificate> {
callback_guard!();
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&TlsCertificate::from_glib_borrow(this).downcast_unchecked())
}
unsafe extern "C" fn notify_issuer_trampoline<P>(this: *mut ffi::GTlsCertificate, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<TlsCertificate> {
callback_guard!();
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&TlsCertificate::from_glib_borrow(this).downcast_unchecked())
}
unsafe extern "C" fn notify_private_key_trampoline<P>(this: *mut ffi::GTlsCertificate, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<TlsCertificate> {
callback_guard!();
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&TlsCertificate::from_glib_borrow(this).downcast_unchecked())
}
unsafe extern "C" fn notify_private_key_pem_trampoline<P>(this: *mut ffi::GTlsCertificate, _param_spec: glib_ffi::gpointer, f: glib_ffi::gpointer)
where P: IsA<TlsCertificate> {
callback_guard!();
let f: &&(Fn(&P) + 'static) = transmute(f);
f(&TlsCertificate::from_glib_borrow(this).downcast_unchecked())
}
|
// Rhodium allows to create hyper servers as a stack of Handlers. Each Handler has its own handle_request
// and handle_response methods
// Handlers are executed by order while handling a request, and by the reverse order while handling the response.
// The order in which handle_request and handle_response functions are executed is summarized in the next flow diagram:
//
// ----------- ----------- ----------- -----------
// --- req -> | | --- req -> | | --- req -> ... --- req -> | | --- req -> | |
// | Handler | | Handler | | Handler | | Service |
// | 1 | | 2 | | n | | |
// <-- res -- |_________| <-- res -- |_________| <-- res -- ... <-- res -- |_________| <-- res -- |_________|
//
// Every Handler is a struct implementing de RhodHandler trait, while the Service is a struct implementing the RhodService trait.
// RhodHandlers + RhodService conforms a RhodStack
// To use Rhodium, you just have to create a RhodStack, set the socket address where the hyper server will listen,
// and the protocol to be used (HTTP/HTTPS).
//
//
// If the Handler i returns an error while handling a request:
// catch_request functions are called for the next handlers (Handler i+1, i+2, ..., n), and then the flow is ended.
// If the Service returns an error:
// the flow is ended
// If the Handler i returns an error while handling a response:
// catch_response functions are called for the next handlers (Handler i-1, i-2, ..., 1), and then the flow is ended.
#[macro_use]
extern crate log;
use hyper;
use hyper::server::conn::AddrIncoming;
use hyper::server::conn::AddrStream;
use hyper::Server as HyperServer;
use std::clone::Clone;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::net::{TcpListener, TcpStream};
use tokio_rustls::server::TlsStream;
pub mod errors;
mod hyper_config;
pub mod protocols;
pub mod request;
pub mod response;
pub mod stack;
use self::errors::RhodHyperError; //Server errors (Hyper errors, bad certificates, etc)
use self::hyper_config::*;
use self::protocols::*;
use self::request::*;
use self::stack::*;
// =====================================================================
// || Structs to share information between handlers ||
// =====================================================================
#[derive(Clone)]
pub struct RhodConnInfo {
pub addr: SocketAddr,
pub proto: HttpProtocol,
}
impl RhodConnInfo {
pub fn new(addr: SocketAddr, proto: HttpProtocol) -> RhodConnInfo {
RhodConnInfo { addr, proto }
}
}
// A generic type that implements the CommunicationChannel trait will be used for communication between handlers and the service
// Users of the library have to define their CommunicationChannel type, which have to implement this trait.
pub trait CommunicationChannel: Send + Sync + 'static {
fn new() -> Self;
}
// ==============================
// || Rhodium ||
// ==============================
// Rhodium: has all information needed to run a server
pub struct Rhodium<C: CommunicationChannel> {
stack: Arc<RhodStack<C>>, // stack of handlers and the service to execute
addr: SocketAddr, // address to listen
protocol: HttpProtocolConf, // use http or https
}
impl<C: CommunicationChannel> Rhodium<C> {
pub fn new(
stack: Arc<RhodStack<C>>,
addr: SocketAddr,
protocol: HttpProtocolConf,
) -> Rhodium<C> {
Rhodium {
stack,
addr,
protocol,
}
}
//Creates hyper server that runs the rhodium stack
pub async fn run(self) -> Result<(), RhodHyperError> {
println!("Listening on {}://{}", self.protocol.to_string(), self.addr);
info!("Listening on {}://{}", self.protocol.to_string(), self.addr);
match &self.protocol {
HttpProtocolConf::HTTP => {
match AddrIncoming::bind(&self.addr) {
Ok(addr_incoming) => {
let builder = HyperServer::builder(addr_incoming);
// creating a service factory.
// for each request, it will return a RhodHyperService with the rhodium stack, and the connection info (source addr + protocol used)
let mk_service = hyper::service::make_service_fn(|socket: &AddrStream| {
let stack = Arc::clone(&self.stack);
let addr = socket.remote_addr();
async move {
Ok::<_, RhodHyperError>(RhodHyperService::new(
stack,
RhodConnInfo::new(addr, HttpProtocol::HTTP),
))
}
});
// starts a server with the created service factory
// wrapps the Hyper result in a Rhod Hyper result
RhodHyperError::from_hyper_error_result(builder.serve(mk_service).await)
}
Err(e) => Err(RhodHyperError::ConfigError(format!(
"Error when binding (HTTP). {}",
e
))),
}
}
HttpProtocolConf::HTTPS {
cert_file,
key_file,
} => {
// Create a TCP listener via tokio.
match TcpListener::bind(&self.addr).await {
Ok(tcp) => match HyperTlsAcceptor::new(tcp, &cert_file, &key_file) {
Ok(tls_acceptor) => {
let builder = HyperServer::builder(tls_acceptor);
// creating a service factory.
// for each request, it will return a RhodHyperService with the rhodium stack, and the connection info (source addr + protocol used)
let mk_service =
hyper::service::make_service_fn(|stream: &TlsStream<TcpStream>| {
let stack = Arc::clone(&self.stack);
let addr = stream.get_ref().0.peer_addr();
async move {
match addr {
Ok(peer_addr) => {
Ok::<_, RhodHyperError>(RhodHyperService::new(
stack,
RhodConnInfo::new(
peer_addr,
HttpProtocol::HTTPS,
),
))
}
Err(e) => Err::<RhodHyperService<C>, RhodHyperError>(
RhodHyperError::ConfigError(format!(
"Couldnt parse client IP. {}",
e
)),
),
}
}
});
// starts a server with the created service factory
// wrapps the Hyper result in a Rhod Hyper result
RhodHyperError::from_hyper_error_result(builder.serve(mk_service).await)
}
Err(e) => Err(RhodHyperError::ConfigError(format!(
"Error when creating TLS Acceptor. {}",
e
))),
},
Err(e) => Err(RhodHyperError::ConfigError(format!(
"Error when binding (HTTPS). {}",
e
))),
}
}
}
}
}
|
#[doc = "Register `APB1LPENR` reader"]
pub type R = crate::R<APB1LPENR_SPEC>;
#[doc = "Register `APB1LPENR` writer"]
pub type W = crate::W<APB1LPENR_SPEC>;
#[doc = "Field `TIM5LPEN` reader - TIM5 clock enable during Sleep mode"]
pub type TIM5LPEN_R = crate::BitReader<TIM5LPEN_A>;
#[doc = "TIM5 clock enable during Sleep mode\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TIM5LPEN_A {
#[doc = "0: Selected module is disabled during Sleep mode"]
DisabledInSleep = 0,
#[doc = "1: Selected module is enabled during Sleep mode"]
EnabledInSleep = 1,
}
impl From<TIM5LPEN_A> for bool {
#[inline(always)]
fn from(variant: TIM5LPEN_A) -> Self {
variant as u8 != 0
}
}
impl TIM5LPEN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TIM5LPEN_A {
match self.bits {
false => TIM5LPEN_A::DisabledInSleep,
true => TIM5LPEN_A::EnabledInSleep,
}
}
#[doc = "Selected module is disabled during Sleep mode"]
#[inline(always)]
pub fn is_disabled_in_sleep(&self) -> bool {
*self == TIM5LPEN_A::DisabledInSleep
}
#[doc = "Selected module is enabled during Sleep mode"]
#[inline(always)]
pub fn is_enabled_in_sleep(&self) -> bool {
*self == TIM5LPEN_A::EnabledInSleep
}
}
#[doc = "Field `TIM5LPEN` writer - TIM5 clock enable during Sleep mode"]
pub type TIM5LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, TIM5LPEN_A>;
impl<'a, REG, const O: u8> TIM5LPEN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Selected module is disabled during Sleep mode"]
#[inline(always)]
pub fn disabled_in_sleep(self) -> &'a mut crate::W<REG> {
self.variant(TIM5LPEN_A::DisabledInSleep)
}
#[doc = "Selected module is enabled during Sleep mode"]
#[inline(always)]
pub fn enabled_in_sleep(self) -> &'a mut crate::W<REG> {
self.variant(TIM5LPEN_A::EnabledInSleep)
}
}
#[doc = "Field `TIM6LPEN` reader - TIM6 clock enable during Sleep mode"]
pub use TIM5LPEN_R as TIM6LPEN_R;
#[doc = "Field `LPTIM1LPEN` reader - LPTIM1 clock enable during sleep mode"]
pub use TIM5LPEN_R as LPTIM1LPEN_R;
#[doc = "Field `RTCAPBLPEN` reader - RTC APB clock enable during sleep mode"]
pub use TIM5LPEN_R as RTCAPBLPEN_R;
#[doc = "Field `WWDGLPEN` reader - Window watchdog clock enable during Sleep mode"]
pub use TIM5LPEN_R as WWDGLPEN_R;
#[doc = "Field `SPI2LPEN` reader - SPI2 clock enable during Sleep mode"]
pub use TIM5LPEN_R as SPI2LPEN_R;
#[doc = "Field `USART2LPEN` reader - USART2 clock enable during Sleep mode"]
pub use TIM5LPEN_R as USART2LPEN_R;
#[doc = "Field `I2C1LPEN` reader - I2C1 clock enable during Sleep mode"]
pub use TIM5LPEN_R as I2C1LPEN_R;
#[doc = "Field `I2C2LPEN` reader - I2C2 clock enable during Sleep mode"]
pub use TIM5LPEN_R as I2C2LPEN_R;
#[doc = "Field `FMPI2C1LPEN` reader - FMPI2C1 clock enable during Sleep"]
pub use TIM5LPEN_R as FMPI2C1LPEN_R;
#[doc = "Field `PWRLPEN` reader - Power interface clock enable during Sleep mode"]
pub use TIM5LPEN_R as PWRLPEN_R;
#[doc = "Field `DACLPEN` reader - DAC interface clock enable during sleep mode"]
pub use TIM5LPEN_R as DACLPEN_R;
#[doc = "Field `TIM6LPEN` writer - TIM6 clock enable during Sleep mode"]
pub use TIM5LPEN_W as TIM6LPEN_W;
#[doc = "Field `LPTIM1LPEN` writer - LPTIM1 clock enable during sleep mode"]
pub use TIM5LPEN_W as LPTIM1LPEN_W;
#[doc = "Field `RTCAPBLPEN` writer - RTC APB clock enable during sleep mode"]
pub use TIM5LPEN_W as RTCAPBLPEN_W;
#[doc = "Field `WWDGLPEN` writer - Window watchdog clock enable during Sleep mode"]
pub use TIM5LPEN_W as WWDGLPEN_W;
#[doc = "Field `SPI2LPEN` writer - SPI2 clock enable during Sleep mode"]
pub use TIM5LPEN_W as SPI2LPEN_W;
#[doc = "Field `USART2LPEN` writer - USART2 clock enable during Sleep mode"]
pub use TIM5LPEN_W as USART2LPEN_W;
#[doc = "Field `I2C1LPEN` writer - I2C1 clock enable during Sleep mode"]
pub use TIM5LPEN_W as I2C1LPEN_W;
#[doc = "Field `I2C2LPEN` writer - I2C2 clock enable during Sleep mode"]
pub use TIM5LPEN_W as I2C2LPEN_W;
#[doc = "Field `FMPI2C1LPEN` writer - FMPI2C1 clock enable during Sleep"]
pub use TIM5LPEN_W as FMPI2C1LPEN_W;
#[doc = "Field `PWRLPEN` writer - Power interface clock enable during Sleep mode"]
pub use TIM5LPEN_W as PWRLPEN_W;
#[doc = "Field `DACLPEN` writer - DAC interface clock enable during sleep mode"]
pub use TIM5LPEN_W as DACLPEN_W;
impl R {
#[doc = "Bit 3 - TIM5 clock enable during Sleep mode"]
#[inline(always)]
pub fn tim5lpen(&self) -> TIM5LPEN_R {
TIM5LPEN_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - TIM6 clock enable during Sleep mode"]
#[inline(always)]
pub fn tim6lpen(&self) -> TIM6LPEN_R {
TIM6LPEN_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 9 - LPTIM1 clock enable during sleep mode"]
#[inline(always)]
pub fn lptim1lpen(&self) -> LPTIM1LPEN_R {
LPTIM1LPEN_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - RTC APB clock enable during sleep mode"]
#[inline(always)]
pub fn rtcapblpen(&self) -> RTCAPBLPEN_R {
RTCAPBLPEN_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Window watchdog clock enable during Sleep mode"]
#[inline(always)]
pub fn wwdglpen(&self) -> WWDGLPEN_R {
WWDGLPEN_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 14 - SPI2 clock enable during Sleep mode"]
#[inline(always)]
pub fn spi2lpen(&self) -> SPI2LPEN_R {
SPI2LPEN_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 17 - USART2 clock enable during Sleep mode"]
#[inline(always)]
pub fn usart2lpen(&self) -> USART2LPEN_R {
USART2LPEN_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 21 - I2C1 clock enable during Sleep mode"]
#[inline(always)]
pub fn i2c1lpen(&self) -> I2C1LPEN_R {
I2C1LPEN_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - I2C2 clock enable during Sleep mode"]
#[inline(always)]
pub fn i2c2lpen(&self) -> I2C2LPEN_R {
I2C2LPEN_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 24 - FMPI2C1 clock enable during Sleep"]
#[inline(always)]
pub fn fmpi2c1lpen(&self) -> FMPI2C1LPEN_R {
FMPI2C1LPEN_R::new(((self.bits >> 24) & 1) != 0)
}
#[doc = "Bit 28 - Power interface clock enable during Sleep mode"]
#[inline(always)]
pub fn pwrlpen(&self) -> PWRLPEN_R {
PWRLPEN_R::new(((self.bits >> 28) & 1) != 0)
}
#[doc = "Bit 29 - DAC interface clock enable during sleep mode"]
#[inline(always)]
pub fn daclpen(&self) -> DACLPEN_R {
DACLPEN_R::new(((self.bits >> 29) & 1) != 0)
}
}
impl W {
#[doc = "Bit 3 - TIM5 clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn tim5lpen(&mut self) -> TIM5LPEN_W<APB1LPENR_SPEC, 3> {
TIM5LPEN_W::new(self)
}
#[doc = "Bit 4 - TIM6 clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn tim6lpen(&mut self) -> TIM6LPEN_W<APB1LPENR_SPEC, 4> {
TIM6LPEN_W::new(self)
}
#[doc = "Bit 9 - LPTIM1 clock enable during sleep mode"]
#[inline(always)]
#[must_use]
pub fn lptim1lpen(&mut self) -> LPTIM1LPEN_W<APB1LPENR_SPEC, 9> {
LPTIM1LPEN_W::new(self)
}
#[doc = "Bit 10 - RTC APB clock enable during sleep mode"]
#[inline(always)]
#[must_use]
pub fn rtcapblpen(&mut self) -> RTCAPBLPEN_W<APB1LPENR_SPEC, 10> {
RTCAPBLPEN_W::new(self)
}
#[doc = "Bit 11 - Window watchdog clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn wwdglpen(&mut self) -> WWDGLPEN_W<APB1LPENR_SPEC, 11> {
WWDGLPEN_W::new(self)
}
#[doc = "Bit 14 - SPI2 clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn spi2lpen(&mut self) -> SPI2LPEN_W<APB1LPENR_SPEC, 14> {
SPI2LPEN_W::new(self)
}
#[doc = "Bit 17 - USART2 clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn usart2lpen(&mut self) -> USART2LPEN_W<APB1LPENR_SPEC, 17> {
USART2LPEN_W::new(self)
}
#[doc = "Bit 21 - I2C1 clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn i2c1lpen(&mut self) -> I2C1LPEN_W<APB1LPENR_SPEC, 21> {
I2C1LPEN_W::new(self)
}
#[doc = "Bit 22 - I2C2 clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn i2c2lpen(&mut self) -> I2C2LPEN_W<APB1LPENR_SPEC, 22> {
I2C2LPEN_W::new(self)
}
#[doc = "Bit 24 - FMPI2C1 clock enable during Sleep"]
#[inline(always)]
#[must_use]
pub fn fmpi2c1lpen(&mut self) -> FMPI2C1LPEN_W<APB1LPENR_SPEC, 24> {
FMPI2C1LPEN_W::new(self)
}
#[doc = "Bit 28 - Power interface clock enable during Sleep mode"]
#[inline(always)]
#[must_use]
pub fn pwrlpen(&mut self) -> PWRLPEN_W<APB1LPENR_SPEC, 28> {
PWRLPEN_W::new(self)
}
#[doc = "Bit 29 - DAC interface clock enable during sleep mode"]
#[inline(always)]
#[must_use]
pub fn daclpen(&mut self) -> DACLPEN_W<APB1LPENR_SPEC, 29> {
DACLPEN_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "APB1 peripheral clock enable in low power mode register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`apb1lpenr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`apb1lpenr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct APB1LPENR_SPEC;
impl crate::RegisterSpec for APB1LPENR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`apb1lpenr::R`](R) reader structure"]
impl crate::Readable for APB1LPENR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`apb1lpenr::W`](W) writer structure"]
impl crate::Writable for APB1LPENR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets APB1LPENR to value 0x36fe_c9ff"]
impl crate::Resettable for APB1LPENR_SPEC {
const RESET_VALUE: Self::Ux = 0x36fe_c9ff;
}
|
#![allow(non_camel_case_types)]
#![allow(proc_macro_derive_resolution_fallback)]
#![allow(non_snake_case)]
#![allow(dead_code)]
#![allow(unused_imports)]
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_derive_enum;
mod common;
mod pg_array;
mod nullable;
mod rename;
mod simple;
|
use shorthand::ShortHand;
#[derive(ShortHand)]
#[shorthand(verify(ffn = "Self::verify_field"))]
struct Example {
field: usize,
}
fn main() {}
|
use super::*;
use std::cmp::Ord;
use std::cmp::Ordering;
use std::cmp::PartialOrd;
impl Ord for EditorInstallation {
fn cmp(&self, other: &EditorInstallation) -> Ordering {
self.version.cmp(&other.version)
}
}
impl PartialOrd for EditorInstallation {
fn partial_cmp(&self, other: &EditorInstallation) -> Option<Ordering> {
Some(self.cmp(other))
}
}
|
#[doc = "Register `OR1` reader"]
pub type R = crate::R<OR1_SPEC>;
#[doc = "Register `OR1` writer"]
pub type W = crate::W<OR1_SPEC>;
#[doc = "Field `ETR_RMP` reader - External trigger remap"]
pub type ETR_RMP_R = crate::BitReader<ETR_RMP_A>;
#[doc = "External trigger remap\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ETR_RMP_A {
#[doc = "0: TIM2 ETR is connected to GPIO: Refer to Alternate Function mapping"]
Gpio = 0,
#[doc = "1: LSE internal clock is connected to TIM2_ETR input"]
Tim2Etr = 1,
}
impl From<ETR_RMP_A> for bool {
#[inline(always)]
fn from(variant: ETR_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ETR_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ETR_RMP_A {
match self.bits {
false => ETR_RMP_A::Gpio,
true => ETR_RMP_A::Tim2Etr,
}
}
#[doc = "TIM2 ETR is connected to GPIO: Refer to Alternate Function mapping"]
#[inline(always)]
pub fn is_gpio(&self) -> bool {
*self == ETR_RMP_A::Gpio
}
#[doc = "LSE internal clock is connected to TIM2_ETR input"]
#[inline(always)]
pub fn is_tim2_etr(&self) -> bool {
*self == ETR_RMP_A::Tim2Etr
}
}
#[doc = "Field `ETR_RMP` writer - External trigger remap"]
pub type ETR_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ETR_RMP_A>;
impl<'a, REG, const O: u8> ETR_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "TIM2 ETR is connected to GPIO: Refer to Alternate Function mapping"]
#[inline(always)]
pub fn gpio(self) -> &'a mut crate::W<REG> {
self.variant(ETR_RMP_A::Gpio)
}
#[doc = "LSE internal clock is connected to TIM2_ETR input"]
#[inline(always)]
pub fn tim2_etr(self) -> &'a mut crate::W<REG> {
self.variant(ETR_RMP_A::Tim2Etr)
}
}
#[doc = "Field `TI4_RMP` reader - Input capture 4 remap"]
pub type TI4_RMP_R = crate::FieldReader<TI4_RMP_A>;
#[doc = "Input capture 4 remap\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum TI4_RMP_A {
#[doc = "0: TIM2 TI4 is connected to GPIO: Refer to Alternate Function mapping"]
Gpio = 0,
#[doc = "1: TIM2 TI4 is connected to COMP1_OUT"]
Comp1 = 1,
#[doc = "2: TIM2 TI4 is connected to COMP2_OUT"]
Comp2 = 2,
#[doc = "3: TIM2 TI4 is connected to a logical OR between COMP1_OUT and COMP2_OUT"]
Comp12 = 3,
}
impl From<TI4_RMP_A> for u8 {
#[inline(always)]
fn from(variant: TI4_RMP_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for TI4_RMP_A {
type Ux = u8;
}
impl TI4_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TI4_RMP_A {
match self.bits {
0 => TI4_RMP_A::Gpio,
1 => TI4_RMP_A::Comp1,
2 => TI4_RMP_A::Comp2,
3 => TI4_RMP_A::Comp12,
_ => unreachable!(),
}
}
#[doc = "TIM2 TI4 is connected to GPIO: Refer to Alternate Function mapping"]
#[inline(always)]
pub fn is_gpio(&self) -> bool {
*self == TI4_RMP_A::Gpio
}
#[doc = "TIM2 TI4 is connected to COMP1_OUT"]
#[inline(always)]
pub fn is_comp_1(&self) -> bool {
*self == TI4_RMP_A::Comp1
}
#[doc = "TIM2 TI4 is connected to COMP2_OUT"]
#[inline(always)]
pub fn is_comp_2(&self) -> bool {
*self == TI4_RMP_A::Comp2
}
#[doc = "TIM2 TI4 is connected to a logical OR between COMP1_OUT and COMP2_OUT"]
#[inline(always)]
pub fn is_comp_12(&self) -> bool {
*self == TI4_RMP_A::Comp12
}
}
#[doc = "Field `TI4_RMP` writer - Input capture 4 remap"]
pub type TI4_RMP_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 2, O, TI4_RMP_A>;
impl<'a, REG, const O: u8> TI4_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "TIM2 TI4 is connected to GPIO: Refer to Alternate Function mapping"]
#[inline(always)]
pub fn gpio(self) -> &'a mut crate::W<REG> {
self.variant(TI4_RMP_A::Gpio)
}
#[doc = "TIM2 TI4 is connected to COMP1_OUT"]
#[inline(always)]
pub fn comp_1(self) -> &'a mut crate::W<REG> {
self.variant(TI4_RMP_A::Comp1)
}
#[doc = "TIM2 TI4 is connected to COMP2_OUT"]
#[inline(always)]
pub fn comp_2(self) -> &'a mut crate::W<REG> {
self.variant(TI4_RMP_A::Comp2)
}
#[doc = "TIM2 TI4 is connected to a logical OR between COMP1_OUT and COMP2_OUT"]
#[inline(always)]
pub fn comp_12(self) -> &'a mut crate::W<REG> {
self.variant(TI4_RMP_A::Comp12)
}
}
impl R {
#[doc = "Bit 1 - External trigger remap"]
#[inline(always)]
pub fn etr_rmp(&self) -> ETR_RMP_R {
ETR_RMP_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bits 2:3 - Input capture 4 remap"]
#[inline(always)]
pub fn ti4_rmp(&self) -> TI4_RMP_R {
TI4_RMP_R::new(((self.bits >> 2) & 3) as u8)
}
}
impl W {
#[doc = "Bit 1 - External trigger remap"]
#[inline(always)]
#[must_use]
pub fn etr_rmp(&mut self) -> ETR_RMP_W<OR1_SPEC, 1> {
ETR_RMP_W::new(self)
}
#[doc = "Bits 2:3 - Input capture 4 remap"]
#[inline(always)]
#[must_use]
pub fn ti4_rmp(&mut self) -> TI4_RMP_W<OR1_SPEC, 2> {
TI4_RMP_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "TIM2 option register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`or1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`or1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct OR1_SPEC;
impl crate::RegisterSpec for OR1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`or1::R`](R) reader structure"]
impl crate::Readable for OR1_SPEC {}
#[doc = "`write(|w| ..)` method takes [`or1::W`](W) writer structure"]
impl crate::Writable for OR1_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets OR1 to value 0"]
impl crate::Resettable for OR1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::slice;
use std::ffi::{CString, CStr};
use traits::FromRaw;
use ::ffi;
use ::error::*;
pub struct Texture<'a> {
raw: &'a ffi::AiTexture,
}
impl<'a> FromRaw<'a, Texture<'a>> for Texture<'a> {
type Raw = *const ffi::AiTexture;
#[inline(always)]
fn from_raw(raw: &'a Self::Raw) -> Texture<'a> {
Texture { raw: unsafe { raw.as_ref().expect("Texture pointer provided by Assimp was NULL") } }
}
}
impl<'a> Texture<'a> {
#[inline]
pub fn width(&self) -> u32 {
self.raw.width as u32
}
#[inline]
pub fn height(&self) -> u32 {
self.raw.height as u32
}
pub fn check_format(&self, format: &str) -> AiResult<bool> {
if format.len() > ffi::ARCH_FORMAT_HINT_LENGTH {
Ok(false)
} else {
let c_str = try_throw!(CString::new(format));
Ok(unsafe {
c_str.as_ref() == CStr::from_ptr(self.raw.arch_format_hint.as_ptr())
})
}
}
#[inline]
pub fn data_raw(&self) -> &'a [ffi::AiTexel] {
unsafe { slice::from_raw_parts(self.raw.data, (self.height() * self.width()) as usize) }
}
} |
pub mod running_page;
pub mod settings_page;
|
use gamesession::channel::*;
use player::*;
use InternalState;
use phase::*;
use slog::*;
pub struct GameSession {
phase: EGamePhase,
state: InternalState,
log: Logger,
}
impl GameSession {
pub fn new() -> GameSession {
let root = Logger::root(Discard, o!(
"murder.version" => env!("CARGO_PKG_VERSION"),
));
let log = root.new(o!("context"=> "GameSession"));
GameSession {
phase: EGamePhase::Selection,
state: InternalState {
players: Vec::new(),
mafia_kill: -1,
first_night: true,
logger: root,
},
log: log
}
}
pub fn use_logger(&mut self, logger: Logger){
self.state.logger = logger;
self.log = self.state.logger.new(o!("context"=> "GameSession"));
}
pub fn get_player(& self, i : usize) -> &Player {
return self.state.players.get(i).expect("Couldn't find player.");
}
pub fn get_players(&self) -> &Vec<Player> {
return &self.state.players;
}
pub fn current_phase(&mut self) -> GamePhase {
match self.phase {
EGamePhase::Selection => return GamePhase::Selection(SelectionState::new(&mut self.state)),
EGamePhase::Morning => return GamePhase::Morning(MorningState::new(&mut self.state)),
EGamePhase::Special => return GamePhase::Special(SpecialState::new(&mut self.state)),
EGamePhase::Mafia => return GamePhase::Mafia(MafiaState::new(&mut self.state)),
}
}
pub fn advance_phase(&mut self) {
// This is yuck, but that's rust.
let old_phase = self.phase.clone();
match self.phase {
EGamePhase::Selection => self.phase = EGamePhase::Morning,
EGamePhase::Morning => self.phase = EGamePhase::Special,
EGamePhase::Special => self.phase = EGamePhase::Mafia,
EGamePhase::Mafia => self.phase = EGamePhase::Morning,
}
info!(self.log, "Moved from {:?} to {:?}", old_phase, self.phase);
}
pub fn player_channels(&self, i: usize) -> Vec<EChannel> {
let player : &Player = &self.get_player(i);
let mut channels : Vec<EChannel> = Vec::new();
if player.is_ghost() {
channels.push(EChannel::Ghost);
return channels;
}
match self.phase {
EGamePhase::Morning | EGamePhase::Selection => {
channels.push(EChannel::Global);
}
EGamePhase::Special => {
if player.role() == &PlayerRole::Detective {
channels.push(EChannel::Detective);
}
}
EGamePhase::Mafia => {
if player.role() == &PlayerRole::Mafia {
channels.push(EChannel::Mafia);
}
}
}
if channels.len() == 0 {
channels.push(EChannel::None);
}
return channels;
}
}
|
use std::sync::MutexGuard;
use nia_interpreter_core::{EventLoopHandle, NiaStopListeningCommand};
use nia_protocol_rust::Response;
use crate::error::NiaServerError;
use crate::error::NiaServerResult;
use crate::protocol::NiaGetDefinedActionsResponse;
use crate::protocol::NiaGetDefinedMappingsRequest;
use crate::protocol::NiaGetDefinedMappingsResponse;
use crate::protocol::NiaGetDefinedModifiersResponse;
use crate::protocol::NiaGetDevicesResponse;
use crate::protocol::NiaHandshakeResponse;
use crate::protocol::NiaRemoveActionResponse;
use crate::protocol::NiaRemoveDeviceByNameResponse;
use crate::protocol::NiaRemoveDeviceByPathResponse;
use crate::protocol::NiaRemoveMappingResponse;
use crate::protocol::NiaRemoveModifierResponse;
use crate::protocol::NiaRequest;
use crate::protocol::Serializable;
use crate::protocol::{NiaChangeMappingResponse, NiaDefineActionResponse};
use crate::protocol::{NiaDefineDeviceResponse, NiaStartListeningResponse};
use crate::protocol::{NiaDefineMappingResponse, NiaStopListeningResponse};
use crate::protocol::{NiaDefineModifierResponse, NiaIsListeningResponse};
use crate::protocol::{NiaExecuteCodeResponse, NiaRemoveDeviceByIdResponse};
use crate::server::Server;
#[derive(Debug, Clone)]
pub enum NiaResponse {
Handshake(NiaHandshakeResponse),
GetDevices(NiaGetDevicesResponse),
ExecuteCode(NiaExecuteCodeResponse),
DefineKeyboard(NiaDefineDeviceResponse),
RemoveKeyboardByPath(NiaRemoveDeviceByPathResponse),
RemoveKeyboardByName(NiaRemoveDeviceByNameResponse),
RemoveKeyboardById(NiaRemoveDeviceByIdResponse),
GetDefinedModifiers(NiaGetDefinedModifiersResponse),
DefineModifier(NiaDefineModifierResponse),
RemoveModifier(NiaRemoveModifierResponse),
GetDefinedActions(NiaGetDefinedActionsResponse),
DefineAction(NiaDefineActionResponse),
RemoveAction(NiaRemoveActionResponse),
GetDefinedMappings(NiaGetDefinedMappingsResponse),
DefineMapping(NiaDefineMappingResponse),
ChangeMapping(NiaChangeMappingResponse),
RemoveMapping(NiaRemoveMappingResponse),
IsListening(NiaIsListeningResponse),
StartListening(NiaStartListeningResponse),
StopListening(NiaStopListeningResponse),
}
impl NiaResponse {
pub fn from(
server: &mut MutexGuard<Server>,
nia_request: NiaRequest,
event_loop_handle: MutexGuard<EventLoopHandle>,
) -> NiaResponse {
let nia_response = match nia_request {
NiaRequest::Handshake(nia_handshake_request) => {
let nia_handshake_response =
NiaHandshakeResponse::from(nia_handshake_request);
NiaResponse::Handshake(nia_handshake_response)
}
NiaRequest::GetDevices(nia_get_devices_request) => {
let nia_get_devices_response = NiaGetDevicesResponse::from(
server,
nia_get_devices_request,
);
NiaResponse::GetDevices(nia_get_devices_response)
}
NiaRequest::ExecuteCode(nia_execute_code_request) => {
let nia_execute_code_response = NiaExecuteCodeResponse::from(
nia_execute_code_request,
event_loop_handle,
);
NiaResponse::ExecuteCode(nia_execute_code_response)
}
NiaRequest::DefineDevice(nia_define_keyboard_request) => {
let nia_define_keyboard_response =
NiaDefineDeviceResponse::from(
server,
nia_define_keyboard_request,
event_loop_handle,
);
NiaResponse::DefineKeyboard(nia_define_keyboard_response)
}
NiaRequest::RemoveDeviceByPath(
nia_remove_keyboard_by_path_request,
) => {
let nia_remove_keyboard_by_path_response =
NiaRemoveDeviceByPathResponse::from(
server,
nia_remove_keyboard_by_path_request,
event_loop_handle,
);
NiaResponse::RemoveKeyboardByPath(
nia_remove_keyboard_by_path_response,
)
}
NiaRequest::RemoveDeviceByName(
nia_remove_keyboard_by_name_request,
) => {
let nia_remove_keyboard_by_name_response =
NiaRemoveDeviceByNameResponse::from(
server,
nia_remove_keyboard_by_name_request,
event_loop_handle,
);
NiaResponse::RemoveKeyboardByName(
nia_remove_keyboard_by_name_response,
)
}
NiaRequest::RemoveDeviceById(nia_remove_keyboard_by_id_request) => {
let nia_remove_keyboard_by_id_response =
NiaRemoveDeviceByIdResponse::from(
server,
nia_remove_keyboard_by_id_request,
event_loop_handle,
);
NiaResponse::RemoveKeyboardById(
nia_remove_keyboard_by_id_response,
)
}
NiaRequest::GetDefinedModifiers(
nia_get_defined_modifiers_request,
) => {
let nia_get_defined_modifiers_response =
NiaGetDefinedModifiersResponse::from(
nia_get_defined_modifiers_request,
event_loop_handle,
);
NiaResponse::GetDefinedModifiers(
nia_get_defined_modifiers_response,
)
}
NiaRequest::DefineModifier(nia_define_modifier_request) => {
let nia_define_modifier_response =
NiaDefineModifierResponse::from(
nia_define_modifier_request,
event_loop_handle,
);
NiaResponse::DefineModifier(nia_define_modifier_response)
}
NiaRequest::RemoveModifier(nia_remove_keyboard_request) => {
let nia_remove_keyboard_response =
NiaRemoveModifierResponse::from(
nia_remove_keyboard_request,
event_loop_handle,
);
NiaResponse::RemoveModifier(nia_remove_keyboard_response)
}
NiaRequest::GetDefinedActions(nia_get_defined_actions_request) => {
let nia_get_defined_actions_response =
NiaGetDefinedActionsResponse::from(
nia_get_defined_actions_request,
event_loop_handle,
);
NiaResponse::GetDefinedActions(nia_get_defined_actions_response)
}
NiaRequest::DefineAction(nia_define_action_request) => {
let nia_define_action_response = NiaDefineActionResponse::from(
nia_define_action_request,
event_loop_handle,
);
NiaResponse::DefineAction(nia_define_action_response)
}
NiaRequest::RemoveAction(nia_remove_action_request) => {
let nia_remove_action_response = NiaRemoveActionResponse::from(
nia_remove_action_request,
event_loop_handle,
);
NiaResponse::RemoveAction(nia_remove_action_response)
}
NiaRequest::GetDefinedMappings(
nia_get_defined_mappings_request,
) => {
let nia_get_defined_mappings_response =
NiaGetDefinedMappingsResponse::from(
nia_get_defined_mappings_request,
event_loop_handle,
);
NiaResponse::GetDefinedMappings(
nia_get_defined_mappings_response,
)
}
NiaRequest::DefineMapping(nia_define_mapping_request) => {
let nia_define_mapping_response =
NiaDefineMappingResponse::from(
nia_define_mapping_request,
event_loop_handle,
);
NiaResponse::DefineMapping(nia_define_mapping_response)
}
NiaRequest::ChangeMapping(nia_change_mapping_request) => {
let nia_change_mapping_response =
NiaChangeMappingResponse::from(
nia_change_mapping_request,
event_loop_handle,
);
NiaResponse::ChangeMapping(nia_change_mapping_response)
}
NiaRequest::RemoveMapping(nia_remove_mapping_request) => {
let nia_remove_mapping_response =
NiaRemoveMappingResponse::from(
nia_remove_mapping_request,
event_loop_handle,
);
NiaResponse::RemoveMapping(nia_remove_mapping_response)
}
NiaRequest::IsListening(nia_is_listening_request) => {
let nia_is_listening_response = NiaIsListeningResponse::from(
nia_is_listening_request,
event_loop_handle,
);
NiaResponse::IsListening(nia_is_listening_response)
}
NiaRequest::StartListening(nia_start_listening_request) => {
let nia_start_listening_response =
NiaStartListeningResponse::from(
nia_start_listening_request,
event_loop_handle,
);
NiaResponse::StartListening(nia_start_listening_response)
}
NiaRequest::StopListening(nia_stop_listening_request) => {
let nia_stop_listening_response =
NiaStopListeningResponse::from(
nia_stop_listening_request,
event_loop_handle,
);
NiaResponse::StopListening(nia_stop_listening_response)
}
};
nia_response
}
}
impl Serializable<NiaResponse, nia_protocol_rust::Response> for NiaResponse {
fn to_pb(&self) -> Response {
let mut response = nia_protocol_rust::Response::new();
match &self {
NiaResponse::Handshake(nia_handshake_response) => {
let handshake_response = nia_handshake_response.to_pb();
response.set_handshake_response(handshake_response);
}
NiaResponse::GetDevices(nia_get_devices_response) => {
let get_devices_response = nia_get_devices_response.to_pb();
response.set_get_devices_response(get_devices_response);
println!("{:?}", response);
}
NiaResponse::ExecuteCode(nia_execute_code_response) => {
let execute_code_response = nia_execute_code_response.to_pb();
response.set_execute_code_response(execute_code_response);
}
NiaResponse::DefineKeyboard(nia_define_keyboard_response) => {
let define_keyboard_response =
nia_define_keyboard_response.to_pb();
response.set_define_device_response(define_keyboard_response);
}
NiaResponse::RemoveKeyboardByPath(
nia_remove_keyboard_by_path_response,
) => {
let remove_keyboard_by_path =
nia_remove_keyboard_by_path_response.to_pb();
response.set_remove_device_by_path_response(
remove_keyboard_by_path,
);
}
NiaResponse::RemoveKeyboardByName(
nia_remove_keyboard_by_name_response,
) => {
let remove_keyboard_by_name =
nia_remove_keyboard_by_name_response.to_pb();
response.set_remove_device_by_name_response(
remove_keyboard_by_name,
);
}
NiaResponse::RemoveKeyboardById(
nia_remove_keyboard_by_id_response,
) => {
let remove_keyboard_by_id =
nia_remove_keyboard_by_id_response.to_pb();
response
.set_remove_device_by_id_response(remove_keyboard_by_id);
}
NiaResponse::GetDefinedModifiers(
nia_get_defined_modifiers_response,
) => {
let get_defined_modifiers =
nia_get_defined_modifiers_response.to_pb();
response
.set_get_defined_modifiers_response(get_defined_modifiers);
}
NiaResponse::DefineModifier(nia_define_modifier_response) => {
let define_modifier = nia_define_modifier_response.to_pb();
response.set_define_modifier_response(define_modifier);
}
NiaResponse::RemoveModifier(nia_remove_modifier_response) => {
let remove_modifier = nia_remove_modifier_response.to_pb();
response.set_remove_modifier_response(remove_modifier);
}
NiaResponse::GetDefinedActions(get_defined_actions_response) => {
let get_defined_actions = get_defined_actions_response.to_pb();
response.set_get_defined_actions_response(get_defined_actions);
}
NiaResponse::DefineAction(define_action_response) => {
let define_action = define_action_response.to_pb();
response.set_define_action_response(define_action);
}
NiaResponse::RemoveAction(remove_action_response) => {
let remove_action = remove_action_response.to_pb();
response.set_remove_action_response(remove_action);
}
NiaResponse::GetDefinedMappings(get_defined_mappings_response) => {
let get_defined_mappings =
get_defined_mappings_response.to_pb();
response
.set_get_defined_mappings_response(get_defined_mappings);
}
NiaResponse::DefineMapping(define_mapping_response) => {
let define_mapping = define_mapping_response.to_pb();
response.set_define_mapping_response(define_mapping);
}
NiaResponse::ChangeMapping(change_mapping_response) => {
let change_mapping = change_mapping_response.to_pb();
response.set_change_mapping_response(change_mapping);
}
NiaResponse::RemoveMapping(remove_mapping_response) => {
let remove_mapping = remove_mapping_response.to_pb();
response.set_remove_mapping_response(remove_mapping);
}
NiaResponse::IsListening(is_listening_response) => {
let is_listening = is_listening_response.to_pb();
response.set_is_listening_response(is_listening);
}
NiaResponse::StartListening(start_listening_response) => {
let start_listening = start_listening_response.to_pb();
response.set_start_listening_response(start_listening);
}
NiaResponse::StopListening(stop_listening_response) => {
let stop_listening = stop_listening_response.to_pb();
response.set_stop_listening_response(stop_listening);
}
}
response
}
fn from_pb(object_pb: Response) -> NiaServerResult<NiaResponse> {
unimplemented!()
}
}
|
mod client;
mod notification;
mod notifications_polling;
mod notifications_response;
pub use self::client::GithubClient;
pub use self::client::new as new_client;
pub use self::notification::PullRequest;
|
pub mod render;
pub mod math;
pub mod render_loop;
use wasm_bindgen::prelude::*;
use wasm_bindgen::{JsCast};
use web_sys::{WebGl2RenderingContext};
use render::builder::{RenderBuilder};
use render::api::{WebRenderAPI, WebRenderBuffer};
use render_loop::{RenderLoop};
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_namespace = console)]
fn log(s: &str);
}
macro_rules! console_log {
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
}
type WebRenderLoop = RenderLoop<WebRenderAPI, WebRenderBuffer>;
#[derive(Debug, Copy, Clone)]
struct Dimensions {
width: i32,
height: i32,
}
#[wasm_bindgen]
#[derive(Debug)]
pub struct Runtime {
render_loop: WebRenderLoop,
dimensions: Dimensions,
}
#[wasm_bindgen]
impl Runtime {
fn new(render_loop: WebRenderLoop, dimensions: Dimensions) -> Self {
Runtime { render_loop, dimensions }
}
#[wasm_bindgen]
pub fn tick(&self) {
self.render_loop.draw();
}
#[wasm_bindgen(js_name = "debugState")]
pub fn debug_state(&self) {
console_log!("Debug: {:#?}", self);
}
#[wasm_bindgen(js_name = "setDimensions")]
pub fn set_dimensions(&mut self, width: i32, height: i32) -> Result<(), JsValue> {
self.dimensions = Dimensions { width, height };
self.render_loop.update_viewport(width, height).map_err(error_to_string)
}
}
#[wasm_bindgen]
#[derive(Debug)]
pub struct RuntimeBuilder {
dimensions: Option<Dimensions>,
render_builder: RenderBuilder,
}
fn error_to_string<E>(error: E) -> JsValue where E: ToString {
return JsValue::from_str(error.to_string().as_ref())
}
#[wasm_bindgen]
impl RuntimeBuilder {
#[wasm_bindgen(constructor)]
pub fn new() -> Result<RuntimeBuilder, JsValue> {
let render_builder = RenderBuilder::new();
let dimensions = None;
Ok(RuntimeBuilder { render_builder, dimensions })
}
#[wasm_bindgen(js_name = "linkWebglContext")]
pub fn link_webgl_context(&mut self, maybe_context: JsValue) -> Result<(), JsValue> {
return maybe_context.dyn_into::<WebGl2RenderingContext>()
.map(|context| self.render_builder.set_context(context))
.map_err(|value| {
let message = format!("expected web gl context, instead got {:?}", value);
return JsValue::from_str(message.as_ref())
});
}
#[wasm_bindgen(js_name = "linkFragShader")]
pub fn link_frag_shader(&mut self, shader_source: &str) -> Result<(), JsValue> {
return self.render_builder.set_frag_shader(shader_source)
.map_err(|err| JsValue::from_str(err.to_string().as_ref()))
}
#[wasm_bindgen(js_name = "linkVertShader")]
pub fn link_vert_shader(&mut self, shader_source: &str) -> Result<(), JsValue> {
return self.render_builder.set_vert_shader(shader_source)
.map_err(|err| JsValue::from_str(err.to_string().as_ref()))
}
#[wasm_bindgen(js_name = "createRuntime")]
pub fn create_runtime(&mut self) -> Result<Runtime, JsValue> {
let dimensions = self.dimensions.ok_or("need dimensions before building runtime")?;
let render_loop = self.render_builder.build_render_api()
.map_err(error_to_string)
.and_then(|render_api|
RenderLoop::create(
render_api,
dimensions.width,
dimensions.height,
).map_err(error_to_string))?;
Ok(Runtime::new(render_loop, dimensions))
}
#[wasm_bindgen(js_name = "setDimensions")]
pub fn set_dimensions(&mut self, width: i32, height: i32) {
self.dimensions = Some(Dimensions { width, height });
}
#[wasm_bindgen(js_name = "debugState")]
pub fn debug_state(&self) {
console_log!("Debug: {:#?}", self);
}
}
#[wasm_bindgen(js_name = "setupPanicHook")]
pub fn setup_panic_hook() {
console_error_panic_hook::set_once();
}
|
//! Warning: incomplete
//!
//! This is a draft of the constants that you can match against with certain values given inside
//! responses.
pub enum Source {
RISKIQ,
PINGLY,
DNSRES,
KASPERSKY,
}
impl Source {
pub fn string(&self) -> String {
match *self {
Source::RISKIQ => "riskiq".to_string(),
Source::PINGLY => "pingly".to_string(),
Source::DNSRES => "dnsres".to_string(),
Source::KASPERSKY => "kaspersky".to_string(),
}
}
}
|
use serde::ser::{Serialize, Serializer};
use super::Axial;
impl Serialize for Axial {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let payload = [self.q, self.r];
<[i32; 2] as Serialize>::serialize(&payload, serializer)
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_test::{assert_tokens, Token};
#[test]
fn de_serialize_json() {
let a = Axial::new(12, 69);
let pl = serde_json::to_string(&a).unwrap();
let b = serde_json::from_str(pl.as_str()).unwrap();
assert_eq!(a, b);
}
#[test]
fn de_serialize_yaml() {
let a = Axial::new(12, 69);
let pl = serde_yaml::to_string(&a).unwrap();
let b = serde_yaml::from_str(pl.as_str()).unwrap();
assert_eq!(a, b);
}
#[test]
fn serde_tokens() {
let a = Axial::new(1, 2);
assert_tokens(
&a,
&[
Token::Tuple { len: 2 },
Token::I32(1),
Token::I32(2),
Token::TupleEnd,
],
);
}
}
|
use std::error::Error;
use std::fmt::{Display, Formatter};
#[derive(Debug)]
pub struct TreeerErr {
errors: Errors
}
impl Default for TreeerErr { fn default() -> Self { Self { errors: Errors::Unknown } } }
impl Display for TreeerErr { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { write!(f, "{:?}", self) } }
impl Error for TreeerErr {}
#[derive(Debug)]
pub enum Errors {
Unknown,
IoError(std::io::Error),
FromUtf8Error(std::string::FromUtf8Error),
}
impl From<()> for TreeerErr {
fn from(_: ()) -> Self {
Self { errors: Errors::Unknown }
}
}
impl From<std::io::Error> for TreeerErr {
fn from(e: std::io::Error) -> Self {
Self { errors: Errors::IoError(e) }
}
}
impl From<std::string::FromUtf8Error> for TreeerErr {
fn from(e: std::string::FromUtf8Error) -> Self {
Self { errors: Errors::FromUtf8Error(e) }
}
}
|
// Copyright 2016 FullContact, Inc
// Copyright 2017 Jason Lingle
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cell::Cell;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr;
use libc::c_uint;
use ffi;
use ffi2;
use supercow::{Supercow, NonSyncSupercow};
use env::{self, Environment, Stat};
use dbi::{db, Database};
use error::{Error, Result};
use mdb_vals::*;
use traits::*;
use cursor::{self, Cursor, StaleCursor};
/// Flags used when calling the various `put` functions.
pub mod put {
use ffi;
use libc;
bitflags! {
/// Flags used when calling the various `put` functions.
///
/// Note that `RESERVE` and `MULTIPLE` are not exposed in these flags
/// because their memory ownership and/or parameter semantics are
/// different. `CURRENT` is expressed separately on the cursor
/// functions.
pub struct Flags : libc::c_uint {
/// Enter the new key/data pair only if it does not already appear
/// in the database. This flag may only be specified if the
/// database was opened with `DUPSORT`. The function will return
/// `KEYEXIST` if the key/data pair already appears in the
/// database.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, Some("reversed"),
/// &lmdb::DatabaseOptions::create_multimap_unsized::<str,str>())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// access.put(&db, "Fruit", "Orange", lmdb::put::Flags::empty()).unwrap();
/// // Duplicate, but that's OK by default
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// // `NODUPDATA` blocks adding an identical item
/// assert!(access.put(&db, "Fruit", "Apple", lmdb::put::NODUPDATA).is_err());
/// // But doesn't affect pairs not already present
/// access.put(&db, "Fruit", "Durian", lmdb::put::NODUPDATA).unwrap();
/// }
/// txn.commit().unwrap();
/// # }
/// ```
///
/// When used on a cursor, the cursor is positioned at the
/// conflicting key/value pair if this results in a `KEYEXIST`
/// error.
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, Some("reversed"),
/// &lmdb::DatabaseOptions::create_multimap_unsized::<str,str>())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// access.put(&db, "Fruit", "Orange", lmdb::put::Flags::empty()).unwrap();
/// access.put(&db, "Fruit", "Durian", lmdb::put::Flags::empty()).unwrap();
///
/// let mut cursor = txn.cursor(&db).unwrap();
/// assert_eq!(Err(lmdb::Error::Code(lmdb::error::KEYEXIST)),
/// cursor.put(&mut access, "Fruit", "Durian",
/// lmdb::put::NODUPDATA));
/// assert_eq!(("Fruit", "Durian"), cursor.get_current(&access).unwrap());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
const NODUPDATA = ffi::MDB_NODUPDATA;
/// Enter the new key/data pair only if the key does not already
/// appear in the database. The function will return `KEYEXIST` if
/// the key already appears in the database, even if the database
/// supports duplicates (`DUPSORT`).
///
/// ## Examples
///
/// ### In a 1:1 database
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, None, &lmdb::DatabaseOptions::defaults())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// // By default, collisions overwrite the old value
/// access.put(&db, "Fruit", "Orange", lmdb::put::Flags::empty()).unwrap();
/// assert_eq!("Orange", access.get::<str,str>(&db, "Fruit").unwrap());
/// // But `NOOVERWRITE` prevents that
/// assert!(access.put(&db, "Fruit", "Durian", lmdb::put::NOOVERWRITE).is_err());
/// assert_eq!("Orange", access.get::<str,str>(&db, "Fruit").unwrap());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
///
/// ### In a `DUPSORT` database
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, Some("reversed"),
/// &lmdb::DatabaseOptions::create_multimap_unsized::<str,str>())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// // Ordinarily, we can add multiple items per key
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// access.put(&db, "Fruit", "Orange", lmdb::put::Flags::empty()).unwrap();
/// let mut cursor = txn.cursor(&db).unwrap();
/// cursor.seek_k::<str,str>(&access, "Fruit").unwrap();
/// assert_eq!(2, cursor.count().unwrap());
///
/// // But this can be prevented with `NOOVERWRITE`
/// access.put(&db, "Veggie", "Carrot", lmdb::put::NOOVERWRITE).unwrap();
/// assert!(access.put(&db, "Veggie", "Squash", lmdb::put::NOOVERWRITE).is_err());
/// cursor.seek_k::<str,str>(&access, "Veggie").unwrap();
/// assert_eq!(1, cursor.count().unwrap());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
// TODO: "The data parameter will be set to point to the existing
// item." We should provide functionality to support that.
const NOOVERWRITE = ffi::MDB_NOOVERWRITE;
/// Append the given key/data pair to the end of the database. This
/// option allows fast bulk loading when keys are already known to
/// be in the correct order. Loading unsorted keys with this flag
/// will cause a `KEYEXIST` error.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, None, &lmdb::DatabaseOptions::defaults())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// // Load values in ascending order
/// access.put(&db, "France", "Paris", lmdb::put::APPEND).unwrap();
/// access.put(&db, "Germany", "Berlin", lmdb::put::APPEND).unwrap();
/// access.put(&db, "Latvia", "Rīga", lmdb::put::APPEND).unwrap();
/// // Error if you violate ordering
/// assert!(access.put(&db, "Armenia", "Yerevan", lmdb::put::APPEND)
/// .is_err());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
const APPEND = ffi::MDB_APPEND;
/// As with `APPEND` above, but for sorted dup data.
const APPENDDUP = ffi::MDB_APPENDDUP;
}
}
}
/// Flags used when deleting items.
pub mod del {
use ffi;
use libc;
bitflags! {
/// Flags used when deleting items via cursors.
pub struct Flags : libc::c_uint {
/// Delete all of the data items for the current key instead of
/// just the current item. This flag may only be specified if the
/// database was opened with `DUPSORT`.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, Some("reversed"),
/// &lmdb::DatabaseOptions::create_multimap_unsized::<str,str>())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// let f = lmdb::put::Flags::empty();
/// access.put(&db, "Fruit", "Apple", f).unwrap();
/// access.put(&db, "Fruit", "Orange", f).unwrap();
/// access.put(&db, "Fruit", "Durian", f).unwrap();
///
/// let mut cursor = txn.cursor(&db).unwrap();
/// cursor.seek_kv("Fruit", "Durian").unwrap();
/// // By default, only the current item is deleted.
/// cursor.del(&mut access, lmdb::del::Flags::empty()).unwrap();
/// cursor.seek_k::<str,str>(&access, "Fruit").unwrap();
/// assert_eq!(2, cursor.count().unwrap());
/// // But with `NODUPDATA`, they will all go away
/// cursor.del(&mut access, lmdb::del::NODUPDATA).unwrap();
/// assert!(cursor.seek_k::<str,str>(&access, "Fruit").is_err());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
const NODUPDATA = ffi::MDB_NODUPDATA;
}
}
}
// This is internal, but used by other parts of the library
#[derive(Debug)]
pub struct TxHandle(pub *mut ffi::MDB_txn);
impl Drop for TxHandle {
fn drop(&mut self) {
if !self.0.is_null() {
unsafe {
ffi::mdb_txn_abort(self.0);
}
self.0 = ptr::null_mut();
}
}
}
impl TxHandle {
pub unsafe fn commit(&mut self) -> Result<()> {
let txn_p = mem::replace(&mut self.0, ptr::null_mut());
lmdb_call!(ffi::mdb_txn_commit(txn_p));
Ok(())
}
}
/// Base functionality for an LMDB transaction.
///
/// The type is "const" in a similar usage to the modifier in C: One cannot use
/// it to make any modifications, but also cannot rely on it actually being
/// read-only. `ConstTransaction`s are used to write code that can operate in
/// either kind of transaction.
///
/// Unlike most other LMDB wrappers, transactions here are (indirectly) the
/// things in control of accessing data behind cursors. This is in order to
/// correctly express memory semantics: Moving a cursor does not invalidate
/// memory obtained from the cursor; however, any mutation through the same
/// transaction does. We therefore model accesses to data in the environment as
/// borrows of the transaction and the database themselves (possibly mutable on
/// the latter), which allows the borrow checker to ensure that all references
/// are dropped before doing a structural modification.
///
/// Note that due to limitations in the Rust borrow checker, one actually needs
/// to use the `*Accessor` structs to access data. Any transaction will yield
/// at most one accessor, which is implemented with a runtime check that should
/// in the vast majority of cases get optimised out.
///
/// Mutability of a transaction reference does not indicate mutability of the
/// underlying database, but rather exclusivity for enforcement of child
/// transaction semantics.
///
/// ## Ownership
///
/// Transactions support all three ownership modes (but owned mode is not
/// useful). See `ReadTransaction` and `WriteTransaction` for details.
///
/// ## Lifetime
///
/// A `ConstTransaction` must be strictly outlived by its `Environment`.
///
/// `'env` is covariant: given two lifetimes `'x` and `'y` where `'x: 'y`, a
/// `&ConstTransaction<'x>` will implicitly coerce to `&ConstTransaction<'y>`.
///
/// ```rust,norun
/// # #![allow(dead_code)]
/// # extern crate lmdb_zero as lmdb;
/// # fn main() { }
/// #
/// fn convariance<'x, 'y>(db: &lmdb::ConstTransaction<'x>)
/// where 'x: 'y {
/// let _db2: &lmdb::ConstTransaction<'y> = db;
/// }
/// ```
///
/// Because of this property, if you need to hold onto an
/// `&lmdb::ConstTransaction` and must explicitly name both lifetimes,
/// it is usually best to use the same lifetime for both the reference and the
/// parameter, eg `&'x lmdb::ConstTransaction<'x>`.
#[derive(Debug)]
pub struct ConstTransaction<'env> {
env: NonSyncSupercow<'env, Environment>,
tx: TxHandle,
has_yielded_accessor: Cell<bool>,
}
/// A read-only LMDB transaction.
///
/// In addition to all operations valid on `ConstTransaction`, a
/// `ReadTransaction` can additionally operate on cursors with a lifetime
/// scoped to the environment instead of the transaction.
///
/// ## Ownership
///
/// `ReadTransaction`s can be created with all three ownership modes (but owned
/// mode is not useful).
///
/// ### Example — Shared mode
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// use std::sync::Arc;
///
/// # fn main() {
/// let env = Arc::new(create_env());
/// let db = Arc::new(lmdb::Database::open(
/// env.clone(), None, &lmdb::DatabaseOptions::defaults()).unwrap());
///
/// // Type and lifetime annotated explicitly for clarity
/// let txn: lmdb::ReadTransaction<'static> = lmdb::ReadTransaction::new(
/// env.clone()).unwrap();
///
/// // Do stuff with `txn`...
/// # drop(txn); drop(db);
/// # }
/// ```
///
/// ## Lifetime
///
/// All notes for `ConstTransaction` apply.
#[derive(Debug)]
// This MUST be a newtype struct and MUST NOT `impl Drop`
pub struct ReadTransaction<'env>(ConstTransaction<'env>);
/// A read-write LMDB transaction.
///
/// In addition to all operations valid on `ConstTransaction`, it is also
/// possible to perform writes to the underlying databases.
///
///
/// ## Ownership
///
/// `WriteTransaction`s can be created with all three ownership modes (but
/// owned mode is not useful).
///
/// ### Example — Shared mode
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// use std::sync::Arc;
///
/// # fn main() {
/// let env = Arc::new(create_env());
/// let db = Arc::new(lmdb::Database::open(
/// env.clone(), None, &lmdb::DatabaseOptions::defaults()).unwrap());
///
/// // Type and lifetime annotated explicitly for clarity
/// let txn: lmdb::WriteTransaction<'static> = lmdb::WriteTransaction::new(
/// env.clone()).unwrap();
///
/// // Do stuff with `txn`...
///
/// txn.commit().unwrap();
/// # }
/// ```
///
/// ## Lifetime
///
/// All notes for `ConstTransaction` apply.
#[derive(Debug)]
// This MUST be a newtype struct and MUST NOT `impl Drop`
pub struct WriteTransaction<'env>(ConstTransaction<'env>);
/// A read-only LMDB transaction that has been reset.
///
/// It can be renewed by calling `ResetTransaction::renew()`.
///
/// ## Lifetime
///
/// All notes for `ReadTransaction` apply.
#[derive(Debug)]
pub struct ResetTransaction<'env>(ReadTransaction<'env>);
/// A read-only data accessor obtained from a `ConstTransaction`.
///
/// There is no corresponding `ReadAccessor`, since there are no additional
/// operations one can do with a known-read-only accessor.
///
/// ## Lifetime
///
/// A `ConstAccessor` must be strictly outlived by its parent transaction. The
/// parent transaction cannot be destroyed (committed, etc) until the borrow
/// from the accessor ends. This in many cases requires adding an extra scope
/// (with bare `{ }` braces) in which to obtain the accessor, as can be seen in
/// many of the examples.
///
/// The lifitem of a reference to a `ConstAccessor` dictates the lifetime of
/// the data accessed via the accessor.
///
/// The `'txn` lifetime parameter is covariant. That is, given two lifetimes
/// `'x` and `'y` where `'x: 'y`, a `&ConstAccessor<'x>` can be implicitly
/// coerced into a `&ConstAccessor<'y>`.
///
/// ```rust,norun
/// # #![allow(dead_code)]
/// # extern crate lmdb_zero as lmdb;
/// # fn main() { }
/// #
/// fn convariance<'x, 'y>(db: &lmdb::ConstAccessor<'x>)
/// where 'x: 'y {
/// let _db2: &lmdb::ConstAccessor<'y> = db;
/// }
/// ```
///
/// Because of this property, if you need to hold onto an
/// `&lmdb::ConstAccessor` and must explicitly name both lifetimes, it
/// is usually best to use the same lifetime for both the reference and the
/// parameter, eg `&'x lmdb::ConstAccessor<'x>`.
#[derive(Debug)]
pub struct ConstAccessor<'txn>(&'txn ConstTransaction<'txn>);
/// ConstAccessor implements Drop trait so that if it gets
/// dropped, a new accessor can be safely obtained
impl<'txn> Drop for ConstAccessor<'txn> {
fn drop(&mut self) {
self.0.has_yielded_accessor.set(false)
}
}
/// A read-write data accessor obtained from a `WriteTransaction`.
///
/// All operations that can be performed on `ConstAccessor` can also be
/// performed on `WriteAccessor`.
///
/// ## Lifetime
///
/// Nominally, `WriteAccessor` would behave the same as `ConstAccessor`.
///
/// However, there is never any useful reason to explicitly reference a
/// `&WriteAccessor` (ie, a shared reference). Instead, one talks about a
/// `&mut WriteAccessor`. The unfortunate consequence here is that the `'txn`
/// lifetime ends up being _invariant_; that is, the following code will not
/// compile:
///
/// ```rust,ignore
/// # #![allow(dead_code)]
/// # extern crate lmdb_zero as lmdb;
/// # fn main() { }
/// #
/// fn convariance<'x, 'y>(db: &mut lmdb::WriteAccessor<'x>)
/// where 'x: 'y {
/// let _db2: &mut lmdb::WriteAccessor<'y> = db; // ERROR!
/// }
/// ```
///
/// The compiler's error messages here tend to be unhelpful. In certain cases,
/// it will suggest changing the function declaration above to something like
/// `&'x mut lmdb::WriteAccessor<'x>`. Applying such a fix when it is suggested
/// _will appear to work_. But what happens is that you end up propagating
/// `&'txn mut lmdb::WriteAccessor<'txn>` the whole way up your call stack.
/// Since `'txn` is invariant, it is inferred to be exactly equal to the
/// lifetime of the transaction, and now you've declared that the borrow from
/// the transaction exists for the entire lifetime of the transaction. This
/// means that you cannot actually commit the transaction.
///
/// Instead, make sure you always have separate type parameters on the `&mut`
/// and the `WriteAccessor` itself. This can usually be accomplished by letting
/// lifetime elision run its course. If you must name both, generally go with
/// `&'access mut WriteAccessor<'txn>`. The `'access` lifetime is the lifetime
/// of any data you obtain via the accessor.
#[derive(Debug)]
pub struct WriteAccessor<'txn>(ConstAccessor<'txn>);
impl<'env> ConstTransaction<'env> {
fn new<'outer: 'env, E>(env: E,
parent: Option<&'env mut ConstTransaction<'outer>>,
flags: c_uint) -> Result<Self>
where E : Into<NonSyncSupercow<'env, Environment>> {
let env : NonSyncSupercow<'env, Environment> = env.into();
let mut rawtx: *mut ffi::MDB_txn = ptr::null_mut();
unsafe {
lmdb_call!(ffi::mdb_txn_begin(
env::env_ptr(&env), parent.map_or(ptr::null_mut(), |p| p.tx.0),
flags, &mut rawtx));
}
Ok(ConstTransaction {
env: env,
tx: TxHandle(rawtx),
has_yielded_accessor: Cell::new(false),
})
}
/// Returns an accessor used to manipulate data in this transaction.
///
/// ## Ownership
///
/// Unlike most other lmdb-zero APIs, accessors do not support shared
/// ownership modes (e.g., where the accessor would hold on to a
/// `Rc<ConstTransaction>`). If you need dynamically-managed lifetime,
/// instead simply drop the accessor and get a new one the next time one is
/// needed.
///
/// ## Panics
///
/// Panics if this function has already been called on this transaction and
/// the returned value has not yet been dropped.
///
/// ## Example
///
/// ```rust,should_panic
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # #[allow(unused_vars)]
/// # fn main() {
/// # let env = create_env();
/// let txn = lmdb::ReadTransaction::new(&env).unwrap();
/// // Get access the first time
/// let access = txn.access();
///
/// // You can't get the accessor again in the same scope, since this
/// // would create two references to the same logical memory and allow
/// // creating aliased mutable references and so forth.
/// let access2 = txn.access(); // PANIC!
/// # }
/// ```
#[inline]
pub fn access(&self) -> ConstAccessor {
assert!(!self.has_yielded_accessor.get(),
"Transaction accessor already returned");
self.has_yielded_accessor.set(true);
ConstAccessor(self)
}
/// Creates a new cursor scoped to this transaction, bound to the given
/// database.
///
/// This method is functionally equivalent to the method on `CreateCursor`
/// and exists for convenience and backwards-compatibility.
///
/// If you have an, e.g., `Rc<ReadTransaction>` and want to get a
/// `Cursor<'static,'db>`, make sure you have the `CreateCursor` trait
/// imported so that the needed alternate implementations of this method
/// are available.
#[inline]
pub fn cursor<'txn, 'db, DB>(&'txn self, db: DB)
-> Result<Cursor<'txn,'db>>
where DB : Into<Supercow<'db, Database<'db>>> {
Cursor::construct(Supercow::borrowed(self), db.into())
}
/// Returns the internal id of this transaction.
pub fn id(&self) -> usize {
unsafe {
ffi2::mdb_txn_id(self.tx.0)
}
}
/// Retrieves statistics for a database.
pub fn db_stat(&self, db: &Database) -> Result<Stat> {
try!(db.assert_same_env(&self.env));
unsafe {
let mut raw: ffi::MDB_stat = mem::zeroed();
lmdb_call!(ffi::mdb_stat(self.tx.0, db.as_raw(), &mut raw));
Ok(raw.into())
}
}
/// Retrieve the DB flags for a database handle.
pub fn db_flags(&self, db: &Database) -> Result<db::Flags> {
try!(db.assert_same_env(&self.env));
let mut raw: c_uint = 0;
unsafe {
lmdb_call!(ffi::mdb_dbi_flags(self.tx.0, db.as_raw(), &mut raw));
}
Ok(db::Flags::from_bits_truncate(raw))
}
#[inline]
fn assert_sensible_cursor(&self, cursor: &Cursor)
-> Result<()> {
if self as *const ConstTransaction !=
cursor::txn_ref(cursor) as *const ConstTransaction
{
Err(Error::Mismatch)
} else {
Ok(())
}
}
}
// Internally used by other parts of the crate
#[inline]
pub fn assert_sensible_cursor(access: &ConstAccessor, cursor: &Cursor)
-> Result<()> {
access.0.assert_sensible_cursor(cursor)
}
#[inline]
pub fn assert_same_env(txn: &ConstTransaction, db: &Database)
-> Result<()> {
db.assert_same_env(&txn.env)
}
#[inline]
pub fn assert_in_env(txn: &ConstTransaction, env: &Environment)
-> Result<()> {
if env as *const Environment != &*txn.env as *const Environment {
Err(Error::Mismatch)
} else {
Ok(())
}
}
#[inline]
pub fn txptr(txn: &ConstTransaction) -> *mut ffi::MDB_txn {
txn.tx.0
}
impl<'env> Deref for ReadTransaction<'env> {
type Target = ConstTransaction<'env>;
fn deref(&self) -> &ConstTransaction<'env> {
&self.0
}
}
impl<'env> DerefMut for ReadTransaction<'env> {
fn deref_mut(&mut self) -> &mut ConstTransaction<'env> {
&mut self.0
}
}
impl<'env> ReadTransaction<'env> {
/// Opens a new, read-only transaction within the given environment.
///
/// ## Note
///
/// A transaction and its cursors must only be used by a single thread
/// (enforced by the rust compiler), and a thread may only have a single
/// transaction at a time. If `NOTLS` is in use, this does not apply to
/// read-only transactions. Attempting to open a read-only transaction
/// while the current thread holds a read-write transaction will deadlock.
pub fn new<E>(env: E) -> Result<Self>
where E : Into<NonSyncSupercow<'env, Environment>> {
Ok(ReadTransaction(try!(ConstTransaction::new(
env, None, ffi::MDB_RDONLY))))
}
/// Dissociates the given cursor from this transaction and its database,
/// returning a `StaleCursor` which can be reused later.
///
/// This only fails if `cursor` does not belong to this transaction.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// # let db = lmdb::Database::open(
/// # &env, None, &lmdb::DatabaseOptions::defaults())
/// # .unwrap();
/// let mut saved_cursor;
/// {
/// let txn = lmdb::ReadTransaction::new(&env).unwrap();
/// let cursor = txn.cursor(&db).unwrap();
/// // Do some stuff with `txn` and `cursor`
///
/// // We don't want to realloc `cursor` next time, so save it away
/// saved_cursor = txn.dissoc_cursor(cursor).unwrap();
/// } // Read transaction goes away, but our saved cursor remains
///
/// {
/// let txn = lmdb::ReadTransaction::new(&env).unwrap();
/// // Rebind the old cursor. It continues operating on `db`.
/// let cursor = txn.assoc_cursor(saved_cursor).unwrap();
/// // Do stuff with txn, cursor
///
/// // We can save the cursor away again
/// saved_cursor = txn.dissoc_cursor(cursor).unwrap();
/// }
/// # }
/// ```
///
/// ## Example — Shared ownership mode
///
/// Cursors can also be dissociated and reassociated with transactions with
/// shared ownership mode. This can also include changing the ownership
/// mode. To be able to use shared ownership mode, make sure that the
/// `AssocCursor` trait is imported or else you will simply borrow the
/// inner transaction instead of taking a copy of the `Rc`, etc.
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// use std::sync::Arc;
///
/// use lmdb::traits::{AssocCursor, CreateCursor};
///
/// # fn main() {
/// // N.B. Unnecessary type and lifetime annotations included for clarity
/// let env: Arc<lmdb::Environment> = Arc::new(create_env());
/// let db: Arc<lmdb::Database<'static>> = Arc::new(lmdb::Database::open(
/// env.clone(), None, &lmdb::DatabaseOptions::defaults()).unwrap());
///
/// let mut saved_cursor: lmdb::StaleCursor<'static>;
/// {
/// // `Arc` is unnecessary in this trivial example, but let's pretend
/// // there was good use for this.
/// let txn: Arc<lmdb::ReadTransaction> = Arc::new(
/// lmdb::ReadTransaction::new(env.clone()).unwrap());
/// let cursor: lmdb::Cursor<'static, 'static> =
/// txn.cursor(db.clone()).unwrap();
///
/// // Do some stuff with `txn` and `cursor`
///
/// // We don't want to realloc `cursor` next time, so save it away
/// saved_cursor = txn.dissoc_cursor(cursor).unwrap();
/// }
///
/// {
/// let txn: Arc<lmdb::ReadTransaction<'static>> =
/// Arc::new(lmdb::ReadTransaction::new(env.clone()).unwrap());
/// // Rebind the old cursor. It continues operating on `db`.
/// let cursor: lmdb::Cursor<'static, 'static> =
/// txn.assoc_cursor(saved_cursor).unwrap();
/// // Do stuff with txn, cursor
///
/// // We can save the cursor away again
/// saved_cursor = txn.dissoc_cursor(cursor).unwrap();
/// }
/// # }
/// ```
pub fn dissoc_cursor<'txn,'db>(&self, cursor: Cursor<'txn,'db>)
-> Result<StaleCursor<'db>>
where 'env: 'db {
try!(self.assert_sensible_cursor(&cursor));
let env = Supercow::clone_non_owned(&self.env)
.expect("Cannot use owned `Environment` with `dissoc_cursor`");
Ok(cursor::to_stale(cursor, env))
}
/// Associates a saved read-only with this transaction.
///
/// The cursor will be rebound to this transaction, but will continue using
/// the same database that it was previously.
///
/// This method is functionally equivalent to the method on `AssocCursor`
/// and exists for convenience and backwards-compatibility.
///
/// If you have an, e.g., `Rc<ReadTransaction>` and want to get a
/// `Cursor<'static,'db>`, make sure you have the `AssocCursor` trait
/// imported so that the needed alternate implementations of this method
/// are available.
pub fn assoc_cursor<'txn,'db>(&'txn self, cursor: StaleCursor<'db>)
-> Result<Cursor<'txn,'db>> {
let self_as_const: &'txn ConstTransaction = &*self;
Cursor::from_stale(cursor,
NonSyncSupercow::borrowed(&*self_as_const))
}
/// Resets this transaction, releasing most of its resources but allowing
/// it to be quickly renewed if desired.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let mut saved_txn;
/// {
/// let txn = lmdb::ReadTransaction::new(&env).unwrap();
/// {
/// let access = txn.access();
/// // Do stuff with `txn`, `access`
/// }
/// // Save our transaction so we don't have to reallocate it next time,
/// // but we also don't keep locks around and will later move to the
/// // latest version of the environment.
/// saved_txn = txn.reset();
/// }
///
/// {
/// // Instead of creating a brand new transaction, renew the one we
/// // saved.
/// let txn = saved_txn.renew().unwrap();
/// {
/// let access = txn.access();
/// // Do stuff with `txn`, `access`
/// }
///
/// // We can save the transaction away again
/// saved_txn = txn.reset();
/// }
/// # }
/// ```
pub fn reset(self) -> ResetTransaction<'env> {
unsafe { ffi::mdb_txn_reset(self.0.tx.0); }
ResetTransaction(self)
}
}
impl<'env> ResetTransaction<'env> {
/// Renews this read-only transaction, making it available for more
/// reading.
pub fn renew(self) -> Result<ReadTransaction<'env>> {
unsafe { lmdb_call!(ffi::mdb_txn_renew((self.0).0.tx.0)); }
Ok(self.0)
}
}
impl<'env> Deref for WriteTransaction<'env> {
type Target = ConstTransaction<'env>;
fn deref(&self) -> &ConstTransaction<'env> {
&self.0
}
}
impl<'env> DerefMut for WriteTransaction<'env> {
fn deref_mut(&mut self) -> &mut ConstTransaction<'env> {
&mut self.0
}
}
impl<'env> WriteTransaction<'env> {
/// Creates a new, read-write transaction in the given environment.
///
/// ## Note
///
/// A transaction and its cursors must only be used by a single thread
/// (enforced by the rust compiler), and a thread may only have a single
/// read-write transaction at a time (even if `NOTLS` is in use --- trying
/// to start two top-level read-write transactions on the same thread will
/// deadlock).
pub fn new<E>(env: E) -> Result<Self>
where E : Into<NonSyncSupercow<'env, Environment>> {
Ok(WriteTransaction(try!(ConstTransaction::new(env, None, 0))))
}
/// Opens a new, read-write transaction as a child transaction of the given
/// parent. While the new transaction exists, no operations may be
/// performed on the parent or any of its cursors. (These bindings are
/// actually stricter, and do not permit cursors or other references into
/// the parent to coexist with the child transaction.)
///
/// After this call, whether or not it succeeds, it is possible to call
/// `access()` on the original transaction again one more time, since the
/// Rust borrow rules guarantee the old accessor was destroyed by the
/// caller already.
///
/// ## Note
///
/// A transaction and its cursors must only be used by a single thread
/// (enforced by the rust compiler).
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, None, &lmdb::DatabaseOptions::defaults()).unwrap();
/// let mut txn = lmdb::WriteTransaction::new(&env).unwrap();
/// let f = lmdb::put::Flags::empty();
/// {
/// let mut access = txn.access();
/// access.put(&db, "Germany", "Berlin", f).unwrap();
/// access.put(&db, "Latvia", "Rīga", f).unwrap();
/// access.put(&db, "France", "Paris", f).unwrap();
/// }
///
/// {
/// // Open a child transaction and do some more reading and writing.
/// let subtx = txn.child_tx().unwrap();
/// let mut access = subtx.access();
/// assert_eq!("Berlin", access.get::<str,str>(&db, "Germany").unwrap());
/// access.put(&db, "Germany", "Frankfurt", f).unwrap();
/// assert_eq!("Frankfurt", access.get::<str,str>(&db, "Germany").unwrap());
/// // Don't commit --- let the child transaction abort (roll back)
/// }
///
/// {
/// let mut access = txn.access();
/// // Now we can do some more reading and writing on the original
/// // transaction.
/// // The effect of the aborted child transaction are not visible.
/// access.put(&db, "United Kingdom", "London", f).unwrap();
/// assert_eq!("Berlin", access.get::<str,str>(&db, "Germany").unwrap());
/// }
///
/// {
/// // Another child.
/// let subtx = txn.child_tx().unwrap();
/// {
/// let mut access = subtx.access();
/// access.put(&db, "Spain", "Madrid", f).unwrap();
/// }
/// // Commit this one this time.
/// subtx.commit().unwrap();
/// }
///
/// {
/// // Now the changes from the child are visible to this transaction,
/// // but still not outside it.
/// let mut access = txn.access();
/// assert_eq!("Madrid", access.get::<str,str>(&db, "Spain").unwrap());
/// }
///
/// txn.commit().unwrap();
/// # }
/// ```
pub fn child_tx<'a>(&'a mut self) -> Result<WriteTransaction<'a>>
where 'env: 'a {
let env = Supercow::share(&mut self.0.env);
Ok(WriteTransaction(try!(ConstTransaction::new(
env, Some(&mut*self), 0))))
}
/// Commits this write transaction.
pub fn commit(mut self) -> Result<()> {
unsafe {
self.0.tx.commit()
}
}
/// Returns a read/write accessor on this transaction.
///
/// ## Panics
///
/// Panics if an accessor has already been obtained from this transaction
/// and not yet dropped.
#[inline]
pub fn access(&self) -> WriteAccessor {
WriteAccessor(self.0.access())
}
}
impl<'txn> ConstAccessor<'txn> {
/// Get items from a database.
///
/// This function retrieves key/data pairs from the database. A reference
/// to the data associated with the given key is returned. If the database
/// supports duplicate keys (`DUPSORT`) then the first data item for the
/// key will be returned. Retrieval of other items requires the use of
/// cursoring.
///
/// The returned memory is valid until the next mutation through the
/// transaction or the end of the transaction (both are enforced through
/// the borrow checker).
///
/// ## Errors
///
/// This call may return errors for reasons other than the key not being
/// found. The easiest way to handle "not found" is generally to use the
/// `to_opt` method on `traits::LmdbResultExt` to promote the value into a
/// `Result<Option<V>>`. Most important of these other errors is the
/// possibility of the key being found, but the value not being convertible
/// to a `&V`.
#[inline]
pub fn get<K : AsLmdbBytes + ?Sized, V : FromLmdbBytes + ?Sized>(
&self, db: &Database, key: &K) -> Result<&V>
{
try!(db.assert_same_env(self.env()));
let mut mv_key = as_val(key);
let mut out_val = EMPTY_VAL;
unsafe {
lmdb_call!(ffi::mdb_get(
self.txptr(), db.as_raw(), &mut mv_key, &mut out_val));
}
from_val(self, &out_val)
}
fn txptr(&self) -> *mut ffi::MDB_txn {
self.0.tx.0
}
fn env(&self) -> &Environment {
&*self.0.env
}
}
impl<'txn> Deref for WriteAccessor<'txn> {
type Target = ConstAccessor<'txn>;
fn deref(&self) -> &ConstAccessor<'txn> {
&self.0
}
}
impl<'txn> WriteAccessor<'txn> {
/// Store items into a database.
///
/// This function stores key/data pairs in the database. The default
/// behavior is to enter the new key/data pair, replacing any previously
/// existing key if duplicates are disallowed, or adding a duplicate data
/// item if duplicates are allowed (`DUPSORT`).
#[inline]
pub fn put<K : AsLmdbBytes + ?Sized, V : AsLmdbBytes + ?Sized>(
&mut self, db: &Database, key: &K, value: &V,
flags: put::Flags) -> Result<()>
{
try!(db.assert_same_env(self.env()));
let mut mv_key = as_val(key);
let mut mv_val = as_val(value);
unsafe {
lmdb_call!(ffi::mdb_put(
self.txptr(), db.as_raw(), &mut mv_key, &mut mv_val,
flags.bits()));
}
Ok(())
}
/// Store items into a database.
///
/// This function stores key/data pairs in the database. The default
/// behavior is to enter the new key/data pair, replacing any previously
/// existing key if duplicates are disallowed, or adding a duplicate data
/// item if duplicates are allowed (`DUPSORT`).
///
/// Unlike `put()`, this does not take a value. Instead, it reserves space
/// for the value (equal to the size of `V`) and then returns a mutable
/// reference to it. Be aware that the `FromReservedLmdbBytes` conversion
/// will be invoked on whatever memory happens to be at the destination
/// location.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// #[repr(C, packed)]
/// #[derive(Clone,Copy,Debug,PartialEq,Eq)]
/// struct MyStruct {
/// x: i32,
/// y: i32,
/// }
/// unsafe impl lmdb::traits::LmdbRaw for MyStruct { }
///
/// # fn main() {
/// # let env = create_env();
/// # let db = lmdb::Database::open(
/// # &env, None, &lmdb::DatabaseOptions::defaults())
/// # .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// {
/// let dst: &mut MyStruct = access.put_reserve(
/// &db, "foo", lmdb::put::Flags::empty()).unwrap();
/// // Writing to `dst` actually writes directly into the database.
/// dst.x = 42;
/// dst.y = 56;
/// // Drop `dst` so we can use `access` again
/// }
/// assert_eq!(&MyStruct { x: 42, y: 56 },
/// access.get(&db, "foo").unwrap());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
#[inline]
pub fn put_reserve<K : AsLmdbBytes + ?Sized,
V : FromReservedLmdbBytes + Sized>(
&mut self, db: &Database, key: &K, flags: put::Flags) -> Result<&mut V>
{
unsafe {
self.put_reserve_unsized(db, key, mem::size_of::<V>(), flags)
}
}
/// Store items into a database.
///
/// This function stores key/data pairs in the database. The default
/// behavior is to enter the new key/data pair, replacing any previously
/// existing key if duplicates are disallowed, or adding a duplicate data
/// item if duplicates are allowed (`DUPSORT`).
///
/// Unlike `put()`, this does not take a value. Instead, it reserves space
/// for the value (equal to an array of `count` objects of size `V`) and
/// then returns a mutable reference to it. Be aware that the content of
/// the returned slice is simply whatever happens to be in the destination
/// memory at the time of this call.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// # let db = lmdb::Database::open(
/// # &env, None, &lmdb::DatabaseOptions::defaults())
/// # .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// {
/// let bytes: &mut [u8] = access.put_reserve_array(
/// &db, "foo", 4, lmdb::put::Flags::empty()).unwrap();
/// // More realistically, one could zero-copy data from a file/socket
/// // into `bytes`, for example.
/// bytes[0] = b'b'; bytes[1] = b'y';
/// bytes[2] = b't'; bytes[3] = b'e';
/// }
/// assert_eq!("byte", access.get::<str,str>(&db, "foo").unwrap());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
#[inline]
pub fn put_reserve_array<K : AsLmdbBytes + ?Sized, V : LmdbRaw>(
&mut self, db: &Database, key: &K, count: usize, flags: put::Flags)
-> Result<&mut [V]>
{
unsafe {
self.put_reserve_unsized(
db, key, mem::size_of::<V>() * count, flags)
}
}
/// Store items into a database.
///
/// This function stores key/data pairs in the database. The default
/// behavior is to enter the new key/data pair, replacing any previously
/// existing key if duplicates are disallowed, or adding a duplicate data
/// item if duplicates are allowed (`DUPSORT`).
///
/// Unlike `put()`, this does not take a value. Instead, it reserves space
/// equal to `size` bytes for the value and then returns a mutable
/// reference to it. Be aware that the `FromReservedLmdbBytes` conversion
/// will be invoked on whatever memory happens to be at the destination
/// location.
///
/// ## Unsafety
///
/// The caller must ensure that `size` is a valid size for `V`.
#[inline]
pub unsafe fn put_reserve_unsized<K : AsLmdbBytes + ?Sized,
V : FromReservedLmdbBytes + ?Sized>(
&mut self, db: &Database, key: &K, size: usize, flags: put::Flags)
-> Result<&mut V>
{
try!(db.assert_same_env(self.env()));
let mut mv_key = as_val(key);
let mut out_val = EMPTY_VAL;
out_val.mv_size = size;
lmdb_call!(ffi::mdb_put(
self.txptr(), db.as_raw(), &mut mv_key, &mut out_val,
flags.bits() | ffi::MDB_RESERVE));
Ok(from_reserved(self, &out_val))
}
/// Delete items from a database by key.
///
/// This function removes key/data pairs from the database. All values
/// whose key matches `key` are deleted, including in the case of
/// `DUPSORT`. This function will return `NOTFOUND` if the specified
/// key is not in the database.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, Some("example"),
/// &lmdb::DatabaseOptions::create_multimap_unsized::<str,str>())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// access.put(&db, "Fruit", "Orange", lmdb::put::Flags::empty()).unwrap();
/// assert_eq!("Apple", access.get::<str,str>(&db, "Fruit").unwrap());
/// access.del_key(&db, "Fruit").unwrap();
/// assert!(access.get::<str,str>(&db, "Fruit").is_err());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
#[inline]
pub fn del_key<K : AsLmdbBytes + ?Sized>(
&mut self, db: &Database, key: &K) -> Result<()>
{
try!(db.assert_same_env(self.env()));
let mut mv_key = as_val(key);
unsafe {
lmdb_call!(ffi::mdb_del(
self.txptr(), db.as_raw(), &mut mv_key, ptr::null_mut()));
}
Ok(())
}
/// Delete items from a database by key and value.
///
/// This function removes key/data pairs from the database. If the database
/// does not support sorted duplicate data items (`DUPSORT`) the `val`
/// parameter is ignored and this call behaves like `del()`. Otherwise, if
/// the data item matching both `key` and `val` will be deleted. This
/// function will return `NOTFOUND` if the specified key/data pair is not
/// in the database.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// let db = lmdb::Database::open(
/// &env, Some("example"),
/// &lmdb::DatabaseOptions::create_multimap_unsized::<str,str>())
/// .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// access.put(&db, "Fruit", "Apple", lmdb::put::Flags::empty()).unwrap();
/// access.put(&db, "Fruit", "Orange", lmdb::put::Flags::empty()).unwrap();
/// assert_eq!("Apple", access.get::<str,str>(&db, "Fruit").unwrap());
/// access.del_item(&db, "Fruit", "Apple").unwrap();
/// assert_eq!("Orange", access.get::<str,str>(&db, "Fruit").unwrap());
/// }
/// txn.commit().unwrap();
/// # }
/// ```
#[inline]
pub fn del_item<K : AsLmdbBytes + ?Sized, V : AsLmdbBytes + ?Sized>(
&mut self, db: &Database, key: &K, val: &V) -> Result<()>
{
try!(db.assert_same_env(self.env()));
let mut mv_key = as_val(key);
let mut mv_val = as_val(val);
unsafe {
lmdb_call!(ffi::mdb_del(
self.txptr(), db.as_raw(), &mut mv_key, &mut mv_val));
}
Ok(())
}
/// Completely clears the content of the given database.
///
/// ## Example
///
/// ```
/// # include!(concat!(env!("CARGO_MANIFEST_DIR"),"/src/example_helpers.rs"));
/// # fn main() {
/// # let env = create_env();
/// # let db = lmdb::Database::open(
/// # &env, None, &lmdb::DatabaseOptions::defaults())
/// # .unwrap();
/// let txn = lmdb::WriteTransaction::new(&env).unwrap();
/// {
/// let mut access = txn.access();
/// let f = lmdb::put::Flags::empty();
/// access.put(&db, "Germany", "Berlin", f).unwrap();
/// access.put(&db, "France", "Paris", f).unwrap();
/// access.put(&db, "Latvia", "Rīga", f).unwrap();
/// assert_eq!(3, txn.db_stat(&db).unwrap().entries);
///
/// access.clear_db(&db).unwrap();
/// assert_eq!(0, txn.db_stat(&db).unwrap().entries);
/// }
/// txn.commit().unwrap();
/// # }
/// ```
pub fn clear_db(&mut self, db: &Database) -> Result<()> {
try!(db.assert_same_env(self.env()));
unsafe {
lmdb_call!(ffi::mdb_drop(self.txptr(), db.as_raw(), 0));
}
Ok(())
}
}
|
use crate::{BoxFuture, CtxTransaction, Entity, Result};
use std::sync::Arc;
pub trait RemovingHandler<E: Entity> {
fn handle_removing<'a>(
&'a self,
trx: &'a mut CtxTransaction<'_>,
key: &'a E::Key,
track_ctx: &'a E::TrackCtx,
) -> BoxFuture<'a, Result<()>>;
}
impl<E: Entity, T> RemovingHandler<E> for T
where
T: for<'a> Fn(
&'a mut CtxTransaction<'_>,
&'a E::Key,
&'a E::TrackCtx,
) -> BoxFuture<'a, Result<()>>,
{
fn handle_removing<'a>(
&'a self,
trx: &'a mut CtxTransaction<'_>,
key: &'a <E as Entity>::Key,
track_ctx: &'a <E as Entity>::TrackCtx,
) -> BoxFuture<'a, Result<()>> {
(self)(trx, key, track_ctx)
}
}
type ArcRemovingHandler<E> = Arc<dyn RemovingHandler<E> + Send + Sync>;
pub struct OnRemove<E>(parking_lot::Mutex<Arc<Box<[ArcRemovingHandler<E>]>>>);
impl<E: Entity> OnRemove<E> {
#[doc(hidden)]
pub fn __call<'b>(
&'b self,
trx: &'b mut CtxTransaction<'_>,
key: &'b E::Key,
track_ctx: &'b E::TrackCtx,
) -> BoxFuture<'b, Result<()>> {
let vec = Arc::clone(&self.0.lock());
Box::pin(async move {
for handler in vec.iter() {
handler.handle_removing(trx, key, track_ctx).await?;
}
Ok(())
})
}
pub fn register<EV: RemovingHandler<E> + Send + Sync + 'static>(&self, ev: EV) {
let mut gate = self.0.lock();
let vec = gate
.iter()
.map(Arc::clone)
.chain(std::iter::once(Arc::new(ev) as _))
.collect::<Vec<_>>()
.into_boxed_slice();
*gate = Arc::new(vec);
}
pub fn register_fn<F>(&self, f: F)
where
F: for<'a, 'b> Fn(
&'b mut CtxTransaction<'a>,
&'b E::Key,
&'b E::TrackCtx,
) -> BoxFuture<'b, Result<()>>
+ Send
+ Sync
+ 'static,
{
self.register(f);
}
}
impl<E> Default for OnRemove<E> {
fn default() -> Self {
Self(Default::default())
}
}
|
extern crate varint;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::io::Cursor;
use std::rc::Rc;
use std::{self, mem, str};
use self::varint::VarintRead;
use crate::error::Error;
use crate::json_value::JsonValue;
use crate::key_builder::KeyBuilder;
use crate::query::{DocResult, QueryScoringInfo};
use crate::snapshot::{AllDocsIterator, DocResultIterator, JsonFetcher, Scorer, Snapshot};
use rocksdb::{self, DBIterator, IteratorMode};
pub trait QueryRuntimeFilter {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult>;
fn next_result(&mut self) -> Option<DocResult>;
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo);
/// returns error is a double negation is detected
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error>;
/// return true if filter or all subfilters are NotFilters
fn is_all_not(&self) -> bool;
}
#[derive(PartialEq)]
pub enum RangeOperator {
Inclusive(f64),
Exclusive(f64),
// For booleans and null only exact match makes sense, hence no inclusive/exclusive
// boundaries are needed
True,
False,
Null,
}
pub struct AllDocsFilter {
iter: AllDocsIterator,
}
impl AllDocsFilter {
pub fn new(snapshot: &Snapshot) -> AllDocsFilter {
AllDocsFilter {
iter: snapshot.new_all_docs_iterator(),
}
}
}
impl QueryRuntimeFilter for AllDocsFilter {
fn first_result(&mut self, _start: &DocResult) -> Option<DocResult> {
self.next_result()
}
fn next_result(&mut self) -> Option<DocResult> {
if let Some(mut dr) = self.iter.next() {
dr.add_score(1, 1.0);
Some(dr)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
qsi.num_terms += 1;
qsi.sum_of_idt_sqs += 1.0;
}
fn check_double_not(&self, _parent_is_neg: bool) -> Result<(), Error> {
Ok(())
}
fn is_all_not(&self) -> bool {
false
}
}
pub struct StemmedWordFilter {
iter: DocResultIterator,
scorer: Scorer,
}
impl StemmedWordFilter {
pub fn new(
snapshot: &Snapshot,
stemmed_word: &str,
kb: &KeyBuilder,
boost: f32,
) -> StemmedWordFilter {
StemmedWordFilter {
iter: snapshot.new_term_doc_result_iterator(stemmed_word, kb),
scorer: snapshot.new_scorer(stemmed_word, kb, boost),
}
}
}
impl QueryRuntimeFilter for StemmedWordFilter {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
self.iter.advance_gte(start);
self.next_result()
}
fn next_result(&mut self) -> Option<DocResult> {
if let Some((mut dr, pos)) = self.iter.next() {
if self.scorer.should_score() {
let count = pos.positions().len();
self.scorer.add_match_score(count as u32, &mut dr);
}
Some(dr)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
self.scorer.init(qsi);
}
fn check_double_not(&self, _parent_is_neg: bool) -> Result<(), Error> {
Ok(())
}
fn is_all_not(&self) -> bool {
false
}
}
/// This is not a QueryRuntimeFilter but it imitates one. Instead of returning just a DocResult
/// it also return a vector of word positions, each being a instance of the word occurance
pub struct StemmedWordPosFilter {
iter: DocResultIterator,
scorer: Scorer,
}
impl StemmedWordPosFilter {
pub fn new(
snapshot: &Snapshot,
stemmed_word: &str,
kb: &KeyBuilder,
boost: f32,
) -> StemmedWordPosFilter {
StemmedWordPosFilter {
iter: snapshot.new_term_doc_result_iterator(stemmed_word, kb),
scorer: snapshot.new_scorer(stemmed_word, kb, boost),
}
}
fn first_result(&mut self, start: &DocResult) -> Option<(DocResult, Vec<u32>)> {
self.iter.advance_gte(start);
self.next_result()
}
fn next_result(&mut self) -> Option<(DocResult, Vec<u32>)> {
if let Some((mut dr, pos)) = self.iter.next() {
let positions = pos.positions();
if self.scorer.should_score() {
let count = positions.len();
self.scorer.add_match_score(count as u32, &mut dr);
}
Some((dr, positions))
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
self.scorer.init(qsi);
}
}
pub struct StemmedPhraseFilter {
filters: Vec<StemmedWordPosFilter>,
}
impl StemmedPhraseFilter {
pub fn new(filters: Vec<StemmedWordPosFilter>) -> StemmedPhraseFilter {
assert!(!filters.is_empty());
StemmedPhraseFilter { filters }
}
fn result(&mut self, base: Option<(DocResult, Vec<u32>)>) -> Option<DocResult> {
// this is the number of matches left before all terms match and we can return a result
let mut matches_left = self.filters.len() - 1;
let (mut base_result, mut base_positions) = base?;
if matches_left == 0 {
return Some(base_result);
}
let mut current_filter = 0;
loop {
current_filter += 1;
if current_filter == self.filters.len() {
current_filter = 0;
}
let (next_result, next_positions) =
self.filters[current_filter].first_result(&base_result)?;
if base_result == next_result {
let mut new_positions = Vec::new();
for &pos in next_positions.iter() {
if base_positions
.binary_search(&(pos.saturating_sub(1)))
.is_ok()
{
new_positions.push(pos);
}
}
if !new_positions.is_empty() {
// we have valus that survive! reassign back to base_positions
base_positions = new_positions;
matches_left -= 1;
if matches_left == 0 {
return Some(base_result);
}
} else {
// we didn't match on phrase, so get next_result from first filter
current_filter = 0;
let (next_result, next_positions) =
self.filters[current_filter].next_result()?;
base_result = next_result;
base_positions = next_positions;
matches_left = self.filters.len() - 1;
}
} else {
// we didn't match on next_result, so get first_result at next_result on
// 1st filter.
current_filter = 0;
let (next_result, next_positions) =
self.filters[current_filter].first_result(&next_result)?;
base_result = next_result;
base_positions = next_positions;
matches_left = self.filters.len() - 1;
}
}
}
}
impl QueryRuntimeFilter for StemmedPhraseFilter {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let base_result = self.filters[0].first_result(start);
self.result(base_result)
}
fn next_result(&mut self) -> Option<DocResult> {
let base_result = self.filters[0].next_result();
self.result(base_result)
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
for f in self.filters.iter_mut() {
f.prepare_relevancy_scoring(qsi);
}
}
fn check_double_not(&self, _parent_is_neg: bool) -> Result<(), Error> {
Ok(())
}
fn is_all_not(&self) -> bool {
false
}
}
pub struct ExactMatchFilter {
iter: DBIterator,
filter: StemmedPhraseFilter,
kb: KeyBuilder,
phrase: String,
case_sensitive: bool,
term_ordinal: Option<usize>,
}
impl ExactMatchFilter {
pub fn new(
snapshot: &Snapshot,
filter: StemmedPhraseFilter,
kb: KeyBuilder,
phrase: String,
case_sensitive: bool,
) -> ExactMatchFilter {
ExactMatchFilter {
iter: snapshot.new_iterator(),
filter,
kb,
phrase: if case_sensitive {
phrase
} else {
phrase.to_lowercase()
},
case_sensitive,
term_ordinal: None,
}
}
#[allow(clippy::collapsible_else_if)]
fn check_exact(&mut self, mut dr: DocResult) -> Option<DocResult> {
loop {
let value_key = self.kb.kp_value_key_from_doc_result(&dr);
self.iter.set_mode(IteratorMode::From(
value_key.as_bytes(),
rocksdb::Direction::Forward,
));
if let Some((key, value)) = self.iter.next() {
debug_assert!(key.starts_with(value_key.as_bytes())); // must always be true!
if let JsonValue::String(string) = JsonFetcher::bytes_to_json_value(&*value) {
let matches = if self.case_sensitive {
self.phrase == string
} else {
self.phrase == string.to_lowercase()
};
if matches {
if self.term_ordinal.is_some() {
dr.add_score(self.term_ordinal.unwrap(), 1.0);
}
return Some(dr);
} else {
if let Some(next) = self.filter.next_result() {
dr = next;
// continue looping
} else {
return None;
}
}
} else {
panic!("Not a string, wtf!");
}
} else {
panic!("Couldn't find value, hulk smash!");
}
}
}
}
impl QueryRuntimeFilter for ExactMatchFilter {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
if let Some(dr) = self.filter.first_result(start) {
self.check_exact(dr)
} else {
None
}
}
fn next_result(&mut self) -> Option<DocResult> {
if let Some(dr) = self.filter.next_result() {
self.check_exact(dr)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
// we score these as binary. Either they have a value of 1 or nothing.
self.term_ordinal = Some(qsi.num_terms);
qsi.num_terms += 1;
qsi.sum_of_idt_sqs += 1.0;
}
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error> {
self.filter.check_double_not(parent_is_neg)
}
fn is_all_not(&self) -> bool {
self.filter.is_all_not()
}
}
pub struct RangeFilter {
iter: DBIterator,
kb: KeyBuilder,
min: Option<RangeOperator>,
max: Option<RangeOperator>,
keypath: String,
term_ordinal: Option<usize>,
}
impl RangeFilter {
pub fn new(
snapshot: &Snapshot,
kb: KeyBuilder,
min: Option<RangeOperator>,
max: Option<RangeOperator>,
) -> RangeFilter {
RangeFilter {
iter: snapshot.new_iterator(),
kb,
min,
max,
// The keypath we use to seek to the correct key within RocksDB
keypath: String::new(),
term_ordinal: None,
}
}
}
impl QueryRuntimeFilter for RangeFilter {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let mut value_key = {
// `min` and `max` have the save type, so picking one is OK
let range_operator = self.min.as_ref().or(self.max.as_ref()).unwrap();
match range_operator {
&RangeOperator::Inclusive(_) | &RangeOperator::Exclusive(_) => {
self.kb.number_key(start.seq)
}
&RangeOperator::True => self.kb.bool_null_key('T', start.seq),
&RangeOperator::False => self.kb.bool_null_key('F', start.seq),
&RangeOperator::Null => self.kb.bool_null_key('N', start.seq),
}
};
// NOTE vmx 2017-04-13: Iterating over keys is really similar to the
// `DocResultIterator` in `snapshot.rs`. It should probablly be unified.
self.iter.set_mode(IteratorMode::From(
value_key.as_bytes(),
rocksdb::Direction::Forward,
));
KeyBuilder::truncate_to_kp_word(&mut value_key);
self.keypath = value_key;
self.next_result()
}
fn next_result(&mut self) -> Option<DocResult> {
for (key, value) in &mut self.iter {
if !key.starts_with(self.keypath.as_bytes()) {
// we passed the key path we are interested in. nothing left to do
return None;
}
let key_str = unsafe { str::from_utf8_unchecked(&key) };
// The key already matched, hence it's a valid doc result. Return it.
if self.min == Some(RangeOperator::True)
|| self.min == Some(RangeOperator::False)
|| self.min == Some(RangeOperator::Null)
{
let mut dr = KeyBuilder::parse_doc_result_from_kp_word_key(key_str);
if self.term_ordinal.is_some() {
dr.add_score(self.term_ordinal.unwrap(), 1.0);
}
return Some(dr);
}
// Else it's a range query on numbers
let number = unsafe {
let array = *(value[..].as_ptr() as *const [_; 8]);
mem::transmute::<[u8; 8], f64>(array)
};
let min_condition = match self.min {
Some(RangeOperator::Inclusive(min)) => number >= min,
Some(RangeOperator::Exclusive(min)) => number > min,
// No condition was given => it always matches
None => true,
_ => panic!("Can't happen, it returns early on the other types"),
};
let max_condition = match self.max {
Some(RangeOperator::Inclusive(max)) => number <= max,
Some(RangeOperator::Exclusive(max)) => number < max,
// No condition was given => it always matches
None => true,
_ => panic!("Can't happen, it returns early on the other types"),
};
if min_condition && max_condition {
let mut dr = KeyBuilder::parse_doc_result_from_kp_word_key(key_str);
if self.term_ordinal.is_some() {
dr.add_score(self.term_ordinal.unwrap(), 1.0);
}
return Some(dr);
}
// Else: No match => KKeep looping and move on to the next key
}
None
}
// TODO vmx 2017-04-13: Scoring is not implemented yet
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
// we score these as binary. Either they have a value of 1 or nothing.
self.term_ordinal = Some(qsi.num_terms);
qsi.num_terms += 1;
qsi.sum_of_idt_sqs += 1.0;
}
fn check_double_not(&self, _parent_is_neg: bool) -> Result<(), Error> {
Ok(())
}
fn is_all_not(&self) -> bool {
false
}
}
pub struct BboxFilter<'a> {
snapshot: Rc<Snapshot<'a>>,
iter: Option<DBIterator>,
kb: KeyBuilder,
bbox: Vec<u8>,
term_ordinal: Option<usize>,
}
impl<'a> BboxFilter<'a> {
pub fn new(snapshot: Rc<Snapshot<'a>>, kb: KeyBuilder, bbox: [f64; 4]) -> BboxFilter<'a> {
let mut bbox_vec = Vec::with_capacity(32);
bbox_vec.extend_from_slice(&bbox[0].to_le_bytes());
bbox_vec.extend_from_slice(&bbox[2].to_le_bytes());
bbox_vec.extend_from_slice(&bbox[1].to_le_bytes());
bbox_vec.extend_from_slice(&bbox[3].to_le_bytes());
BboxFilter {
snapshot,
iter: None,
kb,
bbox: bbox_vec,
term_ordinal: None,
}
}
/// Function to deserialize the Arraypaths
fn from_u8_slice(slice: &[u8]) -> Vec<u64> {
let u64_slice =
unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u64, slice.len() / 8) };
u64_slice.to_vec()
}
}
impl<'a> QueryRuntimeFilter for BboxFilter<'a> {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let query = self
.kb
.rtree_query_key(start.seq, std::u64::MAX, &self.bbox);
self.iter = Some(self.snapshot.new_rtree_iterator(&query));
self.next_result()
}
fn next_result(&mut self) -> Option<DocResult> {
let iter = self.iter.as_mut().unwrap();
if let Some((key, value)) = iter.next() {
let mut vec = Vec::with_capacity(key.len());
vec.extend_from_slice(&key);
let mut read = Cursor::new(vec);
let key_len = read.read_unsigned_varint_32().unwrap();
let offset = read.position() as usize;
let iid = unsafe {
let array = *(key[offset + key_len as usize..].as_ptr() as *const [_; 8]);
mem::transmute::<[u8; 8], u64>(array)
};
let mut dr = DocResult::new();
dr.seq = iid;
dr.arraypath = BboxFilter::from_u8_slice(&value);
if self.term_ordinal.is_some() {
dr.add_score(self.term_ordinal.unwrap(), 1.0);
}
Some(dr)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
// We score these as binary. Either they have a value of 1 or nothing.
self.term_ordinal = Some(qsi.num_terms);
qsi.num_terms += 1;
qsi.sum_of_idt_sqs += 1.0;
}
fn check_double_not(&self, _parent_is_neg: bool) -> Result<(), Error> {
Ok(())
}
fn is_all_not(&self) -> bool {
false
}
}
pub struct DistanceFilter {
filters: Vec<StemmedWordPosFilter>,
current_filter: usize,
distance: u32,
}
impl DistanceFilter {
pub fn new(filters: Vec<StemmedWordPosFilter>, distance: u32) -> DistanceFilter {
DistanceFilter {
filters,
current_filter: 0,
distance,
}
}
fn result(&mut self, base: Option<(DocResult, Vec<u32>)>) -> Option<DocResult> {
// yes this code complex. I tried to break it up, but it wants to be like this.
// this is the number of matches left before all terms match and we can return a result
let mut matches_left = self.filters.len() - 1;
let (mut base_result, positions) = base?;
// This contains tuples of word postions and the filter they came from,
// sorted by word position.
let mut base_positions: Vec<(u32, usize)> = positions
.iter()
.map(|pos| (*pos, self.current_filter))
.collect();
// distance is number of words between searched words.
// add one to make calculating difference easier since abs(posa - posb) == distance + 1
let dis = self.distance + 1;
loop {
self.current_filter += 1;
if self.current_filter == self.filters.len() {
self.current_filter = 0;
}
let (next_result, next_positions) =
self.filters[self.current_filter].first_result(&base_result)?;
if base_result != next_result {
// not same field, next_result becomes base_result.
base_result = next_result;
base_positions = next_positions
.iter()
.map(|pos| (*pos, self.current_filter))
.collect();
matches_left = self.filters.len() - 1;
continue;
}
// so we are in the same field. Now to check the proximity of the values from the
// next result to previous results.
// new_positions_map will accept positions within range of pos. But only if all
// positions that can be are within range. We use the sorted map so we can add
// the same positions multiple times and it's a noop.
let mut new_positions_map = BTreeMap::new();
for &pos in next_positions.iter() {
// coud these lines be any longer? No they could not.
let sub = pos.saturating_sub(dis); // underflows othewises
let start = match base_positions.binary_search_by_key(&(sub), |&(pos2, _)| pos2) {
Ok(start) => start,
Err(start) => start,
};
let end = match base_positions.binary_search_by_key(&(pos + dis), |&(pos2, _)| pos2)
{
Ok(end) => end,
Err(end) => end,
};
// we now collect all the filters within the range
let mut filters_encountered = HashSet::new();
for &(_, filter_n) in base_positions[start..end].iter() {
filters_encountered.insert(filter_n);
}
if filters_encountered.len() == self.filters.len() - matches_left {
// we encountered all the filters we can at this stage,
// so we should add them all to the new_positions_map
for &(prev_pos, filter_n) in base_positions[start..end].iter() {
new_positions_map.insert(prev_pos, filter_n);
}
// and add the current pos
new_positions_map.insert(pos, self.current_filter);
}
}
if !new_positions_map.is_empty() {
// we have valus that survive! reassign back to positions
base_positions = new_positions_map.into_iter().collect();
matches_left -= 1;
if matches_left == 0 {
return Some(base_result);
} else {
continue;
}
}
// we didn't match on next_result, so get next_result on current filter
let (next_result, next_positions) = self.filters[self.current_filter].next_result()?;
base_result = next_result;
base_positions = next_positions
.iter()
.map(|pos| (*pos, self.current_filter))
.collect();
matches_left = self.filters.len() - 1;
}
}
}
impl QueryRuntimeFilter for DistanceFilter {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let base_result = self.filters[self.current_filter].first_result(start);
self.result(base_result)
}
fn next_result(&mut self) -> Option<DocResult> {
let base_result = self.filters[self.current_filter].next_result();
self.result(base_result)
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
for f in self.filters.iter_mut() {
f.prepare_relevancy_scoring(qsi);
}
}
fn check_double_not(&self, _parent_is_neg: bool) -> Result<(), Error> {
Ok(())
}
fn is_all_not(&self) -> bool {
false
}
}
pub struct AndFilter<'a> {
filters: Vec<Box<dyn QueryRuntimeFilter + 'a>>,
current_filter: usize,
array_depth: usize,
}
impl<'a> AndFilter<'a> {
pub fn new(
filters: Vec<Box<dyn QueryRuntimeFilter + 'a>>,
array_depth: usize,
) -> AndFilter<'a> {
AndFilter {
filters,
current_filter: 0,
array_depth,
}
}
fn result(&mut self, base: Option<DocResult>) -> Option<DocResult> {
let mut matches_count = self.filters.len() - 1;
let mut base_result = base?;
base_result.arraypath.resize(self.array_depth, 0);
loop {
self.current_filter += 1;
if self.current_filter == self.filters.len() {
self.current_filter = 0;
}
let mut next_result = self.filters[self.current_filter].first_result(&base_result)?;
next_result.arraypath.resize(self.array_depth, 0);
if base_result == next_result {
matches_count -= 1;
base_result.combine(&mut next_result);
if matches_count == 0 {
return Some(base_result);
}
} else {
base_result = next_result;
matches_count = self.filters.len() - 1;
}
}
}
}
impl<'a> QueryRuntimeFilter for AndFilter<'a> {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let base_result = self.filters[self.current_filter].first_result(start);
self.result(base_result)
}
fn next_result(&mut self) -> Option<DocResult> {
let base_result = self.filters[self.current_filter].next_result();
self.result(base_result)
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
for f in self.filters.iter_mut() {
f.prepare_relevancy_scoring(qsi);
}
}
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error> {
for f in self.filters.iter() {
f.check_double_not(parent_is_neg)?;
}
Ok(())
}
fn is_all_not(&self) -> bool {
for f in self.filters.iter() {
if !f.is_all_not() {
return false;
}
}
true
}
}
/// Used by OrFilter to maintain a already fetched result so we don't refetch when one side isn't
/// returned to caller. Because we won't know which side gets returned until both sides are
/// fetched.
pub struct FilterWithResult<'a> {
filter: Box<dyn QueryRuntimeFilter + 'a>,
result: Option<DocResult>,
is_done: bool,
array_depth: usize,
}
impl<'a> FilterWithResult<'a> {
fn prime_first_result(&mut self, start: &DocResult) {
if self.is_done {
return;
}
if self.result.is_none() || self.result.as_ref().unwrap().less(start, self.array_depth) {
self.result = self.filter.first_result(start);
}
if self.result.is_none() {
self.is_done = true;
} else {
self.result
.as_mut()
.unwrap()
.arraypath
.resize(self.array_depth, 0);
}
}
fn prime_next_result(&mut self) {
if self.is_done {
return;
}
if self.result.is_none() {
self.result = self.filter.next_result();
}
if self.result.is_none() {
self.is_done = true;
} else {
self.result
.as_mut()
.unwrap()
.arraypath
.resize(self.array_depth, 0);
}
}
}
pub struct OrFilter<'a> {
left: FilterWithResult<'a>,
right: FilterWithResult<'a>,
}
impl<'a> OrFilter<'a> {
pub fn new(
left: Box<dyn QueryRuntimeFilter + 'a>,
right: Box<dyn QueryRuntimeFilter + 'a>,
array_depth: usize,
) -> OrFilter<'a> {
OrFilter {
left: FilterWithResult {
filter: left,
result: None,
array_depth,
is_done: false,
},
right: FilterWithResult {
filter: right,
result: None,
array_depth,
is_done: false,
},
}
}
fn take_smallest(&mut self) -> Option<DocResult> {
if let Some(mut left) = self.left.result.take() {
// left exists
if let Some(mut right) = self.right.result.take() {
// both exist, return smallest
match left.cmp(&right) {
Ordering::Less => {
// left is smallest, return and put back right
self.right.result = Some(right);
Some(left)
}
Ordering::Greater => {
// right is smallest, return and put back left
self.left.result = Some(left);
Some(right)
}
Ordering::Equal => {
left.combine(&mut right);
self.right.result = Some(right);
Some(left)
}
}
} else {
// right doesn't exist. return left
Some(left)
}
} else {
// left doesn't exist
if self.right.result.is_some() {
// right exists. return it
self.right.result.take()
} else {
// neither exists. return none
None
}
}
}
}
impl<'a> QueryRuntimeFilter for OrFilter<'a> {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
self.left.prime_first_result(start);
self.right.prime_first_result(start);
self.take_smallest()
}
fn next_result(&mut self) -> Option<DocResult> {
self.left.prime_next_result();
self.right.prime_next_result();
self.take_smallest()
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
self.left.filter.prepare_relevancy_scoring(qsi);
self.right.filter.prepare_relevancy_scoring(qsi);
}
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error> {
self.left.filter.check_double_not(parent_is_neg)?;
self.right.filter.check_double_not(parent_is_neg)?;
Ok(())
}
fn is_all_not(&self) -> bool {
self.left.filter.is_all_not() && self.right.filter.is_all_not()
}
}
pub struct NotFilter<'a> {
iter: DBIterator,
filter: Box<dyn QueryRuntimeFilter + 'a>,
last_doc_returned: Option<DocResult>,
kb: KeyBuilder,
}
impl<'a> NotFilter<'a> {
pub fn new(
snapshot: &Snapshot,
filter: Box<dyn QueryRuntimeFilter + 'a>,
kb: KeyBuilder,
) -> NotFilter<'a> {
NotFilter {
iter: snapshot.new_iterator(),
filter,
last_doc_returned: Some(DocResult::new()),
kb,
}
}
fn is_a_not_match(&mut self, dr: &DocResult) -> bool {
let ret = match dr.last_segment_array_index() {
Some(&0) => {
// if we got a (not) match on the first array element, it's always a match
// but only if the document actually exists.
true
}
Some(_) => {
// if we got a (not) match on any other element, check to make sure the key exists.
// if not, it means other elements did a regular match and skipped them, then we
// ran off the end of the array.
let value_key = self.kb.kp_value_key_from_doc_result(dr);
self.iter.set_mode(IteratorMode::From(
value_key.as_bytes(),
rocksdb::Direction::Forward,
));
if let Some((key, _value)) = self.iter.next() {
let key_str = unsafe { str::from_utf8_unchecked(&key) };
KeyBuilder::is_kp_value_key_prefix(&value_key, key_str)
} else {
false
}
}
None => {
//not an array. always a (not) match.
true
}
};
if ret {
// make sure we actually have a document. It's possible we matched a non-existent seq.
let mut kb = KeyBuilder::new();
kb.push_object_key("_id");
let value_key = kb.kp_value_key_from_doc_result(dr);
self.iter.set_mode(IteratorMode::From(
value_key.as_bytes(),
rocksdb::Direction::Forward,
));
if let Some((key, _value)) = self.iter.next() {
let key_str = unsafe { str::from_utf8_unchecked(&key) };
value_key == key_str
} else {
false
}
} else {
false
}
}
}
impl<'a> QueryRuntimeFilter for NotFilter<'a> {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let mut start = start.clone_only_seq_and_arraypath();
start.arraypath.resize(self.kb.arraypath_len(), 0);
while let Some(dr) = self.filter.first_result(&start) {
if start.less(&dr, self.kb.arraypath_len()) {
if self.is_a_not_match(&start) {
self.last_doc_returned = Some(start.clone_only_seq_and_arraypath());
return Some(start.clone_only_seq_and_arraypath());
} else {
start.increment_first(self.kb.arraypath_len());
}
} else {
start.increment_last(self.kb.arraypath_len());
}
}
self.last_doc_returned = None;
if self.is_a_not_match(&start) {
Some(start)
} else {
None
}
}
fn next_result(&mut self) -> Option<DocResult> {
if let Some(mut next) = self.last_doc_returned.take() {
next.increment_last(self.kb.arraypath_len());
self.first_result(&next)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, _qsi: &mut QueryScoringInfo) {
// no op
}
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error> {
if parent_is_neg {
return Err(Error::Parse(
"Logical not (\"!\") is nested inside of another logical not. \
This is not allowed."
.to_string(),
));
}
self.filter.check_double_not(true)?;
Ok(())
}
fn is_all_not(&self) -> bool {
true
}
}
pub struct BindFilter<'a> {
bind_var_name: String,
filter: Box<dyn QueryRuntimeFilter + 'a>,
array_depth: usize,
kb: KeyBuilder,
option_next: Option<DocResult>,
}
impl<'a> BindFilter<'a> {
pub fn new(
bind_var_name: String,
filter: Box<dyn QueryRuntimeFilter + 'a>,
kb: KeyBuilder,
) -> BindFilter {
BindFilter {
bind_var_name,
filter,
array_depth: kb.arraypath_len(),
kb,
option_next: None,
}
}
fn collect_results(&mut self, mut first: DocResult) -> Option<DocResult> {
let value_key = self.kb.kp_value_key_from_doc_result(&first);
first.add_bind_name_result(&self.bind_var_name, value_key);
while let Some(next) = self.filter.next_result() {
if next.seq == first.seq {
let value_key = self.kb.kp_value_key_from_doc_result(&next);
first.add_bind_name_result(&self.bind_var_name, value_key);
} else {
self.option_next = Some(next);
return Some(first);
}
}
Some(first)
}
}
impl<'a> QueryRuntimeFilter for BindFilter<'a> {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
let first = if let Some(next) = self.option_next.take() {
if start.less(&next, self.array_depth) {
Some(next)
} else {
self.filter.first_result(start)
}
} else {
self.filter.first_result(start)
};
if let Some(first) = first {
self.collect_results(first)
} else {
None
}
}
fn next_result(&mut self) -> Option<DocResult> {
let first = if let Some(next) = self.option_next.take() {
Some(next)
} else {
self.filter.next_result()
};
if let Some(first) = first {
self.collect_results(first)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
self.filter.prepare_relevancy_scoring(qsi);
}
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error> {
self.filter.check_double_not(parent_is_neg)
}
fn is_all_not(&self) -> bool {
self.filter.is_all_not()
}
}
pub struct BoostFilter<'a> {
filter: Box<dyn QueryRuntimeFilter + 'a>,
boost: f32,
}
impl<'a> BoostFilter<'a> {
pub fn new(filter: Box<dyn QueryRuntimeFilter + 'a>, boost: f32) -> BoostFilter {
BoostFilter { filter, boost }
}
}
impl<'a> QueryRuntimeFilter for BoostFilter<'a> {
fn first_result(&mut self, start: &DocResult) -> Option<DocResult> {
if let Some(mut dr) = self.filter.first_result(start) {
dr.boost_scores(self.boost);
Some(dr)
} else {
None
}
}
fn next_result(&mut self) -> Option<DocResult> {
if let Some(mut dr) = self.filter.next_result() {
dr.boost_scores(self.boost);
Some(dr)
} else {
None
}
}
fn prepare_relevancy_scoring(&mut self, qsi: &mut QueryScoringInfo) {
self.filter.prepare_relevancy_scoring(qsi);
}
fn check_double_not(&self, parent_is_neg: bool) -> Result<(), Error> {
self.filter.check_double_not(parent_is_neg)
}
fn is_all_not(&self) -> bool {
self.filter.is_all_not()
}
}
|
#[cfg(feature = "profiling")]
extern crate cpuprofiler;
extern crate env_logger;
extern crate ggez;
#[macro_use]
extern crate log;
extern crate rand;
extern crate rodio;
mod gamestates;
mod ai;
//mod custom_audio;
mod cs;
mod game;
mod resources;
mod rules;
mod types;
mod utils;
use std::env;
use ggez::{conf, ContextBuilder};
use gamestates::GameWrapper;
const SHENZHEN_PATH: &str = ".local/share/Steam/steamapps/common/SHENZHEN IO/Content/";
fn main() {
env_logger::init();
let c = conf::Conf {
window_mode: conf::WindowMode::default().dimensions(1280.0, 806.0),
window_setup: conf::WindowSetup::default().title("Solitaire Clone"),
backend: conf::Backend::OpenGL { major: 3, minor: 2 },
..conf::Conf::default()
};
#[cfg(feature = "profiling")]
use cpuprofiler::PROFILER;
#[cfg(feature = "profiling")]
PROFILER.lock().unwrap().start("solitaire.profile").unwrap();
let resource_dir = if let Ok(manifest_dir) = env::var("CARGO_MANIFEST_DIR") {
let mut path = std::path::PathBuf::from(manifest_dir);
path.push("resources");
path
} else {
std::path::PathBuf::from("./resources")
};
let (mut ctx, mut event_loop) = ContextBuilder::new("solitaire_clone", "Swampsoft Games")
.conf(c)
.add_resource_path(resource_dir)
.add_resource_path(env::home_dir().unwrap().join(SHENZHEN_PATH))
.build()
.unwrap();
let mut state = GameWrapper::new(&mut ctx).unwrap();
loop {
if let GameWrapper::Quit = state {
break;
}
state = state.run(&mut ctx, &mut event_loop).unwrap();
}
#[cfg(feature = "profiling")]
PROFILER.lock().unwrap().stop().unwrap();
}
|
use event;
use theme;
// Module is not named `ncurses` to avoir naming conflict
mod curses;
pub use self::curses::NcursesBackend;
pub trait Backend {
fn init();
fn finish();
fn clear();
fn refresh();
fn has_colors() -> bool;
fn init_color_style(style: theme::ColorStyle, foreground: &theme::Color,
background: &theme::Color);
fn print_at((usize, usize), &str);
fn poll_event() -> event::Event;
fn set_refresh_rate(fps: u32);
fn screen_size() -> (usize, usize);
fn with_color<F: FnOnce()>(color: theme::ColorStyle, f: F);
fn with_effect<F: FnOnce()>(effect: theme::Effect, f: F);
}
|
#[macro_use] extern crate bbs;
use bbs::prelude::*;
use std::collections::BTreeMap;
fn main() {
let (dpk, sk) = Issuer::new_short_keys(None);
let pk = dpk.to_public_key(5).unwrap();
let signing_nonce = Issuer::generate_signing_nonce();
// Send `signing_nonce` to holder
// Recipient wants to hide a message in each signature to be able to link
// them together
let link_secret = Prover::new_link_secret();
let mut messages = BTreeMap::new();
messages.insert(0, link_secret.clone());
let (ctx, signature_blinding) =
Prover::new_blind_signature_context(&pk, &messages, &signing_nonce).unwrap();
// Send `ctx` to signer
let messages = sm_map![
1 => b"Will",
2 => b"Some Skill",
3 => b"message_3",
4 => b"message_4"
];
// Will fail if `ctx` is invalid
let blind_signature = Issuer::blind_sign(&ctx, &messages, &sk, &pk, &signing_nonce).unwrap();
// Send `blind_signature` to recipient
// Recipient knows all `messages` that are signed
let mut msgs = messages
.iter()
.map(|(_, m)| m.clone())
.collect::<Vec<SignatureMessage>>();
msgs.insert(0, link_secret.clone());
let res =
Prover::complete_signature(&pk, msgs.as_slice(), &blind_signature, &signature_blinding);
assert!(res.is_ok());
println!("RES {:?}", res);
let signature = res.unwrap();
// let cred = (signature, msgs);
// Proving stage.
let nonce = Verifier::generate_proof_nonce();
let proof_request = Verifier::new_proof_request(&[1, 2], &pk).unwrap();
let data_to_sign: &[u8] = b"I am signing this statment";
let link_hidden = ProofMessage::Hidden(HiddenMessage::ExternalBlinding(
link_secret.clone(),
nonce.clone(),
));
// let link_hidden_2 = ProofMessage::Hidden(HiddenMessage::ExternalBlinding(
// link_secret,
// nonce,
// ));
//
// println!("Link 1 {:?} \n Link 2 {:?}", &link_hidden.get_message(), &link_hidden_2.get_message());
// Sends `proof_request` and `nonce` to the prover
let proof_messages = vec![
link_hidden,
pm_revealed!(b"Will"),
pm_revealed!(b"Some Skill"),
pm_hidden!(b"message_3"),
pm_hidden!(b"message_4"),
];
let pok = Prover::commit_signature_pok(&proof_request, proof_messages.as_slice(), &signature)
.unwrap();
// complete other zkps as desired and compute `challenge_hash`
// add bytes from other proofs
let data_to_sign: &[u8] = b"I am signing this statment";
// let bytes = Vec::from(data_to_sign);
let claims: &[&[u8]] = &vec!(data_to_sign);
// let mut challenge_bytes = Vec::new();
// challenge_bytes.extend_from_slice(pok.to_bytes().as_slice());
// challenge_bytes.extend_from_slice(&nonce.to_bytes_compressed_form()[..]);
// challenge_bytes.extend_from_slice(data_to_sign);
// let challenge = ProofChallenge::hash(&challenge_bytes);
let option_claim = Some(claims);
let challenge = Prover::create_challenge_hash(&[pok.clone()], option_claim, &nonce).unwrap();
let proof = Prover::generate_signature_pok(pok, &challenge).unwrap();
println!("Proof {:?}", proof);
// Send `proof` and `challenge` to Verifier
match verify_signature_claim_pok(&proof_request, &proof, data_to_sign, &nonce) {
Ok(sig_messages) => {
println!("Signature {:?}", sig_messages);
println!("Signature reveal {:?}", proof.revealed_messages)
}, // check revealed messages
Err(_) => assert!(false), // Why did the proof failed
};
}
pub fn verify_signature_claim_pok(
proof_request: &ProofRequest,
signature_proof: &SignatureProof,
claim: &[u8],
nonce: &ProofNonce,
) -> Result<Vec<SignatureMessage>, BBSError> {
let mut challenge_bytes = signature_proof.proof.get_bytes_for_challenge(
proof_request.revealed_messages.clone(),
&proof_request.verification_key,
);
challenge_bytes.extend_from_slice(&nonce.to_bytes_uncompressed_form()[..]);
challenge_bytes.extend_from_slice(claim);
let challenge_verifier = ProofChallenge::hash(&challenge_bytes);
match signature_proof.proof.verify(
&proof_request.verification_key,
&signature_proof.revealed_messages,
&challenge_verifier,
)? {
PoKOfSignatureProofStatus::Success => Ok(signature_proof
.revealed_messages
.iter()
.map(|(_, m)| *m)
.collect::<Vec<SignatureMessage>>()),
e => Err(BBSErrorKind::InvalidProof { status: e }.into()),
}
}
|
mod book;
mod config;
mod links;
mod show;
use structopt::StructOpt;
#[macro_use]
extern crate prettytable;
#[derive(Debug, StructOpt)]
enum Command {
/// Manage established links
Links(links::LinkCommand),
/// Book hours under aliased service
Book(book::BookCommand),
/// Create simpl config
Config(config::ConfigCommand),
/// Display worked hours
Show(show::ShowCommand),
}
impl Command {
fn execute(&self) {
match self {
Command::Config(cmd) => cmd.execute(),
Command::Links(cmd) => cmd.execute(),
Command::Book(cmd) => cmd.execute(),
Command::Show(cmd) => cmd.execute(),
}
}
}
fn main() {
let opt = Command::from_args();
opt.execute();
}
|
//pub fn power(canvas_id: &str, power: i32) -> Result<Chart, JsValue> {
// let map_coord = func_plot::draw(canvas_id, power).map_err(|err| err.to_string())?;
// Ok(Chart {
// convert: Box::new(move |coord| map_coord(coord).map(|(x, y)| (x.into(), y.into()))),
// })
//}
//
use core::f64::consts::PI;
use crate::chart_and_js_exports::DrawResult;
use plotters::prelude::*;
use crate::map_bounds::MapBounds;
use crate::coord_plane::LatLonPoint;
use crate::lat_lon_lines::sphere_coords;
use crate::projections::projection_by_name;
use crate::projections::projection_types::ProjectionParams;
use crate::projections::projection_types::Projection;
use crate::chart_and_js_exports::JSProjectionParams;
pub fn draw(canvas_id: &str, map_projection_name: String, num_lat_lon: usize,
tissot: bool, bounds: MapBounds, projection_params: Vec<f64>)
-> DrawResult<impl Fn((i32, i32)) -> Option<(f64, f64)>>
{
let backend = CanvasBackend::new(canvas_id).expect("cannot find canvas");
let root = backend.into_drawing_area();
let font: FontDesc = ("sans-serif", 20.0).into();
root.fill(&WHITE)?;
let upper_x = bounds.upper_x;
let lower_x = bounds.lower_x;
let upper_y = bounds.upper_y;
let lower_y = bounds.lower_y;
let mut map_ctx = ChartBuilder::on(&root)
.caption(format!("{}", map_projection_name), font)
.x_label_area_size(30)
.y_label_area_size(30)
.build_ranged(lower_x..upper_x, lower_y..upper_y)?;
map_ctx
.configure_mesh()
.disable_x_mesh()
.disable_y_mesh()
.draw()?;
let points: Vec<LatLonPoint> = sphere_coords(num_lat_lon, 1000);
let projection: Projection = projection_by_name::use_projection(
map_projection_name.clone())
.expect("Projection not found");
let mapped_points = match projection.params {
JSProjectionParams::JSPointsOnly => (projection.projection_function)
(ProjectionParams::PointsOnly(points)),
JSProjectionParams::JSPointsStandardMerid => (projection.projection_function)
(ProjectionParams::PointsStandardMerid(points, projection_params[0])),
JSProjectionParams::JSPointsStandardPar => (projection.projection_function)
(ProjectionParams::PointsStandardPar(points, projection_params[0])),
JSProjectionParams::JSPointsTwoStandardPar => (projection.projection_function)
(ProjectionParams::PointsTwoStandardPar(points, projection_params[0],
projection_params[1])),
};
map_ctx.draw_series(
mapped_points
.iter()
.map(|point| Circle::new(point.to_tuple(), 2, BLACK.filled())),
)?;
root.present()?;
return Ok(map_ctx.into_coord_trans());
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.