text stringlengths 8 4.13M |
|---|
use crate::adapter::repository::user_dao::UserRepositoryBySQLite;
use crate::usecase::user_creation::UserOperation;
use crate::entity::user::UserID;
use actix_web::{get, web, Responder};
#[get("/users/{id}")]
pub async fn show(info: web::Path<i64>) -> impl Responder {
let user_operation = UserOperation { ur: UserRepositoryBySQLite {} };
if let Some(user) = user_operation.find(UserID { id: info.into_inner() }) {
format!("Hello {}", user.name)
} else {
format!("bye")
}
}
|
use std::{
borrow::{Borrow, Cow},
collections::HashMap,
hash::Hash,
};
/// A HashMap like structure for accessing query parameters.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Query<'a>(
HashMap<
Cow<'a, str>,
Cow<'a, str>
>
);
impl<'a> Query<'a> {
pub fn new<Q>(query: Q) -> Query<'a>
where
Q: IntoIterator<Item = (Cow<'a, str>, Cow<'a, str>)>
{
Query(
query
.into_iter()
.collect()
)
}
pub fn clear(&mut self) {
self.0.clear()
}
/// Get an iterator to the keys.
pub fn keys(&self) -> impl Iterator<Item = &str> {
self.0
.keys()
.map(AsRef::as_ref)
}
/// Get an iterator to the values.
pub fn values(&self) -> impl Iterator<Item = &str> {
self.0
.values()
.map(AsRef::as_ref)
}
/// Get an iterator to the key-value pairs.
pub fn iter(&self) -> impl Iterator<Item = (&str, &str)> {
self.0
.iter()
.map(
|(k, v)| (k.as_ref(), v.as_ref())
)
}
/// How many parameters there are.
pub fn len(&self) -> usize {
self.0.len()
}
/// Is there any parameter?
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Check if the given parameter is present.
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where
Cow<'a, str>: Borrow<Q>,
Q: Hash + Eq,
{
self.0.contains_key(k)
}
/// Get the value for a given parameter.
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&str>
where
Cow<'a, str>: Borrow<Q>,
Q: Hash + Eq,
{
self.0
.get(k)
.map(AsRef::as_ref)
}
pub fn get_mut<K: ?Sized>(&mut self, k: &K) -> Option<&mut String>
where
Cow<'a, str>: Borrow<K>,
K: Hash + Eq,
{
self.0
.get_mut(k)
.map(Cow::to_mut)
}
pub fn insert<K, V>(&mut self, k: K, v: V) -> Option<Cow<str>>
where
K: Into<Cow<'a, str>>,
V: Into<Cow<'a, str>>
{
self.0.insert(
k.into(),
v.into()
)
}
pub fn remove<K: ?Sized>(&mut self, k: &K) -> Option<Cow<str>>
where
K: Into<Cow<'a, str>> + Hash + Eq,
Cow<'a, str>: Borrow<K>,
{
self.0.remove(
k.into()
)
}
}
impl<'a, K, V> Extend<(K, V)> for Query<'a>
where
K: Into<Cow<'a, str>>,
V: Into<Cow<'a, str>>
{
fn extend<T>(&mut self, items: T)
where
T: IntoIterator<Item = (K, V)>
{
self.0.extend(
items
.into_iter()
.map(
|(k, v)| (k.into(), v.into())
)
)
}
}
|
fn main() {
let mut build = cc::Build::new();
build.file("the_worst_support.cpp");
build.flag("-fexceptions");
build.cpp(true);
build.compile("the_worst_support");
println!("cargo:rerun-if-env-changed=CC");
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=the_worst.cp");
println!("cargo:rerun-if-changed=the_worst_support.cp");
println!("cargo:root={}", std::env::var("OUT_DIR").unwrap());
}
|
//! Memory bookkeeping.
use prelude::*;
use core::ops::Range;
use core::{ptr, mem, ops};
use shim::config;
/// Elements required _more_ than the length as capacity.
///
/// This represents how many elements that are needed to conduct a `reserve` without the
/// stack overflowing, plus one (representing the new element):
///
/// 1. Aligner.
/// 2. Excessive space.
/// 3. The old buffer.
/// 4. The pushed or inserted block.
///
/// See assumption 4.
pub const EXTRA_ELEMENTS: usize = 4;
#[cfg(feature = "alloc_id")]
use core::sync::atomic::{self, AtomicUsize};
/// The bookkeeper ID count.
///
/// This is atomically incremented whenever a new `Bookkeeper` is created.
#[cfg(feature = "alloc_id")]
static BOOKKEEPER_ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// The memory bookkeeper.
///
/// This stores data about the state of the allocator, and in particular, the free memory.
///
/// The actual functionality is provided by [`Allocator`](./trait.Allocator.html).
pub struct Bookkeeper {
/// The internal block pool.
///
/// Entries in the block pool can be "empty", meaning that you can overwrite the entry without
/// breaking consistency.
///
/// # Assumptions
///
/// Certain assumptions are made:
///
/// 1. The list is always sorted with respect to the block's pointers.
/// 2. No two consecutive or empty block delimited blocks are adjacent, except if the right
/// block is empty.
/// 3. There are no trailing empty blocks.
/// 4. The capacity is always `EXTRA_ELEMENTS` blocks more than the length (this is due to
/// reallocation pushing at maximum two elements, so we reserve two or more extra to allow
/// pushing one additional element without unbounded recursion).
///
/// These are **not** invariants: If these assumpptions are not held, it will simply act strange
/// (e.g. logic bugs), but not memory unsafety.
pool: Vec<Block>,
/// The total number of bytes in the pool.
total_bytes: usize,
/// Is this bookkeeper currently reserving?
///
/// This is used to avoid unbounded metacircular reallocation (reservation).
///
// TODO: Find a replacement for this "hack".
reserving: bool,
/// The allocator ID.
///
/// This is simply to be able to distinguish allocators in the locks.
#[cfg(feature = "alloc_id")]
id: usize,
}
#[allow(len_without_is_empty)]
impl Bookkeeper {
/// Create a new bookkeeper with some initial vector.
pub fn new(vec: Vec<Block>) -> Bookkeeper {
// Make sure the assumptions are satisfied.
debug_assert!(vec.capacity() >= EXTRA_ELEMENTS, "Not enough initial capacity of the vector.");
debug_assert!(vec.is_empty(), "Initial vector isn't empty.");
// TODO: When added use expr field attributes.
#[cfg(feature = "alloc_id")]
let res = Bookkeeper {
pool: vec,
total_bytes: 0,
reserving: false,
// Increment the ID counter to get a brand new ID.
id: BOOKKEEPER_ID_COUNTER.fetch_add(1, atomic::Ordering::SeqCst),
};
#[cfg(not(feature = "alloc_id"))]
let res = Bookkeeper {
pool: vec,
total_bytes: 0,
reserving: false,
};
bk_log!(res, "Bookkeeper created.");
res.check();
res
}
/// Perform a binary search to find the appropriate place where the block can be insert or is
/// located.
///
/// It is guaranteed that no block left to the returned value, satisfy the above condition.
#[inline]
fn find(&mut self, block: &Block) -> usize {
// Logging.
bk_log!(self, "Searching (exact) for {:?}.", block);
let ind = match self.pool.binary_search(block) {
Ok(x) | Err(x) => x,
};
let len = self.pool.len();
// Move left.
ind - self.pool.iter_mut()
.rev()
.skip(len - ind)
.take_while(|x| x.is_empty())
.count()
}
/// Perform a binary search to find the appropriate bound where the block can be insert or is
/// located.
///
/// It is guaranteed that no block left to the returned value, satisfy the above condition.
#[inline]
fn find_bound(&mut self, block: &Block) -> Range<usize> {
// Logging.
bk_log!(self, "Searching (bounds) for {:?}.", block);
let mut left_ind = match self.pool.binary_search(block) {
Ok(x) | Err(x) => x,
};
let len = self.pool.len();
// Move left.
left_ind -= self.pool.iter_mut()
.rev()
.skip(len - left_ind)
.take_while(|x| x.is_empty())
.count();
let mut right_ind = match self.pool.binary_search(&block.empty_right()) {
Ok(x) | Err(x) => x,
};
// Move right.
right_ind += self.pool.iter()
.skip(right_ind)
.take_while(|x| x.is_empty())
.count();
left_ind..right_ind
}
/// Go over every block in the allocator and call some function.
///
/// Technically, this could be done through an iterator, but this, more unidiomatic, way is
/// slightly faster in some cases.
pub fn for_each<F: FnMut(Block)>(mut self, mut f: F) {
// Logging.
bk_log!(self, "Iterating over the blocks of the bookkeeper...");
// Run over all the blocks in the pool.
for i in self.pool.pop_iter() {
f(i);
}
// Take the block holding the pool.
f(Block::from(self.pool));
}
/// Pop the top block from the pool.
pub fn pop(&mut self) -> Option<Block> {
self.pool.pop().map(|res| {
// Update the byte count.
self.total_bytes -= res.size();
// Check stuff, just in case.
self.check();
res
})
}
/// Get the length of the pool.
pub fn len(&self) -> usize {
self.pool.len()
}
/// Get the total bytes of memory in the pool.
pub fn total_bytes(&self) -> usize {
self.total_bytes
}
/// Perform consistency checks.
///
/// This will check for the following conditions:
///
/// 1. The list is sorted.
/// 2. No blocks are adjacent.
///
/// This is NOOP in release mode.
fn check(&self) {
if cfg!(debug_assertions) {
// Logging.
bk_log!(self, "Checking...");
// The total number of bytes.
let mut total_bytes = 0;
// Reverse iterator over the blocks.
let mut it = self.pool.iter().enumerate().rev();
// Check that the capacity is large enough.
assert!(self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(),
"The capacity should be at least {} more than the length of the pool.",
EXTRA_ELEMENTS);
if let Some((_, x)) = it.next() {
// Make sure there are no leading empty blocks.
assert!(!x.is_empty(), "The leading block is empty.");
total_bytes += x.size();
let mut next = x;
for (n, i) in it {
total_bytes += i.size();
// Check if sorted.
assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?}).",
n, next, i);
// Make sure no blocks are adjacent.
assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
{:?})", n, i, next);
// Make sure an empty block has the same address as its right neighbor.
assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \
at index {} ({:?} and {:?})", n, i, next);
// Set the variable tracking the previous block.
next = i;
}
// Check for trailing empty blocks.
assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
}
// Make sure the sum is maintained properly.
assert!(total_bytes == self.total_bytes, "The sum is not equal to the 'total_bytes' \
field: {} ≠ {}.", total_bytes, self.total_bytes);
}
}
}
/// An allocator.
///
/// This provides the functionality of the memory bookkeeper, requiring only provision of two
/// methods, defining the "breaker" (fresh allocator). The core functionality is provided by
/// default methods, which aren't generally made to be overwritten.
///
/// The reason why these methods aren't implemented directly on the bookkeeper is the distinction
/// between different forms of allocators (global, local, and so on). Any newtype of
/// [`Bookkeeper`](./struct.Bookkeeper.html).
///
/// # Guarantees vs. assumptions
///
/// Please note that whenever a guarantee is mentioned, it relies on that the all the methods
/// overwritten are upholding the guarantees specified in the documentation.
pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
/// Allocate _fresh_ space.
///
/// "Fresh" means that the space is allocated through some breaker (be it SBRK or the global
/// allocator).
///
/// The returned pointer is assumed to be aligned to `align`. If this is not held, all future
/// guarantees are invalid.
///
/// # Assumptions
///
/// This is assumed to not modify the order. If some block `b` is associated with index `i`
/// prior to call of this function, it should be too after it.
fn alloc_fresh(&mut self, size: usize, align: usize) -> Block;
/// Called right before new memory is added to the pool.
fn on_new_memory(&mut self) {}
/// Allocate a chunk of memory.
///
/// This function takes a size and an alignment. From these a fitting block is found, to which
/// a pointer is returned. The block returned is guaranteed to be aligned to `align`.
///
/// # Example
///
/// We start with our initial segment.
///
/// ```notrust
/// Address space
/// I---------------------------------I
/// B
/// l
/// k
/// s
/// ```
///
/// We then split it at the aligner, which is used for making sure that
/// the pointer is aligned properly.
///
/// ```notrust
/// Address space
/// I------I
/// B ^ I--------------------------I
/// l al
/// k
/// s
/// ```
///
/// We then use the remaining block, but leave the excessive space.
///
/// ```notrust
/// Address space
/// I------I
/// B I--------I
/// l \_________________/
/// k our allocated block.
/// s
/// ```
///
/// A block representing the marked area is then returned.
fn alloc(&mut self, size: usize, align: usize) -> Block {
// Logging.
bk_log!(self, "Allocating {} bytes with alignment {}.", size, align);
if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
if i.size() >= size {
// Try to split at the aligner.
i.align(align).and_then(|(mut a, mut b)| {
if b.size() >= size {
// Override the old block.
*i = a;
Some((n, b))
} else {
// Put the split block back together and place it back in its spot.
a.merge_right(&mut b).expect("Unable to merge block right.");
*i = a;
None
}
})
} else {
None
}
}).next() {
// Update the pool byte count.
self.total_bytes -= b.size();
if self.pool[n].is_empty() {
// For empty alignment invariant.
let _ = self.remove_at(n);
}
// Split and mark the block uninitialized to the debugger.
let (res, excessive) = b.mark_uninitialized().split(size);
// There are many corner cases that make knowing where to insert it difficult
// so we search instead.
self.free(excessive);
// Check consistency.
self.check();
debug_assert!(res.aligned_to(align), "Alignment failed.");
debug_assert!(res.size() == size, "Requested space does not match with the returned \
block.");
res
} else {
// No fitting block found. Allocate a new block.
self.alloc_external(size, align)
}
}
/// Free a memory block.
///
/// After this have been called, no guarantees are made about the passed pointer. If it want
/// to, it could begin shooting laser beams.
///
/// Freeing an invalid block will drop all future guarantees about this bookkeeper.
///
/// # Example
///
/// ```notrust
/// Address space
/// I------I
/// B I--------I
/// l \_________________/
/// k the used block we want to deallocate.
/// s
/// ```
///
/// If the blocks are adjacent, we merge them:
///
/// ```notrust
/// Address space
/// I------I
/// B I-----------------I
/// l I--------I
/// k
/// s
/// ```
///
/// This gives us:
///
/// ```notrust
/// Address space
/// I------------------------I
/// B I--------I
/// l
/// k
/// s
/// ```
///
/// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
/// See [`insert`](#method.insert) for details.
#[inline]
fn free(&mut self, block: Block) {
// Just logging for the unlucky people debugging this shit. No problem.
bk_log!(self, "Freeing {:?}...", block);
// Binary search for the block.
let bound = self.find_bound(&block);
// Free the given block.
self.free_bound(bound, block);
}
/// Reallocate memory.
///
/// If necessary (inplace reallocation is not possible or feasible) it will allocate a new
/// buffer, fill it with the contents of the old buffer, and deallocate the replaced buffer.
///
/// The following guarantees are made:
///
/// 1. The returned block is valid and aligned to `align`.
/// 2. The returned block contains the same data byte-for-byte as the original buffer.
///
/// The data will be truncated if `new_size` is smaller than `block`'s size.
///
/// # Example
///
/// We will first try to perform an in-place reallocation, and if that fails, we will use
/// memmove.
///
/// ```notrust
/// Address space
/// I------I
/// B \~~~~~~~~~~~~~~~~~~~~~/
/// l needed
/// k
/// s
/// ```
///
/// We simply find the block next to our initial block. If this block is free and have
/// sufficient size, we will simply merge it into our initial block, and leave the excessive
/// space as free. If these conditions are not met, we have to allocate a new list, and then
/// deallocate the old one, after which we use memmove to copy the data over to the newly
/// allocated list.
fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
// Find the index bound.
let ind = self.find_bound(&block);
// Logging.
bk_log!(self;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
// Try to do an inplace reallocation.
match self.realloc_inplace_bound(ind, block, new_size) {
Ok(block) => block,
Err(block) => {
// Reallocation cannot be done inplace.
// Allocate a new block with the same size.
let mut res = self.alloc(new_size, align);
// Copy the old data to the new location.
block.copy_to(&mut res);
// Free the old block.
// Allocation may have moved insertion so we search again.
self.free(block);
// Check consistency.
self.check();
debug_assert!(res.aligned_to(align), "Alignment failed.");
debug_assert!(res.size() >= new_size, "Requested space does not match with the \
returned block.");
res
},
}
}
/// Extend/shrink the buffer inplace.
///
/// This will try to extend the buffer without copying, if the new size is larger than the old
/// one. If not, truncate the block and place it back to the pool.
///
/// On failure, return `Err(Block)` with the old _intact_ block. Shrinking cannot fail.
///
/// This shouldn't be used when the index of insertion is known, since this performs an binary
/// search to find the blocks index. When you know the index use
/// [`realloc_inplace_bound`](#method.realloc_inplace_bound.html).
#[inline]
fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
// Logging.
bk_log!(self, "Reallocating {:?} inplace to {}...", block, new_size);
// Find the bounds of given block.
let bound = self.find_bound(&block);
// Go for it!
let res = self.realloc_inplace_bound(bound, block, new_size);
// Check consistency.
debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
does not match with the returned block.");
res
}
/// Reallocate a block on a know index bound inplace.
///
/// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
// Logging.
bk_log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
/// Assertions...
debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
index.");
if new_size <= block.size() {
// Shrink the block.
bk_log!(self;ind, "Shrinking {:?}.", block);
// Split the block in two segments, the main segment and the excessive segment.
let (block, excessive) = block.split(new_size);
// Free the excessive segment.
self.free_bound(ind, excessive);
// Make some assertions to avoid dumb bugs.
debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
// Run a consistency check.
self.check();
return Ok(block);
// We check if `ind` is the end of the array.
} else {
let mut mergable = false;
if let Some(entry) = self.pool.get_mut(ind.end) {
mergable = entry.size() + block.size() >= new_size && block.left_to(entry);
}
// Note that we are sure that no segments in the array are adjacent (unless they have size
// 0). This way we know that we will, at maximum, need one and only one block for extending
// the current block.
if mergable {
// Logging...
bk_log!(self;ind, "Merging {:?} to the right.", block);
// We'll merge it with the block at the end of the range.
block.merge_right(&mut self.remove_at(ind.end))
.expect("Unable to merge block right, to the end of the range.");
// Merge succeeded.
// Place the excessive block back.
let (res, excessive) = block.split(new_size);
// Remove_at may have shortened the vector.
if ind.start == self.pool.len() {
self.push(excessive);
} else if !excessive.is_empty() {
self.pool[ind.start] = excessive;
}
// Block will still not be adjacent, due to `excessive` being guaranteed to not be
// adjacent to the next block.
// Run a consistency check.
self.check();
return Ok(res);
}
}
Err(block)
}
/// Free a block placed in some index bound.
///
/// This will at maximum insert one element.
///
/// See [`free`](#method.free) for more information.
#[inline]
fn free_bound(&mut self, ind: Range<usize>, mut block: Block) {
// Logging.
bk_log!(self;ind, "Freeing {:?}.", block);
// Short circuit in case of empty block.
if block.is_empty() { return; }
// When compiled with `security`, we zero this block.
block.sec_zero();
if ind.start == self.pool.len() {
self.push(block);
return;
}
// Assertions...
debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
index.");
// Try to merge it with the block to the right.
if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
// Merge the block with the rightmost block in the range.
block.merge_right(&mut self.remove_at(ind.end))
.expect("Unable to merge block right to the block at the end of the range");
// The merging succeeded. We proceed to try to close in the possible gap.
if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
// Check consistency.
self.check();
return;
}
// Dammit, let's try to merge left.
} else if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
// Check consistency.
self.check();
return;
}
// Well, it failed, so we insert it the old-fashioned way.
self.insert(ind.start, block);
// Check consistency.
self.check();
}
/// Allocate external ("fresh") space.
///
/// "Fresh" means that the space is allocated through the breaker.
///
/// The returned pointer is guaranteed to be aligned to `align`.
fn alloc_external(&mut self, size: usize, align: usize) -> Block {
// Logging.
bk_log!(self, "Fresh allocation of size {} with alignment {}.", size, align);
// Break it to me!
let res = self.alloc_fresh(size, align);
// Check consistency.
self.check();
res
}
/// Push an element without reserving.
// TODO: Make `push` and `free` one.
fn push(&mut self, block: Block) {
// Logging.
bk_log!(self;self.pool.len(), "Pushing {:?}.", block);
// Mark the block free.
let mut block = block.mark_free();
// Short-circuit in case on empty block.
if !block.is_empty() {
// Trigger the new memory event handler.
self.on_new_memory();
// Update the pool byte count.
self.total_bytes += block.size();
// Some assertions...
debug_assert!(self.pool.is_empty() || &block > self.pool.last().unwrap(), "Pushing will \
make the list unsorted.");
// We will try to simply merge it with the last block.
if let Some(x) = self.pool.last_mut() {
if x.merge_right(&mut block).is_ok() {
return;
}
}
// Reserve space and free the old buffer.
if let Some(x) = unborrow!(self.reserve(self.pool.len() + 1)) {
// Note that we do not set the count down because this isn't setting back our
// pushed block.
self.free(x);
}
// Try again to merge with last block on the off chance reserve pushed something we can
// merge with. This has actually happened in testing.
if let Some(x) = self.pool.last_mut() {
if x.merge_right(&mut block).is_ok() {
return;
}
}
// Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
// the only non-empty candidate which may be adjacent to `block`.
// Check again that pushing is correct.
if self.pool.is_empty() || &block > self.pool.last().unwrap() {
// We push.
let res = self.pool.push(block);
// Make some assertions.
debug_assert!(res.is_ok(), "Push failed (buffer full).");
} else {
// `free` handles the count, so we set it back.
// TODO: Find a better way to do so.
self.total_bytes -= block.size();
// Can't push because reserve changed the end of the pool.
self.free(block);
}
}
// Check consistency.
self.check();
}
/// Reserve some number of elements, and return the old buffer's block.
///
/// # Assumptions
///
/// This is assumed to not modify the order. If some block `b` is associated with index `i`
/// prior to call of this function, it should be too after it.
fn reserve(&mut self, min_cap: usize) -> Option<Block> {
// Logging.
bk_log!(self;min_cap, "Reserving {}.", min_cap);
if !self.reserving && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) {
// Reserve a little extra for performance reasons.
// TODO: This should be moved to some new method.
let new_cap = min_cap + EXTRA_ELEMENTS + config::extra_fresh(min_cap);
// Catch 'em all.
debug_assert!(new_cap > self.pool.capacity(), "Reserve shrinks?!");
// Make sure no unbounded reallocation happens.
self.reserving = true;
// Break it to me!
let new_buf = self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
// Go back to the original state.
self.reserving = false;
// Check consistency.
self.check();
Some(self.pool.refill(new_buf))
} else {
None
}
}
/// Insert a block entry at some index.
///
/// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
/// right.
///
/// # Warning
///
/// This might in fact break the order.
///
/// # Panics
///
/// Panics on when `ind` is greater than the block pool's length.
///
/// # Example
///
/// We want to insert the block denoted by the tildes into our list. Perform a binary search to
/// find where insertion is appropriate.
///
/// ```notrust
/// Address space
/// I------I
/// B < here I--------I
/// l I------------I
/// k
/// s I---I
/// I~~~~~~~~~~I
/// ```
///
/// We keep pushing the blocks to the right to the next entry until a empty entry is reached:
///
/// ```notrust
/// Address space
/// I------I
/// B < here I--------I <~ this one cannot move down, due to being blocked.
/// l
/// k I------------I <~ thus we have moved this one down.
/// s I---I
/// I~~~~~~~~~~I
/// ```
///
/// Repeating yields:
///
/// ```notrust
/// Address space
/// I------I
/// B < here
/// l I--------I <~ this one cannot move down, due to being blocked.
/// k I------------I <~ thus we have moved this one down.
/// s I---I
/// I~~~~~~~~~~I
/// ```
///
/// Now an empty space is left out, meaning that we can insert the block:
///
/// ```notrust
/// Address space
/// I------I
/// B I----------I
/// l I--------I
/// k I------------I
/// s I---I
/// ```
///
/// The insertion is now completed.
#[inline]
fn insert(&mut self, ind: usize, block: Block) {
// Logging.
bk_log!(self;ind, "Inserting block {:?}...", block);
// Bound check.
assert!(self.pool.len() >= ind, "Insertion out of bounds.");
// Some assertions...
debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \
the list unsorted.", ind);
debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
debug_assert!(!block.is_empty(), "Inserting an empty block.");
// Trigger the new memory event handler.
self.on_new_memory();
// Find the next gap, where a used block were.
let gap = self.pool
.iter()
.enumerate()
// We only check _after_ the index.
.skip(ind)
// Until the block is empty.
.filter(|&(_, x)| x.is_empty())
.next()
.map(|(n, _)| n);
// Log the operation.
bk_log!(self;ind, "Moving all blocks right to {} blocks to the right.",
gap.unwrap_or_else(|| self.pool.len()));
// The old vector's buffer.
let mut old_buf = None;
unsafe {
// LAST AUDIT: 2016-08-21 (Ticki).
// Memmove the elements to make a gap to the new block.
ptr::copy(self.pool.get_unchecked(ind) as *const Block,
self.pool.get_unchecked_mut(ind + 1) as *mut Block,
// The gap defaults to the end of the pool.
gap.unwrap_or_else(|| {
// We will only extend the length if we were unable to fit it into the current length.
// Loooooooging...
bk_log!(self;ind, "Block pool not long enough for shift. Extending.");
// Reserve space. This does not break order, due to the assumption that
// `reserve` never breaks order.
old_buf = unborrow!(self.reserve(self.pool.len() + 1));
// We will move a block into reserved memory but outside of the vec's bounds. For
// that reason, we push an uninitialized element to extend the length, which will
// be assigned in the memcpy.
let res = self.pool.push(mem::uninitialized());
// Just some assertions...
debug_assert!(res.is_ok(), "Push failed (buffer full).");
self.pool.len() - 1
}) - ind);
// Update the pool byte count.
self.total_bytes += block.size();
// Mark it free and set the element.
ptr::write(self.pool.get_unchecked_mut(ind), block.mark_free());
}
// Free the old buffer, if it exists.
if let Some(block) = old_buf {
self.free(block);
}
// Check consistency.
self.check();
}
/// Remove a block.
fn remove_at(&mut self, ind: usize) -> Block {
// Logging.
bk_log!(self;ind, "Removing block at {}.", ind);
let res = if ind + 1 == self.pool.len() {
let block = self.pool[ind].pop();
// Make sure there are no trailing empty blocks.
let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
// Truncate the vector.
self.pool.truncate(new_len);
block
} else {
// Calculate the upper and lower bound
let empty = self.pool[ind + 1].empty_left();
let empty2 = empty.empty_left();
// Replace the block at `ind` with the left empty block from `ind + 1`.
let block = mem::replace(&mut self.pool[ind], empty);
// Iterate over the pool from `ind` and down and set it to the empty of our block.
let skip = self.pool.len() - ind;
for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
// Empty the blocks.
*place = empty2.empty_left();
}
block
};
// Update the pool byte count.
self.total_bytes -= res.size();
// Check consistency.
self.check();
// Mark the block uninitialized to the debugger.
res.mark_uninitialized()
}
}
|
use anyhow::{format_err, Error};
use async_google_apis_common as common;
use common::{
yup_oauth2::{self, InstalledFlowAuthenticator},
DownloadResult, TlsClient,
};
use crossbeam::atomic::AtomicCell;
use futures::future::try_join_all;
use itertools::Itertools;
use lazy_static::lazy_static;
use log::debug;
use maplit::{hashmap, hashset};
use mime::Mime;
use percent_encoding::percent_decode;
use stack_string::{format_sstr, StackString};
use std::{
collections::{HashMap, HashSet},
ffi::OsStr,
fmt::{self, Debug, Formatter},
future::Future,
path::{Path, PathBuf},
string::ToString,
sync::Arc,
};
use stdout_channel::rate_limiter::RateLimiter;
use tokio::{
fs::{self, create_dir_all},
io::AsyncReadExt,
};
use url::Url;
use crate::{
directory_info::DirectoryInfo,
drive_v3_types::{
Change, ChangesGetStartPageTokenParams, ChangesListParams, ChangesService, DriveParams,
DriveParamsAlt, DriveScopes, File, FileList, FilesCreateParams, FilesDeleteParams,
FilesExportParams, FilesGetParams, FilesListParams, FilesService, FilesUpdateParams,
},
exponential_retry,
};
fn https_client() -> TlsClient {
let conn = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.https_only()
.enable_http1()
.build();
hyper::Client::builder().build(conn)
}
lazy_static! {
static ref MIME_TYPES: HashMap<&'static str, &'static str> = hashmap! {
"application/vnd.google-apps.document" => "application/vnd.oasis.opendocument.text",
"application/vnd.google-apps.presentation" => "application/pdf",
"application/vnd.google-apps.spreadsheet" => "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.google-apps.drawing" => "image/png",
"application/vnd.google-apps.site" => "text/plain",
};
}
lazy_static! {
static ref UNEXPORTABLE_MIME_TYPES: HashSet<&'static str> = hashset! {
"application/vnd.google-apps.form",
"application/vnd.google-apps.map",
"application/vnd.google-apps.folder",
};
}
lazy_static! {
static ref EXTENSIONS: HashMap<&'static str, &'static str> = hashmap! {
"application/vnd.oasis.opendocument.text" => "odt",
"image/png" => "png",
"application/pdf" => "pdf",
"image/jpeg" => "jpg",
"text/x-csrc" => "C",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" => "xlsx",
};
}
#[derive(Clone)]
pub struct GDriveInstance {
files: Arc<FilesService>,
changes: Arc<ChangesService>,
page_size: i32,
max_keys: Option<usize>,
session_name: StackString,
pub start_page_token_filename: PathBuf,
pub start_page_token: Arc<AtomicCell<Option<usize>>>,
rate_limit: RateLimiter,
}
impl Debug for GDriveInstance {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.write_str("GDriveInstance")
}
}
impl GDriveInstance {
/// # Errors
/// Return error if intialization fails
pub async fn new(
gdrive_token_path: &Path,
gdrive_secret_file: &Path,
session_name: &str,
) -> Result<Self, Error> {
let fname = gdrive_token_path.join(format_sstr!("{session_name}_start_page_token"));
debug!("{:?}", gdrive_secret_file);
let https = https_client();
let sec = yup_oauth2::read_application_secret(gdrive_secret_file).await?;
let token_file = gdrive_token_path.join(format_sstr!("{session_name}.json"));
let parent = gdrive_token_path;
if !parent.exists() {
create_dir_all(parent).await?;
}
debug!("{:?}", token_file);
let auth = InstalledFlowAuthenticator::builder(
sec,
common::yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect,
)
.persist_tokens_to_disk(token_file)
.hyper_client(https.clone())
.build()
.await?;
let auth = Arc::new(auth);
let scopes = vec![DriveScopes::Drive];
let mut files = FilesService::new(https.clone(), auth.clone());
files.set_scopes(scopes.clone());
let mut changes = ChangesService::new(https, auth);
changes.set_scopes(scopes);
let start_page_token = Self::read_start_page_token(&fname).await?;
Ok(Self {
files: Arc::new(files),
changes: Arc::new(changes),
page_size: 400,
max_keys: None,
session_name: session_name.into(),
start_page_token: Arc::new(AtomicCell::new(start_page_token)),
start_page_token_filename: fname,
rate_limit: RateLimiter::new(1000, 60000),
})
}
#[must_use]
pub fn with_max_keys(mut self, max_keys: usize) -> Self {
self.max_keys = Some(max_keys);
self
}
#[must_use]
pub fn with_page_size(mut self, page_size: i32) -> Self {
self.page_size = page_size;
self
}
/// # Errors
/// Return error if intialization fails
pub async fn read_start_page_token_from_file(&self) -> Result<(), Error> {
self.start_page_token
.store(Self::read_start_page_token(&self.start_page_token_filename).await?);
Ok(())
}
async fn get_filelist(
&self,
page_token: &Option<StackString>,
get_folders: bool,
parents: &Option<Vec<StackString>>,
) -> Result<FileList, Error> {
let fields = vec![
"name",
"id",
"size",
"mimeType",
"owners",
"parents",
"trashed",
"modifiedTime",
"createdTime",
"viewedByMeTime",
"md5Checksum",
"fileExtension",
"webContentLink",
];
let fields = format!("nextPageToken,files({})", fields.join(","));
let p = DriveParams {
fields: Some(fields),
..DriveParams::default()
};
debug!("page_size {}", self.page_size);
let mut params = FilesListParams {
drive_params: Some(p),
corpora: Some("user".into()),
spaces: Some("drive".into()),
page_size: Some(self.page_size),
page_token: page_token.clone().map(Into::into),
..FilesListParams::default()
};
let mut query_chain: Vec<StackString> = Vec::new();
if get_folders {
query_chain.push(r#"mimeType = 'application/vnd.google-apps.folder'"#.into());
} else {
query_chain.push(r#"mimeType != 'application/vnd.google-apps.folder'"#.into());
}
if let Some(ref p) = parents {
let q = p
.iter()
.map(|id| format_sstr!("'{id}' in parents"))
.join(" or ");
query_chain.push(format_sstr!("({q})"));
}
query_chain.push("trashed = false".into());
let query = query_chain.join(" and ");
debug!("query {}", query);
params.q = Some(query);
exponential_retry(|| async {
self.rate_limit.acquire().await;
self.files.list(¶ms).await
})
.await
}
/// # Errors
/// Return error if `get_filelist` fails
pub async fn get_all_files(&self, get_folders: bool) -> Result<Vec<File>, Error> {
let mut all_files = Vec::new();
let mut page_token: Option<StackString> = None;
loop {
let filelist = self.get_filelist(&page_token, get_folders, &None).await?;
if let Some(files) = filelist.files {
debug!("got files {}", files.len());
all_files.extend(files);
}
page_token = filelist.next_page_token.map(Into::into);
debug!("page_token {} {:?}", get_folders, page_token);
if page_token.is_none() {
break;
}
if let Some(max_keys) = self.max_keys {
if all_files.len() > max_keys {
all_files.resize_with(max_keys, Default::default);
break;
}
}
}
Ok(all_files)
}
/// # Errors
/// Return error if `get_all_files` fails
pub async fn get_all_file_info(
&self,
get_folders: bool,
directory_map: &HashMap<StackString, DirectoryInfo>,
) -> Result<Vec<GDriveInfo>, Error> {
let files = self.get_all_files(get_folders).await?;
self.convert_file_list_to_gdrive_info(&files, directory_map)
.await
}
/// # Errors
/// Return error if `from_object` fails
#[allow(clippy::manual_filter_map)]
pub async fn convert_file_list_to_gdrive_info(
&self,
flist: &[File],
directory_map: &HashMap<StackString, DirectoryInfo>,
) -> Result<Vec<GDriveInfo>, Error> {
let futures = flist
.iter()
.filter(|f| {
if let Some(owners) = f.owners.as_ref() {
if owners.is_empty() {
return false;
}
if owners[0].me != Some(true) {
return false;
}
} else {
return false;
}
if Self::is_unexportable(&f.mime_type) {
return false;
}
true
})
.map(|f| GDriveInfo::from_object(f, self, directory_map));
try_join_all(futures).await
}
/// # Errors
/// Return error if `get_filelist` fails
pub async fn process_list_of_keys<T, U>(
&self,
parents: &Option<Vec<StackString>>,
callback: T,
) -> Result<(), Error>
where
T: Fn(File) -> U,
U: Future<Output = Result<(), Error>>,
{
let mut n_processed = 0;
let mut page_token: Option<StackString> = None;
loop {
let mut filelist = self.get_filelist(&page_token, false, parents).await?;
if let Some(files) = filelist.files.take() {
for f in files {
callback(f).await?;
n_processed += 1;
}
}
page_token = filelist.next_page_token.map(Into::into);
if page_token.is_none() {
break;
}
if let Some(max_keys) = self.max_keys {
if n_processed > max_keys {
break;
}
}
}
Ok(())
}
/// # Errors
/// Return error if api call fails
pub async fn get_file_metadata(&self, id: &str) -> Result<File, Error> {
let p = DriveParams {
alt: Some(DriveParamsAlt::Json),
fields: Some("id,name,parents,mimeType,webContentLink".into()),
..DriveParams::default()
};
let params = FilesGetParams {
drive_params: Some(p),
file_id: id.into(),
..FilesGetParams::default()
};
exponential_retry(|| async {
self.rate_limit.acquire().await;
if let DownloadResult::Response(f) = self.files.get(¶ms).await?.do_it(None).await? {
Ok(f)
} else {
Err(format_err!("Failed to get metadata"))
}
})
.await
}
/// # Errors
/// Return error if api call fails
pub async fn create_directory(&self, directory: &Url, parentid: &str) -> Result<File, Error> {
let directory_path = directory
.to_file_path()
.map_err(|e| format_err!("No file path {e:?}"))?;
let directory_name = directory_path
.file_name()
.map(OsStr::to_string_lossy)
.ok_or_else(|| format_err!("Failed to convert string"))?;
let new_file = File {
name: Some(directory_name.into_owned()),
mime_type: Some("application/vnd.google-apps.folder".to_string()),
parents: Some(vec![parentid.to_string()]),
..File::default()
};
let params = FilesCreateParams::default();
exponential_retry(|| async {
self.rate_limit.acquire().await;
self.files.create(¶ms, &new_file).await
})
.await
}
/// # Errors
/// Return error if api call fails
pub async fn upload(&self, local: &Url, parentid: &str) -> Result<File, Error> {
let file_path = local
.to_file_path()
.map_err(|e| format_err!("No file path {e:?}"))?;
let file_obj = fs::File::open(&file_path).await?;
let mime: Mime = "application/octet-stream"
.parse()
.map_err(|e| format_err!("bad mimetype {e:?}"))?;
let new_file = File {
name: file_path
.as_path()
.file_name()
.and_then(OsStr::to_str)
.map(ToString::to_string),
parents: Some(vec![parentid.to_string()]),
mime_type: Some(mime.to_string()),
..File::default()
};
let params = FilesCreateParams {
..FilesCreateParams::default()
};
self.rate_limit.acquire().await;
let upload = self
.files
.create_resumable_upload(¶ms, &new_file)
.await?;
let resp = upload.upload_file(file_obj).await?;
Ok(resp)
}
pub fn is_unexportable<T: AsRef<str>>(mime_type: &Option<T>) -> bool {
mime_type.as_ref().map_or(false, |mime| {
UNEXPORTABLE_MIME_TYPES.contains::<str>(mime.as_ref())
})
}
/// # Errors
/// Return error if api call fails
pub async fn export(&self, gdriveid: &str, local: &Path, mime_type: &str) -> Result<(), Error> {
let params = FilesExportParams {
file_id: gdriveid.into(),
mime_type: mime_type.into(),
..FilesExportParams::default()
};
let mut outfile = fs::File::create(local).await?;
self.rate_limit.acquire().await;
self.files
.export(¶ms)
.await?
.do_it(Some(&mut outfile))
.await?;
Ok(())
}
/// # Errors
/// Return error if api call fails
pub async fn download<T>(
&self,
gdriveid: &str,
local: &Path,
mime_type: &Option<T>,
) -> Result<(), Error>
where
T: AsRef<str> + Debug,
{
if let Some(mime) = mime_type {
if UNEXPORTABLE_MIME_TYPES.contains::<str>(mime.as_ref()) {
return Err(format_err!(
"UNEXPORTABLE_FILE: The MIME type of this file is {mime:?}, which can not be \
exported from Drive. Web content link provided by Drive: {m:?}\n",
m = self
.get_file_metadata(gdriveid)
.await
.ok()
.map(|metadata| metadata.web_view_link)
.unwrap_or_default()
));
}
}
let export_type: Option<&'static str> = mime_type
.as_ref()
.and_then(|t| MIME_TYPES.get::<str>(t.as_ref()))
.copied();
if let Some(t) = export_type {
self.export(gdriveid, local, t).await
} else {
let p = DriveParams {
alt: Some(DriveParamsAlt::Media),
..DriveParams::default()
};
let params = FilesGetParams {
drive_params: Some(p),
file_id: gdriveid.into(),
supports_all_drives: Some(false),
..FilesGetParams::default()
};
let mut outfile = fs::File::create(&local).await?;
self.rate_limit.acquire().await;
if let DownloadResult::Downloaded = self
.files
.get(¶ms)
.await?
.do_it(Some(&mut outfile))
.await?
{
Ok(())
} else {
Err(format_err!("Failed to download"))
}
}
}
/// # Errors
/// Return error if api call fails
pub async fn move_to_trash(&self, id: &str) -> Result<(), Error> {
let f = File {
trashed: Some(true),
..File::default()
};
let params = FilesUpdateParams {
file_id: id.into(),
supports_all_drives: Some(false),
..FilesUpdateParams::default()
};
exponential_retry(|| async {
self.rate_limit.acquire().await;
self.files.update(¶ms, &f).await?;
Ok(())
})
.await
}
/// # Errors
/// Return error if api call fails
pub async fn delete_permanently(&self, id: &str) -> Result<(), Error> {
let params = FilesDeleteParams {
file_id: id.into(),
supports_all_drives: Some(false),
..FilesDeleteParams::default()
};
exponential_retry(|| async {
self.rate_limit.acquire().await;
self.files.delete(¶ms).await
})
.await
}
/// # Errors
/// Return error if api call fails
pub async fn move_to(&self, id: &str, parent: &str, new_name: &str) -> Result<(), Error> {
let current_parents = self
.get_file_metadata(id)
.await?
.parents
.unwrap_or_else(|| vec![String::from("root")])
.join(",");
let file = File {
name: Some(new_name.to_string()),
..File::default()
};
let params = FilesUpdateParams {
file_id: id.into(),
supports_all_drives: Some(false),
remove_parents: Some(current_parents),
add_parents: Some(parent.into()),
..FilesUpdateParams::default()
};
exponential_retry(|| async {
self.rate_limit.acquire().await;
self.files.update(¶ms, &file).await?;
Ok(())
})
.await
}
/// # Errors
/// Return error if api call fails
pub async fn get_directory_map(
&self,
) -> Result<(HashMap<StackString, DirectoryInfo>, Option<StackString>), Error> {
let mut root_id: Option<StackString> = None;
let mut dmap: HashMap<StackString, _> = self
.get_all_files(true)
.await?
.into_iter()
.filter_map(|d| {
if let Some(owners) = d.owners.as_ref() {
if owners.is_empty() {
return None;
}
if owners[0].me != Some(true) {
return None;
}
} else {
return None;
}
if let Some(gdriveid) = d.id.as_ref() {
if let Some(name) = d.name.as_ref() {
if let Some(parents) = d.parents.as_ref() {
if !parents.is_empty() {
return Some((
gdriveid.into(),
DirectoryInfo {
directory_id: gdriveid.into(),
directory_name: name.into(),
parentid: Some(parents[0].clone().into()),
},
));
}
} else {
if root_id.is_none()
&& d.name != Some("Chrome Syncable FileSystem".to_string())
{
root_id = Some(gdriveid.into());
}
return Some((
gdriveid.into(),
DirectoryInfo {
directory_id: gdriveid.into(),
directory_name: name.into(),
parentid: None,
},
));
}
}
}
None
})
.collect();
let unmatched_parents: HashSet<_> = dmap
.values()
.filter_map(|v| {
v.parentid.as_ref().and_then(|p| match dmap.get(p) {
Some(_) => None,
None => Some(p.to_string()),
})
})
.collect();
for parent in unmatched_parents {
let d = self.get_file_metadata(&parent).await?;
if let Some(gdriveid) = d.id.as_ref() {
if let Some(name) = d.name.as_ref() {
let parents = d
.parents
.as_ref()
.and_then(|p| p.get(0).map(ToString::to_string));
if parents.is_none()
&& root_id.is_none()
&& d.name != Some("Chrome Syncable FileSystem".to_string())
{
root_id = Some(gdriveid.into());
}
let val = DirectoryInfo {
directory_id: gdriveid.into(),
directory_name: name.into(),
parentid: parents.map(Into::into),
};
dmap.entry(gdriveid.into()).or_insert(val);
}
}
}
Ok((dmap, root_id))
}
#[must_use]
pub fn get_directory_name_map(
directory_map: &HashMap<StackString, DirectoryInfo>,
) -> HashMap<StackString, Vec<DirectoryInfo>> {
directory_map.values().fold(HashMap::new(), |mut h, m| {
let key = m.directory_name.clone();
let val = m.clone();
h.entry(key).or_insert_with(Vec::new).push(val);
h
})
}
/// # Errors
/// Return error if api call fails
pub async fn get_export_path(
&self,
finfo: &File,
dirmap: &HashMap<StackString, DirectoryInfo>,
) -> Result<Vec<StackString>, Error> {
let mut fullpath = Vec::new();
if let Some(name) = finfo.name.as_ref() {
fullpath.push(name.clone().into());
}
let mut pid: Option<StackString> = finfo
.parents
.as_ref()
.and_then(|parents| parents.get(0).map(|p| p.to_string().into()));
loop {
pid = if let Some(pid_) = pid.as_ref() {
if let Some(dinfo) = dirmap.get(pid_) {
fullpath.push(format_sstr!("{}/", dinfo.directory_name));
dinfo.parentid.clone()
} else {
self.get_file_metadata(pid_)
.await
.ok()
.as_ref()
.and_then(|f| f.parents.as_ref())
.and_then(|v| {
if v.is_empty() {
None
} else {
Some(v[0].to_string().into())
}
})
}
} else {
None
};
if pid.is_none() {
break;
}
}
Ok(fullpath.into_iter().rev().collect())
}
/// # Errors
/// Return error if api call fails
pub fn get_parent_id(
url: &Url,
dir_name_map: &HashMap<StackString, Vec<DirectoryInfo>>,
) -> Result<Option<StackString>, Error> {
let mut previous_parent_id: Option<StackString> = None;
if let Some(segments) = url.path_segments() {
for seg in segments {
let name = percent_decode(seg.as_bytes())
.decode_utf8_lossy()
.into_owned();
let mut matching_directory: Option<StackString> = None;
if let Some(parents) = dir_name_map.get(name.as_str()) {
for parent in parents {
if previous_parent_id.is_none() {
previous_parent_id = Some(parent.directory_id.clone());
matching_directory = Some(parent.directory_id.clone());
break;
}
if parent.parentid.is_some() && parent.parentid == previous_parent_id {
matching_directory = Some(parent.directory_id.clone());
}
}
}
if matching_directory.is_some() {
previous_parent_id = matching_directory.clone();
} else {
return Ok(previous_parent_id);
}
}
}
Ok(None)
}
/// # Errors
/// Return error if api call fails
pub async fn get_start_page_token(&self) -> Result<usize, Error> {
let params = ChangesGetStartPageTokenParams {
..ChangesGetStartPageTokenParams::default()
};
exponential_retry(|| async {
self.rate_limit.acquire().await;
if let Some(start_page_token) = self
.changes
.get_start_page_token(¶ms)
.await?
.start_page_token
{
Ok(start_page_token.parse()?)
} else {
Err(format_err!(
"Received OK response from drive but there is no startPageToken included."
))
}
})
.await
}
/// # Errors
/// Return error if api call fails
pub async fn store_start_page_token(&self, path: &Path) -> Result<(), Error> {
if let Some(start_page_token) = self.start_page_token.load().as_ref() {
let buf = StackString::from_display(start_page_token);
fs::write(path, buf).await?;
}
Ok(())
}
/// # Errors
/// Return error if api call fails
pub async fn read_start_page_token(path: &Path) -> Result<Option<usize>, Error> {
if !path.exists() {
return Ok(None);
}
let mut f = fs::File::open(path).await?;
let mut buf = String::new();
f.read_to_string(&mut buf).await?;
let start_page_token = buf.parse()?;
Ok(Some(start_page_token))
}
/// # Errors
/// Return error if api call fails
pub async fn get_all_changes(&self) -> Result<Vec<Change>, Error> {
if let Some(start_page_token) = self.start_page_token.load() {
let mut start_page_token = start_page_token.to_string();
let mut all_changes = Vec::new();
let changes_fields = ["kind", "type", "time", "removed", "fileId"].join(",");
let file_fields = [
"name",
"id",
"size",
"mimeType",
"owners",
"parents",
"trashed",
"modifiedTime",
"createdTime",
"viewedByMeTime",
"md5Checksum",
"fileExtension",
"webContentLink",
]
.join(",");
let fields = format!(
"kind,nextPageToken,newStartPageToken,changes({changes_fields},\
file({file_fields}))"
);
loop {
let p = DriveParams {
fields: Some(fields.clone()),
..DriveParams::default()
};
let params = ChangesListParams {
drive_params: Some(p),
page_token: start_page_token,
spaces: Some("drive".into()),
restrict_to_my_drive: Some(true),
include_removed: Some(true),
supports_all_drives: Some(false),
page_size: Some(self.page_size),
..ChangesListParams::default()
};
self.rate_limit.acquire().await;
let changelist = self.changes.list(¶ms).await?;
if let Some(changes) = changelist.changes {
all_changes.extend(changes);
} else {
debug!("Changelist does not contain any changes!");
break;
}
if changelist.new_start_page_token.is_some() {
break;
}
match changelist.next_page_token {
Some(token) => start_page_token = token,
None => break,
};
}
Ok(all_changes)
} else {
Ok(Vec::new())
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct GDriveInfo {
pub filename: StackString,
pub filepath: PathBuf,
pub urlname: Url,
pub md5sum: Option<StackString>,
pub sha1sum: Option<StackString>,
pub filestat: (u32, u32),
pub serviceid: StackString,
pub servicesession: StackString,
}
impl GDriveInfo {
/// # Errors
/// Return error if api call fails
pub async fn from_object(
item: &File,
gdrive: &GDriveInstance,
directory_map: &HashMap<StackString, DirectoryInfo>,
) -> Result<Self, Error> {
let filename = item
.name
.as_ref()
.ok_or_else(|| format_err!("No filename"))?;
let md5sum = item.md5_checksum.as_ref().and_then(|x| x.parse().ok());
let st_mtime = item
.modified_time
.as_ref()
.ok_or_else(|| format_err!("No last modified"))?
.unix_timestamp();
let size: u32 = item.size.as_ref().and_then(|x| x.parse().ok()).unwrap_or(0);
let serviceid = item.id.as_ref().ok_or_else(|| format_err!("No ID"))?.into();
let servicesession = gdrive.session_name.parse()?;
let export_path = gdrive.get_export_path(item, directory_map).await?;
let filepath = export_path.iter().fold(PathBuf::new(), |mut p, e| {
p.push(e.as_str());
p
});
let urlname = format_sstr!("gdrive://{}/", gdrive.session_name);
let urlname = Url::parse(&urlname)?;
let urlname = export_path.iter().try_fold(urlname, |u, e| {
if e.contains('#') {
u.join(&e.replace('#', "%35"))
} else {
u.join(e)
}
})?;
let finfo = Self {
filename: filename.into(),
filepath,
urlname,
md5sum,
sha1sum: None,
filestat: (st_mtime as u32, size),
serviceid,
servicesession,
};
if item.id == Some("1t4plcsKgXK_NB025K01yFLKwljaTeM3i".to_string()) {
debug!("{:?}, {:?}", item, finfo);
}
Ok(finfo)
}
/// # Errors
/// Return error if api call fails
pub async fn from_changes_object(
item: Change,
gdrive: &GDriveInstance,
directory_map: &HashMap<StackString, DirectoryInfo>,
) -> Result<Self, Error> {
let file = item.file.ok_or_else(|| format_err!("No file"))?;
Self::from_object(&file, gdrive, directory_map).await
}
}
|
use crate::primitive::{Quad, Sphere};
use glam::Vec3;
use serde::Deserialize;
use std::io::Read;
#[derive(Debug, Deserialize)]
pub struct Light {
pub position: Vec3,
pub diffuse: Vec3,
pub specular: Vec3,
}
#[derive(Debug, Deserialize)]
pub struct Scene {
pub shininess: f32,
pub antialias: u32,
pub background: Vec3,
pub max_depth: u8,
pub resolution: (u32, u32),
pub lights: Vec<Light>,
pub spheres: Vec<Sphere>,
pub quads: Vec<Quad>,
pub camera: Camera,
}
#[derive(Debug, Deserialize)]
pub struct Camera {
pub position: Vec3,
pub direction: Vec3,
pub fov: f32,
}
impl Scene {
pub fn parse<T: Read>(input: T) -> ron::Result<Self> {
return ron::de::from_reader(input);
}
}
|
use nom::branch::alt;
use nom::bytes::complete::{tag, take_until};
use nom::character::complete::{digit0, space0};
use nom::character::is_alphabetic;
use nom::sequence::tuple;
use nom::{dbg_dmp, named, tag, take_while, IResult};
use std::collections::BTreeMap;
use std::convert::TryInto;
#[derive(Debug, PartialEq)]
pub enum AST<'a> {
Pos(u16),
Symbol(&'a [u8]),
Order(IComplie<'a>),
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum IOrder {
HALT = 0x0,
NOP = 0x10,
IRMOVQ = 0x28,
RRMOVQ = 0x38,
MRMOVQ = 0x48,
RMMOVQ = 0x58,
OUT = 0x80,
ADDQ = 0x61,
SUBQ = 0x62,
MULQ = 0x63,
DIVQ = 0x64,
ANDQ = 0x65,
ORQ = 0x66,
XORQ = 0x67,
JMP = 0x70,
JE = 0x71,
JNE = 0x72,
JS = 0x73,
JNS = 0x74,
JG = 0x75,
JGE = 0x76,
JL = 0x77,
JLE = 0x78,
JA = 0x79,
JAE = 0x7A,
JB = 0x7B,
JBE = 0x7C,
CALL = 0xA0,
RET = 0xB0,
IRET = 0xE0,
CONST = 0xFF,
}
impl Default for IOrder {
fn default() -> Self {
IOrder::NOP
}
}
#[derive(Default, Debug, PartialEq, Clone)]
pub struct IComplie<'a> {
pub iorder: IOrder,
pub r_a: u8,
pub r_b: u8,
pub val_c: u16,
pub symbol: Option<&'a [u8]>,
pub len: u8,
}
#[derive(Default, Debug)]
pub struct IFile<'a> {
pub complies: BTreeMap<u16, IComplie<'a>>,
pub symbols: BTreeMap<&'a [u8], u16>,
}
named!(method, take_while!(is_alphabetic));
named!(rsp, tag!("rsp"));
named!(r1, tag!("r1"));
named!(r2, tag!("r2"));
named!(r3, tag!("r3"));
named!(r4, tag!("r4"));
named!(r5, tag!("r5"));
named!(r6, tag!("r6"));
named!(r7, tag!("r7"));
named!(r8, tag!("r8"));
named!(r9, tag!("r9"));
named!(r10, tag!("r10"));
named!(r11, tag!("r11"));
named!(r12, tag!("r12"));
named!(r13, tag!("r13"));
named!(r14, tag!("r14"));
const REGS: [for<'r> fn(
&'r [u8],
) -> std::result::Result<
(&'r [u8], &'r [u8]),
nom::Err<(&'r [u8], nom::error::ErrorKind)>,
>; 15] = [
r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, rsp,
];
//fn parse(input: &[u8]) -> IResult<&[u8], IComplie> {
//let (input, symbol): (&[u8], Option<&[u8]>) = match symbol(input) {
//Ok((input, symbol)) => (input, Some(symbol)),
//Err(nom::Err::Error((input, _))) => (input, None),
//};
//if symbol.is_none() {
//let (input, method) = method(input)?;
//let (input, _) = space(input)?;
//}
//unimplemented!();
//}
fn parse_reg(input: &[u8]) -> IResult<&[u8], u8> {
let (input, _) = tag("%")(input)?;
for (i, reg) in REGS.iter().enumerate() {
match reg(input) {
Ok((input, _)) => return Ok((input, i.try_into().unwrap())),
_ => (),
};
}
return Err(nom::Err::Failure((input, nom::error::ErrorKind::NoneOf)));
}
fn parse_val(input: &[u8]) -> IResult<&[u8], u16> {
let (input, (_, value)) = tuple((tag("$"), digit0))(input)?;
let value = std::str::from_utf8(value)
.unwrap()
.parse::<u16>()
.ok()
.unwrap();
return Ok((input, value));
}
fn parse_val_and_reg(input: &[u8]) -> IResult<&[u8], (u16, u8)> {
let (input, (value, _, reg, _)) = tuple((digit0, tag("("), parse_reg, tag(")")))(input)?;
let value = std::str::from_utf8(value)
.unwrap()
.parse::<u16>()
.ok()
.unwrap();
return Ok((input, (value, reg)));
}
fn parse_jxx_call(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (method, _, symbol)) = tuple((method, space0, method))(input)?;
let iorder = match method {
b"JMP" | b"jmp" => IOrder::JMP,
b"JE" | b"je" => IOrder::JE,
b"JNE" | b"jne" => IOrder::JNE,
b"JS" | b"js" => IOrder::JS,
b"JNS" | b"jns" => IOrder::JNS,
b"JG" | b"jg" => IOrder::JG,
b"JGE" | b"jge" => IOrder::JGE,
b"JL" | b"jl" => IOrder::JL,
b"JLE" | b"jle" => IOrder::JLE,
b"JA" | b"ja" => IOrder::JA,
b"JAE" | b"jae" => IOrder::JAE,
b"JB" | b"jb" => IOrder::JB,
b"JBE" | b"jbe" => IOrder::JBE,
b"CALL" | b"call" => IOrder::CALL,
_ => return Err(nom::Err::Error((input, nom::error::ErrorKind::Tag))),
};
return Ok((
input,
AST::Order(IComplie {
iorder: iorder,
symbol: Some(symbol),
len: 3,
..IComplie::default()
}),
));
}
fn parse_out(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (method, _, valA)) = tuple((method, space0, parse_reg))(input)?;
let iorder = match method {
b"OUT" | b"out" => IOrder::OUT,
_ => return Err(nom::Err::Error((input, nom::error::ErrorKind::Tag))),
};
return Ok((
input,
AST::Order(IComplie {
iorder: iorder,
len: 2,
r_a: valA,
..IComplie::default()
}),
));
}
fn parse_ret_nop_halt(input: &[u8]) -> IResult<&[u8], AST> {
let (input, method) = method(input)?;
let iorder = match method {
b"HALT" | b"halt" => IOrder::HALT,
b"NOP" | b"nop" => IOrder::NOP,
b"RET" | b"ret" => IOrder::RET,
b"IRET" | b"iret" => IOrder::IRET,
_ => return Err(nom::Err::Error((input, nom::error::ErrorKind::Tag))),
};
return Ok((
input,
AST::Order(IComplie {
iorder: iorder,
len: 1,
..IComplie::default()
}),
));
}
fn parse_opq_rrmovq(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (method, _, r_a, _, r_b)) =
tuple((method, space0, parse_reg, tag(","), parse_reg))(input)?;
let iorder = match method {
b"ADDQ" | b"addq" => IOrder::ADDQ,
b"SUBQ" | b"subq" => IOrder::SUBQ,
b"MULQ" | b"mulq" => IOrder::MULQ,
b"DIVQ" | b"divq" => IOrder::DIVQ,
b"ANDQ" | b"andq" => IOrder::ANDQ,
b"ORQ" | b"orq" => IOrder::ORQ,
b"XORQ" | b"xorq" => IOrder::XORQ,
b"RRMOVQ" | b"rrmovq" => IOrder::RRMOVQ,
_ => return Err(nom::Err::Error((input, nom::error::ErrorKind::Tag))),
};
return Ok((
input,
AST::Order(IComplie {
iorder,
r_a,
r_b,
len: 2,
..IComplie::default()
}),
));
}
fn parse_irmovq(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, _, val_c, _, r_b)) =
tuple((method, space0, parse_val, tag(","), parse_reg))(input)?;
return Ok((
input,
AST::Order(IComplie {
iorder: IOrder::IRMOVQ,
val_c,
r_b,
len: 4,
..IComplie::default()
}),
));
}
fn parse_irmovq_symbol(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, _, symbol, _, r_b)) =
tuple((tag("irmovq"), space0, method, tag(","), parse_reg))(input)?;
return Ok((
input,
AST::Order(IComplie {
iorder: IOrder::IRMOVQ,
symbol: Some(symbol),
r_b,
len: 4,
..IComplie::default()
}),
));
}
fn parse_mrmovq(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, _, (val_c, r_b), _, r_a)) =
tuple((method, space0, parse_val_and_reg, tag(","), parse_reg))(input)?;
return Ok((
input,
AST::Order(IComplie {
iorder: IOrder::MRMOVQ,
val_c,
r_b,
r_a,
len: 4,
..IComplie::default()
}),
));
}
fn parse_rmmovq(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, _, r_a, _, (val_c, r_b))) =
tuple((method, space0, parse_reg, tag(","), parse_val_and_reg))(input)?;
return Ok((
input,
AST::Order(IComplie {
iorder: IOrder::RMMOVQ,
val_c,
r_b,
r_a,
len: 4,
..IComplie::default()
}),
));
}
fn parse_order(input: &[u8]) -> IResult<&[u8], AST> {
return alt((
dbg_dmp(parse_irmovq, "parse_irmovq"),
dbg_dmp(parse_irmovq_symbol, "parse_irmovq_symbol"),
parse_opq_rrmovq,
parse_rmmovq,
parse_mrmovq,
parse_jxx_call,
parse_ret_nop_halt,
parse_out,
))(input);
}
fn parse_point(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, _, val)) = tuple((tag(".pos"), space0, digit0))(input)?;
let val = std::str::from_utf8(val)
.unwrap()
.parse::<u16>()
.ok()
.unwrap();
return Ok((input, AST::Pos(val)));
}
fn parse_quad(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, _, val_c)) = tuple((tag(".quad"), space0, digit0))(input)?;
let val_c = std::str::from_utf8(val_c)
.unwrap()
.parse::<u16>()
.ok()
.unwrap();
return Ok((
input,
AST::Order(IComplie {
iorder: IOrder::CONST,
val_c,
len: 2,
..IComplie::default()
}),
));
}
fn parse_symbol(input: &[u8]) -> IResult<&[u8], AST> {
let (input, symbol) = take_until(":")(input)?;
return Ok((input, AST::Symbol(symbol)));
}
pub fn parse(input: &[u8]) -> IResult<&[u8], AST> {
let (input, (_, ast)) = tuple((
space0,
alt((parse_point, parse_quad, parse_symbol, parse_order)),
))(input)?;
return Ok((input, ast));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_halt_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::HALT,
len: 1,
..IComplie::default()
}),
));
assert_eq!(parse(b" halt\n"), answer)
}
#[test]
fn parse_nop_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::NOP,
len: 1,
..IComplie::default()
}),
));
assert_eq!(parse(b" nop\n"), answer)
}
#[test]
fn parse_irmovq_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::IRMOVQ,
val_c: 3456,
r_b: 0,
len: 10,
..IComplie::default()
}),
));
assert_eq!(parse(b" irmovq $123456,%r1\n"), answer)
}
#[test]
fn parse_mrmovq_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::MRMOVQ,
val_c: 9,
r_a: 0,
r_b: 1,
len: 10,
..IComplie::default()
}),
));
assert_eq!(parse(b"mrmovq 9(%r2),%r1\n"), answer)
}
#[test]
fn parse_rmmovq_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::RMMOVQ,
val_c: 9,
r_a: 0,
r_b: 1,
len: 10,
..IComplie::default()
}),
));
assert_eq!(parse(b"rmmovq %r1,9(%r2)\n"), answer)
}
#[test]
fn parse_rrmovq_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::RRMOVQ,
r_a: 0,
r_b: 1,
len: 2,
..IComplie::default()
}),
));
assert_eq!(parse(b"rrmovq %r1,%r2\n"), answer)
}
#[test]
fn parse_ret_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::RET,
len: 1,
..IComplie::default()
}),
));
assert_eq!(parse(b" ret\n"), answer)
}
#[test]
fn parse_pos_test() {
let answer: IResult<&[u8], AST> = Ok((b"\n", AST::Pos(87)));
assert_eq!(parse(b" .pos 87\n"), answer)
}
#[test]
fn parse_symbol_test() {
let answer: IResult<&[u8], AST> = Ok((b":\n", AST::Symbol(b"Stack")));
assert_eq!(parse(b"Stack:\n"), answer)
}
#[test]
fn parse_call_test() {
let answer: IResult<&[u8], AST> = Ok((
b"\n",
AST::Order(IComplie {
iorder: IOrder::CALL,
symbol: Some(b"main"),
len: 9,
..IComplie::default()
}),
));
assert_eq!(parse(b" call main\n"), answer)
}
}
|
#[macro_use]
extern crate validator_derive;
extern crate dotenv;
extern crate validator;
use actix_web::{middleware, web, App, HttpServer};
use dotenv::dotenv;
use postgres::NoTls;
use r2d2_postgres::PostgresConnectionManager;
use std::env;
mod accounts;
mod handler;
mod auth;
mod jwt;
mod model;
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=debug");
env_logger::init();
dotenv().ok();
// r2d2 pool
let pool = r2d2::Pool::new(get_postgre_manager()).expect("Faild to build postgres connection.");
HttpServer::new(move || {
App::new()
.data(pool.clone())
.wrap(middleware::Logger::default())
.wrap(middleware::DefaultHeaders::new().header("Access-Control-Allow-Origin", "*"))
.route("/", web::get().to(handler::index))
.route("/signup", web::post().to(handler::signup))
.route("/login", web::post().to(handler::login))
.route("/verify", web::post().to(handler::verify))
})
.bind("127.0.0.1:8088")?
.run()
.await
}
fn get_postgre_manager() -> PostgresConnectionManager<NoTls> {
let host = env::var("POSTGRES_HOST").unwrap();
let user = env::var("POSTGRES_USER").unwrap();
let password = env::var("POSTGRES_PASSWORD").unwrap();
let config = format!("host={} user={} password={}", host, user, password);
PostgresConnectionManager::new(config.parse().unwrap(), NoTls)
}
|
#[doc = "Register `SR` reader"]
pub type R = crate::R<SR_SPEC>;
#[doc = "Register `SR` writer"]
pub type W = crate::W<SR_SPEC>;
#[doc = "Field `PE` reader - Parity error"]
pub type PE_R = crate::BitReader;
#[doc = "Field `FE` reader - Framing error"]
pub type FE_R = crate::BitReader;
#[doc = "Field `NE` reader - Noise error flag"]
pub type NE_R = crate::BitReader;
#[doc = "Field `ORE` reader - Overrun error"]
pub type ORE_R = crate::BitReader;
#[doc = "Field `IDLE` reader - IDLE line detected"]
pub type IDLE_R = crate::BitReader;
#[doc = "Field `RXNE` reader - Read data register not empty"]
pub type RXNE_R = crate::BitReader;
#[doc = "Field `RXNE` writer - Read data register not empty"]
pub type RXNE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TC` reader - Transmission complete"]
pub type TC_R = crate::BitReader;
#[doc = "Field `TC` writer - Transmission complete"]
pub type TC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `TXE` reader - Transmit data register empty"]
pub type TXE_R = crate::BitReader;
#[doc = "Field `LBD` reader - LIN break detection flag"]
pub type LBD_R = crate::BitReader;
#[doc = "Field `LBD` writer - LIN break detection flag"]
pub type LBD_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Parity error"]
#[inline(always)]
pub fn pe(&self) -> PE_R {
PE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Framing error"]
#[inline(always)]
pub fn fe(&self) -> FE_R {
FE_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Noise error flag"]
#[inline(always)]
pub fn ne(&self) -> NE_R {
NE_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Overrun error"]
#[inline(always)]
pub fn ore(&self) -> ORE_R {
ORE_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - IDLE line detected"]
#[inline(always)]
pub fn idle(&self) -> IDLE_R {
IDLE_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Read data register not empty"]
#[inline(always)]
pub fn rxne(&self) -> RXNE_R {
RXNE_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Transmission complete"]
#[inline(always)]
pub fn tc(&self) -> TC_R {
TC_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - Transmit data register empty"]
#[inline(always)]
pub fn txe(&self) -> TXE_R {
TXE_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - LIN break detection flag"]
#[inline(always)]
pub fn lbd(&self) -> LBD_R {
LBD_R::new(((self.bits >> 8) & 1) != 0)
}
}
impl W {
#[doc = "Bit 5 - Read data register not empty"]
#[inline(always)]
#[must_use]
pub fn rxne(&mut self) -> RXNE_W<SR_SPEC, 5> {
RXNE_W::new(self)
}
#[doc = "Bit 6 - Transmission complete"]
#[inline(always)]
#[must_use]
pub fn tc(&mut self) -> TC_W<SR_SPEC, 6> {
TC_W::new(self)
}
#[doc = "Bit 8 - LIN break detection flag"]
#[inline(always)]
#[must_use]
pub fn lbd(&mut self) -> LBD_W<SR_SPEC, 8> {
LBD_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`sr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SR_SPEC;
impl crate::RegisterSpec for SR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`sr::R`](R) reader structure"]
impl crate::Readable for SR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`sr::W`](W) writer structure"]
impl crate::Writable for SR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets SR to value 0"]
impl crate::Resettable for SR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::options::{self, BuildMode, BuildOptions, Sanitizer};
use crate::utils::default_target;
use anyhow::{anyhow, bail, Context, Result};
use std::collections::HashSet;
use std::io::Read;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::{
env, ffi, fs,
process::{Command, Stdio},
time,
};
const DEFAULT_FUZZ_DIR: &str = "fuzz";
pub struct FuzzProject {
/// The project with fuzz targets
fuzz_dir: PathBuf,
/// The project being fuzzed
project_dir: PathBuf,
targets: Vec<String>,
}
impl FuzzProject {
/// Creates a new instance.
//
/// Find an existing `cargo fuzz` project by starting at the current
/// directory and walking up the filesystem.
///
/// If `fuzz_dir_opt` is `None`, returns a new instance with the default fuzz project
/// path.
pub fn new(fuzz_dir_opt: Option<PathBuf>) -> Result<Self> {
let mut project = Self::manage_initial_instance(fuzz_dir_opt)?;
let manifest = project.manifest()?;
if !is_fuzz_manifest(&manifest) {
bail!(
"manifest `{}` does not look like a cargo-fuzz manifest. \
Add following lines to override:\n\
[package.metadata]\n\
cargo-fuzz = true",
project.manifest_path().display()
);
}
project.targets = collect_targets(&manifest);
Ok(project)
}
/// Creates the fuzz project structure and returns a new instance.
///
/// This will not clone libfuzzer-sys.
/// Similar to `FuzzProject::new`, the fuzz directory will depend on `fuzz_dir_opt`.
pub fn init(init: &options::Init, fuzz_dir_opt: Option<PathBuf>) -> Result<Self> {
let project = Self::manage_initial_instance(fuzz_dir_opt)?;
let fuzz_project = project.fuzz_dir();
let root_project_manifest_path = project.project_dir.join("Cargo.toml");
let manifest = Manifest::parse(&root_project_manifest_path)?;
// TODO: check if the project is already initialized
fs::create_dir(fuzz_project)
.with_context(|| format!("failed to create directory {}", fuzz_project.display()))?;
let fuzz_targets_dir = fuzz_project.join(crate::FUZZ_TARGETS_DIR);
fs::create_dir(&fuzz_targets_dir).with_context(|| {
format!("failed to create directory {}", fuzz_targets_dir.display())
})?;
let cargo_toml = fuzz_project.join("Cargo.toml");
let mut cargo = fs::File::create(&cargo_toml)
.with_context(|| format!("failed to create {}", cargo_toml.display()))?;
cargo
.write_fmt(toml_template!(manifest.crate_name, manifest.edition))
.with_context(|| format!("failed to write to {}", cargo_toml.display()))?;
let gitignore = fuzz_project.join(".gitignore");
let mut ignore = fs::File::create(&gitignore)
.with_context(|| format!("failed to create {}", gitignore.display()))?;
ignore
.write_fmt(gitignore_template!())
.with_context(|| format!("failed to write to {}", gitignore.display()))?;
project
.create_target_template(&init.target, &manifest)
.with_context(|| {
format!(
"could not create template file for target {:?}",
init.target
)
})?;
Ok(project)
}
pub fn list_targets(&self) -> Result<()> {
for bin in &self.targets {
println!("{}", bin);
}
Ok(())
}
/// Create a new fuzz target.
pub fn add_target(&self, add: &options::Add, manifest: &Manifest) -> Result<()> {
// Create corpus and artifact directories for the newly added target
self.corpus_for(&add.target)?;
self.artifacts_for(&add.target)?;
self.create_target_template(&add.target, manifest)
.with_context(|| format!("could not add target {:?}", add.target))
}
/// Add a new fuzz target script with a given name
fn create_target_template(&self, target: &str, manifest: &Manifest) -> Result<()> {
let target_path = self.target_path(target);
// If the user manually created a fuzz project, but hasn't created any
// targets yet, the `fuzz_targets` directory might not exist yet,
// despite a `fuzz/Cargo.toml` manifest with the `metadata.cargo-fuzz`
// key present. Make sure it does exist.
fs::create_dir_all(self.fuzz_targets_dir())
.context("ensuring that `fuzz_targets` directory exists failed")?;
let mut script = fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&target_path)
.with_context(|| format!("could not create target script file at {:?}", target_path))?;
script.write_fmt(target_template!(manifest.edition))?;
let mut cargo = fs::OpenOptions::new()
.append(true)
.open(self.manifest_path())?;
Ok(cargo.write_fmt(toml_bin_template!(target))?)
}
fn cargo(&self, subcommand: &str, build: &BuildOptions) -> Result<Command> {
let mut cmd = Command::new("cargo");
cmd.arg(subcommand)
.arg("--manifest-path")
.arg(self.manifest_path())
// --target=<TARGET> won't pass rustflags to build scripts
.arg("--target")
.arg(&build.triple);
// we default to release mode unless debug mode is explicitly requested
if !build.dev {
cmd.arg("--release");
}
if build.verbose {
cmd.arg("--verbose");
}
if build.no_default_features {
cmd.arg("--no-default-features");
}
if build.all_features {
cmd.arg("--all-features");
}
if let Some(ref features) = build.features {
cmd.arg("--features").arg(features);
}
for flag in &build.unstable_flags {
cmd.arg("-Z").arg(flag);
}
if (matches!(build.sanitizer, Sanitizer::Memory) || build.build_std || build.careful_mode)
&& !build.coverage
{
cmd.arg("-Z").arg("build-std");
}
let mut rustflags: String = "-Cpasses=sancov-module \
-Cllvm-args=-sanitizer-coverage-level=4 \
-Cllvm-args=-sanitizer-coverage-inline-8bit-counters \
-Cllvm-args=-sanitizer-coverage-pc-table"
.to_owned();
if !build.no_trace_compares {
rustflags.push_str(" -Cllvm-args=-sanitizer-coverage-trace-compares");
}
if !build.no_cfg_fuzzing {
rustflags.push_str(" --cfg fuzzing");
}
if build.cfg_fuzzing_repro {
rustflags.push_str(" --cfg fuzzing_repro");
}
if !build.strip_dead_code {
rustflags.push_str(" -Clink-dead-code");
}
if build.coverage {
rustflags.push_str(" -Cinstrument-coverage");
}
match build.sanitizer {
Sanitizer::None => {}
Sanitizer::Memory => {
// Memory sanitizer requires more flags to function than others:
// https://doc.rust-lang.org/unstable-book/compiler-flags/sanitizer.html#memorysanitizer
rustflags.push_str(" -Zsanitizer=memory -Zsanitizer-memory-track-origins")
}
_ => rustflags.push_str(&format!(
" -Zsanitizer={sanitizer}",
sanitizer = build.sanitizer
)),
}
if build.careful_mode {
rustflags.push_str(" -Zextra-const-ub-checks -Zstrict-init-checks --cfg careful");
}
if build.triple.contains("-linux-") {
rustflags.push_str(" -Cllvm-args=-sanitizer-coverage-stack-depth");
}
if !build.release || build.debug_assertions || build.careful_mode {
rustflags.push_str(" -Cdebug-assertions");
}
if build.triple.contains("-msvc") {
// The entrypoint is in the bundled libfuzzer rlib, this gets the linker to find it.
rustflags.push_str(" -Clink-arg=/include:main");
}
// If release mode is enabled then we force 1 CGU to be used in rustc.
// This will result in slower compilations but it looks like the sancov
// passes otherwise add `notEligibleToImport` annotations to functions
// in LLVM IR, meaning that *nothing* can get imported with ThinLTO.
// This means that in release mode, where ThinLTO is critical for
// performance, we're taking a huge hit relative to actual release mode.
// Local tests have once showed this to be a ~3x faster runtime where
// otherwise functions like `Vec::as_ptr` aren't inlined.
if !build.dev {
rustflags.push_str(" -C codegen-units=1");
}
if let Ok(other_flags) = env::var("RUSTFLAGS") {
rustflags.push(' ');
rustflags.push_str(&other_flags);
}
cmd.env("RUSTFLAGS", rustflags);
// For asan and tsan we have default options. Merge them to the given
// options, so users can still provide their own options to e.g. disable
// the leak sanitizer. Options are colon-separated.
match build.sanitizer {
Sanitizer::Address => {
let mut asan_opts = env::var("ASAN_OPTIONS").unwrap_or_default();
if !asan_opts.is_empty() {
asan_opts.push(':');
}
asan_opts.push_str("detect_odr_violation=0");
cmd.env("ASAN_OPTIONS", asan_opts);
}
Sanitizer::Thread => {
let mut tsan_opts = env::var("TSAN_OPTIONS").unwrap_or_default();
if !tsan_opts.is_empty() {
tsan_opts.push(':');
}
tsan_opts.push_str("report_signal_unsafe=0");
cmd.env("TSAN_OPTIONS", tsan_opts);
}
_ => {}
}
Ok(cmd)
}
fn cargo_run(&self, build: &options::BuildOptions, fuzz_target: &str) -> Result<Command> {
let mut cmd = self.cargo("run", build)?;
cmd.arg("--bin").arg(fuzz_target);
if let Some(target_dir) = &build.target_dir {
cmd.arg("--target-dir").arg(target_dir);
}
let mut artifact_arg = ffi::OsString::from("-artifact_prefix=");
artifact_arg.push(self.artifacts_for(fuzz_target)?);
cmd.arg("--").arg(artifact_arg);
Ok(cmd)
}
// note: never returns Ok(None) if build.coverage is true
fn target_dir(&self, build: &options::BuildOptions) -> Result<Option<PathBuf>> {
// Use the user-provided target directory, if provided. Otherwise if building for coverage,
// use the coverage directory
if let Some(target_dir) = build.target_dir.as_ref() {
return Ok(Some(PathBuf::from(target_dir)));
} else if build.coverage {
// To ensure that fuzzing and coverage-output generation can run in parallel, we
// produce a separate binary for the coverage command.
let current_dir = env::current_dir()?;
Ok(Some(
current_dir
.join("target")
.join(default_target())
.join("coverage"),
))
} else {
Ok(None)
}
}
pub fn exec_build(
&self,
mode: options::BuildMode,
build: &options::BuildOptions,
fuzz_target: Option<&str>,
) -> Result<()> {
let cargo_subcommand = match mode {
options::BuildMode::Build => "build",
options::BuildMode::Check => "check",
};
let mut cmd = self.cargo(cargo_subcommand, build)?;
if let Some(fuzz_target) = fuzz_target {
cmd.arg("--bin").arg(fuzz_target);
} else {
cmd.arg("--bins");
}
if let Some(target_dir) = self.target_dir(&build)? {
cmd.arg("--target-dir").arg(target_dir);
}
let status = cmd
.status()
.with_context(|| format!("failed to execute: {:?}", cmd))?;
if !status.success() {
bail!("failed to build fuzz script: {:?}", cmd);
}
Ok(())
}
fn get_artifacts_since(
&self,
target: &str,
since: &time::SystemTime,
) -> Result<HashSet<PathBuf>> {
let mut artifacts = HashSet::new();
let artifacts_dir = self.artifacts_for(target)?;
for entry in fs::read_dir(&artifacts_dir).with_context(|| {
format!(
"failed to read directory entries of {}",
artifacts_dir.display()
)
})? {
let entry = entry.with_context(|| {
format!(
"failed to read directory entry inside {}",
artifacts_dir.display()
)
})?;
let metadata = entry
.metadata()
.context("failed to read artifact metadata")?;
let modified = metadata
.modified()
.context("failed to get artifact modification time")?;
if !metadata.is_file() || modified <= *since {
continue;
}
artifacts.insert(entry.path());
}
Ok(artifacts)
}
fn run_fuzz_target_debug_formatter(
&self,
build: &BuildOptions,
target: &str,
artifact: &Path,
) -> Result<String> {
let debug_output = tempfile::NamedTempFile::new().context("failed to create temp file")?;
let mut cmd = self.cargo_run(build, target)?;
cmd.stdin(Stdio::null());
cmd.env("RUST_LIBFUZZER_DEBUG_PATH", debug_output.path());
cmd.arg(artifact);
let output = cmd
.output()
.with_context(|| format!("failed to run command: {:?}", cmd))?;
if !output.status.success() {
bail!(
"Fuzz target '{target}' exited with failure when attempting to \
debug formatting an interesting input that we discovered!\n\n\
Artifact: {artifact}\n\n\
Command: {cmd:?}\n\n\
Status: {status}\n\n\
=== stdout ===\n\
{stdout}\n\n\
=== stderr ===\n\
{stderr}",
target = target,
status = output.status,
cmd = cmd,
artifact = artifact.display(),
stdout = String::from_utf8_lossy(&output.stdout),
stderr = String::from_utf8_lossy(&output.stderr),
);
}
let debug = fs::read_to_string(&debug_output).context("failed to read temp file")?;
Ok(debug)
}
/// Prints the debug output of an input test case
pub fn debug_fmt_input(&self, debugfmt: &options::Fmt) -> Result<()> {
if !debugfmt.input.exists() {
bail!(
"Input test case does not exist: {}",
debugfmt.input.display()
);
}
let debug = self
.run_fuzz_target_debug_formatter(&debugfmt.build, &debugfmt.target, &debugfmt.input)
.with_context(|| {
format!(
"failed to run `cargo fuzz fmt` on input: {}",
debugfmt.input.display()
)
})?;
eprintln!("\nOutput of `std::fmt::Debug`:\n");
for l in debug.lines() {
eprintln!("{}", l);
}
Ok(())
}
/// Fuzz a given fuzz target
pub fn exec_fuzz(&self, run: &options::Run) -> Result<()> {
self.exec_build(BuildMode::Build, &run.build, Some(&run.target))?;
let mut cmd = self.cargo_run(&run.build, &run.target)?;
for arg in &run.args {
cmd.arg(arg);
}
if !run.corpus.is_empty() {
for corpus in &run.corpus {
cmd.arg(corpus);
}
} else {
cmd.arg(self.corpus_for(&run.target)?);
}
if run.jobs != 1 {
cmd.arg(format!("-fork={}", run.jobs));
}
// When libfuzzer finds failing inputs, those inputs will end up in the
// artifacts directory. To easily filter old artifacts from new ones,
// get the current time, and then later we only consider files modified
// after now.
let before_fuzzing = time::SystemTime::now();
let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn command: {:?}", cmd))?;
let status = child
.wait()
.with_context(|| format!("failed to wait on child process for command: {:?}", cmd))?;
if status.success() {
return Ok(());
}
// Get and print the `Debug` formatting of any new artifacts, along with
// tips about how to reproduce failures and/or minimize test cases.
let new_artifacts = self.get_artifacts_since(&run.target, &before_fuzzing)?;
for artifact in new_artifacts {
// To make the artifact a little easier to read, strip the current
// directory prefix when possible.
let artifact = strip_current_dir_prefix(&artifact);
eprintln!("\n{:─<80}", "");
eprintln!("\nFailing input:\n\n\t{}\n", artifact.display());
// Note: ignore errors when running the debug formatter. This most
// likely just means that we're dealing with a fuzz target that uses
// an older version of the libfuzzer crate, and doesn't support
// `RUST_LIBFUZZER_DEBUG_PATH`.
if let Ok(debug) =
self.run_fuzz_target_debug_formatter(&run.build, &run.target, artifact)
{
eprintln!("Output of `std::fmt::Debug`:\n");
for l in debug.lines() {
eprintln!("\t{}", l);
}
eprintln!();
}
let fuzz_dir = if self.fuzz_dir_is_default_path() {
String::new()
} else {
format!(" --fuzz-dir {}", self.fuzz_dir().display())
};
eprintln!(
"Reproduce with:\n\n\tcargo fuzz run{fuzz_dir}{options} {target} {artifact}\n",
fuzz_dir = &fuzz_dir,
options = &run.build,
target = &run.target,
artifact = artifact.display()
);
eprintln!(
"Minimize test case with:\n\n\tcargo fuzz tmin{fuzz_dir}{options} {target} {artifact}\n",
fuzz_dir = &fuzz_dir,
options = &run.build,
target = &run.target,
artifact = artifact.display()
);
}
eprintln!("{:─<80}\n", "");
bail!("Fuzz target exited with {}", status)
}
pub fn exec_tmin(&self, tmin: &options::Tmin) -> Result<()> {
self.exec_build(BuildMode::Build, &tmin.build, Some(&tmin.target))?;
let mut cmd = self.cargo_run(&tmin.build, &tmin.target)?;
cmd.arg("-minimize_crash=1")
.arg(format!("-runs={}", tmin.runs))
.arg(&tmin.test_case);
for arg in &tmin.args {
cmd.arg(arg);
}
let before_tmin = time::SystemTime::now();
let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn command: {:?}", cmd))?;
let status = child
.wait()
.with_context(|| format!("failed to wait on child process for command: {:?}", cmd))?;
if !status.success() {
eprintln!("\n{:─<80}\n", "");
return Err(anyhow!("Command `{:?}` exited with {}", cmd, status)).with_context(|| {
"Test case minimization failed.\n\
\n\
Usually this isn't a hard error, and just means that libfuzzer\n\
doesn't know how to minimize the test case any further while\n\
still reproducing the original crash.\n\
\n\
See the logs above for details."
});
}
// Find and display the most recently modified artifact, which is
// presumably the result of minification. Yeah, this is a little hacky,
// but it seems to work. I don't want to parse libfuzzer's stderr output
// and hope it never changes.
let minimized_artifact = self
.get_artifacts_since(&tmin.target, &before_tmin)?
.into_iter()
.max_by_key(|a| {
a.metadata()
.and_then(|m| m.modified())
.unwrap_or(time::SystemTime::UNIX_EPOCH)
});
if let Some(artifact) = minimized_artifact {
let artifact = strip_current_dir_prefix(&artifact);
eprintln!("\n{:─<80}\n", "");
eprintln!("Minimized artifact:\n\n\t{}\n", artifact.display());
// Note: ignore errors when running the debug formatter. This most
// likely just means that we're dealing with a fuzz target that uses
// an older version of the libfuzzer crate, and doesn't support
// `RUST_LIBFUZZER_DEBUG_PATH`.
if let Ok(debug) =
self.run_fuzz_target_debug_formatter(&tmin.build, &tmin.target, artifact)
{
eprintln!("Output of `std::fmt::Debug`:\n");
for l in debug.lines() {
eprintln!("\t{}", l);
}
eprintln!();
}
let fuzz_dir = if self.fuzz_dir_is_default_path() {
String::new()
} else {
format!(" --fuzz-dir {}", self.fuzz_dir().display())
};
eprintln!(
"Reproduce with:\n\n\tcargo fuzz run{fuzz_dir}{options} {target} {artifact}\n",
fuzz_dir = &fuzz_dir,
options = &tmin.build,
target = &tmin.target,
artifact = artifact.display()
);
}
Ok(())
}
pub fn exec_cmin(&self, cmin: &options::Cmin) -> Result<()> {
self.exec_build(BuildMode::Build, &cmin.build, Some(&cmin.target))?;
let mut cmd = self.cargo_run(&cmin.build, &cmin.target)?;
for arg in &cmin.args {
cmd.arg(arg);
}
let corpus = if let Some(corpus) = cmin.corpus.clone() {
corpus
} else {
self.corpus_for(&cmin.target)?
};
let corpus = corpus
.to_str()
.ok_or_else(|| anyhow!("corpus must be valid unicode"))?
.to_owned();
let tmp = tempfile::TempDir::new_in(self.fuzz_dir())?;
let tmp_corpus = tmp.path().join("corpus");
fs::create_dir(&tmp_corpus)?;
cmd.arg("-merge=1").arg(&tmp_corpus).arg(&corpus);
// Spawn cmd in child process instead of exec-ing it
let status = cmd
.status()
.with_context(|| format!("could not execute command: {:?}", cmd))?;
if status.success() {
// move corpus directory into tmp to auto delete it
fs::rename(&corpus, tmp.path().join("old"))?;
fs::rename(tmp.path().join("corpus"), corpus)?;
} else {
println!("Failed to minimize corpus: {}", status);
}
Ok(())
}
/// Produce coverage information for a given corpus
pub fn exec_coverage(self, coverage: &options::Coverage) -> Result<()> {
// Build project with source-based coverage generation enabled.
self.exec_build(BuildMode::Build, &coverage.build, Some(&coverage.target))?;
// Retrieve corpus directories.
let corpora = if coverage.corpus.is_empty() {
vec![self.corpus_for(&coverage.target)?]
} else {
coverage
.corpus
.iter()
.map(|name| Path::new(name).to_path_buf())
.collect()
};
// Collect the (non-directory) readable input files from the corpora.
let files_and_dirs = corpora.iter().flat_map(fs::read_dir).flatten().flatten();
let mut readable_input_files = files_and_dirs
.filter(|file| match file.file_type() {
Ok(ft) => ft.is_file(),
_ => false,
})
.peekable();
if readable_input_files.peek().is_none() {
bail!(
"The corpus does not contain program-input files. \
Coverage information requires existing input files. \
Try running the fuzzer first (`cargo fuzz run ...`) to generate a corpus, \
or provide a nonempty corpus directory."
)
}
let (coverage_out_raw_dir, coverage_out_file) = self.coverage_for(&coverage.target)?;
for corpus in corpora.iter() {
// _tmp_dir is deleted when it goes of of scope.
let (mut cmd, _tmp_dir) =
self.create_coverage_cmd(coverage, &coverage_out_raw_dir, &corpus.as_path())?;
eprintln!("Generating coverage data for corpus {:?}", corpus);
let status = cmd
.status()
.with_context(|| format!("Failed to run command: {:?}", cmd))?;
if !status.success() {
Err(anyhow!(
"Command exited with failure status {}: {:?}",
status,
cmd
))
.context("Failed to generage coverage data")?;
}
}
self.merge_coverage(&coverage_out_raw_dir, &coverage_out_file)?;
Ok(())
}
fn create_coverage_cmd(
&self,
coverage: &options::Coverage,
coverage_dir: &Path,
corpus_dir: &Path,
) -> Result<(Command, tempfile::TempDir)> {
let bin_path = {
let profile_subdir = if coverage.build.dev {
"debug"
} else {
"release"
};
let target_dir = self
.target_dir(&coverage.build)?
.expect("target dir for coverage command should never be None");
target_dir
.join(&coverage.build.triple)
.join(profile_subdir)
.join(&coverage.target)
};
let mut cmd = Command::new(bin_path);
// Raw coverage data will be saved in `coverage/<target>` directory.
let corpus_dir_name = corpus_dir
.file_name()
.and_then(|x| x.to_str())
.with_context(|| format!("Invalid corpus directory: {:?}", corpus_dir))?;
cmd.env(
"LLVM_PROFILE_FILE",
coverage_dir.join(format!("default-{}.profraw", corpus_dir_name)),
);
cmd.arg("-merge=1");
let dummy_corpus = tempfile::tempdir()?;
cmd.arg(dummy_corpus.path());
cmd.arg(corpus_dir);
for arg in &coverage.args {
cmd.arg(arg);
}
Ok((cmd, dummy_corpus))
}
fn merge_coverage(&self, profdata_raw_path: &Path, profdata_out_path: &Path) -> Result<()> {
let mut profdata_path = rustlib()?;
profdata_path.push(format!("llvm-profdata{}", env::consts::EXE_SUFFIX));
let mut merge_cmd = Command::new(profdata_path);
merge_cmd.arg("merge").arg("-sparse");
merge_cmd.arg(profdata_raw_path);
merge_cmd.arg("-o").arg(profdata_out_path);
eprintln!("Merging raw coverage data...");
let status = merge_cmd
.status()
.with_context(|| format!("Failed to run command: {:?}", merge_cmd))
.with_context(|| "Merging raw coverage files failed.\n\
\n\
Do you have LLVM coverage tools installed?\n\
https://doc.rust-lang.org/rustc/instrument-coverage.html#installing-llvm-coverage-tools")?;
if !status.success() {
Err(anyhow!(
"Command exited with failure status {}: {:?}",
status,
merge_cmd
))
.context("Merging raw coverage files failed")?;
}
if profdata_out_path.exists() {
eprintln!("Coverage data merged and saved in {:?}.", profdata_out_path);
Ok(())
} else {
bail!("Coverage data could not be merged.")
}
}
pub(crate) fn fuzz_dir(&self) -> &Path {
&self.fuzz_dir
}
fn manifest_path(&self) -> PathBuf {
self.fuzz_dir().join("Cargo.toml")
}
/// Returns paths to the `coverage/<target>/raw` directory and `coverage/<target>/coverage.profdata` file.
fn coverage_for(&self, target: &str) -> Result<(PathBuf, PathBuf)> {
let mut coverage_data = self.fuzz_dir().to_owned();
coverage_data.push("coverage");
coverage_data.push(target);
let mut coverage_raw = coverage_data.clone();
coverage_data.push("coverage.profdata");
coverage_raw.push("raw");
fs::create_dir_all(&coverage_raw).with_context(|| {
format!("could not make a coverage directory at {:?}", coverage_raw)
})?;
Ok((coverage_raw, coverage_data))
}
fn corpus_for(&self, target: &str) -> Result<PathBuf> {
let mut p = self.fuzz_dir().to_owned();
p.push("corpus");
p.push(target);
fs::create_dir_all(&p)
.with_context(|| format!("could not make a corpus directory at {:?}", p))?;
Ok(p)
}
fn artifacts_for(&self, target: &str) -> Result<PathBuf> {
let mut p = self.fuzz_dir().to_owned();
p.push("artifacts");
p.push(target);
// This adds a trailing slash, which is necessary for libFuzzer, because
// it does simple string concatenation when joining paths.
p.push("");
fs::create_dir_all(&p)
.with_context(|| format!("could not make a artifact directory at {:?}", p))?;
Ok(p)
}
fn fuzz_targets_dir(&self) -> PathBuf {
let mut root = self.fuzz_dir().to_owned();
if root.join(crate::FUZZ_TARGETS_DIR_OLD).exists() {
println!(
"warning: The `fuzz/fuzzers/` directory has renamed to `fuzz/fuzz_targets/`. \
Please rename the directory as such. This will become a hard error in the \
future."
);
root.push(crate::FUZZ_TARGETS_DIR_OLD);
} else {
root.push(crate::FUZZ_TARGETS_DIR);
}
root
}
fn target_path(&self, target: &str) -> PathBuf {
let mut root = self.fuzz_targets_dir();
root.push(target);
root.set_extension("rs");
root
}
fn manifest(&self) -> Result<toml::Value> {
let filename = self.manifest_path();
let mut file = fs::File::open(&filename)
.with_context(|| format!("could not read the manifest file: {}", filename.display()))?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
toml::from_slice(&data).with_context(|| {
format!(
"could not decode the manifest file at {}",
filename.display()
)
})
}
// If `fuzz_dir_opt` is `None`, returns a new instance with the default fuzz project
// path. Otherwise, returns a new instance with the inner content of `fuzz_dir_opt`.
fn manage_initial_instance(fuzz_dir_opt: Option<PathBuf>) -> Result<Self> {
let project_dir = find_package()?;
let fuzz_dir = if let Some(el) = fuzz_dir_opt {
el
} else {
project_dir.join(DEFAULT_FUZZ_DIR)
};
Ok(FuzzProject {
fuzz_dir,
project_dir,
targets: Vec::new(),
})
}
fn fuzz_dir_is_default_path(&self) -> bool {
self.fuzz_dir.ends_with(DEFAULT_FUZZ_DIR)
}
}
fn sysroot() -> Result<String> {
let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into());
let output = Command::new(rustc).arg("--print").arg("sysroot").output()?;
// Note: We must trim() to remove the `\n` from the end of stdout
Ok(String::from_utf8(output.stdout)?.trim().to_owned())
}
fn rustlib() -> Result<PathBuf> {
let sysroot = sysroot()?;
let mut pathbuf = PathBuf::from(sysroot);
pathbuf.push("lib");
pathbuf.push("rustlib");
pathbuf.push(rustc_version::version_meta()?.host);
pathbuf.push("bin");
Ok(pathbuf)
}
fn collect_targets(value: &toml::Value) -> Vec<String> {
let bins = value
.as_table()
.and_then(|v| v.get("bin"))
.and_then(toml::Value::as_array);
let mut bins = if let Some(bins) = bins {
bins.iter()
.map(|bin| {
bin.as_table()
.and_then(|v| v.get("name"))
.and_then(toml::Value::as_str)
})
.filter_map(|name| name.map(String::from))
.collect()
} else {
Vec::new()
};
// Always sort them, so that we have deterministic output.
bins.sort();
bins
}
pub struct Manifest {
crate_name: String,
edition: Option<String>,
}
impl Manifest {
pub fn parse(path: &Path) -> Result<Self> {
let contents = fs::read(path)?;
let value: toml::Value = toml::from_slice(&contents)?;
let package = value
.as_table()
.and_then(|v| v.get("package"))
.and_then(toml::Value::as_table);
let crate_name = package
.and_then(|v| v.get("name"))
.and_then(toml::Value::as_str)
.with_context(|| anyhow!("{} (package.name) is malformed", path.display()))?
.to_owned();
let edition = package
.expect("can't be None at this point")
.get("edition")
.map(|v| match v.as_str() {
Some(s) => Ok(s.to_owned()),
None => bail!("{} (package.edition) is malformed", path.display()),
})
.transpose()?;
Ok(Manifest {
crate_name,
edition,
})
}
}
fn is_fuzz_manifest(value: &toml::Value) -> bool {
let is_fuzz = value
.as_table()
.and_then(|v| v.get("package"))
.and_then(toml::Value::as_table)
.and_then(|v| v.get("metadata"))
.and_then(toml::Value::as_table)
.and_then(|v| v.get("cargo-fuzz"))
.and_then(toml::Value::as_bool);
is_fuzz == Some(true)
}
/// Returns the path for the first found non-fuzz Cargo package
fn find_package() -> Result<PathBuf> {
let mut dir = env::current_dir()?;
let mut data = Vec::new();
loop {
let manifest_path = dir.join("Cargo.toml");
match fs::File::open(&manifest_path) {
Err(_) => {}
Ok(mut f) => {
data.clear();
f.read_to_end(&mut data)
.with_context(|| format!("failed to read {}", manifest_path.display()))?;
let value: toml::Value = toml::from_slice(&data).with_context(|| {
format!(
"could not decode the manifest file at {}",
manifest_path.display()
)
})?;
if !is_fuzz_manifest(&value) {
// Not a cargo-fuzz project => must be a proper cargo project :)
return Ok(dir);
}
}
}
if !dir.pop() {
break;
}
}
bail!("could not find a cargo project")
}
fn strip_current_dir_prefix(path: &Path) -> &Path {
env::current_dir()
.ok()
.and_then(|curdir| path.strip_prefix(curdir).ok())
.unwrap_or(path)
}
|
use apllodb_storage_engine_interface::StorageEngine;
use super::query::query_plan::query_plan_tree::query_plan_node::node_repo::QueryPlanNodeRepository;
/// Context object each Processor/Executor has.
/// A context object must be moved out after an SQL process.
#[derive(Debug)]
pub struct SqlProcessorContext<Engine: StorageEngine> {
pub(crate) engine: Engine,
pub(crate) node_repo: QueryPlanNodeRepository,
}
impl<Engine: StorageEngine> SqlProcessorContext<Engine> {
/// Constructor
pub fn new(engine: Engine) -> Self {
Self {
engine,
node_repo: QueryPlanNodeRepository::default(),
}
}
}
|
#[doc = "Reader of register ANA_CTL0"]
pub type R = crate::R<u32, super::ANA_CTL0>;
#[doc = "Writer for register ANA_CTL0"]
pub type W = crate::W<u32, super::ANA_CTL0>;
#[doc = "Register ANA_CTL0 `reset()`'s with value 0x0400"]
impl crate::ResetValue for super::ANA_CTL0 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0400
}
}
#[doc = "Reader of field `CSLDAC`"]
pub type CSLDAC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `CSLDAC`"]
pub struct CSLDAC_W<'a> {
w: &'a mut W,
}
impl<'a> CSLDAC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 8)) | (((value as u32) & 0x07) << 8);
self.w
}
}
#[doc = "Reader of field `VCC_SEL`"]
pub type VCC_SEL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `VCC_SEL`"]
pub struct VCC_SEL_W<'a> {
w: &'a mut W,
}
impl<'a> VCC_SEL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Reader of field `FLIP_AMUXBUS_AB`"]
pub type FLIP_AMUXBUS_AB_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FLIP_AMUXBUS_AB`"]
pub struct FLIP_AMUXBUS_AB_W<'a> {
w: &'a mut W,
}
impl<'a> FLIP_AMUXBUS_AB_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
impl R {
#[doc = "Bits 8:10 - Trimming of common source line DAC."]
#[inline(always)]
pub fn csldac(&self) -> CSLDAC_R {
CSLDAC_R::new(((self.bits >> 8) & 0x07) as u8)
}
#[doc = "Bit 24 - Vcc select: '0': 1.2 V : LP reset value '1': 0.95 V: ULP reset value Note: the flash macro compiler has a configuration option that specifies the default/reset value of this field."]
#[inline(always)]
pub fn vcc_sel(&self) -> VCC_SEL_R {
VCC_SEL_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 27 - Flips amuxbusa and amuxbusb '0': amuxbusa, amuxbusb '1': amuxbusb, amuxbusb"]
#[inline(always)]
pub fn flip_amuxbus_ab(&self) -> FLIP_AMUXBUS_AB_R {
FLIP_AMUXBUS_AB_R::new(((self.bits >> 27) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 8:10 - Trimming of common source line DAC."]
#[inline(always)]
pub fn csldac(&mut self) -> CSLDAC_W {
CSLDAC_W { w: self }
}
#[doc = "Bit 24 - Vcc select: '0': 1.2 V : LP reset value '1': 0.95 V: ULP reset value Note: the flash macro compiler has a configuration option that specifies the default/reset value of this field."]
#[inline(always)]
pub fn vcc_sel(&mut self) -> VCC_SEL_W {
VCC_SEL_W { w: self }
}
#[doc = "Bit 27 - Flips amuxbusa and amuxbusb '0': amuxbusa, amuxbusb '1': amuxbusb, amuxbusb"]
#[inline(always)]
pub fn flip_amuxbus_ab(&mut self) -> FLIP_AMUXBUS_AB_W {
FLIP_AMUXBUS_AB_W { w: self }
}
}
|
use itertools::iproduct;
use std::cmp::max;
const SERIAL_NUMBER: i32 = 3613;
fn get_power((x, y): (i32, i32)) -> i32 {
let rack_id = x + 10;
(rack_id * y + SERIAL_NUMBER) * rack_id / 100 % 10 - 5
}
fn part1() {
let (x, y) = iproduct!(1..299, 1..299)
.max_by_key(|&(x, y)| iproduct!(x..x + 3, y..y + 3).map(get_power).sum::<i32>())
.unwrap();
println!("{},{}", x, y);
}
fn part2() {
let (x, y, size, _) = iproduct!(1..301, 1..301)
.flat_map(|(x, y)| {
(1..(302 - max(x, y))).scan(0, move |power, size| {
*power += (x..x + size)
.map(|x| get_power((x, y + size - 1)))
.sum::<i32>();
*power += (y..y + size - 1)
.map(|y| get_power((x + size - 1, y)))
.sum::<i32>();
Some((x, y, size, *power))
})
})
.max_by_key(|&(_, _, _, power)| power)
.unwrap();
println!("{},{},{}", x, y, size);
}
fn main() {
part1();
part2();
}
|
use crate::{
error::*,
ffi::*,
};
pub type gpio_num_t = u32;
pub type gpio_int_type_t = u32;
pub const gpio_int_type_t_GPIO_INTR_DISABLE: gpio_int_type_t = 0;
pub const gpio_int_type_t_GPIO_INTR_POSEDGE: gpio_int_type_t = 1;
pub const gpio_int_type_t_GPIO_INTR_NEGEDGE: gpio_int_type_t = 2;
pub const gpio_int_type_t_GPIO_INTR_ANYEDGE: gpio_int_type_t = 3;
pub const gpio_int_type_t_GPIO_INTR_LOW_LEVEL: gpio_int_type_t = 4;
pub const gpio_int_type_t_GPIO_INTR_HIGH_LEVEL: gpio_int_type_t = 5;
pub type gpio_mode_t = u32;
pub const gpio_mode_t_GPIO_MODE_DISABLE: gpio_mode_t = 0;
pub const gpio_mode_t_GPIO_MODE_INPUT: gpio_mode_t = 1;
pub const gpio_mode_t_GPIO_MODE_OUTPUT: gpio_mode_t = 2;
pub const gpio_mode_t_GPIO_MODE_OUTPUT_OD: gpio_mode_t = 6;
pub type gpio_pull_mode_t = u32;
pub const gpio_pull_mode_t_GPIO_PULLUP_ONLY: gpio_pull_mode_t = 0;
pub const gpio_pull_mode_t_GPIO_PULLDOWN_ONLY: gpio_pull_mode_t = 1;
pub const gpio_pull_mode_t_GPIO_FLOATING: gpio_pull_mode_t = 2;
pub type gpio_pullup_t = u32;
pub const gpio_pullup_t_GPIO_PULLUP_DISABLE: gpio_pullup_t = 0;
pub const gpio_pullup_t_GPIO_PULLUP_ENABLE: gpio_pullup_t = 1;
pub type gpio_pulldown_t = u32;
pub const gpio_pulldown_t_GPIO_PULLDOWN_DISABLE: gpio_pulldown_t = 0;
pub const gpio_pulldown_t_GPIO_PULLDOWN_ENABLE: gpio_pulldown_t = 1;
pub type gpio_isr_t =
::core::option::Option<unsafe extern "C" fn(arg1: *mut xtensa_void)>;
pub type gpio_isr_handle_t = *mut xtensa_void;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct gpio_config_t {
pub pin_bit_mask: u32,
pub mode: gpio_mode_t,
pub pull_up_en: gpio_pullup_t,
pub pull_down_en: gpio_pulldown_t,
pub intr_type: gpio_int_type_t,
}
extern "C" {
pub fn gpio_config(gpio_cfg: *const gpio_config_t) -> esp_err_t;
pub fn gpio_set_intr_type(gpio_num: gpio_num_t, intr_type: gpio_int_type_t) -> esp_err_t;
pub fn gpio_set_level(gpio_num: gpio_num_t, level: u32) -> esp_err_t;
pub fn gpio_get_level(gpio_num: gpio_num_t) -> xtensa_int;
pub fn gpio_set_direction(gpio_num: gpio_num_t, mode: gpio_mode_t) -> esp_err_t;
pub fn gpio_set_pull_mode(gpio_num: gpio_num_t, pull: gpio_pull_mode_t) -> esp_err_t;
pub fn gpio_wakeup_enable(gpio_num: gpio_num_t, intr_type: gpio_int_type_t) -> esp_err_t;
pub fn gpio_wakeup_disable(gpio_num: gpio_num_t) -> esp_err_t;
pub fn gpio_isr_register(
fn_: ::core::option::Option<unsafe extern "C" fn(arg1: *mut xtensa_void)>,
arg: *mut xtensa_void,
no_use: xtensa_int,
handle_no_use: *mut gpio_isr_handle_t,
) -> esp_err_t;
pub fn gpio_pullup_en(gpio_num: gpio_num_t) -> esp_err_t;
pub fn gpio_pullup_dis(gpio_num: gpio_num_t) -> esp_err_t;
pub fn gpio_pulldown_en(gpio_num: gpio_num_t) -> esp_err_t;
pub fn gpio_pulldown_dis(gpio_num: gpio_num_t) -> esp_err_t;
pub fn gpio_install_isr_service(no_use: xtensa_int) -> esp_err_t;
pub fn gpio_uninstall_isr_service();
pub fn gpio_isr_handler_add(
gpio_num: gpio_num_t,
isr_handler: gpio_isr_t,
args: *mut xtensa_void,
) -> esp_err_t;
pub fn gpio_isr_handler_remove(gpio_num: gpio_num_t) -> esp_err_t;
}
|
use std::cell::RefCell;
use std::rc::Rc;
use crate::pager_manager::Pager_manager;
use crate::{BTree, Attribute, DATATYPE, toDATATYPE, Page_info};
use crate::data_item::{Data_item, Data_item_info};
use anyhow::{Result, anyhow};
use std::borrow::{BorrowMut, Borrow};
use std::collections::HashMap;
use std::ops::Deref;
use crate::page::{Page, PageType, PAGE_SIZE, TABLE_NAME_SIZE, DB_META_ITEM_START, DB_META_ITEM_SIZE, MAX_META_DB_ITEM};
use crate::db_str::Db_str;
use std::io::{SeekFrom, Seek, Read};
pub struct wwc_db{
pager_manager: Rc<RefCell<Pager_manager>>,
pub table_btrees: HashMap<String,Rc<RefCell<BTree>>>
}
impl wwc_db {
pub fn open(path: &str) -> wwc_db {
let pager_manager = Rc::new(RefCell::new(Pager_manager::new(path)));
let table_btrees = wwc_db::get_db_tables(0,pager_manager.clone());
wwc_db{
pager_manager,
table_btrees
}
}
pub fn get_db_tables(index: u32, pager_manager: Rc<RefCell<Pager_manager>>) -> HashMap<String,Rc<RefCell<BTree>>> {
let mut res = HashMap::new();
let mut pager_manager_ref = pager_manager.deref().borrow_mut();
let temp = pager_manager_ref.get_page(index).unwrap();
let db_meta_page = temp.deref().borrow();
assert_eq!(db_meta_page.page_type, PageType::DB_META);
if db_meta_page.item_count() <= 0 {
return res;
}
let mut i = 0;
while i < db_meta_page.item_count() {
let mut name = DATATYPE::STR(Db_str::new_container(TABLE_NAME_SIZE));
toDATATYPE(name.decode(&db_meta_page.buf[DB_META_ITEM_START+DB_META_ITEM_SIZE*i..]),& mut name);
let name_value = match name { DATATYPE::STR(value) => value , _ => panic!("can't happen") };
let mut ptr = DATATYPE::U32(0);
toDATATYPE(ptr.decode(&db_meta_page.buf[DB_META_ITEM_START + DB_META_ITEM_SIZE*i +TABLE_NAME_SIZE..]),&mut ptr);
let ptr_value = match ptr { DATATYPE::U32(value) => value , _ => panic!("can't happen") };
println!("{:?} {:?} {:?}",name_value,ptr_value,db_meta_page);
let btree_meta_page = pager_manager_ref.get_page(ptr_value).unwrap();
let btree = BTree::load_btree(btree_meta_page, pager_manager.clone(), &mut pager_manager_ref);
res.insert(name_value.str,Rc::new(RefCell::new(btree)));
println!("ddd");
i = i + 1;
};
let mut page_index = db_meta_page.next_page();
if page_index != 0 {
// let page = pager_manager.deref().borrow_mut().get_page(page_index).unwrap();
// let page_ref = page.deref().borrow();
res.extend(wwc_db::get_db_tables(page_index, pager_manager.clone()));
}
res
}
pub fn add_db_table(&mut self, table: (&String,u32)) {
let pager_manager = & mut self.pager_manager.deref().borrow_mut();
let mut page_index = pager_manager.db_meta_page.deref().borrow_mut().next_page();
if page_index == 0 {
let name = Db_str::new(table.0,TABLE_NAME_SIZE);
let num = pager_manager.db_meta_page.deref().borrow_mut().item_count();
if num >= MAX_META_DB_ITEM {
let new_page = pager_manager.new_page(PageType::DB_META).unwrap();
new_page.deref().borrow_mut().set_next_page(new_page.deref().borrow().index);
name.encode(&mut new_page.deref().borrow_mut().buf[DB_META_ITEM_START..]);
table.1.encode(&mut new_page.deref().borrow_mut().buf[DB_META_ITEM_START + TABLE_NAME_SIZE..]);
new_page.deref().borrow_mut().set_item_count(1);
} else {
name.encode(&mut pager_manager.db_meta_page.deref().borrow_mut().buf[DB_META_ITEM_START + DB_META_ITEM_SIZE *num..]);
table.1.encode(&mut pager_manager.db_meta_page.deref().borrow_mut().buf[DB_META_ITEM_START + DB_META_ITEM_SIZE*num + TABLE_NAME_SIZE..]);
pager_manager.db_meta_page.deref().borrow_mut().set_item_count(num + 1);
}
}
else {
// let mut temp= Vec::new();
// temp.push(pager_manager.get_page(page_index).unwrap());
// page = temp[temp.len()-1].deref().borrow_mut();
// page_index = page.next_page();
// while page_index != 0 {
// temp.push(pager_manager.get_page(page_index).unwrap());
// page = temp[temp.len()-1].deref().borrow_mut();
// page_index = page.next_page();
// }
let temp = self.get_new_db_meta_page(page_index);
let mut page = temp.deref().borrow_mut();
assert_eq!(page.page_type, PageType::DB_META);
let name = Db_str::new(table.0,TABLE_NAME_SIZE);
let num = page.item_count();
if num >= MAX_META_DB_ITEM {
let new_page = pager_manager.new_page(PageType::DB_META).unwrap();
page.set_next_page(new_page.deref().borrow().index);
name.encode(&mut page.buf[DB_META_ITEM_START..]);
table.1.encode(&mut page.buf[DB_META_ITEM_START + TABLE_NAME_SIZE..]);
page.set_item_count(1);
} else {
name.encode(&mut page.buf[DB_META_ITEM_START + DB_META_ITEM_SIZE *num..]);
table.1.encode(&mut page.buf[DB_META_ITEM_START + DB_META_ITEM_SIZE*num + TABLE_NAME_SIZE..]);
page.set_item_count(num + 1);
}
}
}
fn get_new_db_meta_page(&self,page_index: u32) -> Rc<RefCell<Page>> {
let mut pager_manager = self.pager_manager.deref().borrow_mut();
let index = page_index;
let mut res = Rc::new(RefCell::new(Default::default()));
loop {
res = pager_manager.get_page(index).unwrap();
if res.deref().borrow().next_page() == 0 {
break;
}
}
res
}
pub fn create_table(&mut self, key: Data_item_info, value: Data_item_info, name: String) -> Result<()> {
let btree = BTree::new(self.pager_manager.clone(), key, value);
self.add_db_table((&name, btree.meta_page_index));
self.table_btrees.borrow_mut().insert(name, Rc::new(RefCell::new(btree)));
Ok(())
}
pub fn insert_record(&mut self, key: Data_item, value: Data_item, name: &str) -> Result<(),()> {
let table_option = self.table_btrees.get(name);
if table_option.is_none() {return Err(())}
let mut table = table_option.unwrap().deref().borrow_mut();
table.set(&key,&value);
Ok(())
}
pub fn update_record(&mut self, key: Data_item, value: Data_item, name: &str) -> Result<(),()> {
let table_option = self.table_btrees.get(name);
if table_option.is_none() {return Err(())}
let mut table = table_option.unwrap().deref().borrow_mut();
table.set(&key,&value);
Ok(())
}
pub fn query_by_primary_key(& self, key: &Data_item, name: &str) -> Result<(Data_item,Data_item),()> {
let table_option = self.table_btrees.get(name);
if table_option.is_none() {return Err(())}
let mut table = table_option.unwrap().deref().borrow_mut();
// table.set(&key,&value);
let value = table.get(key);
let y = (key.clone(), value.unwrap());
println!("(key.clone(), value.unwrap()):{:?}",y);
Ok(y)
}
pub fn delete_record(&mut self, key: Data_item, value: Data_item, name: &str) {
}
pub fn query_all_record(& self, name: &str) -> Result<Vec<(Data_item,Data_item)>,()> {
//
Err(())
}
pub fn query_by_index_key(& self, key: Data_item, name: &str) -> Result<(Data_item,Data_item),()> {
//
Err(())
}
pub fn get_data_item_info(&mut self,name: &str) -> Result<(Rc<RefCell<Page_info>>),()> {
let table_option = self.table_btrees.get(name);
if table_option.is_none() {return Err(())}
let mut table = table_option.unwrap().deref().borrow_mut();
Ok(table.get_info())
}
pub fn create_index(&mut self, name: String, key: String) -> Result<()> {
Ok(())
}
}
#[cfg(test)]
mod test{
use crate::{wwcdb, DATATYPE};
use crate::data_item::{Data_item_info, Data_item};
use crate::db_str::Db_str;
use std::ops::Deref;
#[test]
fn test_create_table() {
let mut db = wwcdb::wwc_db::open("create_table.db");
let mut key = Data_item_info::new();
let mut value = Data_item_info::new();
key.add((DATATYPE::U32(0),String::from("student_id")));
//主键
let name = Db_str::new_container(12);
value.add((DATATYPE::STR(name),String::from("name")));
value.add((DATATYPE::F32(0.0),String::from("score")));
//纪录体
db.create_table(key,value,String::from("student_table"));
println!("{:?}",db.table_btrees);
}
#[test]
fn open_exist_file() {
let mut db = wwcdb::wwc_db::open("create_table.db");
//刚刚创建的文件
// let mut key = Data_item_info::new();
// let mut value = Data_item_info::new();
// key.add((DATATYPE::U32(0),String::from("student_id")));
// //主键
// let name = Db_str::new_container(12);
// value.add((DATATYPE::STR(name),String::from("name")));
// value.add((DATATYPE::F32(0.0),String::from("score")));
// //纪录体
// db.create_table(key,value,String::from("student_table"));
println!("{:?}",db.table_btrees);
}
#[test]
fn insert_record() {
let mut db = wwcdb::wwc_db::open("create_table.db");
let res = db.get_data_item_info("student_table").unwrap();
assert!(res.deref().borrow_mut()._key.set(DATATYPE::U32(56),"student_id"));
let key = res.deref().borrow_mut()._key.get_data_item();
assert!(res.deref().borrow_mut()._value.set(DATATYPE::STR(Db_str::new("xiaoming",12)),"name"));
println!("44:{:?}",res.deref().borrow_mut()._value);
assert!(res.deref().borrow_mut()._value.set(DATATYPE::F32(78.4),"score"));
let value = res.deref().borrow_mut()._value.get_data_item();
db.insert_record(key,value,"student_table");
}
#[test]
fn query_record_by_promary_key() {
let mut db = wwcdb::wwc_db::open("create_table.db");
let res = db.get_data_item_info("student_table").unwrap();
assert!(res.deref().borrow_mut()._key.set(DATATYPE::U32(56),"student_id"));
let key = res.deref().borrow_mut()._key.get_data_item();
let value = res.deref().borrow_mut()._value.get_data_item();
db.query_by_primary_key(&key,"student_table");
}
#[test]
fn update_record() {
//更新小明的成绩为99.9分
let mut db = wwcdb::wwc_db::open("create_table.db");
let res = db.get_data_item_info("student_table").unwrap();
assert!(res.deref().borrow_mut()._key.set(DATATYPE::U32(56),"student_id"));
let key = res.deref().borrow_mut()._key.get_data_item();
assert!(res.deref().borrow_mut()._value.clear_value().set(DATATYPE::F32(99.9),"score"));
//在不知道小明名字的情况下,根据键值,只改了分数
let update_value = res.deref().borrow_mut()._value.get_data_item();
db.update_record(key,update_value,"student_table");
}
} |
use itertools::Itertools;
static FILE: &str = include_str!("../inputs/5.txt");
lazy_static! {
static ref SEAT_IDS: Vec<usize> = FILE
.lines()
.map(|s| {
let mut row = 0..128;
let mut col = 0..8;
for c in s.chars() {
match c {
'F' => row = row.start..(row.end - (row.end - row.start) / 2),
'B' => row = (row.start + (row.end - row.start) / 2)..row.end,
'L' => col = col.start..(col.end - (col.end - col.start) / 2),
'R' => col = (col.start + (col.end - col.start) / 2)..col.end,
_ => {}
}
}
row.start * 8 + col.start
})
.sorted()
.collect();
}
pub(crate) fn part1() {
println!("day 5 part 1");
let highest_seat_id = SEAT_IDS.last().unwrap();
println!("Highest seat ID: {:#?}", highest_seat_id);
}
pub(crate) fn part2() {
println!("day 5 part 2");
let lowest_seat_id = *SEAT_IDS.first().unwrap();
let highest_seat_id = *SEAT_IDS.last().unwrap();
for x in lowest_seat_id..highest_seat_id {
if x != SEAT_IDS[x - lowest_seat_id] {
println!("Your seat ID is {}", x);
break;
}
}
}
|
// A disk will be how the file system interacts with the underlying file. A disk instance is created
// when the file is mounted and will open the file for read/write. The file will stay opened through
// the disk structure as long as the file system is mounted. The fields in the disk structure listed
use serde_json::ser::to_string;
use std::fs::File;
use std::io::Read;
use std::fs;
use std::path::Path;
use std::ffi::OsStr;
use std::io::BufReader;
use std::io::prelude::*;
mod block;
// here can be added to the diagnostics output to show the number of block reads/writes
// (otherwise they aren’t needed).
pub struct Disk
{
pub disk_content: Vec<String>,
pub file: String,
pub mounted: bool,
pub reads: i128,
pub writes: i128,
}
impl Disk {
pub fn new() -> Disk{
Disk {
disk_content: Vec::<String>::new(),
file: "".to_string(),
mounted: false,
reads: 0,
writes: 0
}
}
// Open the disk parameter. If successful and the superblock is valid, the file
// system is mounted and the superblock instance is available. The superblock
// instance stored in memory after reading from block 0 should have a list of all
// free inodes and blocks from the disk (see design)
pub fn open(&mut self, f: &std::string::String) -> bool {
let x = std::fs::read_to_string(&f).ok();
self.file = (&f).to_string();
match x {
Some(a) => {
// let path = Path::new(&f);
for line in a.lines()
{
self.disk_content.push(line.to_string());
}
println!("tezt: {:?}", self.disk_content);
self.mounted = true;
true
},
None => false
}
}
// Close the disk. All data write operations must be completed. If successful, the
// file system is unmounted.
pub fn close() -> bool {
return false;
}
// Write the block parameter to the given block id on the given disk. Return true if
// successful and false if not
pub fn write(blockID: i64, b: block::Block) -> bool {
return false;
}
}
fn main() {
// println!("Hello, world!");
// let mut file = File::open("D:/GIT/projects/CS-309-FinalProject/PseudoFS/src/test.txt").expect("Can't open");
let temp = "D:/GIT/projects/CS-309-FinalProject/PseudoFS/src/test.txt";
let mut disk = Disk::new();
// let mut contents = String::new();
// file.read_to_string(&mut contents).expect("Oops can't read file");
// println!("File Contents:\n\n{} ", contents);
disk.open(&temp.to_string());
} |
#[doc = "Register `APB2RSTR` reader"]
pub type R = crate::R<APB2RSTR_SPEC>;
#[doc = "Register `APB2RSTR` writer"]
pub type W = crate::W<APB2RSTR_SPEC>;
#[doc = "Field `TIM1RST` reader - TIM1 reset"]
pub type TIM1RST_R = crate::BitReader<TIM1RST_A>;
#[doc = "TIM1 reset\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TIM1RST_A {
#[doc = "1: Reset the selected module"]
Reset = 1,
}
impl From<TIM1RST_A> for bool {
#[inline(always)]
fn from(variant: TIM1RST_A) -> Self {
variant as u8 != 0
}
}
impl TIM1RST_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<TIM1RST_A> {
match self.bits {
true => Some(TIM1RST_A::Reset),
_ => None,
}
}
#[doc = "Reset the selected module"]
#[inline(always)]
pub fn is_reset(&self) -> bool {
*self == TIM1RST_A::Reset
}
}
#[doc = "Field `TIM1RST` writer - TIM1 reset"]
pub type TIM1RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, TIM1RST_A>;
impl<'a, REG, const O: u8> TIM1RST_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Reset the selected module"]
#[inline(always)]
pub fn reset(self) -> &'a mut crate::W<REG> {
self.variant(TIM1RST_A::Reset)
}
}
#[doc = "Field `TIM8RST` reader - TIM8 reset"]
pub use TIM1RST_R as TIM8RST_R;
#[doc = "Field `USART1RST` reader - USART1 reset"]
pub use TIM1RST_R as USART1RST_R;
#[doc = "Field `USART6RST` reader - USART6 reset"]
pub use TIM1RST_R as USART6RST_R;
#[doc = "Field `ADCRST` reader - ADC interface reset (common to all ADCs)"]
pub use TIM1RST_R as ADCRST_R;
#[doc = "Field `SDIORST` reader - SDIO reset"]
pub use TIM1RST_R as SDIORST_R;
#[doc = "Field `SPI1RST` reader - SPI 1 reset"]
pub use TIM1RST_R as SPI1RST_R;
#[doc = "Field `SPI4RST` reader - SPI4 reset"]
pub use TIM1RST_R as SPI4RST_R;
#[doc = "Field `SYSCFGRST` reader - System configuration controller reset"]
pub use TIM1RST_R as SYSCFGRST_R;
#[doc = "Field `TIM9RST` reader - TIM9 reset"]
pub use TIM1RST_R as TIM9RST_R;
#[doc = "Field `TIM10RST` reader - TIM10 reset"]
pub use TIM1RST_R as TIM10RST_R;
#[doc = "Field `TIM11RST` reader - TIM11 reset"]
pub use TIM1RST_R as TIM11RST_R;
#[doc = "Field `SPI5RST` reader - SPI5 reset"]
pub use TIM1RST_R as SPI5RST_R;
#[doc = "Field `SPI6RST` reader - SPI6 reset"]
pub use TIM1RST_R as SPI6RST_R;
#[doc = "Field `SAI1RST` reader - SAI1 reset"]
pub use TIM1RST_R as SAI1RST_R;
#[doc = "Field `LTDCRST` reader - LTDC reset"]
pub use TIM1RST_R as LTDCRST_R;
#[doc = "Field `TIM8RST` writer - TIM8 reset"]
pub use TIM1RST_W as TIM8RST_W;
#[doc = "Field `USART1RST` writer - USART1 reset"]
pub use TIM1RST_W as USART1RST_W;
#[doc = "Field `USART6RST` writer - USART6 reset"]
pub use TIM1RST_W as USART6RST_W;
#[doc = "Field `ADCRST` writer - ADC interface reset (common to all ADCs)"]
pub use TIM1RST_W as ADCRST_W;
#[doc = "Field `SDIORST` writer - SDIO reset"]
pub use TIM1RST_W as SDIORST_W;
#[doc = "Field `SPI1RST` writer - SPI 1 reset"]
pub use TIM1RST_W as SPI1RST_W;
#[doc = "Field `SPI4RST` writer - SPI4 reset"]
pub use TIM1RST_W as SPI4RST_W;
#[doc = "Field `SYSCFGRST` writer - System configuration controller reset"]
pub use TIM1RST_W as SYSCFGRST_W;
#[doc = "Field `TIM9RST` writer - TIM9 reset"]
pub use TIM1RST_W as TIM9RST_W;
#[doc = "Field `TIM10RST` writer - TIM10 reset"]
pub use TIM1RST_W as TIM10RST_W;
#[doc = "Field `TIM11RST` writer - TIM11 reset"]
pub use TIM1RST_W as TIM11RST_W;
#[doc = "Field `SPI5RST` writer - SPI5 reset"]
pub use TIM1RST_W as SPI5RST_W;
#[doc = "Field `SPI6RST` writer - SPI6 reset"]
pub use TIM1RST_W as SPI6RST_W;
#[doc = "Field `SAI1RST` writer - SAI1 reset"]
pub use TIM1RST_W as SAI1RST_W;
#[doc = "Field `LTDCRST` writer - LTDC reset"]
pub use TIM1RST_W as LTDCRST_W;
impl R {
#[doc = "Bit 0 - TIM1 reset"]
#[inline(always)]
pub fn tim1rst(&self) -> TIM1RST_R {
TIM1RST_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - TIM8 reset"]
#[inline(always)]
pub fn tim8rst(&self) -> TIM8RST_R {
TIM8RST_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 4 - USART1 reset"]
#[inline(always)]
pub fn usart1rst(&self) -> USART1RST_R {
USART1RST_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - USART6 reset"]
#[inline(always)]
pub fn usart6rst(&self) -> USART6RST_R {
USART6RST_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 8 - ADC interface reset (common to all ADCs)"]
#[inline(always)]
pub fn adcrst(&self) -> ADCRST_R {
ADCRST_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 11 - SDIO reset"]
#[inline(always)]
pub fn sdiorst(&self) -> SDIORST_R {
SDIORST_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - SPI 1 reset"]
#[inline(always)]
pub fn spi1rst(&self) -> SPI1RST_R {
SPI1RST_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - SPI4 reset"]
#[inline(always)]
pub fn spi4rst(&self) -> SPI4RST_R {
SPI4RST_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - System configuration controller reset"]
#[inline(always)]
pub fn syscfgrst(&self) -> SYSCFGRST_R {
SYSCFGRST_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 16 - TIM9 reset"]
#[inline(always)]
pub fn tim9rst(&self) -> TIM9RST_R {
TIM9RST_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - TIM10 reset"]
#[inline(always)]
pub fn tim10rst(&self) -> TIM10RST_R {
TIM10RST_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - TIM11 reset"]
#[inline(always)]
pub fn tim11rst(&self) -> TIM11RST_R {
TIM11RST_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 20 - SPI5 reset"]
#[inline(always)]
pub fn spi5rst(&self) -> SPI5RST_R {
SPI5RST_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - SPI6 reset"]
#[inline(always)]
pub fn spi6rst(&self) -> SPI6RST_R {
SPI6RST_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - SAI1 reset"]
#[inline(always)]
pub fn sai1rst(&self) -> SAI1RST_R {
SAI1RST_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 26 - LTDC reset"]
#[inline(always)]
pub fn ltdcrst(&self) -> LTDCRST_R {
LTDCRST_R::new(((self.bits >> 26) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - TIM1 reset"]
#[inline(always)]
#[must_use]
pub fn tim1rst(&mut self) -> TIM1RST_W<APB2RSTR_SPEC, 0> {
TIM1RST_W::new(self)
}
#[doc = "Bit 1 - TIM8 reset"]
#[inline(always)]
#[must_use]
pub fn tim8rst(&mut self) -> TIM8RST_W<APB2RSTR_SPEC, 1> {
TIM8RST_W::new(self)
}
#[doc = "Bit 4 - USART1 reset"]
#[inline(always)]
#[must_use]
pub fn usart1rst(&mut self) -> USART1RST_W<APB2RSTR_SPEC, 4> {
USART1RST_W::new(self)
}
#[doc = "Bit 5 - USART6 reset"]
#[inline(always)]
#[must_use]
pub fn usart6rst(&mut self) -> USART6RST_W<APB2RSTR_SPEC, 5> {
USART6RST_W::new(self)
}
#[doc = "Bit 8 - ADC interface reset (common to all ADCs)"]
#[inline(always)]
#[must_use]
pub fn adcrst(&mut self) -> ADCRST_W<APB2RSTR_SPEC, 8> {
ADCRST_W::new(self)
}
#[doc = "Bit 11 - SDIO reset"]
#[inline(always)]
#[must_use]
pub fn sdiorst(&mut self) -> SDIORST_W<APB2RSTR_SPEC, 11> {
SDIORST_W::new(self)
}
#[doc = "Bit 12 - SPI 1 reset"]
#[inline(always)]
#[must_use]
pub fn spi1rst(&mut self) -> SPI1RST_W<APB2RSTR_SPEC, 12> {
SPI1RST_W::new(self)
}
#[doc = "Bit 13 - SPI4 reset"]
#[inline(always)]
#[must_use]
pub fn spi4rst(&mut self) -> SPI4RST_W<APB2RSTR_SPEC, 13> {
SPI4RST_W::new(self)
}
#[doc = "Bit 14 - System configuration controller reset"]
#[inline(always)]
#[must_use]
pub fn syscfgrst(&mut self) -> SYSCFGRST_W<APB2RSTR_SPEC, 14> {
SYSCFGRST_W::new(self)
}
#[doc = "Bit 16 - TIM9 reset"]
#[inline(always)]
#[must_use]
pub fn tim9rst(&mut self) -> TIM9RST_W<APB2RSTR_SPEC, 16> {
TIM9RST_W::new(self)
}
#[doc = "Bit 17 - TIM10 reset"]
#[inline(always)]
#[must_use]
pub fn tim10rst(&mut self) -> TIM10RST_W<APB2RSTR_SPEC, 17> {
TIM10RST_W::new(self)
}
#[doc = "Bit 18 - TIM11 reset"]
#[inline(always)]
#[must_use]
pub fn tim11rst(&mut self) -> TIM11RST_W<APB2RSTR_SPEC, 18> {
TIM11RST_W::new(self)
}
#[doc = "Bit 20 - SPI5 reset"]
#[inline(always)]
#[must_use]
pub fn spi5rst(&mut self) -> SPI5RST_W<APB2RSTR_SPEC, 20> {
SPI5RST_W::new(self)
}
#[doc = "Bit 21 - SPI6 reset"]
#[inline(always)]
#[must_use]
pub fn spi6rst(&mut self) -> SPI6RST_W<APB2RSTR_SPEC, 21> {
SPI6RST_W::new(self)
}
#[doc = "Bit 22 - SAI1 reset"]
#[inline(always)]
#[must_use]
pub fn sai1rst(&mut self) -> SAI1RST_W<APB2RSTR_SPEC, 22> {
SAI1RST_W::new(self)
}
#[doc = "Bit 26 - LTDC reset"]
#[inline(always)]
#[must_use]
pub fn ltdcrst(&mut self) -> LTDCRST_W<APB2RSTR_SPEC, 26> {
LTDCRST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "APB2 peripheral reset register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`apb2rstr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`apb2rstr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct APB2RSTR_SPEC;
impl crate::RegisterSpec for APB2RSTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`apb2rstr::R`](R) reader structure"]
impl crate::Readable for APB2RSTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`apb2rstr::W`](W) writer structure"]
impl crate::Writable for APB2RSTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets APB2RSTR to value 0"]
impl crate::Resettable for APB2RSTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! Type definitions for `<metal_common>`.
use super::*;
pub trait Common: Sized {
// Called clamped instead of clamp because std has claimed
// the name.
fn clamped<U, V, R>(self, minval: U, maxval: V) -> R
where
Self: MinMax<U, R>,
R: MinMax<V, R>,
{
self.max(minval).min(maxval)
}
fn mix<Tx, Ty, Tr>(self, x: Tx, y: Ty) -> Tr
where
Tx: Copy,
Ty: Op<Tx, Tr>,
Tr: Op<Self, Tr>,
Tx: Op<Tr, Tr>,
{
x + (y - x) * self
}
fn saturate(self) -> Self
where
Self: MinMax<f32, Self>,
{
self.clamped(0.0, 1.0)
}
fn sign<V>(self) -> Self
where
Self: Map<V>,
V: num::Signed,
{
self.map(num::signum)
}
fn smoothstep<Tx, Ty, Tr>(self, edge0: Tx, edge1: Ty) -> Tr
where
Tx: Copy,
Tr: Copy,
Ty: Op<Tx, Tr>,
Self: Op<Tx, Tr>,
Tr: Op,
Tr: MinMax,
Tr: std::convert::From<f32>,
{
let t = ((self - edge0) / (edge1 - edge0)).clamped(0.0.into(), 1.0.into());
t * t * (Tr::from(3.0) - Tr::from(2.0) * t)
}
fn step<Te, Tr, V>(self, edge: Te) -> Tr
where
Self: Op<Te, Tr>,
Tr: Map<V>,
V: PartialOrd,
V: num::Signed,
V: std::convert::From<f32>,
{
(self - edge)
.sign()
.map(|value| vek::partial_min(value, V::from(0.0)))
}
}
impl<T> Common for T {}
#[test]
fn test_clamp() {
1.0.clamped(0.2, 0.3);
1.0.vec3().clamped(0.2.vec3(), 0.3.vec3());
1.0.vec3().clamped(0.2, 0.3.vec3());
}
#[test]
fn test_mix() {
1.0.mix(0.2, 0.3);
1.0.vec3().mix(0.2.vec3(), 0.3.vec3());
// Not supported: 1.0.vec3().mix(0.2, 0.3.vec3());
}
#[test]
fn test_saturate() {
1.0.saturate();
1.0.vec3().saturate();
// sign
1.0.sign();
1.0.vec3().sign();
}
#[test]
fn test_smoothstep() {
1.0f32.smoothstep(0.2, 0.3); // Rust defaults to f64 for which this is not implemented
1.0.vec3().smoothstep(0.2.vec3(), 0.3.vec3());
1.0.vec3().smoothstep(0.2, 0.3.vec3());
}
#[test]
fn test_step() {
1.0f32.step(0.3); // Rust defaults to f64 for which this is not implemented
1.0.vec3().step(0.3.vec3());
1.0.vec3().step(0.3);
}
use std::ops::*;
pub trait Op<T = Self, R = T>:
Sized + Add<T, Output = R> + Sub<T, Output = R> + Div<T, Output = R> + Mul<T, Output = R>
{
}
impl<S, T, R> Op<T, R> for S where
S: Sized + Add<T, Output = R> + Sub<T, Output = R> + Div<T, Output = R> + Mul<T, Output = R>
{
}
|
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![allow(unknown_lints)]
#![allow(clippy::all)]
pub mod core {
pub mod cmp {
pub enum Ordering {
/// An ordering where a compared value is less than another.
Less = -1,
/// An ordering where a compared value is equal to another.
Equal = 0,
/// An ordering where a compared value is greater than another.
Greater = 1,
}
pub trait PartialOrd<Rhs: ?Sized = Self> {
fn lt__ref_i32_ref_i32(x: &i32, y: &i32) -> bool {
(*x) < (*y)
}
}
pub trait Ord {
fn cmp<T>(_a: &T, _b: &T) -> Ordering {
result!()
}
}
}
pub mod default {
pub trait Default {
fn default__T() {
result!()
}
}
}
pub mod fmt {
use std::marker::PhantomData;
pub mod rt {
pub mod v1 {
pub struct Argument {}
}
}
pub struct ArgumentV1<'a> {
phantom: PhantomData<&'a str>,
}
pub mod implement_1 {
use crate::foreign_contracts::core::fmt::ArgumentV1;
use crate::foreign_contracts::core::fmt::Formatter;
use crate::foreign_contracts::core::fmt::Result;
pub fn new<'b, T>(
_x: &'b T,
_f: fn(&T, &mut Formatter<'_>) -> Result,
) -> ArgumentV1<'b> {
result!()
}
}
pub struct Arguments<'a> {
phantom: PhantomData<&'a str>,
}
pub mod implement_2 {
use crate::foreign_contracts::core::fmt::ArgumentV1;
use crate::foreign_contracts::core::fmt::Arguments;
pub fn new_v1<'a>(
_pieces: &'a [&'a str],
_args: &'a [ArgumentV1<'a>],
) -> Arguments<'a> {
result!()
}
}
pub struct Formatter<'a> {
phantom: PhantomData<&'a str>,
}
pub struct Result {}
pub struct Void {}
}
pub mod option {
pub enum Option<T> {
None,
Some(T),
}
impl<T> Option<T> {
pub fn is_none(&self) -> bool {
match self {
Self::None => true,
_ => false,
}
}
pub fn is_some(&self) -> bool {
match self {
Self::None => false,
_ => true,
}
}
pub fn unwrap(self) -> T {
precondition!(self.is_some(), "self may not be None");
result!()
}
}
pub mod implement_5 {
use crate::foreign_contracts::core::option::Option;
pub fn unwrap_or_default<T: Default>(v: Option<T>) -> T {
match v {
Option::None => Default::default(),
Option::Some(v) => v,
}
}
}
}
pub mod ops {
pub mod range {
pub mod implement_12 {
pub struct Range_usize {
pub start: usize,
pub end: usize,
}
pub struct RangeInclusive_usize {
pub start: usize,
pub end: usize,
pub is_empty: Option<bool>,
// This field is:
// - `None` when next() or next_back() was never called
// - `Some(false)` when `start <= end` assuming no overflow
// - `Some(true)` otherwise
// The field cannot be a simple `bool` because the `..=` constructor can
// accept non-PartialOrd types, also we want the constructor to be const.
}
pub fn new__usize(start: usize, end: usize) -> RangeInclusive_usize {
RangeInclusive_usize {
start,
end,
is_empty: None,
}
}
// If this range's `is_empty` is field is unknown (`None`), update it to be a concrete value.
pub fn compute_is_empty__usize(range: &mut RangeInclusive_usize) {
if range.is_empty.is_none() {
range.is_empty = Some(!(range.start <= range.end));
}
}
}
}
pub mod deref {
pub trait Deref {
fn deref__alloc_vec_Vec_i32(vec: &Vec<i32>) -> &[i32] {
let old_len = vec.len();
let res: &[i32] = result!();
assume!(res.len() == old_len);
res
}
fn deref__alloc_vec_Vec_u32(vec: &Vec<u32>) -> &[u32] {
let old_len = vec.len();
let res: &[u32] = result!();
assume!(res.len() == old_len);
res
}
}
}
}
pub mod iter {
pub mod adapters {
use crate::foreign_contracts::core::ops::range::implement_12::Range_usize;
use crate::foreign_contracts::core::slice::Iter;
pub struct Enumerator_slice<'a, T: 'a> {
pub iterator: Iter<'a, T>,
}
pub struct Rev__Range_usize {
pub range: Range_usize,
}
}
pub mod traits {
pub mod collect {
use crate::foreign_contracts::core::iter::adapters::Enumerator_slice;
use crate::foreign_contracts::core::ops::range::implement_12::RangeInclusive_usize;
use crate::foreign_contracts::core::ops::range::implement_12::Range_usize;
pub trait IntoIterator {
fn into_iter__core_iter_adapters_Enumerate_core_slice_Iter_bool(
slice: Enumerator_slice<bool>,
) -> Enumerator_slice<bool> {
slice
}
fn into_iter__core_ops_range_Range_usize(range: Range_usize) -> Range_usize {
range
}
fn into_iter__core_ops_range_RangeInclusive_usize(
range: RangeInclusive_usize,
) -> RangeInclusive_usize {
range
}
}
}
pub mod iterator {
use crate::foreign_contracts::core::iter::adapters::Enumerator_slice;
use crate::foreign_contracts::core::iter::adapters::Rev__Range_usize;
use crate::foreign_contracts::core::ops::range::implement_12::compute_is_empty__usize;
use crate::foreign_contracts::core::ops::range::implement_12::RangeInclusive_usize;
use crate::foreign_contracts::core::ops::range::implement_12::Range_usize;
use crate::foreign_contracts::core::slice::Iter;
pub trait Iterator {
fn enumerate__core_slice_Iter_bool(iter: Iter<bool>) -> Enumerator_slice<bool> {
Enumerator_slice { iterator: iter }
}
fn next__core_iter_adapters_Enumerate_core_slice_Iter_bool(
mut slice: &mut Enumerator_slice<bool>,
) -> Option<(usize, bool)> {
let i = slice.iterator.index;
let collection = slice.iterator.collection;
if i < collection.len() {
slice.iterator.index += 1;
Some((i, collection[i]))
} else {
None
}
}
fn next__core_ops_range_Range_usize(
mut range: &mut Range_usize,
) -> Option<usize> {
if range.start < range.end {
let n = range.start;
range.start = n + 1;
Some(n)
} else {
None
}
}
fn next__core_ops_range_RangeInclusive_usize(
mut range: &mut RangeInclusive_usize,
) -> Option<usize> {
compute_is_empty__usize(&mut range);
if range.is_empty.unwrap_or_default() {
return None;
}
let is_iterating = range.start < range.end;
range.is_empty = Some(!is_iterating);
Some(if is_iterating {
let n = range.start;
range.start = n + 1;
n
} else {
range.start
})
}
fn next_back__core_ops_range_Range_usize(
range: &mut Range_usize,
) -> Option<usize> {
if range.start < range.end {
range.end -= 1;
Some(range.end)
} else {
None
}
}
fn next__core_iter_adapters_Rev_core_ops_range_Range_usize(
rev: &mut Rev__Range_usize,
) -> Option<usize> {
Self::next_back__core_ops_range_Range_usize(&mut rev.range)
}
fn rev__core_ops_range_Range_usize(range: Range_usize) -> Rev__Range_usize {
Rev__Range_usize { range }
}
}
}
}
}
pub mod slice {
use crate::foreign_contracts::core::iter::adapters::Enumerator_slice;
pub struct Iter<'a, T: 'a> {
pub collection: &'a [T],
pub index: usize,
}
impl<'a, T: 'a> Iter<'a, T> {
pub fn enumerate(self) -> Enumerator_slice<'a, T> {
Enumerator_slice { iterator: self }
}
}
pub mod implement {
use crate::foreign_contracts::core::slice::Iter;
pub fn iter__slice_bool(collection: &[bool]) -> Iter<bool> {
Iter {
collection,
index: 0,
}
}
pub fn len__slice_bool(collection: &[bool]) -> usize {
collection.len()
}
pub fn get__u32_usize(collection: &[u32], index: usize) -> Option<&u32> {
if index >= collection.len() {
None
} else {
Some(&collection[index])
}
}
}
}
pub mod usize {
pub const MAX: usize = 4294967295;
}
pub mod u64 {
pub const MAX: u64 = 18446744073709551615;
}
pub mod u16 {
pub const MAX: u16 = 65535;
}
pub mod mem {
pub fn size_of__u32() -> usize {
4
}
}
}
pub mod std {
pub mod io {
pub mod stdio {
use crate::foreign_contracts::core::fmt;
pub fn _print(_args: fmt::Arguments<'_>) {}
}
}
pub mod result {}
}
pub mod alloc {
pub mod vec {
pub struct Vec<T> {
_phantom: std::marker::PhantomData<T>,
len: usize,
}
impl<T> Vec<T> {
pub fn new() -> Vec<T> {
Vec {
_phantom: std::marker::PhantomData,
len: 0,
}
}
pub fn len(&self) -> usize {
self.len
}
pub fn push(&mut self, _value: T) {
precondition!(self.len < std::usize::MAX);
self.len += 1;
}
pub fn pop(&mut self) -> Option<T> {
if self.len == 0 {
None
} else {
self.len -= 1;
result!()
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
}
}
|
#[doc = "Register `ETH_MACLMIR` reader"]
pub type R = crate::R<ETH_MACLMIR_SPEC>;
#[doc = "Register `ETH_MACLMIR` writer"]
pub type W = crate::W<ETH_MACLMIR_SPEC>;
#[doc = "Field `LSI` reader - LSI"]
pub type LSI_R = crate::FieldReader;
#[doc = "Field `LSI` writer - LSI"]
pub type LSI_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>;
#[doc = "Field `DRSYNCR` reader - DRSYNCR"]
pub type DRSYNCR_R = crate::FieldReader;
#[doc = "Field `DRSYNCR` writer - DRSYNCR"]
pub type DRSYNCR_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
#[doc = "Field `LMPDRI` reader - LMPDRI"]
pub type LMPDRI_R = crate::FieldReader;
#[doc = "Field `LMPDRI` writer - LMPDRI"]
pub type LMPDRI_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>;
impl R {
#[doc = "Bits 0:7 - LSI"]
#[inline(always)]
pub fn lsi(&self) -> LSI_R {
LSI_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:10 - DRSYNCR"]
#[inline(always)]
pub fn drsyncr(&self) -> DRSYNCR_R {
DRSYNCR_R::new(((self.bits >> 8) & 7) as u8)
}
#[doc = "Bits 24:31 - LMPDRI"]
#[inline(always)]
pub fn lmpdri(&self) -> LMPDRI_R {
LMPDRI_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - LSI"]
#[inline(always)]
#[must_use]
pub fn lsi(&mut self) -> LSI_W<ETH_MACLMIR_SPEC, 0> {
LSI_W::new(self)
}
#[doc = "Bits 8:10 - DRSYNCR"]
#[inline(always)]
#[must_use]
pub fn drsyncr(&mut self) -> DRSYNCR_W<ETH_MACLMIR_SPEC, 8> {
DRSYNCR_W::new(self)
}
#[doc = "Bits 24:31 - LMPDRI"]
#[inline(always)]
#[must_use]
pub fn lmpdri(&mut self) -> LMPDRI_W<ETH_MACLMIR_SPEC, 24> {
LMPDRI_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "This register contains the periodic intervals for automatic PTP packet generation.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`eth_maclmir::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`eth_maclmir::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ETH_MACLMIR_SPEC;
impl crate::RegisterSpec for ETH_MACLMIR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`eth_maclmir::R`](R) reader structure"]
impl crate::Readable for ETH_MACLMIR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`eth_maclmir::W`](W) writer structure"]
impl crate::Writable for ETH_MACLMIR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets ETH_MACLMIR to value 0"]
impl crate::Resettable for ETH_MACLMIR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::{backend::SchemaBuilder, prepare::*, types::*, SchemaStatementBuilder};
/// Drop a table
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let table = Table::truncate()
/// .table(Font::Table)
/// .to_owned();
///
/// assert_eq!(
/// table.to_string(MysqlQueryBuilder),
/// r#"TRUNCATE TABLE `font`"#
/// );
/// assert_eq!(
/// table.to_string(PostgresQueryBuilder),
/// r#"TRUNCATE TABLE "font""#
/// );
/// assert_eq!(
/// table.to_string(SqliteQueryBuilder),
/// r#"TRUNCATE TABLE `font`"#
/// );
/// ```
#[derive(Debug, Clone)]
pub struct TableTruncateStatement {
pub(crate) table: Option<DynIden>,
}
impl Default for TableTruncateStatement {
fn default() -> Self {
Self::new()
}
}
impl TableTruncateStatement {
/// Construct truncate table statement
pub fn new() -> Self {
Self { table: None }
}
/// Set table name
pub fn table<T: 'static>(mut self, table: T) -> Self
where
T: Iden,
{
self.table = Some(SeaRc::new(table));
self
}
}
impl SchemaStatementBuilder for TableTruncateStatement {
fn build<T: SchemaBuilder>(&self, schema_builder: T) -> String {
let mut sql = SqlWriter::new();
schema_builder.prepare_table_truncate_statement(self, &mut sql);
sql.result()
}
fn build_any(&self, schema_builder: &dyn SchemaBuilder) -> String {
let mut sql = SqlWriter::new();
schema_builder.prepare_table_truncate_statement(self, &mut sql);
sql.result()
}
}
|
use std::collections::HashMap;
#[allow(dead_code)]
fn read_line() -> String {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
line.trim_end().to_owned()
}
fn main() {
let stdin = read_line();
let mut iter = stdin.split_whitespace();
let a = iter.next().unwrap().parse().unwrap();
let b = iter.next().unwrap().parse().unwrap();
let c = iter.next().unwrap().parse().unwrap();
let solver = Solver::new(a, b, c);
let stdout = solver.solve();
stdout.iter().for_each(|s| {
println!("{}", s);
})
}
struct Solver {
a: i64,
b: i64,
c: i64,
}
impl Solver {
fn new(a: i64, b: i64, c: i64) -> Solver {
Solver { a: a, b: b, c: c }
}
fn solve(&self) -> Vec<String> {
let mut memo: HashMap<(i64, i64, i64), f64> = HashMap::new();
let ans = self.solve_by(self.a, self.b, self.c, &mut memo);
let mut buf = Vec::new();
buf.push(format!("{:.6}", ans));
buf
}
fn solve_by(&self, x: i64, y: i64, z: i64, memo: &mut HashMap<(i64, i64, i64), f64>) -> f64 {
if x == 100 || y == 100 || z == 100 {
return 0.0;
}
if memo.contains_key(&(x, y, z)) {
return memo[&(x, y, z)];
}
let xf = x as f64;
let yf = y as f64;
let zf = z as f64;
let ans = if (x == 0 && y == 0) || (x == 0 && z == 0) || (y == 0 && z == 0) {
(100 - x - y - z) as f64
} else {
let ans1 = if x == 0 {
0.0
} else {
xf / (xf + yf + zf) * (self.solve_by(x + 1, y, z, memo) + 1.0)
};
let ans2 = if y == 0 {
0.0
} else {
yf / (xf + yf + zf) * (self.solve_by(x, y + 1, z, memo) + 1.0)
};
let ans3 = if z == 0 {
0.0
} else {
zf / (xf + yf + zf) * (self.solve_by(x, y, z + 1, memo) + 1.0)
};
ans1 + ans2 + ans3
};
memo.insert((x, y, z), ans);
ans
}
}
|
pub mod bump; // Bump allocator - the most simple. Has a counter that only goes up or down. When it is at 0, there are no allocations
pub mod linked_list; // Linked list allocator, which keeps track of free spaces
pub mod fixed_size_block; // Instead of the dynamic sizing of linked list, you have set sizes (Hence fixed_size_block)
use bump::BumpAllocator; // Fast, simple, but not the best as you can't really reuse allocations.
use linked_list::LinkedListAllocator; // Slower, but better as you can assign free memory regions and are not limited by segmentation
use fixed_size_block::FixedSizeBlockAllocator; // Faster than linked lists, but wastes memory. Better for kernels, as faster performance
use alloc::alloc::{GlobalAlloc, Layout}; // We need these to create our global allocator, as we aren't using std_lib
use core::ptr::null_mut; // Null pointer
use x86_64::{
structures::paging::{
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
},
VirtAddr,
}; // Used for memory allocation
/// Define the memory location where the heap starts
pub const HEAP_START: usize = 0x_4444_4444_0000;
/// Define the heap size (100 KiB). We can increase this as we'd like
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
/// We define our allocator here, which needs to inherit GlobalAlloc type.
#[global_allocator] // Select an allocator from the list below (See import notes for specific use cases)
//static ALLOCATOR: Locked<BumpAllocator> = Locked::new(BumpAllocator::new());
//static ALLOCATOR: Locked<LinkedListAllocator> = Locked::new(LinkedListAllocator::new());
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(FixedSizeBlockAllocator::new());
/// We create a zero-sized type as we don't need any fields.
pub struct Dummy;
unsafe impl GlobalAlloc for Dummy {
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
null_mut()
}
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
panic!("dealloc should be never called")
}
}
/// This function takes a frame allocator and mapper, then maps the heap into pages
pub fn init_heap(
mapper: &mut impl Mapper<Size4KiB>,
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
) -> Result<(), MapToError<Size4KiB>> {
// Create a page range, from the heap start memroy address
let page_range = {
// Create a virtual address from our HEAP_START addr
let heap_start = VirtAddr::new(HEAP_START as u64);
// Find the heap end by adding the size of the heap (-1 so we get an inclusive bound)
let heap_end = heap_start + HEAP_SIZE - 1u64;
// Find the page on the page table that contains the start address
let heap_start_page = Page::containing_address(heap_start);
// Same, but end address
let heap_end_page = Page::containing_address(heap_end);
// Create an inclusive range, of every page between the two pages
Page::range_inclusive(heap_start_page, heap_end_page)
};
// iterate through each page
for page in page_range {
// Using the frame allocator (Which we define in memory.rs), allocate a new frame
// (REMINDER: A frame is just a slice of physical memory, that can have any page value)
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
// Define what flags we want our page table to have for our page and frame. In this case,
// we want PRESENT and WRITABLE. PRESENT means there is a page PRESENT, and it is WRITABLE
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
// We then map the page to the frame, with the frame allocator, according to the flags.
unsafe {
mapper.map_to(page, frame, flags, frame_allocator)?.flush() // We flush the results, which updates the map
};
}
// Initalize our allocator
unsafe {
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
}
Ok(())
}
/// Align the given address `addr` upwards to alignment `align`.
///
/// Requires that `align` is a power of two.
///
/// [See more here](https://os.phil-opp.com/allocator-designs/#introduction)
fn align_up(addr: usize, align: usize) -> usize {
(addr + align - 1) & !(align - 1)
}
/// A wrapper around spin::Mutex to permit trait implementations.
pub struct Locked<A> {
inner: spin::Mutex<A>,
}
impl<A> Locked<A> {
pub const fn new(inner: A) -> Self {
Locked {
inner: spin::Mutex::new(inner),
}
}
pub fn lock(&self) -> spin::MutexGuard<A> {
self.inner.lock()
}
} |
use std::io;
use rayon::prelude::*;
use crate::base::Part;
pub fn part1(r: &mut dyn io::Read) -> Result<String, String> {
solve(r, Part::One)
}
pub fn part2(r: &mut dyn io::Read) -> Result<String, String> {
solve(r, Part::Two)
}
fn solve(r: &mut dyn io::Read, part: Part) -> Result<String, String> {
let mut input = String::new();
r.read_to_string(&mut input).map_err(|e| e.to_string())?;
input = input.trim().to_string();
match part {
Part::One => {
let after_reactions = fully_react(input.chars());
Ok(after_reactions.len().to_string())
}
Part::Two => {
let chars = (b'a'..=b'z').map(char::from).collect::<Vec<char>>();
let best = chars
.par_iter()
.map(|&c| fully_react_without(&input, c))
.map(|s| s.len())
.min()
.unwrap();
Ok(best.to_string())
}
}
}
fn fully_react(char_iter: impl Iterator<Item = char>) -> String {
let mut chars = char_iter.map(Option::Some).collect::<Vec<Option<char>>>();
remove_reactions(&mut chars);
chars.iter().filter_map(|&opt_c| opt_c).collect()
}
fn fully_react_without(polymer: &str, unit: char) -> String {
let filtered = polymer
.chars()
.filter(|c| c.to_ascii_uppercase() != unit.to_ascii_uppercase());
fully_react(filtered)
}
fn remove_reactions(chars: &mut [Option<char>]) {
let mut c1_index = 0;
while c1_index < chars.len() {
let c2_index = match find_next_forward(chars, c1_index + 1) {
Some(i) => i,
None => break,
};
let c1 = chars[c1_index].unwrap();
let c2 = chars[c2_index].unwrap();
if reacts(c1, c2) {
chars[c1_index] = None;
chars[c2_index] = None;
c1_index = match find_next_backward(chars, c1_index) {
Some(i) => i,
None => match find_next_forward(chars, c2_index) {
Some(i) => i,
None => break,
},
};
} else {
c1_index = c2_index;
}
}
}
fn find_next_forward(chars: &[Option<char>], start: usize) -> Option<usize> {
let mut index = None;
for (i, opt_c) in chars.iter().enumerate().skip(start) {
if opt_c.is_some() {
index = Some(i);
break;
}
}
index
}
fn find_next_backward(chars: &[Option<char>], start: usize) -> Option<usize> {
let mut index = None;
let mut i = start;
loop {
if chars[i].is_some() {
index = Some(i);
break;
}
if i == 0 {
break;
}
i -= 1;
}
index
}
fn reacts(c1: char, c2: char) -> bool {
c1 != c2 && c1.to_ascii_uppercase() == c2.to_ascii_uppercase()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test;
mod part1 {
use super::*;
test!(example1, "aA", "0", part1);
test!(example2, "abBA", "0", part1);
test!(example3, "abAB", "4", part1);
test!(example4, "aabAAB", "6", part1);
test!(example5, "dabAcCaCBAcCcaDA", "10", part1);
test!(actual, file "../../../inputs/2018/05", "9686", part1);
}
mod part2 {
use super::*;
test!(example, "dabAcCaCBAcCcaDA", "4", part2);
test!(actual, file "../../../inputs/2018/05", "5524", part2);
}
}
|
// xfail-fast
use std;
import std::task;
import std::comm;
import std::uint;
fn die() {
fail;
}
fn iloop() {
task::unsupervise();
let f = die;
task::spawn(f);
}
fn main() {
for each i in uint::range(0u, 100u) {
let f = iloop;
task::spawn(f);
}
} |
use polars::chunked_array::ChunkedArray;
use sqlx::{postgres::*, query};
use std::{env, i32};
use std::fmt::Error;
use std::fs::File;
use std::future::Future;
use std::io::{self, BufRead};
use std::path::Path;
use polars::prelude::{ChunkApply, CsvReader};
use polars::prelude::SerReader;
use polars::prelude::Series;
use polars::prelude::DataFrame;
use async_std::prelude::*;
#[async_std::main]
async fn main() -> anyhow::Result<()> {
let host = env::var("PGHOST");
let url = format!("postgres://postgres:foo@{}/foo", host.unwrap());
let pool = PgPoolOptions::new()
.max_connections(5)
.connect(&url).await?;
// add_received_loot(&pool, 1,1).await?;
// fill up database with items
// populate_users(&pool, "users_text.txt").await?;
// populate_items(&pool, "t4items.txt").await?;
// populate_loot_lists_csv(&pool, "lootlists.csv").await?;
// populate_past_raids(&pool, "past_raids.txt").await?;
Ok(())
}
async fn populate_loot_lists(pool : &PgPool, df : DataFrame) -> anyhow::Result<()> {
let users = df.get_column_names();
for user in users.iter() {
let uid = sqlx::query!("select id from users where char_name = $1", user)
.fetch_one(pool)
.await?
.id;
let loot_list = &df[*user].utf8().unwrap();
let iter : Vec<_> = loot_list.into_iter().map(|x| if x == Some("") { None } else { x }).collect();
for (i,chnk) in iter.chunks(2).enumerate() {
let (col_1, col_2) = (chnk[0] , chnk[1]);
if let Some(col_1) = col_1 {
let col1 = sqlx::query!("select * from items where name = $1", col_1)
.fetch_one(pool)
.await?;
let col2 = match col_2 {
Some(col_2) => Some(sqlx::query!("select * from items where name = $1", col_2)
.fetch_one(pool)
.await?
.id),
None => None
};
let prio_row = LootListRow{ raid : col1.raid.as_ref().unwrap().to_string() , uid , prio : 50-i as i32, col1_id : col1.id, col2_id : col2 };
add_lootlist_row(pool, prio_row).await?;
}
}
}
Ok(())
}
async fn populate_loot_lists_csv(pool : &PgPool, llcsv : &str) -> anyhow::Result<()> {
let df = CsvReader::from_path(llcsv)?;
let df = df.infer_schema(None).has_header(true).finish()?;
populate_loot_lists(pool, df).await?;
Ok(())
}
async fn populate_items(pool : &PgPool, items_txt : &str) -> anyhow::Result<()> {
let lines = read_lines(items_txt)?;
for line in lines {
let l = line?;
let mut sl = l.split_whitespace();
let raid_name = sl.next().ok_or(Error)?;
let t : Vec<&str> = sl.collect();
let item_name = t.join(" ");
sqlx::query!(r#"INSERT INTO items(name,raid) VALUES ($1, $2)"#, item_name, raid_name)
.execute(pool)
.await?;
}
Ok(())
}
async fn populate_users(pool : &PgPool, users_txt : &str) -> Result<(), sqlx::Error> {
let lines = read_lines(users_txt)?;
for line in lines {
add_user(pool, classname_str_to_user(line?)).await?;
}
Ok(())
}
async fn populate_past_raids(pool : &PgPool, dates : &str) -> anyhow::Result<()> {
let lines = read_lines(dates)?;
for line in lines {
let l = line?;
let mut sl = l.split_whitespace();
let mut date = sl.next().ok_or(Error)?.split("-");
let (y,m,d) : (i32,u8,u8) = (date.next().ok_or(Error)?.parse()?,
date.next().ok_or(Error)?.parse()?,
date.next().ok_or(Error)?.parse()?);
let date : time::Date = time::Date::try_from_ymd(y,m,d)?;
let raid_name = sl.collect::<Vec<_>>().join(" ");
sqlx::query!("insert into past_raids(date,raid) values ($1,$2)", date, raid_name)
.execute(pool)
.await?;
}
Ok(())
}
async fn add_received_loot(pool : &PgPool, uid : i32, iid : i32, rid : i32) -> Result<(), sqlx::Error> {
sqlx::query!(r#"INSERT INTO archived_loot(received_user_id, received_item_id, raid_id) VALUES ($1, $2, $3)"#, uid, iid, rid)
.execute(pool)
.await?;
Ok(())
}
fn read_lines<P>(filename: P) -> io::Result<io::Lines<io::BufReader<File>>>
where P: AsRef<Path>, {
let file = File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
struct LootListRow {
raid : String,
uid : i32,
prio : i32,
col1_id : i32,
col2_id : Option<i32>,
}
async fn add_attendence(pool : &PgPool, uid : i32, raid_id : i32) -> Result<(), sqlx::Error> {
sqlx::query!(r#"INSERT INTO attendence(user_id, raid_id) VALUES ($1, $2)"#, uid, raid_id)
.execute(pool)
.await?;
Ok(())
}
struct User {
name : String,
class : String
}
fn classname_str_to_user(classname : String) -> User {
let mut it = classname.split_whitespace();
let sname = it.next().unwrap();
let sclass = it.next().unwrap();
println!("Adding.. {},{}", sname,sclass);
User {
name : sname.to_string(),
class : sclass.to_string()
}
}
async fn add_user(pool : &PgPool, user : User) -> Result<(), sqlx::Error> {
sqlx::query!(r#"INSERT INTO users(char_name, class) VALUES ($1, $2)"#, user.name, user.class)
.execute(pool)
.await?;
Ok(())
}
async fn add_lootlist_row(pool : &PgPool, row : LootListRow) -> Result<(), sqlx::Error> {
match row.col2_id {
Some(col2_id) =>
sqlx::query!(r#"INSERT INTO loot_prios(raid, user_id, priority, col_1_item_id, col_2_item_id) VALUES ($1, $2, $3, $4, $5)"#,
row.raid,
row.uid,
row.prio,
row.col1_id,
row.col2_id)
.execute(pool)
.await?,
None =>
sqlx::query!(r#"INSERT INTO loot_prios(raid, user_id, priority, col_1_item_id) VALUES ($1, $2, $3, $4)"#,
row.raid,
row.uid,
row.prio,
row.col1_id)
.execute(pool)
.await?
};
Ok(())
}
|
extern crate futures;
mod runtime;
mod lifecycled;
mod objectkey;
mod provider;
|
use serde_json::json;
pub trait SearchQuery {
fn to_json(&self) -> serde_json::Value;
}
pub struct QueryStringQuery {
query: String,
}
impl QueryStringQuery {
pub fn new(query: String) -> Self {
Self { query }
}
}
impl SearchQuery for QueryStringQuery {
fn to_json(&self) -> serde_json::Value {
json!({
"query": &self.query.clone(),
})
}
}
|
#[doc = "Register `MMC_CONTROL` reader"]
pub type R = crate::R<MMC_CONTROL_SPEC>;
#[doc = "Register `MMC_CONTROL` writer"]
pub type W = crate::W<MMC_CONTROL_SPEC>;
#[doc = "Field `CNTRST` reader - CNTRST"]
pub type CNTRST_R = crate::BitReader;
#[doc = "Field `CNTRST` writer - CNTRST"]
pub type CNTRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CNTSTOPRO` reader - CNTSTOPRO"]
pub type CNTSTOPRO_R = crate::BitReader;
#[doc = "Field `CNTSTOPRO` writer - CNTSTOPRO"]
pub type CNTSTOPRO_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RSTONRD` reader - RSTONRD"]
pub type RSTONRD_R = crate::BitReader;
#[doc = "Field `RSTONRD` writer - RSTONRD"]
pub type RSTONRD_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CNTFREEZ` reader - CNTFREEZ"]
pub type CNTFREEZ_R = crate::BitReader;
#[doc = "Field `CNTFREEZ` writer - CNTFREEZ"]
pub type CNTFREEZ_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CNTPRST` reader - CNTPRST"]
pub type CNTPRST_R = crate::BitReader;
#[doc = "Field `CNTPRST` writer - CNTPRST"]
pub type CNTPRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CNTPRSTLVL` reader - CNTPRSTLVL"]
pub type CNTPRSTLVL_R = crate::BitReader;
#[doc = "Field `CNTPRSTLVL` writer - CNTPRSTLVL"]
pub type CNTPRSTLVL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UCDBC` reader - UCDBC"]
pub type UCDBC_R = crate::BitReader;
#[doc = "Field `UCDBC` writer - UCDBC"]
pub type UCDBC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - CNTRST"]
#[inline(always)]
pub fn cntrst(&self) -> CNTRST_R {
CNTRST_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - CNTSTOPRO"]
#[inline(always)]
pub fn cntstopro(&self) -> CNTSTOPRO_R {
CNTSTOPRO_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - RSTONRD"]
#[inline(always)]
pub fn rstonrd(&self) -> RSTONRD_R {
RSTONRD_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - CNTFREEZ"]
#[inline(always)]
pub fn cntfreez(&self) -> CNTFREEZ_R {
CNTFREEZ_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - CNTPRST"]
#[inline(always)]
pub fn cntprst(&self) -> CNTPRST_R {
CNTPRST_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - CNTPRSTLVL"]
#[inline(always)]
pub fn cntprstlvl(&self) -> CNTPRSTLVL_R {
CNTPRSTLVL_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 8 - UCDBC"]
#[inline(always)]
pub fn ucdbc(&self) -> UCDBC_R {
UCDBC_R::new(((self.bits >> 8) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - CNTRST"]
#[inline(always)]
#[must_use]
pub fn cntrst(&mut self) -> CNTRST_W<MMC_CONTROL_SPEC, 0> {
CNTRST_W::new(self)
}
#[doc = "Bit 1 - CNTSTOPRO"]
#[inline(always)]
#[must_use]
pub fn cntstopro(&mut self) -> CNTSTOPRO_W<MMC_CONTROL_SPEC, 1> {
CNTSTOPRO_W::new(self)
}
#[doc = "Bit 2 - RSTONRD"]
#[inline(always)]
#[must_use]
pub fn rstonrd(&mut self) -> RSTONRD_W<MMC_CONTROL_SPEC, 2> {
RSTONRD_W::new(self)
}
#[doc = "Bit 3 - CNTFREEZ"]
#[inline(always)]
#[must_use]
pub fn cntfreez(&mut self) -> CNTFREEZ_W<MMC_CONTROL_SPEC, 3> {
CNTFREEZ_W::new(self)
}
#[doc = "Bit 4 - CNTPRST"]
#[inline(always)]
#[must_use]
pub fn cntprst(&mut self) -> CNTPRST_W<MMC_CONTROL_SPEC, 4> {
CNTPRST_W::new(self)
}
#[doc = "Bit 5 - CNTPRSTLVL"]
#[inline(always)]
#[must_use]
pub fn cntprstlvl(&mut self) -> CNTPRSTLVL_W<MMC_CONTROL_SPEC, 5> {
CNTPRSTLVL_W::new(self)
}
#[doc = "Bit 8 - UCDBC"]
#[inline(always)]
#[must_use]
pub fn ucdbc(&mut self) -> UCDBC_W<MMC_CONTROL_SPEC, 8> {
UCDBC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "This register configures the MMC operating mode.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`mmc_control::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`mmc_control::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct MMC_CONTROL_SPEC;
impl crate::RegisterSpec for MMC_CONTROL_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`mmc_control::R`](R) reader structure"]
impl crate::Readable for MMC_CONTROL_SPEC {}
#[doc = "`write(|w| ..)` method takes [`mmc_control::W`](W) writer structure"]
impl crate::Writable for MMC_CONTROL_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets MMC_CONTROL to value 0"]
impl crate::Resettable for MMC_CONTROL_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub mod common;
use std::cmp;
use std::collections::HashMap;
use std::error;
use std::fmt;
#[derive(Debug, Clone, PartialEq)]
pub enum ErrorKind {
Internal,
Application,
}
impl ToString for ErrorKind {
fn to_string(&self) -> String {
match self {
ErrorKind::Internal => "internal".to_owned(),
ErrorKind::Application => "application".to_owned(),
}
}
}
#[derive(Debug, Clone)]
pub struct Error {
kind: ErrorKind,
path: String,
code: String,
status: Option<u32>,
message: Option<String>,
context: HashMap<String, String>,
cause: Option<Box<Error>>,
}
impl Error {
pub fn new<S: Into<String>>(path: S, code: S) -> Error {
Error {
kind: ErrorKind::Application,
path: path.into(),
code: code.into(),
status: None,
message: None,
context: HashMap::new(),
cause: None,
}
}
pub fn internal<S: Into<String>>(path: S, code: S) -> Error {
Error {
kind: ErrorKind::Internal,
path: path.into(),
code: code.into(),
status: None,
message: None,
context: HashMap::new(),
cause: None,
}
}
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
pub fn code(&self) -> &str {
&self.code
}
pub fn path(&self) -> &str {
&self.path
}
pub fn status(&self) -> Option<u32> {
self.status.clone()
}
pub fn message(&self) -> Option<&String> {
self.message.as_ref()
}
pub fn context(&self) -> &HashMap<String, String> {
&self.context
}
pub fn has_context(&self) -> bool {
!self.context.is_empty()
}
pub fn cause(&self) -> Option<&Error> {
match &self.cause {
Some(boxed_err) => Some(boxed_err.as_ref()),
None => None,
}
}
pub fn set_path<S: Into<String>>(&mut self, path: S) -> &mut Error {
self.path = path.into();
self
}
pub fn set_code<S: Into<String>>(&mut self, code: S) -> &mut Error {
self.code = code.into();
self
}
pub fn set_status(&mut self, status: u32) -> &mut Error {
self.status = Some(status);
self
}
pub fn set_message<S: Into<String>>(&mut self, message: S) -> &mut Error {
self.message = Some(message.into());
self
}
pub fn add_context<S: Into<String>>(&mut self, k: S, v: S) -> &mut Error {
self.context.insert(k.into(), v.into());
self
}
pub fn wrap(&mut self, err: Error) -> &mut Error {
self.cause = Some(Box::new(err));
self
}
pub fn wrap_raw<E: error::Error>(&mut self, err: E) -> &mut Error {
let err = Error {
kind: ErrorKind::Internal,
path: "".to_owned(),
code: "raw".to_owned(),
status: None,
message: Some(err.to_string()),
context: HashMap::new(),
cause: None,
};
self.cause = Some(Box::new(err));
self
}
pub fn merge(&mut self, err: Error) -> &mut Error {
self.add_context(err.path(), err.code());
self.context.extend(err.context);
self
}
pub fn build(&self) -> Error {
self.clone()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl error::Error for Error {}
impl cmp::PartialEq for Error {
fn eq(&self, other: &Self) -> bool {
self.code == other.code && self.path == other.path && self.status == other.status
}
}
#[cfg(test)]
mod tests {
use std::error;
use std::fmt;
use super::*;
#[test]
fn basic() {
let err = Error::new("my.path", "code")
.set_message("message")
.set_status(404)
.add_context("k1", "v1")
.add_context("k2", "v2")
.add_context("k2", "v3")
.build();
assert_eq!(err.code(), "code");
assert_eq!(err.message().unwrap(), "message");
assert_eq!(err.path(), "my.path");
assert_eq!(err.status().unwrap(), 404);
assert_eq!(err.context().len(), 2);
assert_eq!(err.context().get("k1").unwrap(), "v1");
assert_eq!(err.context().get("k2").unwrap(), "v3");
}
#[derive(Debug, Clone)]
struct StringError {
error: String,
}
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.error)
}
}
impl error::Error for StringError {}
#[test]
fn wrap() {
let raw_err = StringError {
error: "raw_err".to_owned(),
};
let inner_err = Error::new("inner", "inner")
.wrap_raw(raw_err.clone())
.build();
let outer_err = Error::new("outer", "outer")
.set_code("outer")
.wrap(inner_err.build())
.build();
assert_eq!(
inner_err.cause().unwrap().message().unwrap(),
&raw_err.error
);
assert_eq!(outer_err.cause().unwrap(), &inner_err);
}
#[test]
fn merge() {
let mut err1 = Error::internal("err1", "err1");
err1.add_context("e1-key1", "value1");
err1.add_context("e1-key2", "value2");
let mut err2 = Error::internal("err2", "err2");
err2.add_context("e2-key", "value");
err2.merge(err1);
let mut err3 = Error::new("err3", "err3");
err3.add_context("e1-key1", "value");
err3.add_context("e3-key", "value");
err3.merge(err2);
assert_eq!(err3.context().len(), 6);
assert_eq!(err3.context().get("err1"), Some(&"err1".to_owned()));
assert_eq!(err3.context().get("err2"), Some(&"err2".to_owned()));
assert_eq!(err3.context().get("e1-key1"), Some(&"value1".to_owned()));
assert_eq!(err3.context().get("e1-key2"), Some(&"value2".to_owned()));
assert_eq!(err3.context().get("e2-key"), Some(&"value".to_owned()));
assert_eq!(err3.context().get("e3-key"), Some(&"value".to_owned()));
}
}
|
use std::env;
use std::fs;
use std::path::Path;
fn main() {
let target = env::var("TARGET").unwrap();
println!("target={}", target);
if target.find("-windows-").is_some() {
// do not build c-code on windows, use binaries
let output_dir = env::var("OUT_DIR").unwrap();
let prebuilt_dir = env::var("INDY_PREBUILT_DEPS_DIR").unwrap();
let dst = Path::new(&output_dir[..]).join("..\\..\\..");
let prebuilt_lib = Path::new(&prebuilt_dir[..]).join("lib");
println!("cargo:rustc-link-search=native={}", prebuilt_dir);
println!("cargo:rustc-flags=-L {}\\lib", prebuilt_dir);
println!("cargo:include={}\\include", prebuilt_dir);
let files = vec![
// "libeay32md.dll",
"libzmq.dll",
// "ssleay32md.dll",
];
for f in files.iter() {
if let Ok(_) = fs::copy(&prebuilt_lib.join(f), &dst.join(f)) {
println!(
"copy {} -> {}",
&prebuilt_lib.join(f).display(),
&dst.join(f).display()
);
}
}
} else if target.find("linux-android").is_some() {
let zmq = match env::var("LIBZMQ_LIB_DIR") {
Ok(val) => val,
Err(..) => match env::var("LIBZMQ_PREFIX") {
Ok(dir) => Path::new(&dir[..])
.join("lib")
.to_string_lossy()
.into_owned(),
Err(..) => {
panic!("Missing required environment variables LIBZMQ_PREFIX or LIBZMQ_LIB_DIR")
}
},
};
println!("cargo:rustc-link-search=native={}", zmq);
println!("cargo:rustc-link-lib=static=zmq");
}
}
|
use rand::Rng;
use stream::{BackendBuilder, BackendStream};
use std::collections::HashMap;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use protocol::{Protocol, Resource};
unsafe impl<P> Send for Topology<P> {}
unsafe impl<P> Sync for Topology<P> {}
#[derive(Default)]
pub struct Topology<P> {
pub(crate) hash: String, // hash策略
pub(crate) distribution: String, //distribution策略
// 最后一个元素是slave,倒数第二个元素是master,剩下的是l1.
// 为了方便遍历
l1_seq: AtomicUsize,
// 处理写请求
pub(crate) masters: Vec<String>,
m_streams: HashMap<String, Arc<BackendBuilder>>,
// 只用来同步写请求
followers: Vec<Vec<String>>,
f_streams: HashMap<String, Arc<BackendBuilder>>,
// 处理读请求,每个layer选择一个,先打通
// 包含多层,每层是一组资源池,比如在mc,一般有三层,分别为masterL1--master--slave--slaveL1: [layer[reader[node_dist_pool]]]
pub(crate) layer_readers: Vec<Vec<Vec<String>>>,
get_streams: HashMap<String, Arc<BackendBuilder>>,
gets_streams: HashMap<String, Arc<BackendBuilder>>,
metas: Vec<String>,
meta_stream: HashMap<String, Arc<BackendBuilder>>,
parser: P,
}
// 用来测试的一组配置, ip都是127.0.0.1
// master port: 11211:11212
// followers: 11213, 11214; 11215, 11216;
// l1: 11213, 11214; 11215, 11216
// 没有slave
impl<P> Topology<P> {
pub fn meta(&self) -> Vec<BackendStream> {
self.metas
.iter()
.map(|addr| {
self.meta_stream
.get(addr)
.expect("stream must be exists before address")
.build()
})
.collect()
}
pub fn master(&self) -> Vec<BackendStream> {
self.masters
.iter()
.map(|addr| {
self.m_streams
.get(addr)
.expect("stream must be exists before address")
.build()
})
.collect()
}
// followers是只能写,读忽略的
pub fn followers(&self) -> Vec<Vec<BackendStream>> {
if self.followers.len() == 0 {
return vec![];
}
self.followers
.iter()
.map(|servers| {
servers
.iter()
.map(|addr| {
self.f_streams
.get(addr)
.expect("stream must be exists before address when call followers")
.build()
})
.collect()
})
.collect()
}
// 测试完毕后清理 fishermen 2021.7.2
// TODO:这里只返回一个pool,后面会替换掉 fishermen
// pub fn next_l1(&self) -> Vec<BackendStream> {
// if self.layer_readers.len() == 0 {
// return vec![];
// }
// let idx = self.l1_seq.fetch_add(1, Ordering::AcqRel) % self.readers.len();
// unsafe {
// self.random_reads()
// .get_unchecked(idx)
// .iter()
// .map(|addr| {
// self.get_streams
// .get(addr)
// .expect("stream must be exists before address")
// .build()
// })
// .collect()
// }
// }
// TODO:这里只返回一个pool,后面会替换掉 fishermen
// pub fn next_l1_gets(&self) -> Vec<BackendStream> {
// if self.readers.len() == 0 {
// return vec![];
// }
// let idx = self.l1_seq.fetch_add(1, Ordering::AcqRel) % self.readers.len();
// unsafe {
// self.random_reads()
// .get_unchecked(idx)
// .iter()
// .map(|addr| {
// self.gets_streams
// .get(addr)
// .expect("stream must be exists before address")
// .build()
// })
// .collect()
// }
// }
pub fn retrive_get(&self) -> Vec<Vec<BackendStream>> {
self.reader_layers(&self.get_streams)
}
pub fn retrive_gets(&self) -> Vec<Vec<BackendStream>> {
self.reader_layers(&self.gets_streams)
}
// 由于mc会将master也作为一个L1来访问,所以此处统一做排重处理
fn random_reads(&self) -> Vec<Vec<String>> {
let mut readers = Vec::new();
for layer in &self.layer_readers {
if layer.len() == 1 {
let r = &layer[0];
if !readers.contains(r) {
readers.push(r.clone());
}
} else if layer.len() > 1 {
let rd = rand::thread_rng().gen_range(0..layer.len());
let r = &layer[rd];
if !readers.contains(r) {
readers.push(r.clone())
}
} else {
log::warn!("topolody - rand readers should has candidates!");
}
}
log::info!("use random readers: {:?}", readers);
readers
}
// 获取reader列表
fn reader_layers(
&self,
streams: &HashMap<String, Arc<BackendBuilder>>,
) -> Vec<Vec<BackendStream>> {
// 从每个层选择一个reader
let readers = self.random_reads();
readers
.iter()
.map(|pool| {
pool.iter()
.map(|addr| {
streams
.get(addr)
.expect("stream must be exists before adress")
.build()
})
.collect()
})
.collect()
}
// 删除不存在的stream
fn delete_non_exists(addrs: &[String], streams: &mut HashMap<String, Arc<BackendBuilder>>) {
streams.retain(|addr, _| addrs.contains(addr));
}
// 添加新增的stream
fn add_new(
parser: &P,
addrs: &[String],
streams: &mut HashMap<String, Arc<BackendBuilder>>,
parallel: usize,
namespace: &str,
) where
P: Send + Sync + Protocol + 'static + Clone,
{
for addr in addrs {
if !streams.contains_key(addr) {
streams.insert(
addr.to_string(),
Arc::new(BackendBuilder::from(
parser.clone(),
addr,
parallel,
Resource::Memcache,
namespace,
)),
);
}
}
}
fn update_from_namespace(&mut self, ns: super::Namespace) {
let (masters, followers, readers, hash, distribution) = ns.into_split();
self.masters = masters;
self.followers = followers;
self.layer_readers = readers;
self.hash = hash;
self.distribution = distribution;
//self.metas = self.readers.clone().into_iter().flatten().collect();
self.metas = self.masters.clone();
self.correct_hash_distribution();
}
// 根据java client转换逻辑,对hash、distribution进行转换
fn correct_hash_distribution(&mut self) {
// java 只支持下面三种组合,其他的统统不支持,rust对其他类型,强制改为java的默认方式? fishermen
if (self.hash.eq(hash::HASH_BKDR) && self.distribution.eq(hash::DISTRIBUTION_CONSISTENT))
|| (self.hash.eq(hash::HASH_BKDR) && self.distribution.eq(hash::DISTRIBUTION_MODULA))
|| (self.hash.eq(hash::HASH_CRC32) && self.distribution.eq(hash::DISTRIBUTION_MODULA))
{
return;
}
// 对于其他组合模式,强制改为默认行为
log::warn!(
"!!! found malformed hash/distribution: {}/{}, will change to crc32/mod",
self.hash,
self.distribution
);
self.hash = hash::HASH_CRC32.to_string();
self.distribution = hash::DISTRIBUTION_MODULA.to_string();
}
fn update(&mut self, cfg: &str, name: &str)
where
P: Send + Sync + Protocol + 'static + Clone,
{
let p = self.parser.clone();
let idx = name.find(':').unwrap_or(name.len());
if idx == 0 || idx >= name.len() - 1 {
log::info!("not a valid cache service name:{} no namespace found", name);
return;
}
let namespace = &name[idx + 1..];
match super::Namespace::parse(cfg, namespace) {
Ok(ns) => self.update_from_namespace(ns),
Err(e) => {
log::info!("parse cacheservice config error: name:{} error:{}", name, e);
return;
}
};
if self.masters.len() == 0 || self.layer_readers.len() == 0 {
log::info!("cacheservice empty. {} => {}", name, cfg);
return;
}
let c = 256;
Self::delete_non_exists(&self.masters, &mut self.m_streams);
Self::add_new(&p, &self.masters, &mut self.m_streams, c, namespace);
let followers: Vec<String> = self.followers.clone().into_iter().flatten().collect();
Self::delete_non_exists(&followers, &mut self.f_streams);
Self::add_new(&p, followers.as_ref(), &mut self.f_streams, c, namespace);
let readers: Vec<String> = self
.layer_readers
.clone()
.into_iter()
.flatten()
.flatten()
.collect();
// get command
Self::delete_non_exists(&readers, &mut self.get_streams);
Self::add_new(&p, &readers, &mut self.get_streams, c, namespace);
// get[s] command
Self::delete_non_exists(&readers, &mut self.gets_streams);
Self::add_new(&p, &readers, &mut self.gets_streams, c, namespace);
// meta
Self::delete_non_exists(&self.metas, &mut self.meta_stream);
Self::add_new(&p, &self.metas, &mut self.meta_stream, c, namespace);
}
}
impl<P> Clone for Topology<P>
where
P: Clone,
{
fn clone(&self) -> Self {
Self {
hash: self.hash.clone(),
distribution: self.distribution.clone(),
l1_seq: AtomicUsize::new(self.l1_seq.load(Ordering::Acquire)),
masters: self.masters.clone(),
m_streams: self.m_streams.clone(),
followers: self.followers.clone(),
f_streams: self.f_streams.clone(),
layer_readers: self.layer_readers.clone(),
get_streams: self.get_streams.clone(),
gets_streams: self.gets_streams.clone(),
metas: self.metas.clone(),
meta_stream: self.meta_stream.clone(),
parser: self.parser.clone(),
}
}
}
impl<P> discovery::Topology for Topology<P>
where
P: Send + Sync + Protocol,
{
fn update(&mut self, cfg: &str, name: &str) {
log::info!("cache service topology received:{}", name);
self.update(cfg, name);
log::info!("master:{:?}", self.masters);
}
}
impl<P> left_right::Absorb<(String, String)> for Topology<P>
where
P: Send + Sync + Protocol + 'static + Clone,
{
fn absorb_first(&mut self, cfg: &mut (String, String), _other: &Self) {
self.update(&cfg.0, &cfg.1);
}
fn sync_with(&mut self, first: &Self) {
*self = first.clone();
}
}
impl<P> From<P> for Topology<P> {
fn from(parser: P) -> Self {
Self {
parser: parser,
l1_seq: AtomicUsize::new(0),
hash: Default::default(),
distribution: Default::default(),
masters: Default::default(),
m_streams: Default::default(),
followers: Default::default(),
f_streams: Default::default(),
layer_readers: Default::default(),
get_streams: Default::default(),
gets_streams: Default::default(),
metas: Default::default(),
meta_stream: Default::default(),
}
}
}
|
/*!
When working with data pipes it is often necessary to distinguish between EOF on the reader side caused by writer thread finishing write and writer panicking. This crate provides fused reader type that if writer thread dies while holding armed fuse the reader will get `BrokenPipe` error.
Fuses can also be blown with custom error that is passed to the reader end.
Example usage
=============
Writer panics and reader gets `BrokenPipe` error.
```rust
use pipe::pipe;
use fused_reader::fuse;
use std::io::{Read, Write, ErrorKind};
use std::thread;
let (reader, mut writer) = pipe();
let (mut reader, fuse) = fuse(reader);
thread::spawn(move || {
let _fuse = fuse.arm().unwrap();
writer.write(&[1]).unwrap();
panic!("boom");
});
let mut data = Vec::new();
assert_eq!(reader.read_to_end(&mut data).unwrap_err().kind(), ErrorKind::BrokenPipe);
assert_eq!(&data, &[1]); // data that was written before panic
```
Writer fails with error passed to reader.
```rust
use pipe::pipe;
use fused_reader::fuse;
use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::thread;
let (reader, mut writer) = pipe();
let (mut reader, fuse) = fuse(reader);
thread::spawn(move || {
let fuse = fuse.arm().unwrap();
writer.write(&[1]).unwrap();
fuse.blow(IoError::new(ErrorKind::UnexpectedEof, "uh! oh!"))
});
let mut data = Vec::new();
assert_eq!(reader.read_to_end(&mut data).unwrap_err().kind(), ErrorKind::UnexpectedEof);
assert_eq!(&data, &[1]); // data that was written before error
```
!*/
use std::io::{Read, Error as IoError, ErrorKind};
use std::sync::{Arc, Mutex, MutexGuard, TryLockError};
/// Fuses reader so that if writer thread dies while holding armed fuse the reader will get `BrokenPipe` error.
pub fn fuse<R: Read>(reader: R) -> (FusedReader<R>, Fuse) {
let reader_fuse = Arc::new(Mutex::new(Ok(())));
let writer_fuse = reader_fuse.clone();
( FusedReader {
reader,
fuse: reader_fuse,
},
Fuse(writer_fuse),
)
}
/// Reader that will fail with I/O error if fuse was blown.
#[derive(Debug)]
pub struct FusedReader<R: Read> {
reader: R,
fuse: Arc<Mutex<Result<(), IoError>>>,
}
/// Status of the fuse.
#[derive(Debug)]
pub enum FuseStatus {
/// Fuse was not armed or guard got dropped.
Unarmed,
/// Fuse armed.
Armed,
/// Fuse blown with custom error.
Blown(IoError),
/// Fuse blown by panic unwind.
Poisoned,
}
impl<R: Read> FusedReader<R> {
/// Checks status of the fuse.
///
/// Note that the variant `FuseStatus::Blown` is provided only once and following calls will
/// return `FuseStatus::Unarmed` instead.
pub fn check_fuse(&mut self) -> FuseStatus {
match self.fuse.try_lock() {
Err(TryLockError::Poisoned(_)) => FuseStatus::Poisoned,
Ok(mut guard) => {
if guard.is_err() {
let mut res = Ok(());
std::mem::swap(&mut *guard, &mut res);
FuseStatus::Blown(res.unwrap_err())
} else {
FuseStatus::Unarmed
}
}
Err(TryLockError::WouldBlock) => FuseStatus::Armed,
}
}
/// Returns inner reader.
pub fn into_inner(self) -> R {
self.reader
}
}
impl<R: Read> Read for FusedReader<R> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, IoError> {
// let it read to end before checking fuse
self.reader.read(buf).and_then(|bytes| if bytes == 0 {
match self.check_fuse() {
FuseStatus::Blown(err) => Err(err),
FuseStatus::Poisoned => Err(IoError::new(ErrorKind::BrokenPipe, "writer end dropped due to panic")),
FuseStatus::Unarmed |
FuseStatus::Armed => Ok(bytes),
}
} else {
Ok(bytes)
})
}
}
/// Fuse that can be armed.
#[derive(Debug)]
pub struct Fuse(Arc<Mutex<Result<(), IoError>>>);
impl Fuse {
/// Arms the fuse.
///
/// Returns `BrokenPipe` error if reader was dropped due to panic.
pub fn arm(&self) -> Result<FuseGuard, IoError> {
self.0.lock().map(FuseGuard).map_err(|_| IoError::new(ErrorKind::BrokenPipe, "reader end dropped due to panic"))
}
}
/// Armed fuse that if dropped due to panic will signal reader to fail with `BrokenPipe` error.
#[derive(Debug)]
pub struct FuseGuard<'a>(MutexGuard<'a, Result<(), IoError>>);
impl<'a> FuseGuard<'a> {
/// Blows the fuse with given error.
///
/// The reader end will fail with this error after reaching EOF.
pub fn blow(mut self, err: IoError) {
*self.0 = Err(err);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::io::Write;
use pipe::pipe;
#[test]
fn test_unfused_panic() {
let (mut reader, mut writer) = pipe();
thread::spawn(move || {
writer.write(&[1]).unwrap();
panic!("boom");
});
let mut data = Vec::new();
assert!(reader.read_to_end(&mut data).is_ok());
assert_eq!(&data, &[1]);
}
#[test]
fn test_fused_nopanic() {
let (reader, mut writer) = pipe();
let (mut reader, fuse) = fuse(reader);
thread::spawn(move || {
let _fuse = fuse.arm().unwrap();
writer.write(&[1]).unwrap();
});
let mut data = Vec::new();
assert!(reader.read_to_end(&mut data).is_ok());
assert_eq!(&data, &[1]);
}
#[test]
fn test_fused_panic() {
let (reader, mut writer) = pipe();
let (mut reader, fuse) = fuse(reader);
thread::spawn(move || {
let _fuse = fuse.arm().unwrap();
writer.write(&[1]).unwrap();
panic!("boom");
});
let mut data = Vec::new();
assert_eq!(reader.read_to_end(&mut data).unwrap_err().kind(), ErrorKind::BrokenPipe);
assert_eq!(&data, &[1]);
}
#[test]
fn test_fused_blow() {
let (reader, mut writer) = pipe();
let (mut reader, fuse) = fuse(reader);
thread::spawn(move || {
let fuse = fuse.arm().unwrap();
writer.write(&[1]).unwrap();
fuse.blow(IoError::new(ErrorKind::UnexpectedEof, "uh! oh!"))
});
let mut data = Vec::new();
assert_eq!(reader.read_to_end(&mut data).unwrap_err().kind(), ErrorKind::UnexpectedEof);
assert_eq!(&data, &[1]);
}
}
|
#![cfg_attr(docsrs, feature(doc_cfg))]
#![doc = include_str!("../README.md")]
use std::{
sync::{Mutex, MutexGuard, PoisonError, TryLockError},
thread::yield_now,
};
/**
* Lock several locks at once. Algorithms is taken from the gcc's libstdc++:
* lock the first lock and try_lock the next one; if some one fails,
* repeat starting with locking the failed one and try_locking others in cyclic manner.
*
* However, the order of result items does match the order of the input mutexes.
*/
pub fn lock_many_vec<'a, T>(
mutices: &[&'a Mutex<T>],
) -> Result<Vec<MutexGuard<'a, T>>, PoisonError<MutexGuard<'a, T>>> {
let len = mutices.len();
let mut res = Vec::with_capacity(len);
let mut start = 0;
'retry: loop {
res.push(mutices[start].lock()?);
for n in 1..len {
let idx = (start + n) % len;
match mutices[idx].try_lock() {
Ok(guard) => res.push(guard),
Err(TryLockError::Poisoned(e)) => return Err(e),
Err(TryLockError::WouldBlock) => {
res.clear();
start = idx;
yield_now();
continue 'retry;
}
}
}
res.rotate_right(start);
return Ok(res);
}
}
// TODO replace both implementations with macro.
/**
* Lock several locks at once. Algorithms is taken from the gcc's libstdc++:
* lock the first lock and try_lock the next one; if some one fails,
* repeat starting with locking the failed one and try_locking others in cyclic manner.
*
* However, the order of result items does match the order of the input mutexes.
*/
#[cfg(feature = "arrayvec")]
#[cfg_attr(docsrs, doc(cfg(feature = "arrayvec")))]
pub fn lock_many_arrayvec<'a, T, const N: usize>(
mutices: &[&'a Mutex<T>],
) -> Result<arrayvec::ArrayVec<MutexGuard<'a, T>, N>, PoisonError<MutexGuard<'a, T>>> {
let len = mutices.len();
let mut res = arrayvec::ArrayVec::new();
let mut start = 0;
'retry: loop {
res.push(mutices[start].lock()?);
for n in 1..len {
let idx = (start + n) % len;
match mutices[idx].try_lock() {
Ok(guard) => res.push(guard),
Err(TryLockError::Poisoned(e)) => return Err(e),
Err(TryLockError::WouldBlock) => {
res.clear();
start = idx;
yield_now();
continue 'retry;
}
}
}
res.rotate_right(start);
return Ok(res);
}
}
#[cfg(test)]
mod tests {
use std::{
ops::Deref,
sync::{Arc, Mutex},
thread::{sleep, spawn},
time::Duration,
};
#[cfg(feature = "arrayvec")]
use crate::lock_many_arrayvec;
use crate::lock_many_vec;
#[test]
fn it_works() {
let mut1 = Mutex::new(42);
let mut2 = Mutex::new(43);
assert_eq!(
42 + 43,
lock_many_vec(&[&mut1, &mut2])
.unwrap()
.into_iter()
.map(|g| *g)
.sum::<i32>()
);
}
#[test]
fn test_ordering() {
let mutices = Arc::new((0..4).map(Mutex::new).collect::<Vec<_>>());
let mutices2 = mutices.clone();
let holder = spawn(move || {
let g = mutices2[2].lock().unwrap();
sleep(Duration::from_millis(300));
std::mem::drop(g);
});
sleep(Duration::from_millis(200));
let mutices_ref = mutices.iter().collect::<Vec<&Mutex<_>>>();
let g = lock_many_vec(&mutices_ref).unwrap();
let g_val = g.iter().map(Deref::deref).collect::<Vec<_>>();
assert_eq!(&g_val, &[&0, &1, &2, &3]);
holder.join().unwrap();
}
#[cfg(feature = "arrayvec")]
#[test]
fn it_works_arrayvec() {
let mut1 = Mutex::new(42);
let mut2 = Mutex::new(43);
assert_eq!(
42 + 43,
lock_many_arrayvec::<_, 2>(&[&mut1, &mut2])
.unwrap()
.into_iter()
.map(|g| *g)
.sum::<i32>()
);
}
#[test]
#[cfg(feature = "arrayvec")]
fn test_ordering_arrayvec() {
let mutices = Arc::new((0..4).map(Mutex::new).collect::<Vec<_>>());
let mutices2 = mutices.clone();
let holder = spawn(move || {
let g = mutices2[2].lock().unwrap();
sleep(Duration::from_millis(200));
std::mem::drop(g);
});
sleep(Duration::from_millis(100));
let mutices_ref = mutices.iter().collect::<Vec<&Mutex<_>>>();
let g = lock_many_arrayvec::<_, 4>(&mutices_ref).unwrap();
let g_val = g.iter().map(Deref::deref).collect::<Vec<_>>();
assert_eq!(&g_val, &[&0, &1, &2, &3]);
holder.join().unwrap();
}
#[test]
fn test_parallel() {
let mut1 = Arc::new(Mutex::new((0, 0)));
let mut2 = Arc::new(Mutex::new((0, 0)));
const REPEAT: i32 = 1000000;
let complete = Arc::new(Mutex::new(0));
let mut cnt = 0;
let mut1a = mut1.clone();
let completea = complete.clone();
let tha = std::thread::spawn(move || {
for _ in 0..REPEAT {
mut1a.lock().unwrap().0 += 1;
}
let mut compl_guard = completea.lock().unwrap();
*compl_guard += 1;
});
let mut2b = mut2.clone();
let completeb = complete.clone();
let thb = std::thread::spawn(move || {
for _ in 0..REPEAT {
mut2b.lock().unwrap().0 += 1;
}
let mut compl_guard = completeb.lock().unwrap();
*compl_guard += 1;
});
loop {
let complete_val = *complete.lock().unwrap();
cnt += 1;
let locks = lock_many_vec(&[mut1.deref(), mut2.deref()]).unwrap();
for mut g in locks.into_iter() {
g.1 += g.0;
g.0 = 0;
}
if complete_val == 2 {
break;
}
}
tha.join().unwrap();
thb.join().unwrap();
{
let g1 = mut1.lock().unwrap();
assert_eq!(g1.0, 0);
assert_eq!(g1.1, REPEAT);
}
let g2 = mut2.lock().unwrap();
assert_eq!(g2.0, 0);
assert_eq!(g2.1, REPEAT);
// Well, may fail on particular scheduling...
assert!(cnt > 200, "{}", cnt);
}
}
|
fn main() -> std::io::Result<()> {
let input = std::fs::read_to_string("examples/18/input.txt")?;
use crate::Operator::*;
use Token::*;
let lines = input.lines().map(|line| {
line.chars().filter_map(|ch| match ch {
' ' => None,
'+' => Some(Operator(Add)),
'*' => Some(Operator(Mul)),
'(' => Some(Operator(LParen)),
')' => Some(Operator(RParen)),
_ => Some(Number(ch.to_digit(10).unwrap() as u64)),
})
});
// Shunting-yard_algorithm
let mut sum = 0;
for line in lines.clone() {
let mut stack: Vec<Token> = Vec::new();
let mut output_queue: Vec<Token> = Vec::new();
for token in line {
match token {
Number(_) => output_queue.push(token),
Operator(LParen) => stack.push(token),
Operator(RParen) => {
while let Some(stack_op) = stack.pop() {
if stack_op == Operator(LParen) {
break;
}
output_queue.push(stack_op);
}
}
Operator(op) => {
while let Some(stack_op) = stack.pop() {
if stack_op == Operator(LParen) {
stack.push(stack_op);
break;
}
output_queue.push(stack_op);
}
stack.push(Operator(op));
}
}
}
while let Some(stack_op) = stack.pop() {
output_queue.push(stack_op);
}
let rps = output_queue;
let mut stack: Vec<u64> = Vec::new();
for token in rps {
match token {
Operator(op) => match op {
Add => {
let a = stack.pop().unwrap();
let b = stack.pop().unwrap();
stack.push(a + b)
}
Mul => {
let a = stack.pop().unwrap();
let b = stack.pop().unwrap();
stack.push(a * b)
}
LParen | RParen => unreachable!(),
},
Number(n) => {
stack.push(n);
}
}
}
sum += stack.pop().unwrap();
}
println!("{}", sum);
// Part 2
let mut sum = 0;
for line in lines {
let mut stack: Vec<Token> = Vec::new();
let mut output_queue: Vec<Token> = Vec::new();
for token in line {
match token {
Number(_) => output_queue.push(token),
Operator(LParen) => stack.push(token),
Operator(RParen) => {
while let Some(stack_op) = stack.pop() {
if stack_op == Operator(LParen) {
break;
}
output_queue.push(stack_op);
}
}
Operator(op) => {
while let Some(stack_op) = stack.pop() {
if stack_op != Operator(LParen)
&& (stack_op == Operator(Add) || (Operator(op) == stack_op))
{
output_queue.push(stack_op);
} else {
stack.push(stack_op);
break;
}
}
stack.push(Operator(op));
}
}
}
while let Some(stack_op) = stack.pop() {
output_queue.push(stack_op);
}
let rps = output_queue;
let mut stack: Vec<u64> = Vec::new();
for token in rps {
match token {
Operator(op) => match op {
Add => {
let a = stack.pop().unwrap();
let b = stack.pop().unwrap();
stack.push(a + b)
}
Mul => {
let a = stack.pop().unwrap();
let b = stack.pop().unwrap();
stack.push(a * b)
}
LParen | RParen => unreachable!(),
},
Number(n) => {
stack.push(n);
}
}
}
sum += stack.pop().unwrap();
}
println!("{}", sum);
Ok(())
}
#[derive(PartialEq, Debug, Copy, Clone)]
enum Token {
Number(u64),
Operator(Operator),
}
#[derive(PartialEq, Debug, Copy, Clone)]
enum Operator {
Add,
Mul,
LParen,
RParen,
}
|
// Copyright © 2019 Bart Massey
// [This program is licensed under the "MIT License"]
// Please see the file LICENSE in the source
// distribution of this software for license terms.
// Workaround for `Vec::retain()` passing `&T` instead of
// `&mut T`. See RFC #2160 and issue #25477 for discussion
// of inclusion of this in `std` (looks like it won't be),
// and issue #43244 tracking `Vec::drain_filter()`, which
// is in nightly as a more general proposed replacement,
// but currently has stabilization issues.
use retain_mut::RetainMut;
use crate::*;
/// A sample "mixer" that adds values from streams
/// of samples and scales appropriately to get output samples.
/// Implemented as an unbounded iterator: will return `Some(0.0)`
/// when no sample streams are available.
pub struct Mixer<'a> {
/// Active iterators for streams.
streams: Vec<Samples<'a>>,
/// Current mixer gain value.
gain: f32,
}
/// Max voices before AGC kicks in.
const AGC_VOICES: usize = 8;
/// Mixer gain before AGC kicks in.
const LINEAR_GAIN: f32 = 0.1;
impl<'a> Mixer<'a> {
/// New mixer with no streams.
pub fn new() -> Self {
Self { streams: vec![], gain: LINEAR_GAIN }
}
/// New mixer with initial streams.
pub fn with_streams(streams: Vec<Samples<'a>>) -> Self {
let mut mixer = Self::new();
for st in streams {
mixer.add(st);
}
mixer
}
/// Add a stream to the mixer.
pub fn add(&mut self, st: Samples<'a>) {
self.streams.push(st);
self.agc();
}
/// Adjust the gain to avoid clipping while preserving
/// some linearity. Essentially a compressor.
fn agc(&mut self) {
let nstreams = self.streams.len();
self.gain = if nstreams <= AGC_VOICES {
LINEAR_GAIN
} else {
LINEAR_GAIN * AGC_VOICES as f32 / nstreams as f32
};
}
}
/// Iterator over simultaneous streams of samples that adds
/// them to get a result.
impl<'a> Iterator for Mixer<'a> {
type Item = f32;
// Get the next mixed sample. We do not assume that the
// input streams are infinite, but the output stream is.
fn next(&mut self) -> Option<f32> {
let mut result = 0.0;
let mut agc = false;
self.streams.retain_mut(|st| {
let s = st.next();
match s {
Some(s) => result += s,
None => agc = true,
}
s.is_some()
});
if agc {
self.agc();
}
Some(result * self.gain)
}
}
|
//! Transform contains a position, a rotation, and a size used by every shape in fumarole
use crate::*;
use math::*;
use serde::{Serialize, Deserialize};
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)]
pub struct Transform {
pub position: Vec2<f32>,
pub rotation: f32,
pub size: Vec2<f32>,
}
impl Transform {
#[inline]
pub fn new() -> Self {
Transform {
position: Vec2::new(0.0, 0.0),
rotation: 0.0,
size: Vec2::new(1.0, 1.0),
}
}
/// Transforms self by another transform
#[inline]
pub fn transform(mut self, other: Transform) -> Self {
self.position *= other.size;
self.position *= Mat2::<f32>::from_radians(other.rotation);
self.position += other.position;
self.rotation += other.rotation;
self
}
}
|
//! [ECS](https://aws.amazon.com/ecs/) bindings for Rust
//!
//! To get started, see the docs for [ECSClient](struct.ECSClient.html)
#![allow(non_snake_case)]
use credentials::AWSCredentialsProvider;
use regions::Region;
use signature::SignedRequest;
include!(concat!(env!("OUT_DIR"), "/ecs.rs"));
include!(concat!(env!("OUT_DIR"), "/ecs_helpers.rs"));
|
//! Clipping Region
//use crate::POLY_SUBPIXEL_SCALE;
use crate::cell::RasterizerCell;
/// Rectangle
#[derive(Debug,Copy,Clone)]
pub struct Rectangle<T: std::cmp::PartialOrd + Copy> {
/// Minimum x value
x1: T,
/// Minimum y value
y1: T,
/// Maximum x value
x2: T,
/// Maximum y value
y2: T,
}
impl<T> Rectangle<T> where T: std::cmp::PartialOrd + Copy {
/// Create a new Rectangle
///
/// Values are sorted before storing
pub fn new(x1: T, y1: T, x2: T, y2: T) -> Self {
let (x1, x2) = if x1 > x2 { (x2,x1) } else { (x1,x2) };
let (y1, y2) = if y1 > x2 { (y2,y1) } else { (y1,y2) };
Self { x1,y1,x2,y2 }
}
/// Get location of point relative to rectangle
///
/// Returned is an a u8 made up of the following bits:
/// - [INSIDE](constant.INSIDE.html)
/// - [LEFT](constant.LEFT.html)
/// - [RIGHT](constant.RIGHT.html)
/// - [BOTTOM](constant.BOTTOM.html)
/// - [TOP](constant.TOP.html)
///
pub fn clip_flags(&self, x: T, y: T) -> u8 {
clip_flags(&x,&y, &self.x1, &self.y1, &self.x2, &self.y2)
}
/// Expand if the point (x,y) is outside
pub fn expand(&mut self, x: T, y: T) {
if x < self.x1 { self.x1 = x; }
if x > self.x2 { self.x2 = x; }
if y < self.y1 { self.y1 = y; }
if y > self.y2 { self.y2 = y; }
}
/// Expand if the rectangle is outside
pub fn expand_rect(&mut self, r: &Rectangle<T>) {
self.expand(r.x1, r.y1);
self.expand(r.x2, r.y2);
}
pub fn x1(&self) -> T { self.x1 }
pub fn x2(&self) -> T { self.x2 }
pub fn y1(&self) -> T { self.y1 }
pub fn y2(&self) -> T { self.y2 }
}
/// Inside Region
///
/// See https://en.wikipedia.org/wiki/Liang-Barsky_algorithm
/// See https://en.wikipedia.org/wiki/Cyrus-Beck_algorithm
pub const INSIDE : u8 = 0b0000;
/// Left of Region
///
/// See [Liang Barsky](https://en.wikipedia.org/wiki/Liang-Barsky_algorithm)
///
/// See [Cyrus Beck](https://en.wikipedia.org/wiki/Cyrus-Beck_algorithm)
pub const LEFT : u8 = 0b0000_0001;
/// Right of Region
///
/// See [Liang Barsky](https://en.wikipedia.org/wiki/Liang-Barsky_algorithm)
///
/// See [Cyrus Beck](https://en.wikipedia.org/wiki/Cyrus-Beck_algorithm)
pub const RIGHT : u8 = 0b0000_0010;
/// Below Region
///
/// See [Liang Barsky](https://en.wikipedia.org/wiki/Liang-Barsky_algorithm)
///
/// See [Cyrus Beck](https://en.wikipedia.org/wiki/Cyrus-Beck_algorithm)
pub const BOTTOM : u8 = 0b0000_0100;
/// Above Region
///
/// See [Liang Barsky](https://en.wikipedia.org/wiki/Liang-Barsky_algorithm)
///
/// See [Cyrus Beck](https://en.wikipedia.org/wiki/Cyrus-Beck_algorithm)
pub const TOP : u8 = 0b0000_1000;
/// Determine the loaiton of a point to a broken-down rectangle or range
///
/// Returned is an a u8 made up of the following bits:
/// - [INSIDE](constant.INSIDE.html)
/// - [LEFT](constant.LEFT.html)
/// - [RIGHT](constant.RIGHT.html)
/// - [BOTTOM](constant.BOTTOM.html)
/// - [TOP](constant.TOP.html)
///
fn clip_flags<T: std::cmp::PartialOrd>(x: &T, y: &T, x1: &T, y1: &T, x2: &T, y2: &T) -> u8 {
let mut code = INSIDE;
if x < x1 { code |= LEFT; }
if x > x2 { code |= RIGHT; }
if y < y1 { code |= BOTTOM; }
if y > y2 { code |= TOP; }
code
}
/// Clip Region
///
/// Clipping for Rasterizers
#[derive(Debug)]
pub struct Clip {
/// Current x Point
x1: i64,
/// Current y Point
y1: i64,
/// Rectangle to clip on
clip_box: Option<Rectangle<i64>>,
/// Current clip flag for point (x1,y1)
clip_flag: u8,
}
fn mul_div(a: i64, b: i64, c: i64) -> i64 {
let (a,b,c) = (a as f64, b as f64, c as f64);
(a * b / c).round() as i64
}
impl Clip {
/// Create new Clipping region
pub fn new() -> Self {
Self {x1: 0, y1: 0,
clip_box: None,
clip_flag: INSIDE }
}
/// Clip a line along the top and bottom of the regon
fn line_clip_y(&self, ras: &mut RasterizerCell,
x1: i64, y1: i64,
x2: i64, y2: i64,
f1: u8, f2: u8) {
let b = match self.clip_box {
None => return,
Some(ref b) => b,
};
let f1 = f1 & (TOP|BOTTOM);
let f2 = f2 & (TOP|BOTTOM);
// Fully Visible in y
if f1 == INSIDE && f2 == INSIDE {
ras.line(x1,y1,x2,y2);
} else {
// Both points above or below clip box
if f1 == f2 {
return;
}
let (mut tx1, mut ty1, mut tx2, mut ty2) = (x1,y1,x2,y2);
if f1 == BOTTOM {
tx1 = x1 + mul_div(b.y1-y1, x2-x1, y2-y1);
ty1 = b.y1;
}
if f1 == TOP {
tx1 = x1 + mul_div(b.y2-y1, x2-x1, y2-y1);
ty1 = b.y2;
}
if f2 == BOTTOM {
tx2 = x1 + mul_div(b.y1-y1, x2-x1, y2-y1);
ty2 = b.y1;
}
if f2 == TOP {
tx2 = x1 + mul_div(b.y2-y1, x2-x1, y2-y1);
ty2 = b.y2;
}
ras.line(tx1,tx2,ty1,ty2);
}
}
/// Draw a line from (x1,y1) to (x2,y2) into a RasterizerCell
///
/// Final point (x2,y2) is saved internally as (x1,y1))
pub(crate) fn line_to(&mut self, ras: &mut RasterizerCell, x2: i64, y2: i64) {
if let Some(ref b) = self.clip_box {
let f2 = b.clip_flags(x2,y2);
// Both points above or below clip box
let fy1 = (TOP | BOTTOM) & self.clip_flag;
let fy2 = (TOP | BOTTOM) & f2;
if fy1 != INSIDE && fy1 == fy2 {
self.x1 = x2;
self.y1 = y2;
self.clip_flag = f2;
return;
}
let (x1,y1,f1) = (self.x1, self.y1, self.clip_flag);
match (f1 & (LEFT|RIGHT), f2 & (LEFT|RIGHT)) {
(INSIDE,INSIDE) => self.line_clip_y(ras, x1,y1,x2,y2,f1,f2),
(INSIDE,RIGHT) => {
let y3 = y1 + mul_div(b.x2-x1, y2-y1, x2-x1);
let f3 = b.clip_flags(b.x2, y3);
self.line_clip_y(ras, x1, y1, b.x2, y3, f1, f3);
self.line_clip_y(ras, b.x2, y3, b.x2, y2, f3, f2);
},
(RIGHT,INSIDE) => {
let y3 = y1 + mul_div(b.x2-x1, y2-y1, x2-x1);
let f3 = b.clip_flags(b.x2, y3);
self.line_clip_y(ras, b.x2, y1, b.x2, y3, f1, f3);
self.line_clip_y(ras, b.x2, y3, x2, y2, f3, f2);
},
(INSIDE,LEFT) => {
let y3 = y1 + mul_div(b.x1-x1, y2-y1, x2-x1);
let f3 = b.clip_flags(b.x1, y3);
self.line_clip_y(ras, x1, y1, b.x1, y3, f1, f3);
self.line_clip_y(ras, b.x1, y3, b.x1, y2, f3, f2);
},
(RIGHT,LEFT) => {
let y3 = y1 + mul_div(b.x2-x1, y2-y1, x2-x1);
let y4 = y1 + mul_div(b.x1-x1, y2-y1, x2-x1);
let f3 = b.clip_flags(b.x2, y3);
let f4 = b.clip_flags(b.x1, y4);
self.line_clip_y(ras, b.x2, y1, b.x2, y3, f1, f3);
self.line_clip_y(ras, b.x2, y3, b.x1, y4, f3, f4);
self.line_clip_y(ras, b.x1, y4, b.x1, y2, f4, f2);
},
(LEFT,INSIDE) => {
let y3 = y1 + mul_div(b.x1-x1, y2-y1, x2-x1);
let f3 = b.clip_flags(b.x1, y3);
self.line_clip_y(ras, b.x1, y1, b.x1, y3, f1, f3);
self.line_clip_y(ras, b.x1, y3, x2, y2, f3, f2);
},
(LEFT,RIGHT) => {
let y3 = y1 + mul_div(b.x1-x1, y2-y1, x2-x1);
let y4 = y1 + mul_div(b.x2-x1, y2-y1, x2-x1);
let f3 = b.clip_flags(b.x1, y3);
let f4 = b.clip_flags(b.x2, y4);
self.line_clip_y(ras, b.x1, y1, b.x1, y3, f1, f3);
self.line_clip_y(ras, b.x1, y3, b.x2, y4, f3, f4);
self.line_clip_y(ras, b.x2, y4, b.x2, y2, f4, f2);
},
(LEFT,LEFT) => self.line_clip_y(ras, b.x1,y1,b.x1,y2,f1,f2),
(RIGHT,RIGHT) => self.line_clip_y(ras, b.x2,y1,b.x2,y2,f1,f2),
(_,_) => unreachable!("f1,f2 {:?} {:?}", f1,f2),
}
self.clip_flag = f2;
} else {
ras.line(self.x1, self.y1, x2, y2);
}
self.x1 = x2;
self.y1 = y2;
}
/// Move to point (x2,y2)
///
/// Point is saved internally as (x1,y1)
pub(crate) fn move_to(&mut self, x2: i64, y2: i64) {
self.x1 = x2;
self.y1 = y2;
if let Some(ref b) = self.clip_box {
self.clip_flag = clip_flags(&x2,&y2,
&b.x1,&b.y1,
&b.x2,&b.y2);
}
}
/// Define the clipping region
pub fn clip_box(&mut self, x1: i64, y1: i64, x2: i64, y2: i64) {
self.clip_box = Some( Rectangle::new(x1, y1, x2, y2) );
}
}
|
mod sc2000;
use sc2000::*;
use ini::Ini;
use std::process::abort;
fn main() {
let mut h0 = String::new();
let file = Ini::load_from_file("input.ini").unwrap();
let mut input_name = String::new();
for (sec, prop) in file.iter() {
for (k, v) in prop.iter() {
match (sec, k) {
(Some("Cypher"), "H0") => h0 = String::from(v),
(Some("Cypher"), "HashLength") => {
if v != "128" {
println!("Hash length must be 128!");
abort();
}
}
(Some("Stream"), "File") => input_name = String::from(v),
_ => {}
};
}
}
let h0 = u128::from_str_radix(&h0, 16).unwrap();
println!("Hashing of {} with h0 = {:#034X}", input_name, h0);
let h = hash(&input_name, h0);
println!("Hash is {:#034X}", h);
}
|
use crate::material::Material;
use crate::ray::Ray;
use crate::vec::{Point3, Vec3};
use std::ops::Range;
use std::rc::Rc;
pub struct HitRecord {
pub point: Point3,
pub normal: Vec3,
pub t: f64,
pub front_face: bool,
pub material: Rc<dyn Material>,
}
impl HitRecord {
pub fn new(point: Point3, normal: Vec3, t: f64, ray: &Ray, material: Rc<dyn Material>) -> Self {
let front_face = ray.direction.dot(normal) < 0.0;
HitRecord {
point,
normal: if front_face { normal } else { -normal },
t,
front_face,
material,
}
}
}
pub trait Hittable {
fn hit(&self, ray: &Ray, t_range: Range<f64>) -> Option<HitRecord>;
}
|
mod advanced_traits;
mod advanced_fn_closure;
fn main() {
}
|
use crate::*;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum ImageError {
#[error("Image decoding error: {0}")]
Load(&'static str),
#[error("Image encoding error: {0}")]
Save(&'static str),
#[error("Image packing error: {0}")]
Packing(&'static str),
#[error(transparent)]
Common(#[from] CommonError),
}
impl From<ImageError> for CommonError {
fn from(e: ImageError) -> Self {
e.into()
}
}
impl From<std::io::Error> for ImageError {
fn from(err: std::io::Error) -> Self {
ImageError::Common(CommonError::Io(err))
}
}
|
use clap::{Arg, App};
use rand::Rng;
const CHECK_CODES: [i32; 8] = [7, 9, 5, 3, 2, 4, 6, 8];
fn get_digits(number: i32) -> Vec<i32> {
let mut digits = Vec::new();
let mut n = number;
while n > 9 {
digits.push(n % 10);
n = n / 10;
}
digits.push(n);
digits
}
fn main() {
let matches = App::new("International post tracking numbers generator")
.version("1.0")
.arg(Arg::with_name("count")
.short("c")
.long("count")
.value_name("INTEGER")
.help("Number of tracking numbers to generate (default: 1)"))
.arg(Arg::with_name("PREFIX")
.required(true)
.index(1))
.arg(Arg::with_name("POSTFIX")
.required(true)
.index(2))
.get_matches();
let mut rng = rand::thread_rng();
let prefix = matches.value_of("PREFIX").unwrap();
let postfix = matches.value_of("POSTFIX").unwrap();
for _ in 1..matches.value_of("count").unwrap_or("1").parse::<i32>().unwrap()+1 {
let id = rng.gen_range(0, 100000000);
let mut check = 0;
let mut i = 0;
for number in get_digits(id) {
check += number * CHECK_CODES[i];
i += 1;
}
check = 11 - check % 11;
check = match check {
10 => 0,
11 => 5,
_ => check
};
println!("{}{:08}{}{}", prefix, id, check, postfix);
}
}
|
fn hex_to_byte(byte: u8) -> u8 {
match byte {
48..=57 => byte - 48,
97..=102 => byte - 87,
_ => 0,
}
}
fn byte_to_hex(byte: u8) -> u8 {
match byte {
0..=9 => byte + 48,
10..=15 => byte + 87,
_ => 0,
}
}
pub fn hex_value<T: AsRef<[u8]>>(input: T) -> Vec<u8> {
input
.as_ref()
.to_ascii_lowercase()
.chunks(2)
.map(|chunk| 16u8 * hex_to_byte(chunk[0]) + hex_to_byte(chunk[1]))
.collect()
}
pub fn as_hex<T: AsRef<[u8]>>(input: T) -> Vec<u8> {
input
.as_ref()
.iter()
.flat_map(|n| {
vec![byte_to_hex(n / 16), byte_to_hex(n % 16)]
.clone()
.to_vec()
})
.collect()
}
#[cfg(test)]
mod test {
use crate::set1::hex::{as_hex, hex_value};
use std::str::from_utf8;
#[test]
fn example_input() {
assert_eq!(hex_value("090A0B0C"), vec![9, 10, 11, 12]);
}
#[test]
fn example_input_2() {
let output = as_hex(vec![9, 10, 11, 12]);
assert_eq!(from_utf8(output.as_slice()).unwrap(), "090a0b0c");
}
#[test]
fn reversibility() {
let input = "Rub a dub dub, 3 men in a tub";
let output = hex_value(as_hex(input));
assert_eq!(from_utf8(output.as_slice()).unwrap(), input);
}
}
|
//! ITP1_8_Dの回答
//! [https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_8_D](https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_8_D)
use std::io::BufRead;
/// ITP1_8_Dの回答
#[allow(dead_code)]
pub fn main() {
loop {
if let Some(dataset) = read_dataset(std::io::stdin().lock()) {
let output = match dataset.exists_pattern() {
true => "Yes",
false => "No"
};
println!("{}", output);
continue;
}
return;
}
}
#[derive(Debug, Eq, PartialEq)]
struct Dataset {
s: String,
p: String,
}
impl Dataset {
fn new(s: &str, p: &str) -> Self {
Self { s: s.to_string(), p: p.to_string() }
}
fn exists_pattern(&self) -> bool {
let temp = self.s.repeat(2);
match temp.find(&self.p) {
Some(_) => true,
_ => false
}
}
}
fn read_dataset<T: BufRead>(mut reader: T) -> Option<Dataset> {
let mut s = String::new();
if let Err(_) = reader.read_line(&mut s) {
return None;
}
let s = s.trim();
if s.is_empty() {
return None;
}
let mut p = String::new();
if let Err(_) = reader.read_line(&mut p) {
return None;
}
if p.is_empty() {
return None;
}
let p = p.trim();
Some(Dataset::new(s, p))
}
#[cfg(test)]
mod test {
use std::io::Cursor;
use super::*;
//noinspection SpellCheckingInspection
#[test]
fn test_dataset_new() {
assert_eq!(Dataset { s: "vanceknowledgetoad".to_string(), p: "advance".to_string() },
Dataset::new("vanceknowledgetoad", "advance"));
}
//noinspection SpellCheckingInspection
#[test]
fn test_exists_pattern() {
let dataset = Dataset::new("vanceknowledgetoad", "advance");
assert!(dataset.exists_pattern())
}
//noinspection SpellCheckingInspection
#[test]
fn test_read_dataset() {
let mut input = Cursor::new("vanceknowledgetoad\nadvance\n");
assert_eq!(Some(Dataset { s: "vanceknowledgetoad".to_string(), p: "advance".to_string() }),
read_dataset(&mut input));
}
} |
#[doc = "Register `PUCR` reader"]
pub type R = crate::R<PUCR_SPEC>;
#[doc = "Register `PUCR` writer"]
pub type W = crate::W<PUCR_SPEC>;
#[doc = "Field `URCL` reader - URCL"]
pub type URCL_R = crate::BitReader;
#[doc = "Field `URCL` writer - URCL"]
pub type URCL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UECL` reader - UECL"]
pub type UECL_R = crate::BitReader;
#[doc = "Field `UECL` writer - UECL"]
pub type UECL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `URDL` reader - URDL"]
pub type URDL_R = crate::BitReader;
#[doc = "Field `URDL` writer - URDL"]
pub type URDL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `UEDL` reader - UEDL"]
pub type UEDL_R = crate::BitReader;
#[doc = "Field `UEDL` writer - UEDL"]
pub type UEDL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - URCL"]
#[inline(always)]
pub fn urcl(&self) -> URCL_R {
URCL_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - UECL"]
#[inline(always)]
pub fn uecl(&self) -> UECL_R {
UECL_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - URDL"]
#[inline(always)]
pub fn urdl(&self) -> URDL_R {
URDL_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - UEDL"]
#[inline(always)]
pub fn uedl(&self) -> UEDL_R {
UEDL_R::new(((self.bits >> 3) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - URCL"]
#[inline(always)]
#[must_use]
pub fn urcl(&mut self) -> URCL_W<PUCR_SPEC, 0> {
URCL_W::new(self)
}
#[doc = "Bit 1 - UECL"]
#[inline(always)]
#[must_use]
pub fn uecl(&mut self) -> UECL_W<PUCR_SPEC, 1> {
UECL_W::new(self)
}
#[doc = "Bit 2 - URDL"]
#[inline(always)]
#[must_use]
pub fn urdl(&mut self) -> URDL_W<PUCR_SPEC, 2> {
URDL_W::new(self)
}
#[doc = "Bit 3 - UEDL"]
#[inline(always)]
#[must_use]
pub fn uedl(&mut self) -> UEDL_W<PUCR_SPEC, 3> {
UEDL_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DSI Host PHY ULPS control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pucr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pucr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PUCR_SPEC;
impl crate::RegisterSpec for PUCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`pucr::R`](R) reader structure"]
impl crate::Readable for PUCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`pucr::W`](W) writer structure"]
impl crate::Writable for PUCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PUCR to value 0"]
impl crate::Resettable for PUCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! DB-backed changes tries storage.
use crate::cache::{
ComplexBlockId, DbCache, DbCacheSync, DbCacheTransactionOps, EntryType as CacheEntryType,
};
use crate::utils::{self, meta_keys, Meta};
use crate::{Database, DbHash};
use codec::{Decode, Encode};
use hash_db::Prefix;
use parking_lot::RwLock;
use sc_client_api::backend::PrunableStateChangesTrieStorage;
use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache, HeaderMetadataCache};
use sp_blockchain::{Error as ClientError, Result as ClientResult};
use sp_core::storage::PrefixedStorageKey;
use sp_core::{convert_hash, ChangesTrieConfiguration, ChangesTrieConfigurationRange};
use sp_database::Transaction;
use sp_runtime::generic::{BlockId, ChangesTrieSignal, DigestItem};
use sp_runtime::traits::{
Block as BlockT, CheckedSub, HashFor, Header as HeaderT, NumberFor, One, Zero,
};
use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction};
use sp_trie::MemoryDB;
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
/// Extract new changes trie configuration (if available) from the header.
pub fn extract_new_configuration<Header: HeaderT>(
header: &Header,
) -> Option<&Option<ChangesTrieConfiguration>> {
header
.digest()
.log(DigestItem::as_changes_trie_signal)
.and_then(ChangesTrieSignal::as_new_configuration)
}
/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is
/// currently guaranteed because import lock is held during block import/finalization.
pub struct DbChangesTrieStorageTransaction<Block: BlockT> {
/// Cache operations that must be performed after db transaction is committed.
cache_ops: DbCacheTransactionOps<Block>,
/// New configuration (if changed at current block).
new_config: Option<Option<ChangesTrieConfiguration>>,
}
impl<Block: BlockT> DbChangesTrieStorageTransaction<Block> {
/// Consume self and return transaction with given new configuration.
pub fn with_new_config(mut self, new_config: Option<Option<ChangesTrieConfiguration>>) -> Self {
self.new_config = new_config;
self
}
}
impl<Block: BlockT> From<DbCacheTransactionOps<Block>> for DbChangesTrieStorageTransaction<Block> {
fn from(cache_ops: DbCacheTransactionOps<Block>) -> Self {
DbChangesTrieStorageTransaction { cache_ops, new_config: None }
}
}
/// Changes tries storage.
///
/// Stores all tries in separate DB column.
/// Lock order: meta, tries_meta, cache, build_cache.
pub struct DbChangesTrieStorage<Block: BlockT> {
db: Arc<dyn Database<DbHash>>,
meta_column: u32,
changes_tries_column: u32,
key_lookup_column: u32,
header_column: u32,
meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
tries_meta: RwLock<ChangesTriesMeta<Block>>,
min_blocks_to_keep: Option<u32>,
/// The cache stores all ever existing changes tries configurations.
cache: DbCacheSync<Block>,
/// Build cache is a map of block => set of storage keys changed at this block.
/// They're used to build digest blocks - instead of reading+parsing tries from db
/// we just use keys sets from the cache.
build_cache: RwLock<ChangesTrieBuildCache<Block::Hash, NumberFor<Block>>>,
}
/// Persistent struct that contains all the changes tries metadata.
#[derive(Decode, Encode, Debug)]
struct ChangesTriesMeta<Block: BlockT> {
/// Oldest unpruned max-level (or skewed) digest trie blocks range.
/// The range is inclusive from both sides.
/// Is None only if:
/// 1) we haven't yet finalized any blocks (except genesis)
/// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are
/// disabled 3) changes tries pruning is disabled
pub oldest_digest_range: Option<(NumberFor<Block>, NumberFor<Block>)>,
/// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range.
/// It is guaranteed that we have no any changes tries before (and including) this block.
/// It is guaranteed that all existing changes tries after this block are not yet pruned (if
/// created).
pub oldest_pruned_digest_range_end: NumberFor<Block>,
}
impl<Block: BlockT> DbChangesTrieStorage<Block> {
/// Create new changes trie storage.
pub fn new(
db: Arc<dyn Database<DbHash>>,
header_metadata_cache: Arc<HeaderMetadataCache<Block>>,
meta_column: u32,
changes_tries_column: u32,
key_lookup_column: u32,
header_column: u32,
cache_column: u32,
meta: Arc<RwLock<Meta<NumberFor<Block>, Block::Hash>>>,
min_blocks_to_keep: Option<u32>,
) -> ClientResult<Self> {
let (finalized_hash, finalized_number, genesis_hash) = {
let meta = meta.read();
(meta.finalized_hash, meta.finalized_number, meta.genesis_hash)
};
let tries_meta = read_tries_meta(&*db, meta_column)?;
Ok(Self {
db: db.clone(),
meta_column,
changes_tries_column,
key_lookup_column,
header_column,
meta,
min_blocks_to_keep,
cache: DbCacheSync(RwLock::new(DbCache::new(
db.clone(),
header_metadata_cache,
key_lookup_column,
header_column,
cache_column,
genesis_hash,
ComplexBlockId::new(finalized_hash, finalized_number),
))),
build_cache: RwLock::new(ChangesTrieBuildCache::new()),
tries_meta: RwLock::new(tries_meta),
})
}
/// Commit new changes trie.
pub fn commit(
&self,
tx: &mut Transaction<DbHash>,
mut changes_trie: MemoryDB<HashFor<Block>>,
parent_block: ComplexBlockId<Block>,
block: ComplexBlockId<Block>,
new_header: &Block::Header,
finalized: bool,
new_configuration: Option<Option<ChangesTrieConfiguration>>,
cache_tx: Option<DbChangesTrieStorageTransaction<Block>>,
) -> ClientResult<DbChangesTrieStorageTransaction<Block>> {
// insert changes trie, associated with block, into DB
for (key, (val, _)) in changes_trie.drain() {
tx.set(self.changes_tries_column, key.as_ref(), &val);
}
// if configuration has not been changed AND block is not finalized => nothing to do here
let new_configuration = match new_configuration {
Some(new_configuration) => new_configuration,
None if !finalized => return Ok(DbCacheTransactionOps::empty().into()),
None =>
return self.finalize(
tx,
parent_block.hash,
block.hash,
block.number,
Some(new_header),
cache_tx,
),
};
// update configuration cache
let mut cache_at = HashMap::new();
cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode());
Ok(DbChangesTrieStorageTransaction::from(match cache_tx {
Some(cache_tx) => self
.cache
.0
.write()
.transaction_with_ops(tx, cache_tx.cache_ops)
.on_block_insert(
parent_block,
block,
cache_at,
if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal },
)?
.into_ops(),
None => self
.cache
.0
.write()
.transaction(tx)
.on_block_insert(
parent_block,
block,
cache_at,
if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal },
)?
.into_ops(),
})
.with_new_config(Some(new_configuration)))
}
/// Called when block is finalized.
pub fn finalize(
&self,
tx: &mut Transaction<DbHash>,
parent_block_hash: Block::Hash,
block_hash: Block::Hash,
block_num: NumberFor<Block>,
new_header: Option<&Block::Header>,
cache_tx: Option<DbChangesTrieStorageTransaction<Block>>,
) -> ClientResult<DbChangesTrieStorageTransaction<Block>> {
// prune obsolete changes tries
self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?;
// if we have inserted the block that we're finalizing in the same transaction
// => then we have already finalized it from the commit() call
if cache_tx.is_some() {
if let Some(new_header) = new_header {
if new_header.hash() == block_hash {
return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed"))
}
}
}
// and finalize configuration cache entries
let block = ComplexBlockId::new(block_hash, block_num);
let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero());
let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num);
Ok(match cache_tx {
Some(cache_tx) => DbChangesTrieStorageTransaction::from(
self.cache
.0
.write()
.transaction_with_ops(tx, cache_tx.cache_ops)
.on_block_finalize(parent_block, block)?
.into_ops(),
)
.with_new_config(cache_tx.new_config),
None => DbChangesTrieStorageTransaction::from(
self.cache
.0
.write()
.transaction(tx)
.on_block_finalize(parent_block, block)?
.into_ops(),
),
})
}
/// When block is reverted.
pub fn revert(
&self,
tx: &mut Transaction<DbHash>,
block: &ComplexBlockId<Block>,
) -> ClientResult<DbChangesTrieStorageTransaction<Block>> {
Ok(self.cache.0.write().transaction(tx).on_block_revert(block)?.into_ops().into())
}
/// When transaction has been committed.
pub fn post_commit(&self, tx: Option<DbChangesTrieStorageTransaction<Block>>) {
if let Some(tx) = tx {
self.cache.0.write().commit(tx.cache_ops).expect(
"only fails if cache with given name isn't loaded yet;\
cache is already loaded because there is tx; qed",
);
}
}
/// Commit changes into changes trie build cache.
pub fn commit_build_cache(
&self,
cache_update: ChangesTrieCacheAction<Block::Hash, NumberFor<Block>>,
) {
self.build_cache.write().perform(cache_update);
}
/// Prune obsolete changes tries.
fn prune(
&self,
tx: &mut Transaction<DbHash>,
block_hash: Block::Hash,
block_num: NumberFor<Block>,
new_header: Option<&Block::Header>,
cache_tx: Option<&DbChangesTrieStorageTransaction<Block>>,
) -> ClientResult<()> {
// never prune on archive nodes
let min_blocks_to_keep = match self.min_blocks_to_keep {
Some(min_blocks_to_keep) => min_blocks_to_keep,
None => return Ok(()),
};
let mut tries_meta = self.tries_meta.write();
let mut next_digest_range_start = block_num;
loop {
// prune oldest digest if it is known
// it could be unknown if:
// 1) either we're finalizing block#1
// 2) or we are (or were) in period where changes tries are disabled
if let Some((begin, end)) = tries_meta.oldest_digest_range {
if block_num <= end || block_num - end <= min_blocks_to_keep.into() {
break
}
tries_meta.oldest_pruned_digest_range_end = end;
sp_state_machine::prune_changes_tries(
&*self,
begin,
end,
&sp_state_machine::ChangesTrieAnchorBlockId {
hash: convert_hash(&block_hash),
number: block_num,
},
|node| tx.remove(self.changes_tries_column, node.as_ref()),
);
next_digest_range_start = end + One::one();
}
// proceed to the next configuration range
let next_digest_range_start_hash = match block_num == next_digest_range_start {
true => block_hash,
false => utils::require_header::<Block>(
&*self.db,
self.key_lookup_column,
self.header_column,
BlockId::Number(next_digest_range_start),
)?
.hash(),
};
let config_for_new_block = new_header
.map(|header| *header.number() == next_digest_range_start)
.unwrap_or(false);
let next_config = match cache_tx {
Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => {
let config = cache_tx.new_config.clone().expect("guarded by is_some(); qed");
ChangesTrieConfigurationRange {
zero: (block_num, block_hash),
end: None,
config,
}
},
_ if config_for_new_block => self.configuration_at(&BlockId::Hash(
*new_header
.expect("config_for_new_block is only true when new_header is passed; qed")
.parent_hash(),
))?,
_ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?,
};
if let Some(config) = next_config.config {
let mut oldest_digest_range = config
.next_max_level_digest_range(next_config.zero.0, next_digest_range_start)
.unwrap_or_else(|| (next_digest_range_start, next_digest_range_start));
if let Some(end) = next_config.end {
if end.0 < oldest_digest_range.1 {
oldest_digest_range.1 = end.0;
}
}
tries_meta.oldest_digest_range = Some(oldest_digest_range);
continue
}
tries_meta.oldest_digest_range = None;
break
}
write_tries_meta(tx, self.meta_column, &*tries_meta);
Ok(())
}
}
impl<Block: BlockT> PrunableStateChangesTrieStorage<Block> for DbChangesTrieStorage<Block> {
fn storage(
&self,
) -> &dyn sp_state_machine::ChangesTrieStorage<HashFor<Block>, NumberFor<Block>> {
self
}
fn configuration_at(
&self,
at: &BlockId<Block>,
) -> ClientResult<ChangesTrieConfigurationRange<NumberFor<Block>, Block::Hash>> {
self.cache
.get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)?
.and_then(|(zero, end, encoded)| {
Decode::decode(&mut &encoded[..]).ok().map(|config| ChangesTrieConfigurationRange {
zero,
end,
config,
})
})
.ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig)
}
fn oldest_pruned_digest_range_end(&self) -> NumberFor<Block> {
self.tries_meta.read().oldest_pruned_digest_range_end
}
}
impl<Block: BlockT> sp_state_machine::ChangesTrieRootsStorage<HashFor<Block>, NumberFor<Block>>
for DbChangesTrieStorage<Block>
{
fn build_anchor(
&self,
hash: Block::Hash,
) -> Result<sp_state_machine::ChangesTrieAnchorBlockId<Block::Hash, NumberFor<Block>>, String> {
utils::read_header::<Block>(
&*self.db,
self.key_lookup_column,
self.header_column,
BlockId::Hash(hash),
)
.map_err(|e| e.to_string())
.and_then(|maybe_header| {
maybe_header
.map(|header| sp_state_machine::ChangesTrieAnchorBlockId {
hash,
number: *header.number(),
})
.ok_or_else(|| format!("Unknown header: {}", hash))
})
}
fn root(
&self,
anchor: &sp_state_machine::ChangesTrieAnchorBlockId<Block::Hash, NumberFor<Block>>,
block: NumberFor<Block>,
) -> Result<Option<Block::Hash>, String> {
// check API requirement: we can't get NEXT block(s) based on anchor
if block > anchor.number {
return Err(format!(
"Can't get changes trie root at {} using anchor at {}",
block, anchor.number
))
}
// we need to get hash of the block to resolve changes trie root
let block_id = if block <= self.meta.read().finalized_number {
// if block is finalized, we could just read canonical hash
BlockId::Number(block)
} else {
// the block is not finalized
let mut current_num = anchor.number;
let mut current_hash: Block::Hash = convert_hash(&anchor.hash);
let maybe_anchor_header: Block::Header = utils::require_header::<Block>(
&*self.db,
self.key_lookup_column,
self.header_column,
BlockId::Number(current_num),
)
.map_err(|e| e.to_string())?;
if maybe_anchor_header.hash() == current_hash {
// if anchor is canonicalized, then the block is also canonicalized
BlockId::Number(block)
} else {
// else (block is not finalized + anchor is not canonicalized):
// => we should find the required block hash by traversing
// back from the anchor to the block with given number
while current_num != block {
let current_header: Block::Header = utils::require_header::<Block>(
&*self.db,
self.key_lookup_column,
self.header_column,
BlockId::Hash(current_hash),
)
.map_err(|e| e.to_string())?;
current_hash = *current_header.parent_hash();
current_num = current_num - One::one();
}
BlockId::Hash(current_hash)
}
};
Ok(utils::require_header::<Block>(
&*self.db,
self.key_lookup_column,
self.header_column,
block_id,
)
.map_err(|e| e.to_string())?
.digest()
.log(DigestItem::as_changes_trie_root)
.cloned())
}
}
impl<Block> sp_state_machine::ChangesTrieStorage<HashFor<Block>, NumberFor<Block>>
for DbChangesTrieStorage<Block>
where
Block: BlockT,
{
fn as_roots_storage(
&self,
) -> &dyn sp_state_machine::ChangesTrieRootsStorage<HashFor<Block>, NumberFor<Block>> {
self
}
fn with_cached_changed_keys(
&self,
root: &Block::Hash,
functor: &mut dyn FnMut(&HashMap<Option<PrefixedStorageKey>, HashSet<Vec<u8>>>),
) -> bool {
self.build_cache.read().with_changed_keys(root, functor)
}
fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result<Option<Vec<u8>>, String> {
Ok(self.db.get(self.changes_tries_column, key.as_ref()))
}
}
/// Read changes tries metadata from database.
fn read_tries_meta<Block: BlockT>(
db: &dyn Database<DbHash>,
meta_column: u32,
) -> ClientResult<ChangesTriesMeta<Block>> {
match db.get(meta_column, meta_keys::CHANGES_TRIES_META) {
Some(h) => match Decode::decode(&mut &h[..]) {
Ok(h) => Ok(h),
Err(err) =>
Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))),
},
None => Ok(ChangesTriesMeta {
oldest_digest_range: None,
oldest_pruned_digest_range_end: Zero::zero(),
}),
}
}
/// Write changes tries metadata from database.
fn write_tries_meta<Block: BlockT>(
tx: &mut Transaction<DbHash>,
meta_column: u32,
meta: &ChangesTriesMeta<Block>,
) {
tx.set_from_vec(meta_column, meta_keys::CHANGES_TRIES_META, meta.encode());
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{insert_header, prepare_changes, Block};
use crate::Backend;
use hash_db::EMPTY_PREFIX;
use sc_client_api::backend::{
Backend as ClientBackend, BlockImportOperation, NewBlockState,
PrunableStateChangesTrieStorage,
};
use sp_blockchain::HeaderBackend as BlockchainHeaderBackend;
use sp_core::H256;
use sp_runtime::testing::{Digest, Header};
use sp_runtime::traits::{BlakeTwo256, Hash};
use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage};
fn changes(number: u64) -> Option<Vec<(Vec<u8>, Vec<u8>)>> {
Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())])
}
fn insert_header_with_configuration_change(
backend: &Backend<Block>,
number: u64,
parent_hash: H256,
changes: Option<Vec<(Vec<u8>, Vec<u8>)>>,
new_configuration: Option<ChangesTrieConfiguration>,
) -> H256 {
let mut digest = Digest::default();
let mut changes_trie_update = Default::default();
if let Some(changes) = changes {
let (root, update) = prepare_changes(changes);
digest.push(DigestItem::ChangesTrieRoot(root));
changes_trie_update = update;
}
digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(
new_configuration,
)));
let header = Header {
number,
parent_hash,
state_root: BlakeTwo256::trie_root(Vec::new()),
digest,
extrinsics_root: Default::default(),
};
let header_hash = header.hash();
let block_id = if number == 0 {
BlockId::Hash(Default::default())
} else {
BlockId::Number(number - 1)
};
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, block_id).unwrap();
op.set_block_data(header, None, None, NewBlockState::Best).unwrap();
op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap();
backend.commit_operation(op).unwrap();
header_hash
}
#[test]
fn changes_trie_storage_works() {
let backend = Backend::<Block>::new_test(1000, 100);
backend.changes_tries_storage.meta.write().finalized_number = 1000;
let check_changes = |backend: &Backend<Block>,
block: u64,
changes: Vec<(Vec<u8>, Vec<u8>)>| {
let (changes_root, mut changes_trie_update) = prepare_changes(changes);
let anchor = sp_state_machine::ChangesTrieAnchorBlockId {
hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(),
number: block,
};
assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root)));
let storage = backend.changes_tries_storage.storage();
for (key, (val, _)) in changes_trie_update.drain() {
assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val)));
}
};
let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())];
let changes1 = vec![
(b"key_at_1".to_vec(), b"val_at_1".to_vec()),
(b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()),
];
let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())];
let block0 = insert_header(
&backend,
0,
Default::default(),
Some(changes0.clone()),
Default::default(),
);
let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default());
let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default());
// check that the storage contains tries for all blocks
check_changes(&backend, 0, changes0);
check_changes(&backend, 1, changes1);
check_changes(&backend, 2, changes2);
}
#[test]
fn changes_trie_storage_works_with_forks() {
let backend = Backend::<Block>::new_test(1000, 100);
let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())];
let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())];
let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())];
let block0 = insert_header(
&backend,
0,
Default::default(),
Some(changes0.clone()),
Default::default(),
);
let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default());
let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default());
let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())];
let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())];
let block2_1_0 =
insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default());
let block2_1_1 =
insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default());
let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())];
let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())];
let block2_2_0 =
insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default());
let block2_2_1 =
insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default());
// finalize block1
backend.changes_tries_storage.meta.write().finalized_number = 1;
// branch1: when asking for finalized block hash
let (changes1_root, _) = prepare_changes(changes1);
let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root)));
// branch2: when asking for finalized block hash
let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root)));
// branch1: when asking for non-finalized block hash (search by traversal)
let (changes2_1_0_root, _) = prepare_changes(changes2_1_0);
let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root)));
// branch2: when asking for non-finalized block hash (search using canonicalized hint)
let (changes2_2_0_root, _) = prepare_changes(changes2_2_0);
let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root)));
// finalize first block of branch2 (block2_2_0)
backend.changes_tries_storage.meta.write().finalized_number = 3;
// branch2: when asking for finalized block of this branch
assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root)));
// branch1: when asking for finalized block of other branch
// => result is incorrect (returned for the block of branch1), but this is expected,
// because the other fork is abandoned (forked before finalized header)
let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 };
assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root)));
}
#[test]
fn changes_tries_are_pruned_on_finalization() {
let mut backend = Backend::<Block>::new_test(1000, 100);
backend.changes_tries_storage.min_blocks_to_keep = Some(8);
let parent_hash = |number| {
if number == 0 {
Default::default()
} else {
backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash()
}
};
let insert_regular_header = |with_changes, number| {
insert_header(
&backend,
number,
parent_hash(number),
if with_changes { changes(number) } else { None },
Default::default(),
);
};
let is_pruned = |number| {
let trie_root = backend
.blockchain()
.header(BlockId::Number(number))
.unwrap()
.unwrap()
.digest()
.log(DigestItem::as_changes_trie_root)
.cloned();
match trie_root {
Some(trie_root) =>
backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(),
None => true,
}
};
let finalize_block = |number| {
let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap();
let mut tx = Transaction::new();
let cache_ops = backend
.changes_tries_storage
.finalize(&mut tx, *header.parent_hash(), header.hash(), number, None, None)
.unwrap();
backend.storage.db.commit(tx).unwrap();
backend.changes_tries_storage.post_commit(Some(cache_ops));
};
// configuration ranges:
// (0; 6] - None
// [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17
// [18; 21] - None
// [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32
// [33; ... - Some(1)
let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2));
let config_at_17 = None;
let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1));
let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0));
(0..6).for_each(|number| insert_regular_header(false, number));
insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6);
(7..17).for_each(|number| insert_regular_header(true, number));
insert_header_with_configuration_change(
&backend,
17,
parent_hash(17),
changes(17),
config_at_17,
);
(18..21).for_each(|number| insert_regular_header(false, number));
insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21);
(22..32).for_each(|number| insert_regular_header(true, number));
insert_header_with_configuration_change(
&backend,
32,
parent_hash(32),
changes(32),
config_at_32,
);
(33..50).for_each(|number| insert_regular_header(true, number));
// when only genesis is finalized, nothing is pruned
(0..=6).for_each(|number| assert!(is_pruned(number)));
(7..=17).for_each(|number| assert!(!is_pruned(number)));
(18..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when blocks [1; 18] are finalized, nothing is pruned
(1..=18).for_each(|number| finalize_block(number));
(0..=6).for_each(|number| assert!(is_pruned(number)));
(7..=17).for_each(|number| assert!(!is_pruned(number)));
(18..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when block 19 is finalized, changes tries for blocks [7; 10] are pruned
finalize_block(19);
(0..=10).for_each(|number| assert!(is_pruned(number)));
(11..=17).for_each(|number| assert!(!is_pruned(number)));
(18..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when blocks [20; 22] are finalized, nothing is pruned
(20..=22).for_each(|number| finalize_block(number));
(0..=10).for_each(|number| assert!(is_pruned(number)));
(11..=17).for_each(|number| assert!(!is_pruned(number)));
(18..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when block 23 is finalized, changes tries for blocks [11; 14] are pruned
finalize_block(23);
(0..=14).for_each(|number| assert!(is_pruned(number)));
(15..=17).for_each(|number| assert!(!is_pruned(number)));
(18..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when blocks [24; 25] are finalized, nothing is pruned
(24..=25).for_each(|number| finalize_block(number));
(0..=14).for_each(|number| assert!(is_pruned(number)));
(15..=17).for_each(|number| assert!(!is_pruned(number)));
(18..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when block 26 is finalized, changes tries for blocks [15; 17] are pruned
finalize_block(26);
(0..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when blocks [27; 37] are finalized, nothing is pruned
(27..=37).for_each(|number| finalize_block(number));
(0..=21).for_each(|number| assert!(is_pruned(number)));
(22..50).for_each(|number| assert!(!is_pruned(number)));
// when block 38 is finalized, changes tries for blocks [22; 29] are pruned
finalize_block(38);
(0..=29).for_each(|number| assert!(is_pruned(number)));
(30..50).for_each(|number| assert!(!is_pruned(number)));
// when blocks [39; 40] are finalized, nothing is pruned
(39..=40).for_each(|number| finalize_block(number));
(0..=29).for_each(|number| assert!(is_pruned(number)));
(30..50).for_each(|number| assert!(!is_pruned(number)));
// when block 41 is finalized, changes tries for blocks [30; 32] are pruned
finalize_block(41);
(0..=32).for_each(|number| assert!(is_pruned(number)));
(33..50).for_each(|number| assert!(!is_pruned(number)));
// when block 42 is finalized, changes trie for block 33 is pruned
finalize_block(42);
(0..=33).for_each(|number| assert!(is_pruned(number)));
(34..50).for_each(|number| assert!(!is_pruned(number)));
// when block 43 is finalized, changes trie for block 34 is pruned
finalize_block(43);
(0..=34).for_each(|number| assert!(is_pruned(number)));
(35..50).for_each(|number| assert!(!is_pruned(number)));
}
#[test]
fn changes_tries_configuration_is_updated_on_block_insert() {
let backend = Backend::<Block>::new_test(1000, 100);
// configurations at blocks
let config_at_1 = Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 });
let config_at_3 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 });
let config_at_5 = None;
let config_at_7 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 });
// insert some blocks
let block0 = insert_header(&backend, 0, Default::default(), None, Default::default());
let block1 =
insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone());
let block2 = insert_header(&backend, 2, block1, None, Default::default());
let block3 =
insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone());
let block4 = insert_header(&backend, 4, block3, None, Default::default());
let block5 =
insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone());
let block6 = insert_header(&backend, 6, block5, None, Default::default());
let block7 =
insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone());
// test configuration cache
let storage = &backend.changes_tries_storage;
assert_eq!(
storage.configuration_at(&BlockId::Hash(block1)).unwrap().config,
config_at_1.clone(),
);
assert_eq!(
storage.configuration_at(&BlockId::Hash(block2)).unwrap().config,
config_at_1.clone(),
);
assert_eq!(
storage.configuration_at(&BlockId::Hash(block3)).unwrap().config,
config_at_3.clone(),
);
assert_eq!(
storage.configuration_at(&BlockId::Hash(block4)).unwrap().config,
config_at_3.clone(),
);
assert_eq!(
storage.configuration_at(&BlockId::Hash(block5)).unwrap().config,
config_at_5.clone(),
);
assert_eq!(
storage.configuration_at(&BlockId::Hash(block6)).unwrap().config,
config_at_5.clone(),
);
assert_eq!(
storage.configuration_at(&BlockId::Hash(block7)).unwrap().config,
config_at_7.clone(),
);
}
#[test]
fn test_finalize_several_configuration_change_blocks_in_single_operation() {
let mut backend = Backend::<Block>::new_test(10, 10);
backend.changes_tries_storage.min_blocks_to_keep = Some(8);
let configs =
(0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::<Vec<_>>();
// insert unfinalized headers
let block0 = insert_header_with_configuration_change(
&backend,
0,
Default::default(),
None,
configs[0].clone(),
);
let block1 = insert_header_with_configuration_change(
&backend,
1,
block0,
changes(1),
configs[1].clone(),
);
let block2 = insert_header_with_configuration_change(
&backend,
2,
block1,
changes(2),
configs[2].clone(),
);
let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2));
let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3));
let block2_1 = insert_header_with_configuration_change(
&backend,
2,
block1,
changes(8),
side_config2_1.clone(),
);
let _ = insert_header_with_configuration_change(
&backend,
3,
block2_1,
changes(9),
side_config2_2.clone(),
);
// insert finalized header => 4 headers are finalized at once
let header3 = Header {
number: 3,
parent_hash: block2,
state_root: Default::default(),
digest: Digest {
logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(
configs[3].clone(),
))],
},
extrinsics_root: Default::default(),
};
let block3 = header3.hash();
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap();
op.mark_finalized(BlockId::Hash(block1), None).unwrap();
op.mark_finalized(BlockId::Hash(block2), None).unwrap();
op.set_block_data(header3, None, None, NewBlockState::Final).unwrap();
backend.commit_operation(op).unwrap();
// insert more unfinalized headers
let block4 = insert_header_with_configuration_change(
&backend,
4,
block3,
changes(4),
configs[4].clone(),
);
let block5 = insert_header_with_configuration_change(
&backend,
5,
block4,
changes(5),
configs[5].clone(),
);
let block6 = insert_header_with_configuration_change(
&backend,
6,
block5,
changes(6),
configs[6].clone(),
);
// insert finalized header => 4 headers are finalized at once
let header7 = Header {
number: 7,
parent_hash: block6,
state_root: Default::default(),
digest: Digest {
logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(
configs[7].clone(),
))],
},
extrinsics_root: Default::default(),
};
let mut op = backend.begin_operation().unwrap();
backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap();
op.mark_finalized(BlockId::Hash(block4), None).unwrap();
op.mark_finalized(BlockId::Hash(block5), None).unwrap();
op.mark_finalized(BlockId::Hash(block6), None).unwrap();
op.set_block_data(header7, None, None, NewBlockState::Final).unwrap();
backend.commit_operation(op).unwrap();
}
#[test]
fn changes_tries_configuration_is_reverted() {
let backend = Backend::<Block>::new_test(10, 10);
let config0 = Some(ChangesTrieConfiguration::new(2, 5));
let block0 =
insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0);
let config1 = Some(ChangesTrieConfiguration::new(2, 6));
let block1 =
insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1);
backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap();
let config2 = Some(ChangesTrieConfiguration::new(2, 7));
let block2 =
insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2);
let config2_1 = Some(ChangesTrieConfiguration::new(2, 8));
let _ =
insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1);
let config2_2 = Some(ChangesTrieConfiguration::new(2, 9));
let block2_2 =
insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2);
let config2_3 = Some(ChangesTrieConfiguration::new(2, 10));
let _ =
insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3);
// before truncate there are 2 unfinalized forks - block2_1+block2_3
assert_eq!(
backend
.changes_tries_storage
.cache
.0
.write()
.get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG)
.unwrap()
.unfinalized()
.iter()
.map(|fork| fork.head().valid_from.number)
.collect::<Vec<_>>(),
vec![3, 4],
);
// after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2
backend.revert(1, false).unwrap();
assert_eq!(
backend
.changes_tries_storage
.cache
.0
.write()
.get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG)
.unwrap()
.unfinalized()
.iter()
.map(|fork| fork.head().valid_from.number)
.collect::<Vec<_>>(),
vec![3, 3],
);
// after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl
// specifics), the 1st one points to the block #3 because it isn't truncated
backend.revert(1, false).unwrap();
assert_eq!(
backend
.changes_tries_storage
.cache
.0
.write()
.get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG)
.unwrap()
.unfinalized()
.iter()
.map(|fork| fork.head().valid_from.number)
.collect::<Vec<_>>(),
vec![3, 2],
);
// after truncating block2 - there are no unfinalized forks
backend.revert(1, false).unwrap();
assert!(backend
.changes_tries_storage
.cache
.0
.write()
.get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG)
.unwrap()
.unfinalized()
.iter()
.map(|fork| fork.head().valid_from.number)
.collect::<Vec<_>>()
.is_empty(),);
}
}
|
#[doc = "Reader of register TR_CTRL2"]
pub type R = crate::R<u32, super::TR_CTRL2>;
#[doc = "Writer for register TR_CTRL2"]
pub type W = crate::W<u32, super::TR_CTRL2>;
#[doc = "Register TR_CTRL2 `reset()`'s with value 0x3f"]
impl crate::ResetValue for super::TR_CTRL2 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x3f
}
}
#[doc = "Determines the effect of a compare match event (COUNTER equals CC register) on the 'line_out' output signals. Note that INVERT is especially useful for center aligned pulse width modulation. To generate a duty cycle of 0 percent, the counter CC register should be set to '0'. For a 100 percent duty cycle, the counter CC register should be set to larger than the counter PERIOD register.\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CC_MATCH_MODE_A {
#[doc = "0: Set to '1'"]
SET,
#[doc = "1: Set to '0'"]
CLEAR,
#[doc = "2: Invert"]
INVERT,
#[doc = "3: No Change"]
NO_CHANGE,
}
impl From<CC_MATCH_MODE_A> for u8 {
#[inline(always)]
fn from(variant: CC_MATCH_MODE_A) -> Self {
match variant {
CC_MATCH_MODE_A::SET => 0,
CC_MATCH_MODE_A::CLEAR => 1,
CC_MATCH_MODE_A::INVERT => 2,
CC_MATCH_MODE_A::NO_CHANGE => 3,
}
}
}
#[doc = "Reader of field `CC_MATCH_MODE`"]
pub type CC_MATCH_MODE_R = crate::R<u8, CC_MATCH_MODE_A>;
impl CC_MATCH_MODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CC_MATCH_MODE_A {
match self.bits {
0 => CC_MATCH_MODE_A::SET,
1 => CC_MATCH_MODE_A::CLEAR,
2 => CC_MATCH_MODE_A::INVERT,
3 => CC_MATCH_MODE_A::NO_CHANGE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SET`"]
#[inline(always)]
pub fn is_set(&self) -> bool {
*self == CC_MATCH_MODE_A::SET
}
#[doc = "Checks if the value of the field is `CLEAR`"]
#[inline(always)]
pub fn is_clear(&self) -> bool {
*self == CC_MATCH_MODE_A::CLEAR
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == CC_MATCH_MODE_A::INVERT
}
#[doc = "Checks if the value of the field is `NO_CHANGE`"]
#[inline(always)]
pub fn is_no_change(&self) -> bool {
*self == CC_MATCH_MODE_A::NO_CHANGE
}
}
#[doc = "Write proxy for field `CC_MATCH_MODE`"]
pub struct CC_MATCH_MODE_W<'a> {
w: &'a mut W,
}
impl<'a> CC_MATCH_MODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CC_MATCH_MODE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Set to '1'"]
#[inline(always)]
pub fn set(self) -> &'a mut W {
self.variant(CC_MATCH_MODE_A::SET)
}
#[doc = "Set to '0'"]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(CC_MATCH_MODE_A::CLEAR)
}
#[doc = "Invert"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(CC_MATCH_MODE_A::INVERT)
}
#[doc = "No Change"]
#[inline(always)]
pub fn no_change(self) -> &'a mut W {
self.variant(CC_MATCH_MODE_A::NO_CHANGE)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03);
self.w
}
}
#[doc = "Determines the effect of a counter overflow event (COUNTER reaches PERIOD) on the 'line_out' output signals.\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum OVERFLOW_MODE_A {
#[doc = "0: Set to '1'"]
SET,
#[doc = "1: Set to '0'"]
CLEAR,
#[doc = "2: Invert"]
INVERT,
#[doc = "3: No Change"]
NO_CHANGE,
}
impl From<OVERFLOW_MODE_A> for u8 {
#[inline(always)]
fn from(variant: OVERFLOW_MODE_A) -> Self {
match variant {
OVERFLOW_MODE_A::SET => 0,
OVERFLOW_MODE_A::CLEAR => 1,
OVERFLOW_MODE_A::INVERT => 2,
OVERFLOW_MODE_A::NO_CHANGE => 3,
}
}
}
#[doc = "Reader of field `OVERFLOW_MODE`"]
pub type OVERFLOW_MODE_R = crate::R<u8, OVERFLOW_MODE_A>;
impl OVERFLOW_MODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> OVERFLOW_MODE_A {
match self.bits {
0 => OVERFLOW_MODE_A::SET,
1 => OVERFLOW_MODE_A::CLEAR,
2 => OVERFLOW_MODE_A::INVERT,
3 => OVERFLOW_MODE_A::NO_CHANGE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SET`"]
#[inline(always)]
pub fn is_set(&self) -> bool {
*self == OVERFLOW_MODE_A::SET
}
#[doc = "Checks if the value of the field is `CLEAR`"]
#[inline(always)]
pub fn is_clear(&self) -> bool {
*self == OVERFLOW_MODE_A::CLEAR
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == OVERFLOW_MODE_A::INVERT
}
#[doc = "Checks if the value of the field is `NO_CHANGE`"]
#[inline(always)]
pub fn is_no_change(&self) -> bool {
*self == OVERFLOW_MODE_A::NO_CHANGE
}
}
#[doc = "Write proxy for field `OVERFLOW_MODE`"]
pub struct OVERFLOW_MODE_W<'a> {
w: &'a mut W,
}
impl<'a> OVERFLOW_MODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: OVERFLOW_MODE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Set to '1'"]
#[inline(always)]
pub fn set(self) -> &'a mut W {
self.variant(OVERFLOW_MODE_A::SET)
}
#[doc = "Set to '0'"]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(OVERFLOW_MODE_A::CLEAR)
}
#[doc = "Invert"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(OVERFLOW_MODE_A::INVERT)
}
#[doc = "No Change"]
#[inline(always)]
pub fn no_change(self) -> &'a mut W {
self.variant(OVERFLOW_MODE_A::NO_CHANGE)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | (((value as u32) & 0x03) << 2);
self.w
}
}
#[doc = "Determines the effect of a counter underflow event (COUNTER reaches '0') on the 'line_out' output signals.\n\nValue on reset: 3"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum UNDERFLOW_MODE_A {
#[doc = "0: Set to '1'"]
SET,
#[doc = "1: Set to '0'"]
CLEAR,
#[doc = "2: Invert"]
INVERT,
#[doc = "3: No Change"]
NO_CHANGE,
}
impl From<UNDERFLOW_MODE_A> for u8 {
#[inline(always)]
fn from(variant: UNDERFLOW_MODE_A) -> Self {
match variant {
UNDERFLOW_MODE_A::SET => 0,
UNDERFLOW_MODE_A::CLEAR => 1,
UNDERFLOW_MODE_A::INVERT => 2,
UNDERFLOW_MODE_A::NO_CHANGE => 3,
}
}
}
#[doc = "Reader of field `UNDERFLOW_MODE`"]
pub type UNDERFLOW_MODE_R = crate::R<u8, UNDERFLOW_MODE_A>;
impl UNDERFLOW_MODE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> UNDERFLOW_MODE_A {
match self.bits {
0 => UNDERFLOW_MODE_A::SET,
1 => UNDERFLOW_MODE_A::CLEAR,
2 => UNDERFLOW_MODE_A::INVERT,
3 => UNDERFLOW_MODE_A::NO_CHANGE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `SET`"]
#[inline(always)]
pub fn is_set(&self) -> bool {
*self == UNDERFLOW_MODE_A::SET
}
#[doc = "Checks if the value of the field is `CLEAR`"]
#[inline(always)]
pub fn is_clear(&self) -> bool {
*self == UNDERFLOW_MODE_A::CLEAR
}
#[doc = "Checks if the value of the field is `INVERT`"]
#[inline(always)]
pub fn is_invert(&self) -> bool {
*self == UNDERFLOW_MODE_A::INVERT
}
#[doc = "Checks if the value of the field is `NO_CHANGE`"]
#[inline(always)]
pub fn is_no_change(&self) -> bool {
*self == UNDERFLOW_MODE_A::NO_CHANGE
}
}
#[doc = "Write proxy for field `UNDERFLOW_MODE`"]
pub struct UNDERFLOW_MODE_W<'a> {
w: &'a mut W,
}
impl<'a> UNDERFLOW_MODE_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: UNDERFLOW_MODE_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Set to '1'"]
#[inline(always)]
pub fn set(self) -> &'a mut W {
self.variant(UNDERFLOW_MODE_A::SET)
}
#[doc = "Set to '0'"]
#[inline(always)]
pub fn clear(self) -> &'a mut W {
self.variant(UNDERFLOW_MODE_A::CLEAR)
}
#[doc = "Invert"]
#[inline(always)]
pub fn invert(self) -> &'a mut W {
self.variant(UNDERFLOW_MODE_A::INVERT)
}
#[doc = "No Change"]
#[inline(always)]
pub fn no_change(self) -> &'a mut W {
self.variant(UNDERFLOW_MODE_A::NO_CHANGE)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 4)) | (((value as u32) & 0x03) << 4);
self.w
}
}
impl R {
#[doc = "Bits 0:1 - Determines the effect of a compare match event (COUNTER equals CC register) on the 'line_out' output signals. Note that INVERT is especially useful for center aligned pulse width modulation. To generate a duty cycle of 0 percent, the counter CC register should be set to '0'. For a 100 percent duty cycle, the counter CC register should be set to larger than the counter PERIOD register."]
#[inline(always)]
pub fn cc_match_mode(&self) -> CC_MATCH_MODE_R {
CC_MATCH_MODE_R::new((self.bits & 0x03) as u8)
}
#[doc = "Bits 2:3 - Determines the effect of a counter overflow event (COUNTER reaches PERIOD) on the 'line_out' output signals."]
#[inline(always)]
pub fn overflow_mode(&self) -> OVERFLOW_MODE_R {
OVERFLOW_MODE_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 4:5 - Determines the effect of a counter underflow event (COUNTER reaches '0') on the 'line_out' output signals."]
#[inline(always)]
pub fn underflow_mode(&self) -> UNDERFLOW_MODE_R {
UNDERFLOW_MODE_R::new(((self.bits >> 4) & 0x03) as u8)
}
}
impl W {
#[doc = "Bits 0:1 - Determines the effect of a compare match event (COUNTER equals CC register) on the 'line_out' output signals. Note that INVERT is especially useful for center aligned pulse width modulation. To generate a duty cycle of 0 percent, the counter CC register should be set to '0'. For a 100 percent duty cycle, the counter CC register should be set to larger than the counter PERIOD register."]
#[inline(always)]
pub fn cc_match_mode(&mut self) -> CC_MATCH_MODE_W {
CC_MATCH_MODE_W { w: self }
}
#[doc = "Bits 2:3 - Determines the effect of a counter overflow event (COUNTER reaches PERIOD) on the 'line_out' output signals."]
#[inline(always)]
pub fn overflow_mode(&mut self) -> OVERFLOW_MODE_W {
OVERFLOW_MODE_W { w: self }
}
#[doc = "Bits 4:5 - Determines the effect of a counter underflow event (COUNTER reaches '0') on the 'line_out' output signals."]
#[inline(always)]
pub fn underflow_mode(&mut self) -> UNDERFLOW_MODE_W {
UNDERFLOW_MODE_W { w: self }
}
}
|
use anyhow::Result;
use confy::load;
use console::Term;
use protonvpn::{cli::CliOptions, constants::APP_NAME, main as main_cli, vpn::util::Config};
#[paw::main]
fn main(args: CliOptions) -> Result<()> {
// Stdio handle is passed through the entire program
let mut terminal = Term::buffered_stdout();
let config = load::<Config>(APP_NAME);
main_cli(args, config, &mut terminal)
}
|
//! Replaces the game's broken cheats system with our own system that integrates with the menu.
use std::sync::atomic::{AtomicBool, Ordering};
use crate::{
call_original, gui, hook,
menu::{self, RowData, TabData},
settings::Settings,
};
use lazy_static::lazy_static;
use log::error;
use once_cell::sync::Lazy;
pub struct Cheat {
index: usize,
code: &'static str,
description: &'static str,
}
lazy_static! {
static ref WAITING_CHEATS: std::sync::Mutex<Vec<usize>> = std::sync::Mutex::new(vec![]);
}
impl Cheat {
const fn new(index: usize, code: &'static str, description: &'static str) -> Cheat {
Cheat {
index,
code,
description,
}
}
fn get_function(&self) -> Option<fn()> {
let entry_address = 0x10065c358 + (self.index as usize * 8);
let ptr = hook::slide::<*const *const u64>(entry_address);
// The array pointer shouldn't be null, but we check it just in case.
// The more important check is the second, which ensures that the function pointer is not 0.
if ptr.is_null() || unsafe { *ptr }.is_null() {
None
} else {
// Get the value again, but this time as a pointer to a function.
// The reason we don't get it as a *const fn() the first time is that 'fn' is itself
// the function pointer, but we can't check if it is null. We use *const *const u64
// instead because we can check the inner pointer as well.
let func_ptr = hook::slide::<*const fn()>(entry_address);
Some(unsafe { *func_ptr })
}
}
fn get_active_mut(&self) -> &'static mut bool {
unsafe {
hook::slide::<*mut bool>(0x10072dda8 + (self.index as usize))
.as_mut()
.unwrap()
}
}
fn is_active(&self) -> bool {
*self.get_active_mut()
}
fn queue(&self) {
let mut waiting = WAITING_CHEATS.lock().unwrap();
waiting.push(self.index);
}
fn cancel(&self) {
let mut waiting = WAITING_CHEATS.lock().unwrap();
waiting.retain(|cheat_index| *cheat_index != self.index);
}
fn is_in_queue(&self) -> bool {
let waiting = WAITING_CHEATS.lock().unwrap();
waiting.contains(&self.index)
}
fn run(&self) {
if let Some(function) = self.get_function() {
log::info!("Calling cheat function {:?}", function);
function();
return;
}
// If the cheat has no function pointer, then we need to toggle its active status.
let active = self.get_active_mut();
*active = !*active;
}
}
// (a, b) where a = "check b" and b = "save the cheat states". a is false when a save is in progress.
// This is a weak system for avoiding two saves happening at the same time, but it works well enough.
// Besides, it's practically impossible for a save to be triggered when one is already in progress because
// saving is very fast.
static SAVE_FLAGS: Lazy<(AtomicBool, AtomicBool)> =
Lazy::new(|| (AtomicBool::new(true), AtomicBool::new(false)));
// CCheat::DoCheats is where cheat codes are checked and then cheats activated (indirectly),
// so we need to do our cheat stuff here to ensure that the cheats don't fuck up the game by
// doing stuff at weird times. The point in CGame::Process where DoCheats is called is where
// every other system in the game expects cheats to be activated.
// Cheats that need textures to be loaded - such as weapon or vehicle cheats - can crash the
// game if they are executed on the wrong thread or at the wrong time, so it is very important
// that we get this right.
fn do_cheats() {
if let Ok(waiting) = WAITING_CHEATS.lock().as_mut() {
// Perform all queued cheat actions.
for cheat_index in waiting.iter() {
CHEATS[*cheat_index as usize].run();
}
// Clear the queue.
waiting.clear();
} else {
error!("Unable to lock cheat queue for CCheat::DoCheats!");
}
if SAVE_FLAGS.0.load(Ordering::SeqCst) && SAVE_FLAGS.1.load(Ordering::SeqCst) {
// Ignore further requests to save the cheat states.
SAVE_FLAGS.0.store(false, Ordering::SeqCst);
// We need to save the cheat states now. We only do this after the cheat functions have been called because
// some will clear their "enabled" status when they run.
// todo: Check that saving cheats after execution is always a good idea.
// We just save an array of bytes in the order of the cheats, with each being 1 or 0 depending on the status of that cheat.
// We do this outside of our saving thread because we don't want the statuses to change while we're accessing them.
let cheat_state_bytes: Vec<u8> = CHEATS
.iter()
.map(|cheat| {
let is_active = cheat.is_active();
if is_active {
log::info!("Saving status ON for cheat '{}'.", cheat.code);
}
is_active as u8
})
.collect();
std::thread::spawn(|| {
if let Err(err) = std::fs::write(
crate::resources::get_documents_path("cleo_saved_cheats.u8"),
cheat_state_bytes,
) {
log::error!("Error while saving cheat states: {}", err);
} else {
log::info!("Cheat states saved successfully.");
}
// Clear save request flag (so we don't keep saving infinitely).
SAVE_FLAGS.1.store(false, Ordering::SeqCst);
// Allow new saving requests to be processed.
SAVE_FLAGS.0.store(true, Ordering::SeqCst);
});
}
}
struct CheatData {
cheat: &'static Cheat,
queued_state: Option<bool>,
}
impl CheatData {
fn new(cheat: &'static Cheat) -> CheatData {
CheatData {
cheat,
queued_state: if cheat.is_in_queue() {
Some(!cheat.is_active())
} else {
None
},
}
}
fn will_be_active(&self) -> bool {
self.queued_state.unwrap_or_else(|| self.cheat.is_active())
}
}
impl RowData for CheatData {
fn title(&self) -> String {
if self.cheat.code.is_empty() {
"???"
} else {
self.cheat.code
}
.into()
}
fn detail(&self) -> menu::RowDetail {
menu::RowDetail::Info(self.cheat.description.into())
}
fn value(&self) -> &str {
/*
State Tint Status
In queue, turning on Blue "Queued On"
In queue, turning off Red "Queued Off"
Not in queue, on Green "On"
Not in queue, off None "Off"
*/
let will_be_active = self.will_be_active();
if self.cheat.is_in_queue() {
if will_be_active {
"Queued On"
} else {
"Queued Off"
}
} else if will_be_active {
"On"
} else {
"Off"
}
}
fn tint(&self) -> Option<(u8, u8, u8)> {
let will_be_active = self.will_be_active();
if self.cheat.is_in_queue() {
if will_be_active {
Some(gui::colours::BLUE)
} else {
Some(gui::colours::RED)
}
} else if will_be_active {
Some(gui::colours::GREEN)
} else {
None
}
}
fn handle_tap(&mut self) -> bool {
let is_queued = self.queued_state.is_some();
if is_queued {
// Remove the cheat from the queue.
self.cheat.cancel();
self.queued_state = None;
} else {
self.queued_state = Some(!self.cheat.is_active());
// Not queued yet.
self.cheat.queue();
}
if Settings::shared().save_cheats.load(Ordering::SeqCst) {
// Request that the cheats be saved because it is likely that a status will change.
SAVE_FLAGS.1.store(true, Ordering::SeqCst);
}
true
}
}
pub fn tab_data() -> TabData {
let sorted_cheats: Lazy<Vec<&Cheat>> = Lazy::new(|| {
let mut vec: Vec<&Cheat> = CHEATS.iter().by_ref().collect();
vec.sort_by_key(|cheat| {
if cheat.code.is_empty() {
// Push cheats without codes to the end. If we don't do this, the cheat menu only shows "???" for the first few rows.
"ZZZZZ"
} else {
cheat.code
}
});
vec
});
TabData {
name: "Cheats".to_string(),
warning: Some(
r#"Using cheats can lead to a crash and/or loss of progress.
If you don't want to risk breaking your save, back up your progress to a different slot first."#
.to_string(),
),
row_data: sorted_cheats
.iter()
.map(|cheat| Box::new(CheatData::new(cheat)) as Box<dyn RowData>)
.collect(),
}
}
fn reset_cheats() {
log::info!("Resetting cheats");
call_original!(crate::targets::reset_cheats);
if !Settings::shared().save_cheats.load(Ordering::SeqCst) {
log::info!("Cheat saving/loading is disabled.");
return;
}
log::info!("Loading saved cheats.");
let path = crate::resources::get_documents_path("cleo_saved_cheats.u8");
if !path.exists() {
log::info!("No saved cheats file found.");
return;
}
match std::fs::read(path) {
Ok(loaded_bytes) => {
// Check that we have a file that matches our cheat count.
if loaded_bytes.len() != CHEATS.len() {
log::error!("Invalid cheat save: byte count must match cheat count.");
return;
}
// Ensure that all the bytes are valid.
for byte in loaded_bytes.iter() {
if byte != &0 && byte != &1 {
log::error!("Invalid cheat save: found non-Boolean byte.");
return;
}
}
// Set all the cheat statuses according to the bytes in the file.
for (i, b) in loaded_bytes.iter().enumerate() {
*CHEATS[i].get_active_mut() = b == &1;
}
log::info!("Cheats loaded successfully.");
}
Err(err) => {
log::error!("Error loading cheat file: {}", err);
}
}
}
pub fn init() {
crate::targets::do_cheats::install(do_cheats);
crate::targets::reset_cheats::install(reset_cheats);
}
// We have to include the codes because the game doesn't have the array.
// Android does, though, so I copied the codes from there. The order has been preserved.
// The spreadsheet at
// https://docs.google.com/spreadsheets/d/1-rmga12W9reALga7fct22tJ-1thxbbsfGiGltK2qgh0/edit?usp=sharing
// was very helpful during research, and the page at https://gta.fandom.com/wiki/Cheats_in_GTA_San_Andreas
// was really useful for writing cheat descriptions.
static CHEATS: [Cheat; 111] = [
Cheat::new(0, "THUGSARMOURY", "Weapon set 1"),
Cheat::new(1, "PROFESSIONALSKIT", "Weapon set 2"),
Cheat::new(2, "NUTTERSTOYS", "Weapon set 3"),
Cheat::new(
3,
"",
"Give dildo, minigun and thermal/night-vision goggles",
),
Cheat::new(4, "", "Advance clock by 4 hours"),
Cheat::new(5, "", "Skip to completion on some missions"),
Cheat::new(6, "", "Debug (show mappings)"),
Cheat::new(7, "", "Full invincibility"),
Cheat::new(8, "", "Debug (show tap to target)"),
Cheat::new(9, "", "Debug (show targeting)"),
Cheat::new(10, "INEEDSOMEHELP", "Give health, armour and $250,000"),
Cheat::new(11, "TURNUPTHEHEAT", "Increase wanted level by two stars"),
Cheat::new(12, "TURNDOWNTHEHEAT", "Clear wanted level"),
Cheat::new(13, "PLEASANTLYWARM", "Sunny weather"),
Cheat::new(14, "TOODAMNHOT", "Very sunny weather"),
Cheat::new(15, "DULLDULLDAY", "Overcast weather"),
Cheat::new(16, "STAYINANDWATCHTV", "Rainy weather"),
Cheat::new(17, "CANTSEEWHEREIMGOING", "Foggy weather"),
Cheat::new(18, "TIMEJUSTFLIESBY", "Faster time"),
Cheat::new(19, "SPEEDITUP", "Faster gameplay"),
Cheat::new(20, "SLOWITDOWN", "Slower gameplay"),
Cheat::new(
21,
"ROUGHNEIGHBOURHOOD",
"Pedestrians riot, give player golf club",
),
Cheat::new(22, "STOPPICKINGONME", "Pedestrians attack the player"),
Cheat::new(23, "SURROUNDEDBYNUTTERS", "Give pedestrians weapons"),
Cheat::new(24, "TIMETOKICKASS", "Spawn Rhino tank"),
Cheat::new(25, "OLDSPEEDDEMON", "Spawn Bloodring Banger"),
Cheat::new(26, "", "Spawn stock car"),
Cheat::new(27, "NOTFORPUBLICROADS", "Spawn Hotring Racer A"),
Cheat::new(28, "JUSTTRYANDSTOPME", "Spawn Hotring Racer B"),
Cheat::new(29, "WHERESTHEFUNERAL", "Spawn Romero"),
Cheat::new(30, "CELEBRITYSTATUS", "Spawn Stretch Limousine"),
Cheat::new(31, "TRUEGRIME", "Spawn Trashmaster"),
Cheat::new(32, "18HOLES", "Spawn Caddy"),
Cheat::new(33, "ALLCARSGOBOOM", "Explode all vehicles"),
Cheat::new(34, "WHEELSONLYPLEASE", "Invisible cars"),
Cheat::new(35, "STICKLIKEGLUE", "Improved suspension and handling"),
Cheat::new(36, "GOODBYECRUELWORLD", "Suicide"),
Cheat::new(37, "DONTTRYANDSTOPME", "Traffic lights are always green"),
Cheat::new(
38,
"ALLDRIVERSARECRIMINALS",
"All NPC drivers drive aggressively and have a wanted level",
),
Cheat::new(39, "PINKISTHENEWCOOL", "Pink traffic"),
Cheat::new(40, "SOLONGASITSBLACK", "Black traffic"),
Cheat::new(41, "", "Cars have sideways wheels"),
Cheat::new(42, "FLYINGFISH", "Flying boats"),
Cheat::new(43, "WHOATEALLTHEPIES", "Maximum fat"),
Cheat::new(44, "BUFFMEUP", "Maximum muscle"),
Cheat::new(45, "", "Maximum gambling skill"),
Cheat::new(46, "LEANANDMEAN", "Minimum fat and muscle"),
Cheat::new(47, "BLUESUEDESHOES", "All pedestrians are Elvis Presley"),
Cheat::new(
48,
"ATTACKOFTHEVILLAGEPEOPLE",
"Pedestrians attack the player with guns and rockets",
),
Cheat::new(49, "LIFESABEACH", "Beach party theme"),
Cheat::new(50, "ONLYHOMIESALLOWED", "Gang wars"),
Cheat::new(
51,
"BETTERSTAYINDOORS",
"Pedestrians replaced with fighting gang members",
),
Cheat::new(52, "NINJATOWN", "Triad theme"),
Cheat::new(53, "LOVECONQUERSALL", "Pimp mode"),
Cheat::new(54, "EVERYONEISPOOR", "Rural traffic"),
Cheat::new(55, "EVERYONEISRICH", "Sports car traffic"),
Cheat::new(56, "CHITTYCHITTYBANGBANG", "Flying cars"),
Cheat::new(57, "CJPHONEHOME", "Very high bunny hops"),
Cheat::new(58, "JUMPJET", "Spawn Hydra"),
Cheat::new(59, "IWANTTOHOVER", "Spawn Vortex"),
Cheat::new(
60,
"TOUCHMYCARYOUDIE",
"Destroy other vehicles on collision",
),
Cheat::new(61, "SPEEDFREAK", "All cars have nitro"),
Cheat::new(62, "BUBBLECARS", "Cars float away when hit"),
Cheat::new(63, "NIGHTPROWLER", "Always midnight"),
Cheat::new(64, "DONTBRINGONTHENIGHT", "Always 9PM"),
Cheat::new(65, "SCOTTISHSUMMER", "Stormy weather"),
Cheat::new(66, "SANDINMYEARS", "Sandstorm"),
Cheat::new(67, "", "Predator?"),
Cheat::new(68, "KANGAROO", "10x jump height"),
Cheat::new(69, "NOONECANHURTME", "Infinite health"),
Cheat::new(70, "MANFROMATLANTIS", "Infinite lung capacity"),
Cheat::new(71, "LETSGOBASEJUMPING", "Spawn Parachute"),
Cheat::new(72, "ROCKETMAN", "Spawn Jetpack"),
Cheat::new(73, "IDOASIPLEASE", "Lock wanted level"),
Cheat::new(74, "BRINGITON", "Six-star wanted level"),
Cheat::new(75, "STINGLIKEABEE", "Super punches"),
Cheat::new(76, "IAMNEVERHUNGRY", "Player never gets hungry"),
Cheat::new(77, "STATEOFEMERGENCY", "Pedestrians riot"),
Cheat::new(78, "CRAZYTOWN", "Carnival theme"),
Cheat::new(79, "TAKEACHILLPILL", "Adrenaline effects"),
Cheat::new(80, "FULLCLIP", "Everyone has unlimited ammo"),
Cheat::new(81, "IWANNADRIVEBY", "Full weapon control in vehicles"),
Cheat::new(82, "GHOSTTOWN", "No pedestrians, reduced live traffic"),
Cheat::new(83, "HICKSVILLE", "Rural theme"),
Cheat::new(84, "WANNABEINMYGANG", "Recruit anyone with pistols"),
Cheat::new(85, "NOONECANSTOPUS", "Recruit anyone with AK-47s"),
Cheat::new(86, "ROCKETMAYHEM", "Recruit anyone with rocket launchers"),
Cheat::new(87, "WORSHIPME", "Maximum respect"),
Cheat::new(88, "HELLOLADIES", "Maximum sex appeal"),
Cheat::new(89, "ICANGOALLNIGHT", "Maximum stamina"),
Cheat::new(90, "PROFESSIONALKILLER", "Hitman level for all weapons"),
Cheat::new(91, "NATURALTALENT", "Maximum vehicle skills"),
Cheat::new(92, "OHDUDE", "Spawn Hunter"),
Cheat::new(93, "FOURWHEELFUN", "Spawn Quad"),
Cheat::new(94, "HITTHEROADJACK", "Spawn Tanker with Tanker Trailer"),
Cheat::new(95, "ITSALLBULL", "Spawn Dozer"),
Cheat::new(96, "FLYINGTOSTUNT", "Spawn Stunt Plane"),
Cheat::new(97, "MONSTERMASH", "Spawn Monster Truck"),
Cheat::new(98, "", "Prostitutes pay you"),
Cheat::new(99, "", "Taxis have hydraulics and nitro"),
Cheat::new(100, "", "CRASHES! Slot cheat 1"),
Cheat::new(101, "", "CRASHES! Slot cheat 2"),
Cheat::new(102, "", "CRASHES! Slot cheat 3"),
Cheat::new(103, "", "CRASHES! Slot cheat 4"),
Cheat::new(104, "", "CRASHES! Slot cheat 5"),
Cheat::new(105, "", "CRASHES! Slot cheat 6"),
Cheat::new(106, "", "CRASHES! Slot cheat 7"),
Cheat::new(107, "", "CRASHES! Slot cheat 8"),
Cheat::new(108, "", "CRASHES! Slot cheat 9"),
Cheat::new(109, "", "CRASHES! Slot cheat 10"),
Cheat::new(110, "", "Xbox helper"),
];
|
use std::{
env,
fmt::Write,
io::Cursor,
thread,
time::{Duration, Instant},
};
use futures::StreamExt;
use image::io::Reader as ImageReader;
use inline_python::{python, Context};
use serde::Deserialize;
use telegram_bot::{
prelude::*, reply_markup, Api, CanSendMessage, MessageKind, ParseMode, UpdateKind,
};
#[macro_use]
extern crate log;
#[tokio::main]
async fn main() {
log4rs::init_file("log_config.yml", Default::default()).expect("No se pudo iniciar Log");
info!("Iniciando...");
let token = env::var("POKE_TOKEN").expect("Token no encontrado");
let api = Api::new(&token);
let mut stream = api.stream();
let client = reqwest::Client::new();
let mut telegram_url = String::from("https://api.telegram.org/");
let mut ans: Vec<String> = Vec::new();
let mut text = String::new();
let telegram_len = telegram_url.len();
let c: Context = python! {
from fastbook import load_learner
learn_inf = load_learner("./export.pkl")
};
info!("Listo para recibir querys.");
while let Some(update) = stream.next().await {
match update {
Ok(update) => {
if let UpdateKind::Message(mut message) = update.kind {
let now = Instant::now();
match message.kind {
MessageKind::Photo { ref mut data, .. } => {
write!(
telegram_url,
"bot{}/getFile?file_id={}",
&token, &data[0].file_id
)
.unwrap();
let response = client
.get(&telegram_url)
.send()
.await
.unwrap()
.json::<Response>()
.await
.unwrap();
telegram_url.truncate(telegram_len);
write!(
telegram_url,
"file/bot{}/{}",
&token, &response.result.file_path
)
.unwrap();
let image_bytes = client
.get(&telegram_url)
.send()
.await
.unwrap()
.bytes()
.await
.unwrap();
let img = ImageReader::with_format(
Cursor::new(image_bytes),
image::ImageFormat::Jpeg,
)
.decode()
.unwrap();
img.resize_exact(680, 680, image::imageops::FilterType::Nearest);
img.save_with_format("i", image::ImageFormat::Jpeg).unwrap();
c.run(python! {
prediction = learn_inf.predict("i")
ans = f"{prediction[0]}", f"{max(prediction[-1]):.4}"
});
ans = c.get::<Vec<String>>("ans");
let pokelink = format!("https://www.pokemon.com/us/pokedex/{}", ans[0]);
write!(
&mut text,
"[{}]({}), confidence {}",
ans[0], pokelink, ans[1],
)
.unwrap();
text[..=1].make_ascii_uppercase();
let mut reply = message.text_reply(&text);
reply.parse_mode(ParseMode::Markdown);
reply.reply_markup(
reply_markup!(inline_keyboard, ["Pokédex" url pokelink]),
);
api.spawn(reply);
telegram_url.truncate(telegram_len);
text.clear();
}
MessageKind::Text { ref mut data, .. } => {
if data.contains("/h") {
api.spawn(message.chat.text(HELP));
} else if data.contains("/s") {
api.spawn(message.chat.text(START).parse_mode(ParseMode::Markdown));
} else {
api.spawn(message.chat.text(NO_MATCH));
}
}
_ => {
api.spawn(message.chat.text(NO_MATCH));
}
}
info!(
"{} {} {} {:#?}",
&message.from.first_name,
ans[0],
ans[1],
Instant::now().duration_since(now)
);
}
}
Err(e) => {
error!("{}", e);
thread::sleep(Duration::from_secs(1));
}
}
}
}
#[derive(Deserialize)]
struct Response {
result: Result,
}
#[derive(Deserialize)]
struct Result {
file_path: String,
}
static START: &str =
"*PokeClass*\n\nJust send a picture of a Pokémon and I will try to guess which one it is using deep learning";
static HELP: &str = "Just send a picture";
static NO_MATCH: &str = "I didn't understand your message, try again or use /help";
|
use glium::glutin::{self, event::{ElementState, VirtualKeyCode, Event, WindowEvent}, platform::unix::WindowExtUnix};
use glium::{Display, Surface};
use imgui::{Context, FontConfig, FontGlyphRanges, FontSource, Ui};
use imgui_glium_renderer::Renderer;
use imgui_winit_support::{HiDpiMode, WinitPlatform};
use std::time::Instant;
// mod clipboard;
pub struct System {
pub event_loop: glutin::event_loop::EventLoop<()>,
pub display: Display,
pub imgui: Context,
pub platform: WinitPlatform,
pub renderer: Renderer,
pub font_size: f32,
}
pub fn init(title: &str) -> System {
let _title = match title.rfind('/') {
Some(idx) => title.split_at(idx + 1).1,
None => title,
};
let event_loop = glutin::event_loop::EventLoop::new();
let cb = glutin::ContextBuilder::new()
.with_gl(glutin::GlRequest::GlThenGles { opengl_version: (3, 0), opengles_version: (2, 0) })
.with_vsync(true);
let wb = glutin::window::WindowBuilder::new()
.with_fullscreen(Some(event_loop.primary_monitor()));
let gl_window = cb.build_windowed(wb, &event_loop).unwrap();
let display = Display::with_debug(gl_window, glium::debug::DebugCallbackBehavior::PrintAll).unwrap();
let mut imgui = Context::create();
imgui.set_ini_filename(None);
// if let Some(backend) = clipboard::init() {
// imgui.set_clipboard_backend(Box::new(backend));
// } else {
// eprintln!("Failed to initialize clipboard");
// }
let mut platform = WinitPlatform::init(&mut imgui);
{
let gl_window = display.gl_window();
let window = gl_window.window();
platform.attach_window(imgui.io_mut(), &window, HiDpiMode::Rounded);
}
let hidpi_factor = platform.hidpi_factor();
let font_size = (48.0 * hidpi_factor) as f32;
imgui.fonts().add_font(&[
FontSource::DefaultFontData {
config: Some(FontConfig {
size_pixels: font_size,
..FontConfig::default()
}),
},
FontSource::TtfData {
data: include_bytes!("../../../resources/mplus-1p-regular.ttf"),
// data: include_bytes!("../../../resources/Roboto-Regular.ttf"),
size_pixels: font_size,
config: Some(FontConfig {
rasterizer_multiply: 1.75,
glyph_ranges: FontGlyphRanges::japanese(),
..FontConfig::default()
}),
},
]);
imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32;
imgui.io_mut().config_flags |= imgui::ConfigFlags::NAV_ENABLE_GAMEPAD;
let renderer = Renderer::init(&mut imgui, &display).expect("Failed to initialize renderer");
System {
event_loop,
display,
imgui,
platform,
renderer,
font_size,
}
}
impl System {
pub fn main_loop<F: FnMut(&mut bool, &mut Ui)>(self, mut run_ui: F) {
let System {
event_loop,
display,
mut imgui,
platform,
mut renderer,
..
} = self;
let gl_window = display.gl_window();
let window = gl_window.window();
let mut last_frame = Instant::now();
let mut run = true;
while run {
// event_loop.poll_events(|event| {
// platform.handle_event(imgui.io_mut(), &window, &event);
// if let Event::WindowEvent { event, .. } = event {
// if let WindowEvent::CloseRequested = event {
// run = false;
// }
// }
// });
let io = imgui.io_mut();
platform
.prepare_frame(io, &window)
.expect("Failed to start frame");
last_frame = io.update_delta_time(last_frame);
let mut ui = imgui.frame();
run_ui(&mut run, &mut ui);
let mut target = display.draw();
target.clear_color_srgb(0.1, 0.1, 0.1, 1.0);
platform.prepare_render(&ui, &window);
let draw_data = ui.render();
renderer
.render(&mut target, draw_data)
.expect("Rendering failed");
target.finish().expect("Failed to swap buffers");
window.gbm_page_flip();
}
}
}
|
use crate::io::TwzIOHdr;
use crate::io::{PollStates, ReadFlags, ReadOutput, ReadResult, WriteFlags, WriteOutput, WriteResult};
use std::sync::atomic::{AtomicU32, Ordering};
use twz::event::{Event, EventHdr};
use twz::mutex::TwzMutex;
pub const BSTREAM_METAEXT_TAG: u64 = 0x00000000bbbbbbbb;
#[repr(C)]
#[derive(Default)]
pub struct BstreamHdr {
ev: EventHdr,
lock: TwzMutex<BstreamInternal>,
}
#[repr(C)]
struct BstreamInternal {
flags: u32,
head: AtomicU32,
tail: AtomicU32,
nbits: u32,
io: TwzIOHdr,
buffer: [u8; 8192],
}
impl crate::io::TwzIO for BstreamHdr {
fn poll(&self, events: PollStates) -> Option<twz::event::Event> {
Some(twz::event::Event::new(&self.ev, events.bits()))
}
fn read(&self, buf: &mut [u8], flags: ReadFlags) -> ReadResult {
let mut internal = self.lock.lock();
let mut count = 0;
while count < buf.len() {
if internal.head.load(Ordering::SeqCst) == internal.tail.load(Ordering::SeqCst) {
/* empty */
if count != 0 || flags.contains_any(ReadFlags::NONBLOCK) {
break;
}
if self.ev.clear(PollStates::READ.bits()) != 0 {
continue;
}
drop(internal);
let event = Event::new(&self.ev, PollStates::READ.bits());
twz::event::wait(&[&event], None).unwrap();
internal = self.lock.lock();
continue;
}
let tail = internal.tail.load(Ordering::SeqCst);
buf[count] = internal.buffer[tail as usize];
internal
.tail
.store((tail + 1) & ((1 << internal.nbits) - 1), Ordering::SeqCst);
count += 1;
}
if internal.head.load(Ordering::SeqCst) == internal.tail.load(Ordering::SeqCst) {
self.ev.clear(PollStates::READ.bits());
}
self.ev.wake(PollStates::WRITE.bits(), None);
if count == 0 && (flags.contains_any(ReadFlags::NONBLOCK)) {
Ok(ReadOutput::WouldBlock)
} else {
Ok(ReadOutput::Done(count))
}
}
fn write(&self, buf: &[u8], flags: WriteFlags) -> WriteResult {
let mut internal = self.lock.lock();
let mut count = 0;
while count < buf.len() {
if internal.free_space() <= 1 {
if count == 0 && !flags.contains_any(WriteFlags::NONBLOCK) {
if self.ev.clear(PollStates::WRITE.bits()) != 0 {
continue;
}
drop(internal);
let event = Event::new(&self.ev, PollStates::WRITE.bits());
twz::event::wait(&[&event], None).unwrap();
internal = self.lock.lock();
continue;
}
break;
}
let head = internal.head.load(Ordering::SeqCst);
internal.buffer[head as usize] = buf[count];
internal
.head
.store((head + 1) & ((1 << internal.nbits) - 1), Ordering::SeqCst);
count += 1;
}
if internal.free_space() <= 1 {
self.ev.clear(PollStates::WRITE.bits());
}
self.ev.wake(PollStates::READ.bits(), None);
if count == 0 && (flags.contains_any(WriteFlags::NONBLOCK)) {
Ok(WriteOutput::WouldBlock)
} else {
Ok(WriteOutput::Done(count))
}
}
}
impl Default for BstreamInternal {
fn default() -> Self {
Self {
flags: 0,
head: AtomicU32::new(0),
tail: AtomicU32::new(0),
nbits: 12,
io: TwzIOHdr::default(),
buffer: [0; 8192],
}
}
}
impl BstreamInternal {
fn free_space(&self) -> usize {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
(if tail > head {
tail - head
} else {
(1 << self.nbits) - head + tail
}) as usize
}
}
impl BstreamHdr {
pub fn total_size(&self) -> usize {
std::mem::size_of::<BstreamHdr>() + self.buffer_size()
}
pub fn buffer_size(&self) -> usize {
1usize << self.lock.lock().nbits
}
}
|
//! VapourSynth cores.
use std::ffi::{CStr, CString, NulError};
use std::fmt;
use std::marker::PhantomData;
use std::ptr::NonNull;
use vapoursynth_sys as ffi;
use crate::api::API;
use crate::format::{ColorFamily, Format, FormatID, SampleType};
use crate::map::OwnedMap;
use crate::plugin::Plugin;
/// Contains information about a VapourSynth core.
#[derive(Debug, Clone, Copy, Hash)]
pub struct Info {
/// String containing the name of the library, copyright notice, core and API versions.
pub version_string: &'static str,
/// Version of the core.
pub core_version: i32,
/// Version of the API.
pub api_version: i32,
/// Number of worker threads.
pub num_threads: usize,
/// The framebuffer cache will be allowed to grow up to this size (bytes) before memory is
/// aggressively reclaimed.
pub max_framebuffer_size: u64,
/// Current size of the framebuffer cache, in bytes.
pub used_framebuffer_size: u64,
}
/// A reference to a VapourSynth core.
#[derive(Debug, Clone, Copy)]
pub struct CoreRef<'core> {
handle: NonNull<ffi::VSCore>,
_owner: PhantomData<&'core ()>,
}
unsafe impl<'core> Send for CoreRef<'core> {}
unsafe impl<'core> Sync for CoreRef<'core> {}
impl<'core> CoreRef<'core> {
/// Wraps `handle` in a `CoreRef`.
///
/// # Safety
/// The caller must ensure `handle` is valid and API is cached.
#[inline]
pub(crate) unsafe fn from_ptr(handle: *mut ffi::VSCore) -> Self {
Self {
handle: NonNull::new_unchecked(handle),
_owner: PhantomData,
}
}
/// Returns the underlying pointer.
#[inline]
pub(crate) fn ptr(&self) -> *mut ffi::VSCore {
self.handle.as_ptr()
}
/// Returns information about the VapourSynth core.
pub fn info(self) -> Info {
#[cfg(not(feature = "gte-vapoursynth-api-36"))]
let raw_info = unsafe {
API::get_cached()
.get_core_info(self.handle.as_ptr())
.as_ref()
.unwrap()
};
#[cfg(feature = "gte-vapoursynth-api-36")]
let raw_info = unsafe { &API::get_cached().get_core_info(self.handle.as_ptr()) };
let version_string = unsafe { CStr::from_ptr(raw_info.versionString).to_str().unwrap() };
debug_assert!(raw_info.numThreads >= 0);
debug_assert!(raw_info.maxFramebufferSize >= 0);
debug_assert!(raw_info.usedFramebufferSize >= 0);
Info {
version_string,
core_version: raw_info.core,
api_version: raw_info.api,
num_threads: raw_info.numThreads as usize,
max_framebuffer_size: raw_info.maxFramebufferSize as u64,
used_framebuffer_size: raw_info.usedFramebufferSize as u64,
}
}
/// Retrieves a registered or preset `Format` by its id. The id can be of a previously
/// registered format, or one of the `PresetFormat`.
#[inline]
pub fn get_format(&self, id: FormatID) -> Option<Format<'core>> {
let ptr = unsafe { API::get_cached().get_format_preset(id.0, self.handle.as_ptr()) };
unsafe { ptr.as_ref().map(|p| Format::from_ptr(p)) }
}
/// Registers a custom video format.
///
/// Returns `None` if an invalid format is described.
///
/// Registering compat formats is not allowed. Only certain privileged built-in filters are
/// allowed to handle compat formats.
///
/// RGB formats are not allowed to be subsampled.
#[inline]
pub fn register_format(
&self,
color_family: ColorFamily,
sample_type: SampleType,
bits_per_sample: u8,
sub_sampling_w: u8,
sub_sampling_h: u8,
) -> Option<Format<'core>> {
unsafe {
API::get_cached()
.register_format(
color_family.into(),
sample_type.into(),
i32::from(bits_per_sample),
i32::from(sub_sampling_w),
i32::from(sub_sampling_h),
self.handle.as_ptr(),
)
.as_ref()
.map(|p| Format::from_ptr(p))
}
}
/// Returns a plugin with the given identifier.
#[inline]
pub fn get_plugin_by_id(&self, id: &str) -> Result<Option<Plugin<'core>>, NulError> {
let id = CString::new(id)?;
let ptr = unsafe { API::get_cached().get_plugin_by_id(id.as_ptr(), self.handle.as_ptr()) };
if ptr.is_null() {
Ok(None)
} else {
Ok(Some(unsafe { Plugin::from_ptr(ptr) }))
}
}
/// Returns a plugin with the given namespace.
///
/// `get_plugin_by_id()` should be used instead.
#[inline]
pub fn get_plugin_by_namespace(
&self,
namespace: &str,
) -> Result<Option<Plugin<'core>>, NulError> {
let namespace = CString::new(namespace)?;
let ptr =
unsafe { API::get_cached().get_plugin_by_ns(namespace.as_ptr(), self.handle.as_ptr()) };
if ptr.is_null() {
Ok(None)
} else {
Ok(Some(unsafe { Plugin::from_ptr(ptr) }))
}
}
/// Returns a map containing a list of all loaded plugins.
///
/// Keys: meaningless unique strings;
///
/// Values: namespace, identifier, and full name, separated by semicolons.
// TODO: parse the values on the crate side and return a nice struct.
#[inline]
pub fn plugins(&self) -> OwnedMap<'core> {
unsafe { OwnedMap::from_ptr(API::get_cached().get_plugins(self.handle.as_ptr())) }
}
/// Sets the maximum size of the framebuffer cache. Returns the new maximum size.
#[cfg(feature = "gte-vapoursynth-api-36")]
#[inline]
pub fn set_max_cache_size(&self, bytes: i64) -> i64 {
unsafe { API::get_cached().set_max_cache_size(bytes, self.handle.as_ptr()) }
}
/// Sets the number of worker threads for the given core.
///
/// If the requested number of threads is zero or lower, the number of hardware threads will be
/// detected and used.
///
/// Returns the new thread count.
#[cfg(feature = "gte-vapoursynth-api-36")]
#[inline]
pub fn set_thread_count(&self, threads: i32) -> i32 {
unsafe { API::get_cached().set_thread_count(threads, self.handle.as_ptr()) }
}
}
impl fmt::Display for Info {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version_string)?;
writeln!(f, "Worker threads: {}", self.num_threads)?;
writeln!(
f,
"Max framebuffer cache size: {}",
self.max_framebuffer_size
)?;
writeln!(
f,
"Current framebuffer cache size: {}",
self.used_framebuffer_size
)
}
}
|
use apllodb_immutable_schema_engine_infra::test_support::sqlite_database_cleaner::SqliteDatabaseCleaner;
use apllodb_server::ApllodbServer;
use apllodb_shared_components::{DatabaseName, Session};
use super::{session_with_db, Step, Steps};
#[derive(Debug, Default)]
pub struct SqlTest {
server: ApllodbServer,
steps: Vec<Step>,
}
impl SqlTest {
/// NOTE: do not pass database command like "CREATE DATABASE" / "USE DATABASE" / ...
/// Database is automatically created / used in run().
pub fn add_step(mut self, step: Step) -> Self {
self.steps.push(step);
self
}
#[allow(dead_code)] // seemingly every tests/*.rs must call this func not to be `dead_code`
pub fn add_steps(mut self, steps: Steps) -> Self {
let steps: Vec<Step> = steps.into();
for step in steps {
self = self.add_step(step);
}
self
}
#[allow(dead_code)]
pub async fn run(self) {
let db_name = DatabaseName::random();
let _db_cleaner = SqliteDatabaseCleaner::new(db_name.clone());
let mut cur_session = Session::from(session_with_db(&self.server, db_name).await);
for step in &self.steps {
cur_session = step.run(&self.server, cur_session).await;
}
}
#[allow(dead_code)]
pub async fn run_with_manual_db_control(self) {
let mut cur_session = Session::default();
for step in &self.steps {
cur_session = step.run(&self.server, cur_session).await;
}
}
}
|
use nostalgia::{Key, Record};
use nostalgia_derive::Storable;
use serde::{Deserialize, Serialize};
#[derive(Storable, Serialize, Deserialize)]
#[key = "id"]
struct Thing {
id: u32,
}
fn main() {}
|
pub mod p0001;
pub mod p0002;
pub mod p0003;
pub mod p0004;
pub mod p0005;
pub mod p0006;
pub mod primes;
|
#![deny(warnings)]
#[macro_use]
extern crate serde_derive;
extern crate futures;
extern crate hyper;
extern crate hyper_rustls;
extern crate rustls;
extern crate tls_client;
extern crate tokio_core;
extern crate serde_json;
use futures::future;
use futures::Stream;
use hyper::rt::Future;
use hyper::{Body, Request, Uri};
use std::env;
use std::str::FromStr;
use std::str;
use tls_client::start_client;
// the same as AdvancedAsynchronousMonotonicCounter
// just for putting it into HTTP body as json format
#[derive(Serialize, Deserialize, Debug)]
pub struct AdvancedAsynchronousMonotonicCounter2 {
pub current: usize, // current value, initialized with 0, strictly increasing, cannot be rolled back
// need more fields
// ToDo
}
#[derive(Serialize, Deserialize, Debug)]
pub struct JsonToServer {
pub key: usize,
pub previous: usize,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct JsonFromServer {
pub key: usize,
pub counter: AdvancedAsynchronousMonotonicCounter2,
}
fn main() {
// First parameter is target URL (mandatory)
let url = match env::args().nth(1) {
Some(ref url) => Uri::from_str(url).expect("well-formed URI"),
None => {
println!("Usage: aamcs_client url certificate private_key ca_store");
return;
}
};
// Second parameter is client certificate (mandatory)
let cert = match env::args().nth(2) {
Some(ref path) => {
println!("client certificate: {}", path);
path.to_owned()
}
None => {
println!("Please provide certificate");
println!("Usage: aamcs_client url certificate private_key ca_store");
return;
}
};
// Third parameter is client private key (mandatory)
let rsa = match env::args().nth(3) {
Some(ref path) => {
println!("private key: {}", path);
path.to_owned()
}
None => {
println!("Please provide private_key");
println!("Usage: aamcs_client url certificate private_key ca_store");
return;
}
};
// Fourth parameter is custom Root-CA store (mandatory)
let ca = match env::args().nth(4) {
Some(ref path) => {
println!("Root-CA store: {}", path);
path.to_owned()
}
None => {
println!("Please provide Root-CA store");
println!("Usage: aamcs_client url certificate private_key ca_store");
return;
}
};
// type `hyper::Client<hyper_rustls::HttpsConnector<hyper::client::HttpConnector>>`
let client = start_client(&cert, &rsa, &ca);
// a GET request
// type `hyper::client::ResponseFuture`
let get = client
.request(
Request::get(url.clone())
.header("Content-Type", "application/json")
.body({
let json_to_server: JsonToServer = JsonToServer {
key: 0,
previous: 0,
};
let serialized = serde_json::to_string(&json_to_server).unwrap();
Body::from(serialized)
}).unwrap(),
).and_then(|res| {
println!("received a response :");
println!("Status: {}", res.status());
println!("Headers:\n{:#?}", res.headers());
res.into_body().concat2()
}).and_then(|body| {
// issue: empty body, need to fix, get the complete body correctly
// ToDo
// fixed 2018/10/12
println!("Body:\n{}", str::from_utf8(&body).unwrap());
println!("\n");
future::ok(())
});
let mut core = tokio_core::reactor::Core::new().unwrap();
if let Err(err) = core.run(get) {
println!("FAILED: {}", err);
std::process::exit(1)
}
}
|
// This file is part of rdma-core. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/rdma-core/master/COPYRIGHT. No part of rdma-core, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2016 The developers of rdma-core. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/rdma-core/master/COPYRIGHT.
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum ibv_rate
{
IBV_RATE_MAX = 0,
IBV_RATE_2_5_GBPS = 2,
IBV_RATE_5_GBPS = 5,
IBV_RATE_10_GBPS = 3,
IBV_RATE_20_GBPS = 6,
IBV_RATE_30_GBPS = 4,
IBV_RATE_40_GBPS = 7,
IBV_RATE_60_GBPS = 8,
IBV_RATE_80_GBPS = 9,
IBV_RATE_120_GBPS = 10,
IBV_RATE_14_GBPS = 11,
IBV_RATE_56_GBPS = 12,
IBV_RATE_112_GBPS = 13,
IBV_RATE_168_GBPS = 14,
IBV_RATE_25_GBPS = 15,
IBV_RATE_100_GBPS = 16,
IBV_RATE_200_GBPS = 17,
IBV_RATE_300_GBPS = 18,
}
|
use crate::{
define_node_command, get_set_swap,
scene::commands::{Command, SceneContext},
};
use rg3d::{
core::{color::Color, pool::Handle},
resource::texture::Texture,
scene::{graph::Graph, node::Node},
};
define_node_command!(SetSpriteSizeCommand("Set Sprite Size", f32) where fn swap(self, node) {
get_set_swap!(self, node.as_sprite_mut(), size, set_size);
});
define_node_command!(SetSpriteRotationCommand("Set Sprite Rotation", f32) where fn swap(self, node) {
get_set_swap!(self, node.as_sprite_mut(), rotation, set_rotation);
});
define_node_command!(SetSpriteColorCommand("Set Sprite Color", Color) where fn swap(self, node) {
get_set_swap!(self, node.as_sprite_mut(), color, set_color);
});
define_node_command!(SetSpriteTextureCommand("Set Sprite Texture", Option<Texture>) where fn swap(self, node) {
get_set_swap!(self, node.as_sprite_mut(), texture, set_texture);
});
|
//! The index holds all the data presented to the gui, and all read operations are directed here.
//! Whenever something changes in the repository, the change is reflected in the index.
use notify::{ RecommendedWatcher, Watcher, RecursiveMode };
use std::path::Path;
use std::time::Duration;
use std::sync::mpsc::channel;
pub struct Index {
config: Config
}
impl Index {
pub fn new(config: Config) -> Index {
return Index {
config: config
};
}
/// Grabs a record from the index.
pub fn fetch(&self) {
}
}
fn watcher() -> notify::Result<()> {
let (tx, rx) = channel();
let watch_delay = Duration::from_secs(2);
let mut watcher: RecommendedWatcher = Watcher::new(tx, watch_delay)?;
watcher.watch(repo.to_string(), RecursiveMode::Recursive);
loop {
match rx.recv() {
Err(error) => println!("watch error: {:?}", error),
Ok(event) => match event {
DebouncedEvent::NoticeWrite(path) => None,
DebouncedEvent::NoticeRemove(path) => None,
DebouncedEvent::Create(path) => readyaml(path),
DebouncedEvent::Write(path) => readyaml(path),
DebouncedEvent::Chmod(path) => readyaml(path),
DebouncedEvent::Remove(path) => readyaml(path),
DebouncedEvent::Rename(from, dest) => {
readyaml(from);
readyaml(dest);
},
DebouncedEvent::Rescan() => None,
DebouncedEvent::Error(error, path) => None
}
}
}
}
|
use ansi_colors::*;
fn main(){
let mut str1 = ColouredStr::new("string1sdaasdsdasda\nsdasdasdasd\nadasds");
str1.green();
str1.bold();
str1.back_black();
println!("{}",str1);
}
|
use super::Pattern;
pub fn is_life_106_file<S: AsRef<str>>(s: &S) -> bool {
s.as_ref().starts_with("#Life 1.06")
}
pub fn parse_life_106_file<S: AsRef<str>>(s: &S) -> Result<Pattern, String> {
let s = s.as_ref();
// Skip first line, because it is the header.
let lines = s.lines().skip(1);
let mut pattern = Pattern::default();
for line in lines.filter(|s| !s.is_empty()) {
let mut line_split = line.split_whitespace();
let x = match line_split.next() {
None => return Err(format!("could not find x in `{}`", line)),
Some(v) => v,
};
let y = match line_split.next() {
None => return Err(format!("could not find y in `{}`", line)),
Some(v) => v,
};
let x = match x.trim().parse() {
Err(_) => return Err(format!("could not parse x as number: `{}`", x)),
Ok(v) => v,
};
let y = match y.trim().parse() {
Err(_) => return Err(format!("could not parse x as number: `{}`", y)),
Ok(v) => v,
};
pattern.cells.push((x, y));
}
Ok(pattern)
}
|
use bitflags::bitflags;
use crate::uses::*;
use crate::mem::VirtRange;
#[derive(Debug)]
pub struct Section<'a>
{
pub virt_range: VirtRange,
pub data: Option<&'a [u8]>,
// data virtual offset from start of virt_range
pub data_offset: usize,
pub flags: PHdrFlags,
}
#[derive(Debug)]
pub struct ElfParser<'a>
{
data: &'a [u8],
elf_header: &'a ElfHeader,
program_headers: &'a [ProgramHeader],
}
impl<'a> ElfParser<'a>
{
pub fn new(data: &[u8]) -> Result<ElfParser, Err>
{
let elf_header = ElfHeader::new(data)?;
elf_header.check()?;
let phdr = elf_header.program_header;
let phdr_len = elf_header.phdr_len as usize;
// TODO: figure out if it matters that alignment requirements might not be met
// (probably not on x86)
let program_headers = Self::extract_slice(data, phdr, phdr_len)
.ok_or_else(|| Err::new("invalid program headers"))?;
Ok(ElfParser {
data,
elf_header,
program_headers,
})
}
pub fn program_headers(&self) -> Vec<Section>
{
let mut out = Vec::new();
for header in self.program_headers.iter() {
if header.ptype == P_TYPE_LOAD {
if header.p_memsz == 0 {
continue;
}
let virt_range =
VirtRange::new_unaligned(VirtAddr::new(header.p_vaddr as u64), header.p_memsz);
let virt_range_aligned = virt_range.aligned();
let data = if header.p_filesz == 0 {
None
} else {
Some(match self.extract(header.p_offset, header.p_filesz) {
Some(data) => data,
None => continue,
})
};
out.push(Section {
virt_range: virt_range_aligned,
data,
data_offset: virt_range.as_usize() - virt_range_aligned.as_usize(),
flags: header.flags,
});
}
}
out
}
pub fn entry_point(&self) -> usize
{
self.elf_header.entry
}
fn extract_slice<T>(data: &[u8], index: usize, len: usize) -> Option<&[T]>
{
let slice = data.get(index..(index + len * size_of::<T>()))?;
let ptr = slice.as_ptr() as *const T;
unsafe { Some(core::slice::from_raw_parts(ptr, len)) }
}
fn extract<T>(&self, index: usize, len: usize) -> Option<&[T]>
{
let slice = self.data.get(index..(index + len * size_of::<T>()))?;
let ptr = slice.as_ptr() as *const T;
unsafe { Some(core::slice::from_raw_parts(ptr, len)) }
}
}
// NOTE: this only applies to little endian architectures
const ELF_MAGIC: u32 = 0x464c457f;
const BIT_32: u8 = 1;
const BIT_64: u8 = 2;
const LITTLE_ENDIAN: u8 = 1;
const BIG_ENDIAN: u8 = 2;
const SYSTEM_V_ABI: u8 = 0;
const RELOCATABLE: u16 = 1;
const EXECUTABLE: u16 = 2;
const SHARED: u16 = 3;
const CORE: u16 = 4;
const X64: u16 = 0x3e;
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct ElfHeader
{
// elf magic number
magic: u32,
// 1: 32 bits
// 2: 64 bits
bits: u8,
// 1: little endian
// 2: big endian
endianness: u8,
header_version: u8,
// 0 for system V
abi: u8,
unused: u64,
// 1: relocatable
// 2: executable
// 3: shared
// 4: core
info: u16,
arch: u16,
elf_version: u32,
entry: usize,
program_header: usize,
section_header: usize,
flags: u32,
header_size: u16,
phdr_entry_size: u16,
phdr_len: u16,
shdr_entry_size: u16,
shdr_len: u16,
shdr_names_index: u16,
}
impl ElfHeader
{
fn new(data: &[u8]) -> Result<&Self, Err>
{
if data.len() < size_of::<Self>() {
return Err(Err::new("invalid elf header"));
}
unsafe { Ok((data.as_ptr() as *const Self).as_ref().unwrap()) }
}
fn check(&self) -> Result<(), Err>
{
if self.magic != ELF_MAGIC {
Err(Err::new("Binary is not ELF"))
} else if self.bits != BIT_64 || self.endianness != LITTLE_ENDIAN || self.arch != X64 {
Err(Err::new("Binary is not an x64 binary"))
} else if self.abi != SYSTEM_V_ABI {
Err(Err::new("Binary does not use system V abi"))
} else if self.info != EXECUTABLE {
Err(Err::new("Binary is not an executable"))
} else if self.phdr_entry_size as usize != size_of::<ProgramHeader>() {
Err(Err::new("Invalid ELF program header sizes"))
} else {
Ok(())
}
}
}
const P_TYPE_NULL: u32 = 0;
const P_TYPE_LOAD: u32 = 1;
const P_TYPE_DYNAMIC: u32 = 2;
const P_TYPE_INTERP: u32 = 3;
const P_TYPE_NOTE: u32 = 4;
bitflags! {
pub struct PHdrFlags: u32
{
const EXECUTABLE = 1;
const WRITABLE = 2;
const READABLE = 4;
}
}
impl PHdrFlags
{
pub fn readable(&self) -> bool
{
self.contains(Self::READABLE)
}
pub fn writable(&self) -> bool
{
self.contains(Self::WRITABLE)
}
pub fn executable(&self) -> bool
{
self.contains(Self::EXECUTABLE)
}
}
#[derive(Debug, Clone, Copy)]
#[repr(C, packed)]
struct ProgramHeader
{
ptype: u32,
flags: PHdrFlags,
p_offset: usize,
p_vaddr: usize,
unused: usize,
p_filesz: usize,
p_memsz: usize,
align: usize,
}
|
extern crate slow_primes;
pub fn factor(input: usize) -> [usize; 20] {
let mut number: usize = input as usize;
let prime_list = &slow_primes::Primes::sieve(number as usize);
let mut divisors: [_; 20] = [0; 20];
let mut n = 0;
if slow_primes::is_prime_miller_rabin(number as u64) == false {
for p in prime_list.primes() {
loop {
if number % p == 0 {
number /=p;
divisors[n]= p as usize;
n+=1;
} else {
break;
}
}
}
} else {
divisors[n]=number;
}
return divisors;
}
pub fn prime_with_two_factors(input: usize) -> bool {
let divs: [usize; 20] = factor(input);
let mut y = 0;
let o = &mut 0;
let z = 2 as usize;
for d in divs.into_iter() {
if d == o {
break;
} else {
y += 1;
}
}
if y == z {
return true;
} else {
return false;
}
}
|
use std::boxed::Box;
use std::io::BufReader;
use std::fs::File;
use std::path::PathBuf;
use std::collections::HashMap;
use std::io::Read;
use std::io::Cursor;
use rodio::Device;
use rodio::Sink;
use rodio::Source;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum Sound {
TapMuted,
Holst,
}
lazy_static! {
static ref SOUND_DEVICE: Device = rodio::default_output_device().unwrap();
static ref SOUNDS: HashMap<Sound, Vec<u8>> = load_sounds();
}
fn load_sounds() -> HashMap<Sound, Vec<u8>> {
let sounds_dir = find_folder::Search::ParentsThenKids(3, 3).for_folder("sounds").unwrap();
let mut sounds: HashMap<Sound, Vec<u8>> = HashMap::new();
for (sound, filename) in [
(Sound::TapMuted, "tap-muted.wav"),
(Sound::Holst, "holst.mp3"),
].iter() {
let mut bytes: Vec<u8> = Vec::new();
File::open(sounds_dir.join(filename)).unwrap().read_to_end(&mut bytes);
sounds.insert(*sound, bytes);
}
sounds
}
pub fn play_sound(sound: Sound, volume: f32) {
let file: Cursor<&[u8]> = Cursor::new(&SOUNDS[&sound]);
let source = rodio::Decoder::new(BufReader::new(file)).unwrap();
let sink = Sink::new(&SOUND_DEVICE);
sink.set_volume(volume);
sink.append(source);
sink.play();
sink.detach();
} |
use std::ops::Deref;
use std::thread;
use crossbeam::queue::ArrayQueue;
use reqwest::header::{HeaderMap, HeaderName, HeaderValue, USER_AGENT};
use serde_json::{Map, Value};
use crate::alpaca::account::APCA_SECRET_KEY;
use crate::alpaca::account::{Account, APCA_API_KEY};
use crate::alpaca::positions::{Portfolio, Position};
use crate::alpaca::tick_data::TickData;
use crate::appconfig::{AlpacaConfig, AppConfig};
use std::sync::{Arc, RwLock};
use std::time::Duration;
impl AlpacaConfig {
fn headers(&self) -> HeaderMap {
let mut headers = HeaderMap::new();
headers.insert(USER_AGENT, HeaderValue::from_static("reqwest"));
headers.insert(
HeaderName::from_static(APCA_API_KEY),
HeaderValue::from_str(self.key.as_str()).unwrap(),
);
headers.insert(
HeaderName::from_static(APCA_SECRET_KEY),
HeaderValue::from_str(self.secret.as_str()).unwrap(),
);
headers
}
}
impl Account {
pub fn read_alpaca_data(account_rw_lock: Arc<RwLock<Account>>, app_config: AppConfig) {
let client: reqwest::blocking::Client = reqwest::blocking::Client::builder()
.default_headers(app_config.alpaca_config.headers())
.build()
.unwrap();
let url = format!("{}{}", app_config.alpaca_config.url, "/v2/account");
thread::spawn(move || {
let res = client.get(&url).send().unwrap();
let account: Account = res.json().unwrap();
let mut account_guard = account_rw_lock.write().unwrap();
*account_guard = account;
});
}
}
impl Portfolio {
pub fn read_alpaca_data(queue: Arc<ArrayQueue<Portfolio>>, app_config: AppConfig) {
let client: reqwest::blocking::Client = reqwest::blocking::Client::builder()
.default_headers(app_config.alpaca_config.headers())
.build()
.unwrap();
let url = format!("{}{}", app_config.alpaca_config.url, "/v2/account");
thread::spawn(move || {
let res = client.get(&url).send().unwrap();
let portfolio_json: Vec<Map<String, Value>> = res.json().unwrap();
println!("Portfolio data: {:?}", portfolio_json);
let portfolio: Portfolio = Portfolio::from_alpaca_json_map(portfolio_json);
queue.push(portfolio);
});
}
pub fn from_alpaca_json_map(vecs: Vec<serde_json::Map<String, Value>>) -> Portfolio {
let positions = vecs
.iter()
.map(move |m| Position::from_alpaca_json_map(m.deref().clone()))
.collect();
Portfolio { positions }
}
}
impl Position {
pub fn from_alpaca_json_map(map: serde_json::Map<String, Value>) -> Position {
Position {
symbol: map.get("symbol").unwrap().to_string(),
asset_class: map.get("asset_class").unwrap().to_string(),
avg_entry_price: map
.get("avg_entry_price")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
qty: map
.get("qty")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
side: map
.get("side")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
market_value: map
.get("market_value")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
cost_basis: map
.get("cost_basis")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
unrealized_pl: map
.get("unrealized_pl")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
unrealized_plpc: map
.get("unrealized_plpc")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
unrealized_intraday_pl: map
.get("unrealized_intraday_pl")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
unrealized_intraday_plpc: map
.get("unrealized_intraday_plpc")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
current_price: map
.get("current_price")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
lastday_price: map
.get("lastday_price")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
change_today: map
.get("change_today")
.unwrap()
.as_str()
.unwrap()
.to_string()
.parse()
.unwrap(),
}
}
pub fn from_alpaca_json(str: &str) -> Position {
let map = serde_json::from_str(str).unwrap();
Position::from_alpaca_json_map(map)
}
}
#[test]
#[ignore]
fn test_deserialize_position() {
let str = r##"
{
"asset_id": "904837e3-3b76-47ec-b432-046db621571b",
"symbol": "AAPL",
"exchange": "NASDAQ",
"asset_class": "us_equity",
"avg_entry_price": "100.0",
"qty": "5",
"side": "long",
"market_value": "600.0",
"cost_basis": "500.0",
"unrealized_pl": "100.0",
"unrealized_plpc": "0.20",
"unrealized_intraday_pl": "10.0",
"unrealized_intraday_plpc": "0.0084",
"current_price": "120.0",
"lastday_price": "119.0",
"change_today": "0.0084"
}
"##;
let value: f64 = "100.0".parse().unwrap();
println!("{:?}", value);
//let map2 = serde_json::Map<String, Value>();
//map2.insert("test", Value::from("100.0"));
//println!("{:?}", position);
let map: Map<String, Value> = serde_json::from_str(str).unwrap();
let position = Position::from_alpaca_json_map(map);
println!("{:?}", position);
}
#[test]
fn test_deserialize_portfolio() {
let str = r##"
[
{
"asset_id": "904837e3-3b76-47ec-b432-046db621571b",
"symbol": "AAPL",
"exchange": "NASDAQ",
"asset_class": "us_equity",
"avg_entry_price": "100.0",
"qty": "5",
"side": "long",
"market_value": "600.0",
"cost_basis": "500.0",
"unrealized_pl": "100.0",
"unrealized_plpc": "0.20",
"unrealized_intraday_pl": "10.0",
"unrealized_intraday_plpc": "0.0084",
"current_price": "120.0",
"lastday_price": "119.0",
"change_today": "0.0084"
}
]
"##;
let positions: Vec<Map<String, Value>> = serde_json::from_str(str).unwrap();
Portfolio::from_alpaca_json_map(positions.clone());
let portfolio = Portfolio::from_alpaca_json_map(positions);
println!("positions after deserialization {:?}", portfolio);
}
|
//! Sync your Org with your favorite applications.
//!
//! > This project is still in *alpha stage*. Don't forget to backup your
//! > orgmode files before trying!
//!
//! # Installation
//!
//! ```text
//! $ cargo install orgize-sync
//! ```
//!
//! # Subcommand
//!
//! ## `init`
//!
//! Initializes a new configuration file
//!
//! ```text
//! USAGE:
//! orgize-sync init [FLAGS]
//!
//! FLAGS:
//! -h, --help Prints help information
//! -V, --version Prints version information
//! -v, --verbose Increases verbosity
//! ```
//!
//! ## `conf`
//!
//! Prints your configuration file
//!
//! ```text
//! USAGE:
//! orgize-sync conf [FLAGS] [OPTIONS]
//!
//! FLAGS:
//! -h, --help Prints help information
//! -V, --version Prints version information
//! -v, --verbose Increases verbosity
//!
//! OPTIONS:
//! -c, --conf-path <conf-path> Path to configuration file
//! ```
//!
//! ## `sync`
//!
//! Synchronizes org files
//!
//! ```text
//! USAGE:
//! orgize-sync sync [FLAGS] [OPTIONS]
//!
//! FLAGS:
//! -h, --help Prints help information
//! --skip-google-calendar Skips Google Calendar synchronization
//! --skip-toggl Skips Toggl synchronization
//! -V, --version Prints version information
//! -v, --verbose Increases verbosity
//!
//! OPTIONS:
//! -c, --conf-path <conf-path> Path to configuration file
//! ```
//!
//! # Configuration
//!
//! + [General](#general)
//! + [Global](#global)
//! + [Pre-file](#pre-file)
//! + [Google Calendar](#google-calendar)
//! + [Global](#global-1)
//! + [Pre-file](#pre-file-1)
//! + [Toggl](#toggl)
//! + [Global](#global-2)
//! + [Pre-file](#pre-file-2)
//!
//! ## General
//!
//! ### Global
//!
//! ```javascript
//! {
//! // Path to dotenv file.
//! // The default is "${UserCacheDir}/orgize-sync/.env".
//! "env_path": "./.env"
//! }
//! ```
//!
//! ### Pre-file
//!
//! ```javascript
//! {
//! "files": [
//! {
//! // Specifies the name for this orgmode file. Optional.
//! "name": "note",
//! // Specifies the path to orgmode file. Required.
//! "path": "./notes.org"
//! }
//! ]
//! }
//! ```
//!
//! ## Google Calendar
//!
//! ### Global
//!
//! ```javascript
//! {
//! "google_calendar": {
//! // Google OAuth client id. Required.
//! // Sepcifying here or by setting the "GOOGLE_CLIENT_ID" environment variable.
//! "client_id": "xxx",
//! // Google OAuth client secret. Required.
//! // Sepcifying here or by setting the "GOOGLE_CLIENT_SECRET" environment variable.
//! "client_secret": "xxx",
//! // Redirect url after authorizing.
//! // The default is "http://localhost"
//! "redirect_uri": "",
//! // Path to store the access token and refresh token.
//! // The default is "${UserCacheDir}/orgize-sync".
//! "token_dir": "",
//! // The default is "google-token.json".
//! "token_filename": ""
//! }
//! }
//! ```
//!
//! ### Pre-file
//!
//! ```javascript
//! {
//! "files": [
//! {
//! "google-calendar": {
//! // Which calendar to sync. Required.
//! "calendar": "",
//! // Whether to append new calendar event to the org mode.
//! // The default is true.
//! "append_new": false,
//! // Where to append new calendar event.
//! // The default is "Sync".
//! "append_headline": "New Headline",
//! // Which property to store event id.
//! // The default is "EVENT_ID".
//! "property": "EVENT_ID",
//! // Number of days to filter headline before today.
//! // The default is 7.
//! "up_days": 1,
//! // Number of days to filter headline after today.
//! // The default is 7.
//! "down_days": 1
//! }
//! }
//! ]
//! }
//! ```
//!
//! ## Toggl
//!
//! ### Global
//!
//! ```javascript
//! {
//! "toggl": {
//! // Toggl Api Token. Required.
//! // Sepcifying here or by setting the "TOGGL_API_TOKEN" environment variable.
//! "api_token": "xxx"
//! }
//! }
//! ```
//!
//! ### Pre-file
//!
//! ```javascript
//! {
//! "files": [
//! {
//! "toggl": {
//! // Number of days to filter headline before today.
//! // The default is 7.
//! "up_days": 1,
//! // Number of days to filter headline after today.
//! // The default is 7.
//! "down_days": 1
//! }
//! }
//! ]
//! }
//! ```
mod conf;
mod error;
#[cfg(feature = "google_calendar")]
mod google;
mod logger;
#[cfg(feature = "toggl")]
mod toggl;
use log::LevelFilter;
use std::io::stdout;
use std::path::PathBuf;
use std::process;
use structopt::StructOpt;
use crate::{conf::Conf, error::Result};
#[derive(StructOpt, Debug)]
#[structopt(name = "orgize-sync")]
struct Opt {
#[structopt(subcommand)]
subcommand: Cmd,
}
#[derive(StructOpt, Debug)]
enum Cmd {
/// Initializes a new configuration file
#[structopt(name = "init")]
Init {
/// Increases verbosity
#[structopt(short, long)]
verbose: bool,
},
/// Synchronizes org files
#[structopt(name = "sync")]
Sync {
/// Skips Google Calendar synchronization
#[cfg(feature = "google_calendar")]
#[structopt(long = "skip-google-calendar")]
skip_google_calendar: bool,
/// Skips Toggl synchronization
#[cfg(feature = "toggl")]
#[structopt(long = "skip-toggl")]
skip_toggl: bool,
/// Increases verbosity
#[structopt(short, long)]
verbose: bool,
/// Path to configuration file
#[structopt(short, long, parse(from_os_str))]
conf_path: Option<PathBuf>,
},
/// Validates and prints your configuration file
#[structopt(name = "conf")]
Conf {
/// Toggles silent mode (no output)
#[structopt(short, long)]
silent: bool,
/// Increases verbosity
#[structopt(short, long)]
verbose: bool,
/// Path to configuration file
#[structopt(short, long, parse(from_os_str))]
conf_path: Option<PathBuf>,
},
}
fn main() -> Result<()> {
match Opt::from_args().subcommand {
Cmd::Init { verbose } => {
init_logger(verbose);
Conf::init()?;
}
Cmd::Conf {
silent,
verbose,
conf_path,
} => {
init_logger(verbose);
if !silent {
serde_json::to_writer_pretty(stdout(), &Conf::new(conf_path)?)?;
} else if Conf::new(conf_path).is_err() {
process::exit(1);
}
}
Cmd::Sync {
verbose,
conf_path,
skip_google_calendar,
skip_toggl,
} => {
init_logger(verbose);
let _conf = Conf::new(conf_path)?;
if cfg!(feature = "google_calendar") && !skip_google_calendar {}
if cfg!(feature = "toggl") && !skip_toggl {}
}
}
Ok(())
}
fn init_logger(verbose: bool) {
log::set_logger(&logger::LOGGER).unwrap();
if verbose {
log::set_max_level(LevelFilter::Trace);
} else {
log::set_max_level(LevelFilter::Info);
}
}
|
use super::preludes::*;
use std::cell::RefCell;
use std::rc::Rc;
use crate::evaluator::objects;
use vm::bytecode;
pub const MAX_FRAMES: usize = 1024;
#[derive(Clone, Debug, Default, PartialEq)]
pub struct Frame {
pub cl: objects::Closure,
pub pointer: usize,
pub base_pointer: usize,
}
impl Frame {
pub fn new(cl: objects::Closure, base_pointer: usize) -> Self {
Self {
cl,
pointer: 0,
base_pointer,
}
}
pub fn instructions(&self) -> &bytecode::Instructions {
&self.cl.func.instructions
}
}
impl From<Frame> for bytecode::Instructions {
fn from(value: Frame) -> Self {
value.cl.func.instructions
}
}
#[derive(Clone, Debug, PartialEq)]
pub struct StackFrame {
frames: Vec<Rc<RefCell<Frame>>>,
pointer: usize,
}
impl Default for StackFrame {
fn default() -> Self {
let frame = Rc::new(RefCell::new(Frame::default()));
Self {
frames: vec![frame; MAX_FRAMES],
pointer: 0,
}
}
}
impl StackFrame {
pub fn new() -> Self {
Self::default()
}
pub fn current(&self) -> Rc<RefCell<Frame>> {
Rc::clone(&self.frames[self.pointer - 1])
}
pub fn push(&mut self, frame: Frame) {
self.frames[self.pointer] = Rc::new(RefCell::new(frame));
self.pointer += 1;
}
pub fn pop(&mut self) -> Rc<RefCell<Frame>> {
self.pointer -= 1;
Rc::clone(&self.frames[self.pointer])
}
}
|
use serde::{Deserialize, Serialize};
use common::event::EventPublisher;
use common::result::Result;
use crate::domain::author::{AuthorId, AuthorRepository};
use crate::domain::category::{CategoryId, CategoryRepository};
use crate::domain::collection::{Collection, CollectionRepository};
use crate::domain::publication::{Header, Image, Name, Synopsis, Tag};
#[derive(Deserialize)]
pub struct CreateCommand {
pub name: String,
pub synopsis: String,
pub category_id: String,
pub tags: Vec<String>,
pub cover: String,
}
impl CreateCommand {
fn validate(&self) -> Result<()> {
Ok(())
}
}
#[derive(Serialize)]
pub struct CreateResponse {
id: String,
}
pub struct Create<'a> {
event_pub: &'a dyn EventPublisher,
author_repo: &'a dyn AuthorRepository,
category_repo: &'a dyn CategoryRepository,
collection_repo: &'a dyn CollectionRepository,
}
impl<'a> Create<'a> {
pub fn new(
event_pub: &'a dyn EventPublisher,
author_repo: &'a dyn AuthorRepository,
category_repo: &'a dyn CategoryRepository,
collection_repo: &'a dyn CollectionRepository,
) -> Self {
Create {
event_pub,
author_repo,
category_repo,
collection_repo,
}
}
pub async fn exec(&self, author_id: String, cmd: CreateCommand) -> Result<CreateResponse> {
cmd.validate()?;
let name = Name::new(cmd.name)?;
let synopsis = Synopsis::new(cmd.synopsis)?;
let mut tags = Vec::new();
for tag in cmd.tags.into_iter() {
tags.push(Tag::new(tag)?);
}
let cover = Image::new(cmd.cover)?;
let category_id = CategoryId::new(cmd.category_id)?;
self.category_repo.find_by_id(&category_id).await?;
let header = Header::new(name, synopsis, category_id, tags, cover)?;
let author_id = AuthorId::new(author_id)?;
self.author_repo.find_by_id(&author_id).await?;
let mut collection =
Collection::new(self.collection_repo.next_id().await?, author_id, header)?;
self.collection_repo.save(&mut collection).await?;
self.event_pub
.publish_all(collection.base().events()?)
.await?;
Ok(CreateResponse {
id: collection.base().id().to_string(),
})
}
}
|
pub mod drive;
pub mod dumper;
pub mod intake;
pub mod digital_monitor;
pub trait SubsystemFactory<T>: ToString {
fn produce(self: Box<Self>) -> T;
} |
extern crate debug;
enum List<T> {
Cons(T, Box<List<T>>),
Nil
}
impl<T: PartialEq> PartialEq for List<T> {
fn eq(&self, ys: &List<T>) -> bool {
match (self, ys) {
(&Nil, &Nil) => true,
(&Cons(ref x, box ref xs_tail), &Cons(ref y, box ref ys_tail))
if x == y => xs_tail == ys_tail,
_ => false
}
}
}
fn main () {
let mut xs = Cons(1i, box Cons(2i, box Nil));
let ys = xs;
xs = Nil;
println!("{:?}", xs == ys);
println!("{:?}", ys);
}
|
use std::collections::HashMap;
pub struct SymbolTable {
map: HashMap<String, usize>,
}
impl SymbolTable {
pub fn new() -> SymbolTable {
Self {
map: HashMap::new(),
}
}
pub fn add_entry(&mut self, symbol: String, address: usize) {
self.map.insert(symbol, address);
}
pub fn contains(&self, symbol: String) -> bool {
self.map.contains_key(&symbol)
}
pub fn get_address(&self, symbol: String) -> usize {
*self.map.get(&symbol).unwrap()
}
}
|
use std::collections::{HashMap, HashSet};
use super::prelude::*;
/// Phase for voting on a mission proposal
pub struct Voting {
/// Map from players to whether they up- or down-voted the proposal.
votes: HashMap<String, bool>,
/// Whether or not the voting results are obscured
obscured: bool,
}
impl GameState<Voting> {
pub fn handle_vote(mut self, player: &str, is_upvote: bool) -> ActionResult {
if self.phase.votes.contains_key(player) {
return self.player_error("You already voted");
}
log::debug!(
"{} {}",
player,
if is_upvote { "upvoted" } else { "downvoted" }
);
self.phase.votes.insert(player.to_string(), is_upvote);
let mut effects = vec![Effect::Broadcast(Message::VoteReceived)];
if self.phase.votes.len() == self.game.size() {
let mission = self.mission();
let mut upvotes = HashSet::new();
let mut downvotes = HashSet::new();
for (player, vote) in self.phase.votes.drain() {
let is_arthur = self.game.players.is(&player, Role::Arthur)
&& self.role_state.arthur.has_declared();
let collection = if vote { &mut upvotes } else { &mut downvotes };
if is_arthur {
collection.insert(format!("{} (Arthur)", player));
}
collection.insert(player);
}
let sent = upvotes.len() > downvotes.len();
// TODO: This probably could be cleaner, but hacking this for pre-alpha.
if (self.phase.obscured) {
effects.push(Effect::Broadcast(Message::Toast {
severity: ToastSeverity::WARN,
message: format!(
"Mission {}: Maeve has obscured the votes!\nUpvotes: {}\nDownvotes: {}",
mission,
upvotes.len(),
downvotes.len()
),
}));
}
let vote_counts = if self.phase.obscured {
messages::VoteCounts::Obscured {
upvotes: upvotes.len() as u32,
downvotes: downvotes.len() as u32,
}
} else {
messages::VoteCounts::Public { upvotes, downvotes }
};
effects.push(Effect::Broadcast(Message::VotingResults {
sent,
counts: vote_counts,
}));
if mission == 1 {
let proposal_index = if sent { 0 } else { 1 };
let proposal = &self.proposals[proposal_index];
log::debug!("Voted to send {} on mission 1", proposal);
effects.push(Effect::Broadcast(Message::MissionGoing {
mission,
players: proposal.players.clone(),
}));
let next_state = self.with_phase(OnMission::new(proposal_index));
(GameStateWrapper::OnMission(next_state), effects)
} else {
let proposal = self.proposals.last().expect("Voted with no proposals!");
if sent {
log::debug!("Voted to send {} on mission {}", proposal, mission);
effects.push(Effect::Broadcast(Message::MissionGoing {
mission,
players: proposal.players.clone(),
}));
let proposal_index = self.proposals.len() - 1;
let next_state = self.with_phase(OnMission::new(proposal_index));
(GameStateWrapper::OnMission(next_state), effects)
} else {
log::debug!("Voted not to send {}", proposal);
let next_proposer = self.game.next_proposer(&proposal.proposer).to_string();
self.into_proposing(next_proposer, effects)
}
}
} else {
// If we don't have all the votes yet, there's no state change
(GameStateWrapper::Voting(self), effects)
}
}
pub fn handle_obscure(mut self, player: &str) -> ActionResult {
if self.game.players.by_name(player).unwrap().role == Role::Maeve {
if self.phase.obscured {
self.player_error("You already obscured the votes for this proposal")
} else if self.role_state.maeve.can_obscure() {
log::debug!("Maeve obscured the votes!");
self.role_state.maeve.mark_obscure();
self.phase.obscured = true;
(GameStateWrapper::Voting(self), vec![])
} else {
self.player_error("You can't obscure this round")
}
} else {
self.player_error("You can't obscure votes")
}
}
/// Cancels voting, returning to the player who had been proposing. This is used for Arthur declarations while voting, since
/// if Arthur were on the proposal it is no longer valid.
pub fn cancel_vote(mut self, effects: Vec<Effect>) -> ActionResult {
// Remove the last proposal, since it's getting re-proposed
let proposal = self
.proposals
.pop()
.expect("In Voting phase with no proposals");
log::debug!("Cancelling vote on {}", proposal);
self.into_proposing(proposal.proposer, effects)
}
}
impl Voting {
/// Create a new `Voting` phase with no votes yet cast.
pub fn new() -> Voting {
Voting {
votes: HashMap::new(),
obscured: false,
}
}
}
impl_phase!(Voting);
|
use std::vec::IntoIter;
use std::iter::IntoIterator;
use std::collections::VecDeque;
#[derive( Clone )]
pub struct BufferedPeekable<T> {
iter: IntoIter<T>,
buf: VecDeque<T>,
}
impl<T> BufferedPeekable<T> {
pub fn new( v: Vec<T> ) -> BufferedPeekable<T> {
BufferedPeekable {
iter: v.into_iter(),
buf: VecDeque::new(),
}
}
pub fn is_empty( &mut self ) -> bool {
self.peek().is_none()
}
pub fn peek( &mut self ) -> Option<&T> {
if self.buf.len() == 0 {
self.buf.push_back( self.iter.next()? );
}
self.buf.front()
}
pub fn peek_ahead( &mut self, distance: usize ) -> Option<&T> {
if distance == 0 {
self.peek()
} else if distance < self.buf.len() {
self.buf.get( distance )
} else {
for _ in 0 ..= ( distance - self.buf.len() ) {
self.buf.push_back( self.iter.next()? );
}
self.buf.get( distance )
}
}
pub fn consume( &mut self ) -> Option<T> {
if self.buf.len() > 0 {
self.buf.pop_front()
} else {
self.iter.next()
}
}
}
|
use dtn::system;
use clap::Clap;
use log::{debug};
#[derive(Debug)]
#[derive(Clap)]
struct Opts {
// #[clap(short = "e", long = "eid", help = "Local EID (ex: dtn://example.com")]
// local_eid: String,
// #[clap(long = "stcp", help = "Enable STCP listener ")]
// stcp_enable: bool,
// #[clap(long = "stcp-port", help = "STCP listen port", default_value = "4556" )]
// stcp_port: u16,
#[clap(short = "c", long = "conf", help = "Configuration file")]
conf_file: String,
}
pub fn main() {
env_logger::init();
let opts: Opts = Opts::parse();
debug!("{:?}", opts);
// let local_eid = EndpointID::with_dtn(&opts.local_eid).unwrap();
// let mut conf = router::Configuration::load().unwrap();
// let conf = router::Configuration {
// local_eid,
// stcp_enable: opts.stcp_enable,
// stcp_port: opts.stcp_port,
// };
system::start(opts.conf_file);
} |
// Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! macOS implementation of features at the application scope.
use super::util;
use crate::clipboard::ClipboardItem;
use cocoa::appkit::{NSApp, NSPasteboardTypeString};
use cocoa::base::{id, nil, BOOL, YES};
use cocoa::foundation::NSInteger;
pub struct Application;
impl Application {
pub fn quit() {
unsafe {
let () = msg_send![NSApp(), terminate: nil];
}
}
/// Hide the application this window belongs to. (cmd+H)
pub fn hide() {
unsafe {
let () = msg_send![NSApp(), hide: nil];
}
}
/// Hide all other applications. (cmd+opt+H)
pub fn hide_others() {
unsafe {
let workspace = class!(NSWorkspace);
let shared: id = msg_send![workspace, sharedWorkspace];
let () = msg_send![shared, hideOtherApplications];
}
}
/// Returns the contents of the clipboard, if any.
pub fn get_clipboard_contents() -> Option<ClipboardItem> {
unsafe {
let nspasteboard = class!(NSPasteboard);
let pasteboard: id = msg_send![nspasteboard, generalPasteboard];
let data_types: id = msg_send![pasteboard, types];
let count: usize = msg_send![data_types, count];
for i in 0..count {
let dtype: id = msg_send![data_types, objectAtIndex: i];
let is_string: BOOL = msg_send![dtype, isEqualToString: NSPasteboardTypeString];
if is_string == YES {
let contents: id = msg_send![pasteboard, stringForType: dtype];
let contents = util::from_nsstring(contents);
return Some(contents.into());
} else {
log::info!("unhandled pasteboard type {}", util::from_nsstring(dtype));
}
//TODO: handle other data types
}
None
}
}
/// Sets the contents of the system clipboard.
pub fn set_clipboard_contents(item: ClipboardItem) {
unsafe {
let nspasteboard = class!(NSPasteboard);
let pasteboard: id = msg_send![nspasteboard, generalPasteboard];
match item {
ClipboardItem::Text(string) => {
let nsstring = util::make_nsstring(&string);
let _: NSInteger = msg_send![pasteboard, clearContents];
let _: BOOL =
msg_send![pasteboard, setString: nsstring forType: NSPasteboardTypeString];
}
other => log::warn!("unhandled clipboard data {:?}", other),
}
}
}
}
|
use std::os::raw::c_char;
use crate::error::{check_status, Error, ErrorKind};
/// Initial number of bytes to allocate when copying a string out of a string vector
const INITIAL_SIZE: usize = 128;
/// Maximum number of bytes to allocate when copying a string out of a string vector
const MAX_SIZE: usize = 1024 * 1024;
/// A helper for copying a string from a C API
///
/// operation should be a function that takes a pointer to a buffer of characters and the length
/// of the buffer. It calls a C function that fills the buffer and returns an error code.
///
/// If the error code indicates an error, the error is returned. Otherwise, this function attempts
/// to convert the copied characters into a String.
///
/// This function returns errors in the following cases:
/// * `operation` returned an error code that indicates an error
/// * The string to be copied is longer than the maximum allowed length
/// * The string copied is not valid UTF-8
pub(crate) fn copy_string<F>(mut operation: F) -> Result<String, Error>
where
F: FnMut(*mut c_char, usize) -> uhd_sys::uhd_error::Type,
{
let mut buffer: Vec<u8> = Vec::new();
for size in BufferSizes::new() {
buffer.resize(size, b'\0');
// Call into the C code to copy the string
let status = operation(buffer.as_mut_ptr() as *mut c_char, buffer.len());
check_status(status)?;
// Get the part of the buffer before the first null
if let Some(null_index) = buffer.iter().position(|b| *b == b'\0') {
buffer.truncate(null_index);
buffer.shrink_to_fit();
// Try to convert to UTF-8
return String::from_utf8(buffer).map_err(|_| Error::new(ErrorKind::Utf8));
} else {
// If there is no null, the error message was longer than BUFFER_LENGTH.
// Try again with the next size.
continue;
}
}
// String is too large to fully copy
Err(Error::new(ErrorKind::StringLength))
}
/// An iterator over buffer sizes that yields INITIAL_SIZE and then double the previous value
/// up to MAX_SIZE
struct BufferSizes {
/// The next value to return
next: usize,
}
impl BufferSizes {
pub fn new() -> Self {
BufferSizes { next: INITIAL_SIZE }
}
}
impl Iterator for BufferSizes {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
if self.next > MAX_SIZE {
// Maximum exceeded
None
} else {
let current = self.next;
self.next *= 2;
Some(current)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn buffer_sizes() {
let mut sizes = BufferSizes::new();
assert_eq!(Some(128), sizes.next());
assert_eq!(Some(256), sizes.next());
assert_eq!(Some(512), sizes.next());
assert_eq!(Some(1024), sizes.next());
assert_eq!(Some(2048), sizes.next());
assert_eq!(Some(4096), sizes.next());
assert_eq!(Some(8192), sizes.next());
assert_eq!(Some(16384), sizes.next());
assert_eq!(Some(32768), sizes.next());
assert_eq!(Some(65536), sizes.next());
assert_eq!(Some(131072), sizes.next());
assert_eq!(Some(262144), sizes.next());
assert_eq!(Some(524288), sizes.next());
assert_eq!(Some(1048576), sizes.next());
assert_eq!(None, sizes.next());
}
}
|
#[doc = "Register `DDRCTRL_HWLPCTL` reader"]
pub type R = crate::R<DDRCTRL_HWLPCTL_SPEC>;
#[doc = "Register `DDRCTRL_HWLPCTL` writer"]
pub type W = crate::W<DDRCTRL_HWLPCTL_SPEC>;
#[doc = "Field `HW_LP_EN` reader - HW_LP_EN"]
pub type HW_LP_EN_R = crate::BitReader;
#[doc = "Field `HW_LP_EN` writer - HW_LP_EN"]
pub type HW_LP_EN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `HW_LP_EXIT_IDLE_EN` reader - HW_LP_EXIT_IDLE_EN"]
pub type HW_LP_EXIT_IDLE_EN_R = crate::BitReader;
#[doc = "Field `HW_LP_EXIT_IDLE_EN` writer - HW_LP_EXIT_IDLE_EN"]
pub type HW_LP_EXIT_IDLE_EN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `HW_LP_IDLE_X32` reader - HW_LP_IDLE_X32"]
pub type HW_LP_IDLE_X32_R = crate::FieldReader<u16>;
#[doc = "Field `HW_LP_IDLE_X32` writer - HW_LP_IDLE_X32"]
pub type HW_LP_IDLE_X32_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 12, O, u16>;
impl R {
#[doc = "Bit 0 - HW_LP_EN"]
#[inline(always)]
pub fn hw_lp_en(&self) -> HW_LP_EN_R {
HW_LP_EN_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - HW_LP_EXIT_IDLE_EN"]
#[inline(always)]
pub fn hw_lp_exit_idle_en(&self) -> HW_LP_EXIT_IDLE_EN_R {
HW_LP_EXIT_IDLE_EN_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bits 16:27 - HW_LP_IDLE_X32"]
#[inline(always)]
pub fn hw_lp_idle_x32(&self) -> HW_LP_IDLE_X32_R {
HW_LP_IDLE_X32_R::new(((self.bits >> 16) & 0x0fff) as u16)
}
}
impl W {
#[doc = "Bit 0 - HW_LP_EN"]
#[inline(always)]
#[must_use]
pub fn hw_lp_en(&mut self) -> HW_LP_EN_W<DDRCTRL_HWLPCTL_SPEC, 0> {
HW_LP_EN_W::new(self)
}
#[doc = "Bit 1 - HW_LP_EXIT_IDLE_EN"]
#[inline(always)]
#[must_use]
pub fn hw_lp_exit_idle_en(&mut self) -> HW_LP_EXIT_IDLE_EN_W<DDRCTRL_HWLPCTL_SPEC, 1> {
HW_LP_EXIT_IDLE_EN_W::new(self)
}
#[doc = "Bits 16:27 - HW_LP_IDLE_X32"]
#[inline(always)]
#[must_use]
pub fn hw_lp_idle_x32(&mut self) -> HW_LP_IDLE_X32_W<DDRCTRL_HWLPCTL_SPEC, 16> {
HW_LP_IDLE_X32_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DDRCTRL hardware low power control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ddrctrl_hwlpctl::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ddrctrl_hwlpctl::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DDRCTRL_HWLPCTL_SPEC;
impl crate::RegisterSpec for DDRCTRL_HWLPCTL_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ddrctrl_hwlpctl::R`](R) reader structure"]
impl crate::Readable for DDRCTRL_HWLPCTL_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ddrctrl_hwlpctl::W`](W) writer structure"]
impl crate::Writable for DDRCTRL_HWLPCTL_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DDRCTRL_HWLPCTL to value 0x03"]
impl crate::Resettable for DDRCTRL_HWLPCTL_SPEC {
const RESET_VALUE: Self::Ux = 0x03;
}
|
//! The RFC 959 Print Working Directory (`PWD`) command
//
// This command causes the name of the current working
// directory to be returned in the reply.
use crate::{
auth::UserDetail,
server::controlchan::{
error::ControlChanError,
handler::{CommandContext, CommandHandler},
Reply, ReplyCode,
},
storage::{Metadata, StorageBackend},
};
use async_trait::async_trait;
#[derive(Debug)]
pub struct Pwd;
#[async_trait]
impl<Storage, User> CommandHandler<Storage, User> for Pwd
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
{
#[tracing_attributes::instrument]
async fn handle(&self, args: CommandContext<Storage, User>) -> Result<Reply, ControlChanError> {
let session = args.session.lock().await;
// TODO: properly escape double quotes in `cwd`
let result = format!("\"{}\"", session.cwd.as_path().display());
// On Windows systems, the path will be formatted with Windows style separators ('\')
// Most FTP clients expect normal UNIX separators ('/'), and they have trouble handling
// Windows style separators, so if we are on a Windows host, we replace the separators here.
#[cfg(windows)]
let result = result.replace(std::path::MAIN_SEPARATOR, "/");
Ok(Reply::new_with_string(ReplyCode::DirCreated, result))
}
}
|
extern crate pest;
#[macro_use] extern crate pest_derive;
mod parser {
use {Entity, Attribute, Relationship, Cardinality};
use pest;
use pest::Parser;
#[cfg(debug_assertions)]
const _GRAMMAR : &'static str = include_str!("grammar.pest");
#[derive(Parser)]
#[grammar = "grammar.pest"]
struct EntityParser;
fn parse_cardinality(pair : pest::iterators::Pair<Rule, pest::inputs::StrInput>) -> Cardinality {
use Cardinality::*;
assert_eq!(Rule::card, pair.as_rule());
let card = pair.into_inner().next().unwrap();
match card.as_rule() {
Rule::card_any => AtLeast(0),
Rule::card_exact => Exactly(card.as_str().parse().unwrap()),
Rule::card_at_least => AtLeast(card.into_inner().next().unwrap().as_str().parse().unwrap()),
Rule::card_range => {
let mut iter = card.into_inner();
let n = iter.next().unwrap().as_str().parse().unwrap();
let m = iter.next().unwrap().as_str().parse().unwrap();
Range(n, m)
},
_ => unreachable!("{:?}", card.as_rule()),
}
}
fn parse_relationship(pair : pest::iterators::Pair<Rule, pest::inputs::StrInput>) -> Relationship {
assert_eq!(Rule::relationship, pair.as_rule());
let rel = pair.into_inner().next().unwrap();
match rel.as_rule() {
Rule::binary_rel => {
let mut iter = rel.into_inner();
let e1 = iter.next().unwrap().as_str().to_owned();
let s1 = iter.next().unwrap().as_str().to_owned();
let c1 = parse_cardinality(iter.next().unwrap());
let c2 = parse_cardinality(iter.next().unwrap());
let e2 = iter.next().unwrap().as_str().to_owned();
let s2 = iter.next().unwrap().as_str().to_owned();
Relationship::Binary(e1, s1, c1, e2, s2, c2)
},
Rule::subtype_open => {
let mut iter = rel.into_inner();
let name = iter.next().unwrap().as_str().to_owned();
let subs = iter.map(|p| p.as_str().to_owned()).collect();
Relationship::SubTypeOpen(name, subs)
},
Rule::subtype_closed => {
let mut iter = rel.into_inner();
let name = iter.next().unwrap().as_str().to_owned();
let disc = iter.next().unwrap().as_str().to_owned();
let subs = iter.map(|p| p.as_str().to_owned()).collect();
Relationship::SubTypeClosed(name, disc, subs)
},
_ => unreachable!("{:?}", rel.as_rule()),
}
}
fn parse_entity(pair : pest::iterators::Pair<Rule, pest::inputs::StrInput>) -> Entity {
assert_eq!(Rule::entity, pair.as_rule());
let mut iter = pair.into_inner().peekable();
assert_eq!(Rule::name, iter.peek().unwrap().as_rule());
let name = iter.next().unwrap().as_str().to_owned();
let mut attributes = Vec::new();
let mut independent = true;
for p in iter {
assert_eq!(Rule::attribute, p.as_rule());
let mut attr = p.into_inner().peekable();
assert_eq!(Rule::name, attr.peek().unwrap().as_rule());
let mut name = attr.next().unwrap().as_str().to_owned();
if attr.peek().map(|p| p.as_rule()) == Some(Rule:: name) {
name += " / ";
name += attr.next().unwrap().as_str();
}
let mut a = Attribute::new(&name);
while let Some(m) = attr.next() {
match m.as_rule() {
Rule::mod_fk => { a = a.fk() },
Rule::mod_null => { a = a.null() },
Rule::mod_pk => { a = a.pk() },
Rule::mod_ak => {
let ak = m.into_inner().next().unwrap().as_str().parse().unwrap();
a = a.ak(ak)
},
_ => unreachable!("{:?}", m.as_rule()),
}
}
if a.in_fk && a.in_pk {
independent = false;
}
attributes.push(a);
}
Entity { name, attributes, independent }
}
pub fn parse_model(input : &str) -> (Vec<Entity>, Vec<Relationship>) {
let mut entities = Vec::new();
let mut relationships = Vec::new();
let result = EntityParser::parse_str(Rule::model, input).unwrap();
for pair in result {
if pair.as_rule() == Rule::entity {
entities.push(parse_entity(pair));
} else if pair.as_rule() == Rule::relationship {
relationships.push(parse_relationship(pair));
}
}
(entities, relationships)
}
#[test]
fn test_entity_parser() {
use write_graph;
let input = r#"
[species]
species_id :pk
scientific_name :ak1
diet
can_move
[stationary_species]
species_id :pk :fk
[moving_species]
species_id :pk :fk
limbs
species =(can_move) stationary_species + moving_species
[crawling_species]
species_id :pk :fk
ground_speed
can_climb
can_dig
[flying_species]
species_id :pk :fk
flying_speed
max_altitude
[swimming_species]
species_id :pk :fk
swim_speed
max_depth
moving_species >: crawling_species + flying_species + swimming_species
[biome]
biome_id :pk
temperature
biome_type
[surface_biome]
biome_id :pk :fk
altitude
humidity
[ocean_biome]
biome_id :pk :fk
depth
salinity
[underground_biome]
biome_id :pk :fk
biome =(biome_type) surface_biome + ocean_biome + underground_biome
species "lives in" 1+:1+ biome "supports"
[predation]
predator/species_id :pk :fk
prey/species_id :pk :fk
biome_id :pk :fk
species "is predator" 1:* predation "has predator"
species "is prey" 1:* predation "has prey"
biome "facilitates" 1:* predation "occurs in"
"#;
let (entities, relationships) = parse_model(input);
write_graph(&entities, &relationships);
}
}
type KeyFlag = u32;
type Name = String;
#[derive(Clone)]
struct Attribute {
name : Name,
in_pk : bool,
in_fk : bool,
nullable : bool,
aks : KeyFlag,
}
impl Attribute {
fn new(name : &str) -> Attribute {
Attribute {
name: name.to_owned(),
in_pk: false,
in_fk: false,
nullable: false,
aks: 0
}
}
fn fk(mut self) -> Self { self.in_fk = true; self }
fn pk(mut self) -> Self { self.in_pk = true; self }
fn null(mut self) -> Self { self.nullable = true; self }
fn ak(mut self, id : u8) -> Self { self.aks ^= 1 << (id - 1); self }
}
pub struct Entity {
name : Name,
attributes : Vec<Attribute>,
independent : bool,
}
impl Entity {
fn new(name : &str) -> Entity {
Entity {
name: name.to_owned(),
attributes: Vec::new(),
independent: true,
}
}
fn att(mut self, a : Attribute) -> Self {
self.attributes.push(a);
self
}
}
#[derive(Clone, Copy)]
pub enum Cardinality {
Exactly(usize),
Range(usize, usize),
AtLeast(usize),
}
use Cardinality::*;
pub enum Relationship {
Binary(Name, String, Cardinality, Name, String, Cardinality),
SubTypeOpen(Name, Vec<Name>),
SubTypeClosed(Name, Name, Vec<Name>),
}
use Relationship::*;
fn write_graph(entities : &[Entity], relationships : &[Relationship]) {
println!("digraph {{");
println!("node [shape=plain]");
let print_attributes = |atts : &[Attribute]| {
for a in atts.iter() {
print!("<tr><td align=\"left\">{}</td><td align=\"left\">", a.name);
let mut modifiers = Vec::new();
if a.in_fk {
modifiers.push("FK".to_owned());
}
{
let aks = a.aks;
for i in 0..31 {
if aks & (1 << i) != 0 {
modifiers.push(format!("AK{}", i + 1));
}
}
}
if a.nullable {
modifiers.push("NULL".to_owned());
}
if ! modifiers.is_empty() {
print!("<font point-size=\"8\">");
for m in &modifiers {
print!("{} ", m);
}
print!("</font>");
}
println!("</td></tr>");
}
};
for e in entities {
println!(r#"{name} [label=<<table cellspacing="0" border="0"><tr><td align="left">{name}</td></tr><tr><td><table cellborder="0" {style}>"#,
name = e.name,
style = if e.independent { "" } else {"style=\"rounded\""}
);
let (pk, nk) : (Vec<Attribute>, Vec<Attribute>) = e.attributes.clone().into_iter().partition(|a| a.in_pk);
print_attributes(&pk);
if ! nk.is_empty() {
println!("<hr />");
}
print_attributes(&nk);
println!("</table></td></tr></table>\n>]");
}
println!("splines=\"ortho\"");
println!("edge [arrowhead=\"none\" color=\"gray\"]");
let mut subid = 1;
for r in relationships {
match *r {
Binary(ref e1, ref s1, c1, ref e2, ref s2, c2) => {
let print_card = |c : Cardinality| -> String { match c {
Exactly(1) | Range(1, 1) => String::from(""),
Exactly(n) => format!("{}", n),
AtLeast(0) => String::from("∗"),
AtLeast(n) => format!("{}+", n),
Range(n, m) => format!("{}..{}", n, m),
}};
println!(r#"{} -> {} [taillabel="{}"] [headlabel="{}"] [label=<<font point-size="10">{} / {}</font>>]"#, e1, e2, print_card(c1), print_card(c2), s1, s2);
},
SubTypeClosed(ref sup, ref disc, ref subs) => {
println!(r#"subtype{} [shape="doublecircle" label=<<font point-size="10">{}</font>> width=0.2 margin=0]"#, subid, disc);
println!("{} -> subtype{}", sup, subid);
for sub in subs.iter() {
println!("subtype{} -> {}", subid, sub);
}
subid += 1;
},
SubTypeOpen(ref sup, ref subs) => {
println!(r#"subtype{} [shape="circle" label="" width=0.2]"#, subid);
println!("{} -> subtype{}", sup, subid);
for sub in subs.iter() {
println!("subtype{} -> {}", subid, sub);
}
subid += 1;
},
}
}
println!("}}");
}
fn main() {
use std::io::Read;
let mut input = String::new();
if std::io::stdin().read_to_string(&mut input).is_ok() {
let (entities, relationships) = parser::parse_model(&input);
write_graph(&entities, &relationships);
}
}
|
use crate::{InterfaceIpAddr, InterfaceIpv4, InterfaceIpv6};
pub(crate) fn np_ipv4_to_nmstate(
np_iface: &nispor::Iface,
) -> Option<InterfaceIpv4> {
if let Some(np_ip) = &np_iface.ipv4 {
let mut ip = InterfaceIpv4::default();
if !np_ip.addresses.is_empty() {
ip.enabled = true;
}
for np_addr in &np_ip.addresses {
if np_addr.valid_lft != "forever" {
ip.dhcp = true;
}
ip.addresses.push(InterfaceIpAddr {
ip: np_addr.address.clone(),
prefix_length: np_addr.prefix_len as u32,
});
}
Some(ip)
} else {
// IP might just disabled
if np_iface.controller == None {
Some(InterfaceIpv4 {
enabled: false,
..Default::default()
})
} else {
None
}
}
}
pub(crate) fn np_ipv6_to_nmstate(
np_iface: &nispor::Iface,
) -> Option<InterfaceIpv6> {
if let Some(np_ip) = &np_iface.ipv6 {
let mut ip = InterfaceIpv6::default();
if !np_ip.addresses.is_empty() {
ip.enabled = true;
}
for np_addr in &np_ip.addresses {
if np_addr.valid_lft != "forever" {
ip.autoconf = true;
}
ip.addresses.push(InterfaceIpAddr {
ip: np_addr.address.clone(),
prefix_length: np_addr.prefix_len as u32,
});
}
Some(ip)
} else {
// IP might just disabled
if np_iface.controller == None {
Some(InterfaceIpv6 {
enabled: false,
..Default::default()
})
} else {
None
}
}
}
pub(crate) fn nmstate_ipv4_to_np(
nms_ipv4: Option<&InterfaceIpv4>,
) -> nispor::IpConf {
let mut np_ip_conf = nispor::IpConf::default();
if let Some(nms_ipv4) = nms_ipv4 {
for nms_addr in &nms_ipv4.addresses {
np_ip_conf.addresses.push(nispor::IpAddrConf {
address: nms_addr.ip.to_string(),
prefix_len: nms_addr.prefix_length as u8,
});
}
}
np_ip_conf
}
pub(crate) fn nmstate_ipv6_to_np(
nms_ipv6: Option<&InterfaceIpv6>,
) -> nispor::IpConf {
let mut np_ip_conf = nispor::IpConf::default();
if let Some(nms_ipv6) = nms_ipv6 {
for nms_addr in &nms_ipv6.addresses {
np_ip_conf.addresses.push(nispor::IpAddrConf {
address: nms_addr.ip.to_string(),
prefix_len: nms_addr.prefix_length as u8,
});
}
}
np_ip_conf
}
|
#[cfg(feature = "async_mode")]
use curl::easy::{Easy2, Handler, WriteError};
#[cfg(feature = "async_mode")]
use crate::error::ReturnError;
// TESTED
#[cfg(feature = "async_mode")]
struct Collector(Vec<u8>);
#[cfg(feature = "async_mode")]
impl Handler for Collector {
fn write(&mut self, data: &[u8]) -> Result<usize, WriteError> {
self.0.extend_from_slice(data);
Ok(data.len())
}
}
/// requests required data from server via given url in async mode.
///
/// This function is fundamental and at the bottom level of the requesting hierarchy.
#[cfg(feature = "async_mode")]
pub(crate) fn do_request(url_format: &str) -> Result<String, ReturnError> {
let mut handle = Easy2::new(Collector(Vec::new()));
if let Err(_) = handle.get(true) {
return Err(ReturnError::UnableToRequest)
}
if let Err(_) = handle.url(url_format) {
return Err(ReturnError::UnableToSetUrl);
}
// Applying request is repeated 3 times if the operation does not work properly. In the last turn if the perform()
// function ends up with an error, an error is returned from the loop. Otherwise, successful operation breaks the
// loop.
let mut perform_result;
for element in 0..3 {
perform_result = handle.perform();
if perform_result.is_ok() { break; }
if element != 2 { continue; }
return Err(ReturnError::FailedToApplyRequest);
}
match handle.response_code() {
Ok(number) => {
if number != 200 {
return Err(ReturnError::RequestDenied)
}
},
Err(_) => return Err(ReturnError::NotFound),
}
let contents = handle.get_ref();
let response = String::from_utf8_lossy(&contents.0);
Ok(response.to_string())
}
|
#![allow(dead_code)]
extern crate rsnl;
extern crate rsgnl;
fn main() {
let mut nls = rsnl::socket::alloc().unwrap();
let p = rsgnl::socket::connect(&mut nls);
let s= "nl80211";
let family = rsgnl::controller::resolve(&nls, s);
println!("Family Index: {}", family);
// connect the socket to generic netlink
}
|
use crate::codegen::AatbeModule;
use llvm_sys_wrapper::{LLVMBasicBlockRef, LLVMValueRef};
pub fn branch(module: &AatbeModule, block: LLVMBasicBlockRef) -> LLVMValueRef {
module.llvm_builder_ref().build_br(block)
}
pub fn cond_branch(
module: &AatbeModule,
cond: LLVMValueRef,
then_block: LLVMBasicBlockRef,
else_block: LLVMBasicBlockRef,
) -> LLVMValueRef {
module
.llvm_builder_ref()
.build_cond_br(cond, then_block, else_block)
}
|
#[doc = "Register `D3PCR2H` reader"]
pub type R = crate::R<D3PCR2H_SPEC>;
#[doc = "Register `D3PCR2H` writer"]
pub type W = crate::W<D3PCR2H_SPEC>;
#[doc = "Field `PCS48` reader - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub type PCS48_R = crate::FieldReader<PCS48_A>;
#[doc = "Pending request clear input signal selection on Event input x= truncate ((n+96)/2)\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum PCS48_A {
#[doc = "0: DMA ch6 event selected as D3 domain pendclear source"]
DmaCh6 = 0,
#[doc = "1: DMA ch7 event selected as D3 domain pendclear source"]
DmaCh7 = 1,
#[doc = "2: LPTIM4 out selected as D3 domain pendclear source"]
Lptim4 = 2,
#[doc = "3: LPTIM5 out selected as D3 domain pendclear source"]
Lptim5 = 3,
}
impl From<PCS48_A> for u8 {
#[inline(always)]
fn from(variant: PCS48_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for PCS48_A {
type Ux = u8;
}
impl PCS48_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> PCS48_A {
match self.bits {
0 => PCS48_A::DmaCh6,
1 => PCS48_A::DmaCh7,
2 => PCS48_A::Lptim4,
3 => PCS48_A::Lptim5,
_ => unreachable!(),
}
}
#[doc = "DMA ch6 event selected as D3 domain pendclear source"]
#[inline(always)]
pub fn is_dma_ch6(&self) -> bool {
*self == PCS48_A::DmaCh6
}
#[doc = "DMA ch7 event selected as D3 domain pendclear source"]
#[inline(always)]
pub fn is_dma_ch7(&self) -> bool {
*self == PCS48_A::DmaCh7
}
#[doc = "LPTIM4 out selected as D3 domain pendclear source"]
#[inline(always)]
pub fn is_lptim4(&self) -> bool {
*self == PCS48_A::Lptim4
}
#[doc = "LPTIM5 out selected as D3 domain pendclear source"]
#[inline(always)]
pub fn is_lptim5(&self) -> bool {
*self == PCS48_A::Lptim5
}
}
#[doc = "Field `PCS48` writer - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub type PCS48_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 2, O, PCS48_A>;
impl<'a, REG, const O: u8> PCS48_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "DMA ch6 event selected as D3 domain pendclear source"]
#[inline(always)]
pub fn dma_ch6(self) -> &'a mut crate::W<REG> {
self.variant(PCS48_A::DmaCh6)
}
#[doc = "DMA ch7 event selected as D3 domain pendclear source"]
#[inline(always)]
pub fn dma_ch7(self) -> &'a mut crate::W<REG> {
self.variant(PCS48_A::DmaCh7)
}
#[doc = "LPTIM4 out selected as D3 domain pendclear source"]
#[inline(always)]
pub fn lptim4(self) -> &'a mut crate::W<REG> {
self.variant(PCS48_A::Lptim4)
}
#[doc = "LPTIM5 out selected as D3 domain pendclear source"]
#[inline(always)]
pub fn lptim5(self) -> &'a mut crate::W<REG> {
self.variant(PCS48_A::Lptim5)
}
}
#[doc = "Field `PCS49` reader - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_R as PCS49_R;
#[doc = "Field `PCS50` reader - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_R as PCS50_R;
#[doc = "Field `PCS51` reader - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_R as PCS51_R;
#[doc = "Field `PCS52` reader - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_R as PCS52_R;
#[doc = "Field `PCS53` reader - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_R as PCS53_R;
#[doc = "Field `PCS49` writer - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_W as PCS49_W;
#[doc = "Field `PCS50` writer - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_W as PCS50_W;
#[doc = "Field `PCS51` writer - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_W as PCS51_W;
#[doc = "Field `PCS52` writer - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_W as PCS52_W;
#[doc = "Field `PCS53` writer - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
pub use PCS48_W as PCS53_W;
impl R {
#[doc = "Bits 0:1 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
pub fn pcs48(&self) -> PCS48_R {
PCS48_R::new((self.bits & 3) as u8)
}
#[doc = "Bits 2:3 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
pub fn pcs49(&self) -> PCS49_R {
PCS49_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 4:5 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
pub fn pcs50(&self) -> PCS50_R {
PCS50_R::new(((self.bits >> 4) & 3) as u8)
}
#[doc = "Bits 6:7 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
pub fn pcs51(&self) -> PCS51_R {
PCS51_R::new(((self.bits >> 6) & 3) as u8)
}
#[doc = "Bits 8:9 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
pub fn pcs52(&self) -> PCS52_R {
PCS52_R::new(((self.bits >> 8) & 3) as u8)
}
#[doc = "Bits 10:11 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
pub fn pcs53(&self) -> PCS53_R {
PCS53_R::new(((self.bits >> 10) & 3) as u8)
}
}
impl W {
#[doc = "Bits 0:1 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
#[must_use]
pub fn pcs48(&mut self) -> PCS48_W<D3PCR2H_SPEC, 0> {
PCS48_W::new(self)
}
#[doc = "Bits 2:3 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
#[must_use]
pub fn pcs49(&mut self) -> PCS49_W<D3PCR2H_SPEC, 2> {
PCS49_W::new(self)
}
#[doc = "Bits 4:5 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
#[must_use]
pub fn pcs50(&mut self) -> PCS50_W<D3PCR2H_SPEC, 4> {
PCS50_W::new(self)
}
#[doc = "Bits 6:7 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
#[must_use]
pub fn pcs51(&mut self) -> PCS51_W<D3PCR2H_SPEC, 6> {
PCS51_W::new(self)
}
#[doc = "Bits 8:9 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
#[must_use]
pub fn pcs52(&mut self) -> PCS52_W<D3PCR2H_SPEC, 8> {
PCS52_W::new(self)
}
#[doc = "Bits 10:11 - Pending request clear input signal selection on Event input x= truncate ((n+96)/2)"]
#[inline(always)]
#[must_use]
pub fn pcs53(&mut self) -> PCS53_W<D3PCR2H_SPEC, 10> {
PCS53_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "EXTI D3 pending clear selection register high\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`d3pcr2h::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`d3pcr2h::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct D3PCR2H_SPEC;
impl crate::RegisterSpec for D3PCR2H_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`d3pcr2h::R`](R) reader structure"]
impl crate::Readable for D3PCR2H_SPEC {}
#[doc = "`write(|w| ..)` method takes [`d3pcr2h::W`](W) writer structure"]
impl crate::Writable for D3PCR2H_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets D3PCR2H to value 0"]
impl crate::Resettable for D3PCR2H_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
extern crate rusthub;
extern crate rustc_serialize;
use std::fs::File;
use std::path::Path;
use std::io::Read;
use rusthub::oauth;
use rustc_serialize::json;
#[test]
#[ignore]
fn create_authorization() {
let auth = oauth::AuthBody {
scopes: vec!["notifications".to_string()],
note: "gh-notify".to_string(),
note_url: "https://github.com/brianerikson/gh-notify".to_string(),
client_id: "aaaaaaaaaxaaaaaaaaax".to_string(),
client_secret: "aaaaaaaaaxaaaaaaaaaxaaaaaaaaaxaaaaaaaaax".to_string()
};
println!("{}", json::as_pretty_json(&auth));
let result = oauth::create_authorization(String::from("username"), String::from("password"), auth);
println!("{}", result);
}
#[test]
#[ignore]
fn get_or_create_authorization() {
let auth = oauth::AuthBody2 {
client_secret: "aaaaaaaaaxaaaaaaaaaxaaaaaaaaaxaaaaaaaaax".to_string(),
scopes: vec!["notifications".to_string()],
note: "gh-notify".to_string(),
note_url: "https://github.com/brianerikson/gh-notify".to_string(),
};
println!("{}", json::as_pretty_json(&auth));
let result = oauth::get_authorization(
String::from("username"), String::from("password"), auth, "aaaaaaaaaxaaaaaaaaax".to_string()
);
match result {
Ok(token_response) => println!("{:#?}", token_response),
Err(err) => panic!("{}", err)
}
}
#[test]
#[ignore]
fn decode_token_response() {
let mut json_str = String::new();
let res = File::open(&Path::new("tests/getorcreateauth_response.json")).unwrap().read_to_string(&mut json_str);
match res {
Ok(_) => {}
Err(err) => panic!("{}", err)
}
let response: oauth::TokenResponse = match json::decode(&json_str) {
Ok(response) => response,
Err(err) => panic!("{}", err)
};
println!("{:#?}", response);
} |
#![crate_type = "lib"]
extern crate i3ipc;
pub mod focuswatcher;
pub mod sockethandler;
|
use crate::vec3::Vec3;
use crate::ray::Ray;
use std::f64::consts::PI;
use crate::sphere::random_in_unit_disk;
pub struct Camera {
origin: Vec3,
lower_left_corner: Vec3,
horizontal: Vec3,
vertical: Vec3,
w: Vec3,
u: Vec3,
v: Vec3,
lens_radius: f32
}
impl Camera {
pub fn new(lookfrom: Vec3, lookat: Vec3, vup: Vec3, vfov: f32, aspect_ratio: f32,
aperture: f32, focus_dist: f32) -> Camera {
let theta = vfov * PI as f32 / 180.0;
let h = (theta/2.0f32).tan();
let viewport_height = 2.0 * h;
let viewport_width = viewport_height * aspect_ratio;
let w = (lookfrom - lookat).normalize();
let u = vup.cross(&w).normalize();
let v = w.cross(&u);
let origin = lookfrom;
let horizontal = u * viewport_width * focus_dist;
let vertical = v * viewport_height * focus_dist;
let lower_left_corner = origin - horizontal * 0.5 - vertical * 0.5 - w * focus_dist;
Camera {
origin, horizontal, vertical, lower_left_corner,
w, u, v,
lens_radius: aperture / 2.0
}
}
pub fn get_ray_at_uv(&self, u: f32, v: f32) -> Ray {
let rd = random_in_unit_disk() * self.lens_radius;
let offset = self.u * rd.x + self.v * rd.y;
Ray::new_filled(
self.origin + offset,
self.lower_left_corner + self.horizontal * u + self.vertical * v - self.origin - offset)
}
} |
use std::fmt::Display;
use std::iter::{self, FromIterator};
pub fn pre_format<T>(i: T, len: usize, prefix: char) -> String
where
T: Display,
{
let s = i.to_string();
if s.len() >= len {
s
} else {
let pre = iter::repeat(prefix).take(len - s.len());
format!("{}{}", String::from_iter(pre), i)
}
}
pub fn show_duration(duration: i32) -> String {
let hour_duration = 3600000;
let minute_duration = 60000;
let second_duration = 1000;
let hour = duration / hour_duration;
let minute = (duration % hour_duration) / minute_duration;
let second = (duration % minute_duration) / second_duration;
if hour > 0 {
format!(
"{}:{}:{}",
hour,
pre_format(minute, 2, '0'),
pre_format(second, 2, '0')
)
} else {
format!(
"{}:{}",
pre_format(minute, 2, '0'),
pre_format(second, 2, '0')
)
}
}
|
// q0187_repeated_dna_sequences
struct Solution;
use std::collections::HashMap;
impl Solution {
pub fn find_repeated_dna_sequences(s: String) -> Vec<String> {
let l = s.len();
if l <= 10 {
return vec![];
}
let mut map = HashMap::new();
for i in 0..=l - 10 {
let c = map.entry(&s[i..i + 10]).or_insert(0);
*c += 1;
println!("{}--{:?}", &s[i..i + 1], map);
}
let mut ret = vec![];
for (k, v) in map.into_iter() {
if v > 1 {
ret.push(format!("{}", k));
}
}
ret
}
}
#[cfg(test)]
mod tests {
use super::Solution;
use crate::util;
#[test]
fn it_works() {
// assert_eq!(
// util::vec_2_set(vec![String::from("AAAAACCCCC"), String::from("CCCCCAAAAA")]),
// util::vec_2_set(Solution::find_repeated_dna_sequences(String::from(
// "AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
// )))
// );
assert_eq!(
util::vec_2_set(vec![String::from("AAAAAAAAAA")]),
util::vec_2_set(Solution::find_repeated_dna_sequences(String::from(
"AAAAAAAAAAA"
)))
);
}
}
|
mod metadata;
pub use self::metadata::*;
|
#[macro_use]
extern crate nom;
use nom::line_ending;
use nom::types::CompleteByteSlice;
use Dir::{ U, D, R, L };
const INPUT: &'static str = include_str!("day2.txt");
#[derive(Debug)]
enum Dir { U, D, R, L }
named!(match_up<CompleteByteSlice, Dir>, map!(char!('U'), |_| U));
named!(match_down<CompleteByteSlice, Dir>, map!(char!('D'), |_| D));
named!(match_left<CompleteByteSlice, Dir>, map!(char!('L'), |_| L));
named!(match_right<CompleteByteSlice, Dir>, map!(char!('R'), |_| R));
named!(match_one_dir<CompleteByteSlice, Dir>,
alt!(
match_up |
match_down |
match_left |
match_right
)
);
named!(match_dirs<CompleteByteSlice, Vec<Dir>>,
many0!(match_one_dir)
);
named!(parse_line<CompleteByteSlice, Vec<Dir>>,
terminated!(
match_dirs,
opt!(line_ending)
)
);
named!(parse_input<CompleteByteSlice, Vec<Vec<Dir>>>,
many0!(parse_line)
);
fn main() {
let vv = parse_input(CompleteByteSlice(INPUT.as_bytes())).ok().unwrap().1;
let mut res = String::from("");
let mut digit = 5;
let map_up = [ 0, 1, 2, 1, 4, 5, 2, 3, 4, 9, 6, 7, 8, 11 ];
let map_down = [ 0, 3, 6, 7, 8, 5, 10, 11, 12, 9, 10, 13, 12, 13 ];
let map_left = [ 0, 1, 2, 2, 3, 5, 5, 6, 7, 8, 10, 10, 11, 13 ];
let map_right = [ 0, 1, 3, 4, 4, 6, 7, 8, 9, 9, 11, 12, 12, 13 ];
for v in vv.iter() {
for dir in v.iter() {
digit = match dir {
U => map_up[digit],
D => map_down[digit],
L => map_left[digit],
R => map_right[digit],
}
}
let c = (digit + (if digit >= 10 { 55 } else { 48 })) as u8 as char;
res.push(c);
}
println!("{}", res);
}
|
#![deny(missing_docs)]
#![cfg_attr(test, deny(warnings))]
#![cfg_attr(test, feature(core))]
//! # lazylist
//!
//! A lazy, reference counted linked list similar to Haskell's [].
#[macro_use(lazy)]
extern crate lazy;
use lazy::single::Thunk;
use std::rc::Rc;
use std::iter::FromIterator;
#[macro_export]
macro_rules! list { ($val:expr) => { Rc::new(lazy!($val)) } }
#[macro_export]
macro_rules! pair { ($val:expr, $list:expr) => { list!($crate::List::Cons($val, $list)) } }
#[macro_export]
macro_rules! nil { () => { list!($crate::List::Nil) } }
/// A lazy, reference counted, singly linked list.
///
/// See `List` for methods.
pub type RcList<T> = Rc<Thunk<List<T>>>;
use List::{Nil, Cons};
/// A Node in a lazy, reference counted, singly linked list.
#[derive(Clone)]
pub enum List<T: 'static> {
/// The Empty List
Nil,
/// A list with one member and possibly another list.
Cons(T, RcList<T>)
}
impl<T: 'static> List<T> {
/// Create a new RcList
pub fn new() -> RcList<T> {
nil!()
}
/// Create an RcList with a single member.
pub fn singleton(val: T) -> RcList<T> {
pair!(val, nil!())
}
/// Get the first element in an RcList, if there is one.
pub fn head(&self) -> Option<&T> {
match *self {
Cons(ref val, _) => Some(val),
Nil => None
}
}
/// Get the tail of the RcList, if there is one.
pub fn tail(&self) -> Option<RcList<T>> {
match *self {
Cons(_, ref tail) => Some(tail.clone()),
Nil => None
}
}
}
/// A trait providing RcList methods on Rc<Thunk<List<T>>>
pub trait RcListMethods<T> {
/// Add a value to the RcList (in the front)
fn push(self, val: T) -> RcList<T>;
/// Remove a value from the RcList (at the front)
fn pop(&self) -> Option<(&T, RcList<T>)>;
/// Get the length of the RcList (O(n))
fn len(&self) -> usize;
}
impl<T: 'static> RcListMethods<T> for RcList<T> {
/// Add a value to the RcList (in the front)
fn push(self, val: T) -> RcList<T> {
pair!(val, self)
}
/// Remove a value from the RcList (at the front)
fn pop(&self) -> Option<(&T, RcList<T>)> {
self.tail().and_then(|next| self.head().map(|head| {
(head, next)
}))
}
/// Get the length of the RcList (O(n))
fn len(&self) -> usize {
self.count()
}
}
impl<T> FromIterator<T> for RcList<T> {
fn from_iter<I>(mut iter: I) -> RcList<T>
where I: Iterator<Item=T> + 'static {
list!({
match iter.next() {
Some(val) => Cons(val, FromIterator::from_iter(iter)),
None => Nil
}
})
}
}
impl<'a, T> Iterator for &'a RcList<T> {
type Item = &'a T;
fn next(&mut self) -> Option<&'a T> {
let (value, rest) = match ****self {
Cons(ref value, ref rest) => (value, rest),
Nil => return None
};
*self = rest;
Some(value)
}
}
#[test]
fn test_fib() {
fn fib(n: u64) -> u64 {
let mut n0 = 0;
let mut n1 = 1;
for _ in 0..n {
let sum = n0 + n1;
n0 = n1;
n1 = sum;
}
return n0;
}
fn fibs() -> RcList<u64> {
fn fibs_inner(n0: u64, n1: u64) -> RcList<u64> {
pair!(n0, fibs_inner(n1, n0 + n1))
}
fibs_inner(0, 1)
}
for (i, &x) in fibs().take(100).enumerate() {
assert_eq!(x, fib(i as u64))
}
}
|
fn main() {
let greeting = String::from("Hallo, ich bin Alexander!");
/*
Variants
Type => i8, u8
Option => 8, 16, 32, 64, 128
Two more thing
-> The upper/lower limit still applies like C.
-> The val of 'isize' is dpding on ur PC's arch (i32, i64)
*/
let s1: i8;
let s2: i128;
let s3: isize;
/*
The types which Rust 'guessed out'
are usually good choices (and it's FAST)
Number values end with sth like 'i32'
is totally accepted (surely the b'' cannot add this)
*/
let i1 = 1_000_000_000i32; // 1 billion
let i2 = 0xff + 0o77; // 318
let i3 = 0b1111_1111; // 255
let i4 = b'R'; // 82 (hmm)
println!("\n{}", greeting);
println!("[1]: {} [2]: {} [3]: {} [4]: {}",
i1, i2, i3, i4 );
// Eazy peasy
let ep1 = 2.0; // f64
let ep2: f32 = 2.0; // f32
let ep3 = true;
let ep4: bool = false;
// Opts!
let o1 = 10 / 3;
let o2 = 10 % 3;
println!("(10, 3) => Div: {}, Mod: {}", o1, o2);
/*
The '' and "" is quite different (e.g. Type) in Rust.
It use the 'UTF-16' by default I assume.
Related info: https://zh.wikipedia.org/wiki/UTF-16
*/
let c1 = 'ä';
let c2 = '😇';
println!("--- {}, --- {}", c1, c2);
// The annotations here are OPTIONAL.
let tuple: (bool, i32, f64) = (true, 1, 2.0);
let (t1, t2, t3) = tuple;
let p1 = tuple.0; // index, sort of
let p2 = tuple.1;
let p3 = tuple.2;
println!("--- {}, --- {}, --- {}", t1, t2, t3);
println!("--- {}, --- {}, --- {}", p1, p2, p3);
/*
Some cmp between 'Tuple' & 'Array'
=> Both are <fixed> length!
=> The former could contain any types, the latter is NOT.
Oh, there's a "vector" type BTW.
It could be expanded (or shrinked).
*/
let ary_mth = ["Jan.", "Feb.", "Mar."];
let ary_num: [i32; 6] = [1, 2, 3, 4, 5, 6];
// Access the elems
let m_jan = ary_mth[0];
let m_feb = ary_mth[1];
/*
Invalid index
-> checking by Rust
-> then produce 'panic' (Exit)
*/
// let m_dec = ary_mth[11];
}
|
//! Builders for customizing asynchronous Metric Sinks.
use tokio::time::Duration;
use crate::{DEFAULT_BATCH_BUF_SIZE, DEFAULT_MAX_BATCH_DELAY, DEFAULT_QUEUE_CAPACITY};
/// Builder allows you to override various default parameter values before creating an instance
/// of the desired Metric Sink.
#[derive(Debug)]
pub struct Builder<T, S> {
pub(crate) addr: T,
pub(crate) sock: S,
pub(crate) queue_cap: usize,
pub(crate) buf_size: usize,
pub(crate) max_delay: Duration,
}
impl<T, S> Builder<T, S> {
pub(crate) fn new(addr: T, sock: S) -> Self {
Self {
addr,
sock,
queue_cap: DEFAULT_QUEUE_CAPACITY,
buf_size: DEFAULT_BATCH_BUF_SIZE,
max_delay: DEFAULT_MAX_BATCH_DELAY,
}
}
/// Sets the maximum metric queue capacity (default: [DEFAULT_QUEUE_CAPACITY](crate::DEFAULT_QUEUE_CAPACITY)).
pub fn queue_cap(&mut self, queue_cap: usize) -> &mut Self {
self.queue_cap = queue_cap;
self
}
/// Sets the batch buffer size (default: [DEFAULT_BATCH_BUF_SIZE](crate::DEFAULT_BATCH_BUF_SIZE)).
pub fn buf_size(&mut self, buf_size: usize) -> &mut Self {
self.buf_size = buf_size;
self
}
/// Sets the maximum delay before flushing any buffered metrics (default: [DEFAULT_MAX_BATCH_DELAY](crate::DEFAULT_MAX_BATCH_DELAY)).
pub fn max_delay(&mut self, max_delay: Duration) -> &mut Self {
self.max_delay = max_delay;
self
}
}
|
use crate::{
gui::{make_dropdown_list_option, BuildContext, Ui, UiMessage, UiNode},
scene::commands::{
mesh::{
SetMeshCastShadowsCommand, SetMeshDecalLayerIndexCommand, SetMeshRenderPathCommand,
},
SceneCommand,
},
send_sync_message,
sidebar::{
make_bool_input_field, make_int_input_field, make_section, make_text_mark, COLUMN_WIDTH,
ROW_HEIGHT,
},
Message,
};
use rg3d::{
core::{pool::Handle, scope_profile},
gui::{
button::ButtonBuilder,
dropdown_list::DropdownListBuilder,
grid::{Column, GridBuilder, Row},
message::{
ButtonMessage, CheckBoxMessage, DropdownListMessage, MessageDirection,
NumericUpDownMessage, UiMessageData, WidgetMessage,
},
stack_panel::StackPanelBuilder,
widget::WidgetBuilder,
Thickness,
},
scene::{mesh::RenderPath, node::Node},
};
use std::sync::mpsc::Sender;
pub struct MeshSection {
pub section: Handle<UiNode>,
cast_shadows: Handle<UiNode>,
render_path: Handle<UiNode>,
decal_layer_index: Handle<UiNode>,
sender: Sender<Message>,
surfaces_list: Handle<UiNode>,
current_surface: Option<usize>,
surface_section: Handle<UiNode>,
edit_material: Handle<UiNode>,
}
impl MeshSection {
pub fn new(ctx: &mut BuildContext, sender: Sender<Message>) -> Self {
let cast_shadows;
let render_path;
let decal_layer_index;
let surfaces_list;
let surface_section;
let edit_material;
let section = make_section(
"Mesh Properties",
StackPanelBuilder::new(
WidgetBuilder::new()
.with_child(
GridBuilder::new(
WidgetBuilder::new()
.with_child(make_text_mark(ctx, "Cast Shadows", 0))
.with_child({
cast_shadows = make_bool_input_field(ctx, 0);
cast_shadows
})
.with_child(make_text_mark(ctx, "Render Path", 1))
.with_child({
render_path = DropdownListBuilder::new(
WidgetBuilder::new()
.on_row(1)
.on_column(1)
.with_margin(Thickness::uniform(1.0)),
)
.with_close_on_selection(true)
.with_items(vec![
make_dropdown_list_option(ctx, "Deferred"),
make_dropdown_list_option(ctx, "Forward"),
])
.build(ctx);
render_path
})
.with_child(make_text_mark(ctx, "Decal Layer Index", 2))
.with_child({
decal_layer_index = make_int_input_field(ctx, 2, 0, 255, 1);
decal_layer_index
})
.with_child(make_text_mark(ctx, "Surfaces", 3))
.with_child({
surfaces_list = DropdownListBuilder::new(
WidgetBuilder::new().on_row(3).on_column(1),
)
.with_close_on_selection(true)
.build(ctx);
surfaces_list
}),
)
.add_column(Column::strict(COLUMN_WIDTH))
.add_column(Column::stretch())
.add_row(Row::strict(ROW_HEIGHT))
.add_row(Row::strict(ROW_HEIGHT))
.add_row(Row::strict(ROW_HEIGHT))
.add_row(Row::strict(ROW_HEIGHT))
.build(ctx),
)
.with_child({
surface_section = make_section(
"Surface Properties",
GridBuilder::new(
WidgetBuilder::new()
.with_child(make_text_mark(ctx, "Material", 0))
.with_child({
edit_material = ButtonBuilder::new(
WidgetBuilder::new().on_row(0).on_column(1),
)
.with_text("...")
.build(ctx);
edit_material
}),
)
.add_column(Column::strict(COLUMN_WIDTH))
.add_column(Column::stretch())
.add_row(Row::strict(ROW_HEIGHT))
.build(ctx),
ctx,
);
surface_section
}),
)
.build(ctx),
ctx,
);
Self {
section,
cast_shadows,
render_path,
sender,
decal_layer_index,
surfaces_list,
edit_material,
surface_section,
current_surface: None,
}
}
pub fn sync_to_model(&mut self, node: &Node, ui: &mut Ui) {
send_sync_message(
ui,
WidgetMessage::visibility(self.section, MessageDirection::ToWidget, node.is_mesh()),
);
if let Node::Mesh(mesh) = node {
send_sync_message(
ui,
CheckBoxMessage::checked(
self.cast_shadows,
MessageDirection::ToWidget,
Some(mesh.cast_shadows()),
),
);
let variant = match mesh.render_path() {
RenderPath::Deferred => 0,
RenderPath::Forward => 1,
};
send_sync_message(
ui,
WidgetMessage::visibility(
self.surface_section,
MessageDirection::ToWidget,
self.current_surface.is_some(),
),
);
send_sync_message(
ui,
DropdownListMessage::selection(
self.render_path,
MessageDirection::ToWidget,
Some(variant),
),
);
send_sync_message(
ui,
NumericUpDownMessage::value(
self.decal_layer_index,
MessageDirection::ToWidget,
mesh.decal_layer_index() as f32,
),
);
if mesh.surfaces().len() != ui.node(self.surfaces_list).as_dropdown_list().items().len()
{
let items = mesh
.surfaces()
.iter()
.enumerate()
.map(|(n, _)| {
make_dropdown_list_option(&mut ui.build_ctx(), &format!("Surface {}", n))
})
.collect::<Vec<_>>();
let selection = if items.is_empty() { None } else { Some(0) };
send_sync_message(
ui,
DropdownListMessage::items(
self.surfaces_list,
MessageDirection::ToWidget,
items,
),
);
// This has to be sent without `send_sync_message` because we need to get response message
// in `handle_ui_message`.
ui.send_message(DropdownListMessage::selection(
self.surfaces_list,
MessageDirection::ToWidget,
selection,
));
}
}
}
pub fn handle_ui_message(&mut self, message: &UiMessage, node: &Node, handle: Handle<Node>) {
scope_profile!();
if let Node::Mesh(mesh) = node {
match *message.data() {
UiMessageData::CheckBox(CheckBoxMessage::Check(value)) => {
let value = value.unwrap_or(false);
if message.destination() == self.cast_shadows && mesh.cast_shadows().ne(&value)
{
self.sender
.send(Message::DoSceneCommand(SceneCommand::SetMeshCastShadows(
SetMeshCastShadowsCommand::new(handle, value),
)))
.unwrap();
}
}
UiMessageData::DropdownList(DropdownListMessage::SelectionChanged(selection)) => {
if message.destination() == self.render_path {
if let Some(selection) = selection {
let new_render_path = match selection {
0 => RenderPath::Deferred,
1 => RenderPath::Forward,
_ => unreachable!(),
};
if new_render_path != mesh.render_path() {
self.sender
.send(Message::DoSceneCommand(SceneCommand::SetMeshRenderPath(
SetMeshRenderPathCommand::new(handle, new_render_path),
)))
.unwrap();
}
}
} else if message.destination() == self.surfaces_list {
self.current_surface = selection;
self.sender.send(Message::SyncToModel).unwrap();
}
}
UiMessageData::Button(ButtonMessage::Click) => {
if message.destination() == self.edit_material {
if let Some(current_surface) = self.current_surface {
if let Some(surface) = mesh.surfaces().get(current_surface) {
self.sender
.send(Message::OpenMaterialEditor(surface.material().clone()))
.unwrap();
}
}
}
}
UiMessageData::NumericUpDown(NumericUpDownMessage::Value(index))
if message.destination() == self.decal_layer_index =>
{
let index = index.clamp(0.0, 255.0) as u8;
if index != mesh.decal_layer_index() {
self.sender
.send(Message::DoSceneCommand(
SceneCommand::SetMeshDecalLayerIndex(
SetMeshDecalLayerIndexCommand::new(handle, index),
),
))
.unwrap();
}
}
_ => {}
}
}
}
}
|
use std::{collections::{HashMap, hash_map::Entry}, rc::Rc};
use rand::{distributions::WeightedIndex, prelude::Distribution};
use crate::{abstractions::CustomDistribution};
use crate::algorithm::algorithm::GenHash;
use crate::abstractions::FitFunc;
use std::hash::Hash;
// use crate::Chromosome;
// #[derive(Clone, Copy, Debug)]
pub struct Boltzmann {
pub distribution: Option<WeightedIndex<f64>>,
pub t_coefficient: f64,
pub f_max: f64,
pub generation: f64,
pub max_generation: f64
}
impl<T> CustomDistribution<T> for Boltzmann
where
T: Hash + Eq + Clone
{
fn sample(&self) -> usize {
let mut rng = rand::thread_rng();
self.distribution.as_ref().unwrap().sample(&mut rng)
}
fn new(&self, population: &Vec<std::rc::Rc<T>>, fitness: &FitFunc<T>
, cache: &mut GenHash<T>) -> Self {
let distribution = self.boltzmann_selection(population, fitness, cache);
return Boltzmann {
distribution: Option::Some(distribution),
t_coefficient: self.t_coefficient,
f_max: self.f_max,
generation: self.generation,
max_generation: self.max_generation,
}
}
}
impl Boltzmann {
fn boltzmann_selection<T> (
&self,
population: &Vec<Rc<T>>,
fitness: &Box<dyn Fn(&T) -> f64>,
cache: &mut GenHash<T>,
) -> WeightedIndex<f64>
where
T: Hash + Eq
{
WeightedIndex::new(
population
.iter()
.map(|item| self.boltzmann_fitnesses(item, fitness, cache))
).unwrap()
}
pub fn boltzmann_fitnesses<T> (
&self,
item: &Rc<T>,
fitness: &Box<dyn Fn(&T) -> f64>,
cache: &mut HashMap<Rc<T>, f64>,
) -> f64
where
T: Hash + Eq
{
match cache.entry(item.clone()) {
Entry::Vacant(entry) => *entry.insert(self.boltzmann_probability(item, fitness)),
Entry::Occupied(entry) => *entry.get()
}
}
fn boltzmann_probability<T> (
&self,
x: &Rc<T>,
fitness: &Box<dyn Fn(&T) -> f64>,
) -> f64 {
f64::exp(-((self.f_max - fitness(x))/self.get_t_boltzmann()))
}
fn get_t_boltzmann(&self) -> f64 {
(self.t_coefficient).powf((1f64 + 100f64*self.generation)/self.max_generation)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.