blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
140
| path
stringlengths 5
183
| src_encoding
stringclasses 6
values | length_bytes
int64 12
5.32M
| score
float64 2.52
4.94
| int_score
int64 3
5
| detected_licenses
listlengths 0
47
| license_type
stringclasses 2
values | text
stringlengths 12
5.32M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0cdd9b10db3d4c21f269b2b918e0748ac88c800f
|
Rust
|
Kixunil/dont_panic
|
/src/lib.rs
|
UTF-8
| 3,456
| 3.953125
| 4
|
[
"MITNFA"
] |
permissive
|
//! This crate provides macros that look just like `panic!()` but instead of panicking, they cause a
//! linking error if their calls are not optimized-out. This can be used to ensure the compiler
//! optimizes away some code.
//!
//! # Example
//!
//! ```no_compile
//! #[macro_use]
//! extern crate dont_panic;
//!
//! fn main() {
//! /*
//! let x = 6 * 9;
//! if x == 42 {
//! dont_panic!("6 * 9 == 42");
//! }
//! */
//! let x = false;
//! if x {
//! dont_panic!("42");
//! }
//! }
//! ```
//!
//! Compile with `--release` or `--features=panic`
#![no_std]
extern "C" {
/// This function doesn't actually exist. It ensures a linking error if it isn't optimized-out.
pub fn rust_panic_called_where_shouldnt() -> !;
}
/// This macro doesn't panic. Instead it tries to call a non-existing function. If the compiler can
/// prove it can't be called and optimizes it away, the code will compile just fine. Otherwise you get
/// a linking error.
///
/// This should be used only in cases you are absolutely sure are OK and optimizable by compiler.
#[cfg(not(feature = "panic"))]
#[macro_export]
macro_rules! dont_panic {
($($x:tt)*) => ({
unsafe { $crate::rust_panic_called_where_shouldnt(); }
})
}
/// This macro is active only with `panic` feature turned on and it will really panic, instead of
/// causing a linking error. The purpose is to make development easier. (E.g. in debug mode.)
#[cfg(feature = "panic")]
#[macro_export]
macro_rules! dont_panic {
($($x:tt)*) => ({
panic!($($x)*);
})
}
/// Like assert but calls `dont_panic!()` instead of `panic!()`
#[macro_export]
macro_rules! dp_assert {
($cond:expr) => (
if !$cond {
dont_panic!(concat!("assertion failed: ", stringify!($cond)))
}
);
($cond:expr, $($arg:tt)+) => (
if !$cond {
dont_panic!($($arg)+)
}
);
}
/// This function calls the given closure, asserting that there's no possibility of panicking.
/// If the compiler can't prove this, the code will be left with a `dont_panic!` linking error.
#[cfg(not(feature = "panic"))]
pub fn call<T, F: FnOnce() -> T>(f: F) -> T {
struct DontPanic;
impl Drop for DontPanic {
fn drop(&mut self) {
dont_panic!();
}
}
let guard = DontPanic;
let result = f();
core::mem::forget(guard);
result
}
/// With the `panic` feature turned on, this function just calls the closure directly, letting
/// it panic or not on its own.
#[cfg(feature = "panic")]
pub fn call<T, F: FnOnce() -> T>(f: F) -> T {
f()
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
let should_panic = false;
if should_panic {
dont_panic!();
}
}
#[test]
fn call_slice_index() {
let foo = [1, 2, 3];
super::call(|| assert_eq!(foo[0] + foo[1] + foo[2], 6));
}
#[cfg(feature = "panic")]
#[test]
#[should_panic]
fn panic() {
let should_panic = true;
if should_panic {
dont_panic!();
}
}
#[cfg(feature = "panic")]
#[test]
fn no_panic() {
let should_panic = false;
if should_panic {
dont_panic!();
}
}
#[cfg(feature = "panic")]
#[test]
#[should_panic]
fn call_slice_index_panic() {
let foo = [1, 2, 3];
super::call(|| assert_eq!(foo[1] + foo[2] + foo[3], 6));
}
}
| true
|
f7ed501cca4c4cfd64bb09e04949f70806dc83d9
|
Rust
|
kjn-void/advent-of-code-2019
|
/src/day22/mod.rs
|
UTF-8
| 2,974
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
use regex::Regex;
use super::Solution;
type Deck = Vec<u32>;
enum Technique {
DealIntoNewStack,
DealWithIncrement(usize),
Cut(isize),
}
fn deal_into_new_stack(deck: &Deck) -> Deck {
deck.iter().rev().map(|&d| d).collect()
}
fn cut_n(deck: &Deck, n: isize) -> Deck {
let cn = if n >= 0 {
n as usize
} else {
deck.len() - n.abs() as usize
};
deck.into_iter()
.cycle()
.skip(cn)
.take(deck.len())
.map(|&d| d)
.collect()
}
fn deal_with_increment(deck: &Deck, n: usize) -> Deck {
let mut new_deck = (0..deck.len()).map(|_| 0).collect::<Vec<_>>();
let mut idx = 0;
for &d in deck {
new_deck[idx] = d;
idx = (idx + n) % deck.len();
}
new_deck
}
fn apply_techniques(techniques: &Vec<Technique>, deck: &Deck) -> Deck {
techniques
.iter()
.fold(deck.clone(), |deck, technique| match technique {
Technique::DealWithIncrement(n) => deal_with_increment(&deck, *n),
Technique::DealIntoNewStack => deal_into_new_stack(&deck),
Technique::Cut(n) => cut_n(&deck, *n),
})
}
impl Solution for Day22 {
fn part1(&self) -> String {
let deck = apply_techniques(&self.techniques, &(0..10007).collect());
deck.iter().enumerate().filter(|(_, &val)| val==2019).next().unwrap().0.to_string()
}
fn part2(&self) -> String {
"".to_string()
}
}
// State required to solve day 22
pub struct Day22 {
techniques: Vec<Technique>,
}
pub fn solution(lines: Vec<&str>) -> Box<dyn Solution> {
let re =
Regex::new(r"^((deal into new stack)|(deal with increment (\d+))|(cut (-?\d+)))$").unwrap();
let mut techniques = Vec::new();
for line in lines {
let caps = re.captures(line).unwrap();
if let Some(_) = caps.get(2) {
techniques.push(Technique::DealIntoNewStack);
} else if let Some(inc) = caps.get(4) {
techniques.push(Technique::DealWithIncrement(
inc.as_str().parse::<usize>().unwrap(),
));
} else {
let n = caps.get(6).unwrap().as_str();
techniques.push(Technique::Cut(n.parse::<isize>().unwrap()));
}
}
Box::new(Day22 { techniques })
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn d22_ex1() {
let deck = (0..10).collect();
assert_eq!(deal_into_new_stack(&deck), [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]);
}
#[test]
fn d22_ex2() {
let deck = (0..10).collect();
assert_eq!(cut_n(&deck, 3), [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]);
}
#[test]
fn d22_ex3() {
let deck = (0..10).collect();
assert_eq!(cut_n(&deck, -4), [6, 7, 8, 9, 0, 1, 2, 3, 4, 5]);
}
#[test]
fn d22_ex4() {
let deck = (0..10).collect();
assert_eq!(
deal_with_increment(&deck, 3),
[0, 7, 4, 1, 8, 5, 2, 9, 6, 3]
);
}
}
| true
|
642bf865eeb8ff8ff6e4c871239f55e70aba08cc
|
Rust
|
TimLikesTacos/Blackjack
|
/src/gui_classes/header.rs
|
UTF-8
| 1,260
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
use crate::gui_classes::{BUTTON_H, PADDING};
use crate::Message;
use fltk::app::Sender;
use fltk::button::Button;
use fltk::enums::Align;
use fltk::frame::Frame;
use fltk::group::{Pack, PackType, Row};
use fltk::prelude::*;
#[allow(dead_code)]
pub struct GUIHeader {
restart: Button,
}
impl GUIHeader {
pub fn new(x: i32, y: i32, w: i32, h: i32, s: &Sender<Message>) -> GUIHeader {
let mut row = Row::new(x, y, w, h, "");
row.set_margin(PADDING);
let mut butg = Pack::default().with_size(50, h);
let mut button = Button::default()
.with_size(BUTTON_H, BUTTON_H)
.with_label("Restart")
.with_align(Align::Inside | Align::Center);
button.emit(s.clone(), Message::Restart);
butg.set_type(PackType::Horizontal);
butg.end();
// Add empty space
let mut title = Frame::default()
.with_label("BLACKJACK")
.with_align(Align::Inside | Align::Center);
title.set_label_size(title.label_size() * 2);
Frame::default()
.with_label("Tim Reed\nTake Home Assignment")
.with_align(Align::Inside | Align::Right | Align::Top);
row.end();
GUIHeader { restart: button }
}
}
| true
|
cb5661375c2e515095d4effc1a7f24f1a11d98c0
|
Rust
|
dysinger/rarathon
|
/src/structs.rs
|
UTF-8
| 8,060
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#![allow(non_camel_case_types,non_snake_case)]
use rustc_serialize::{Decoder, Decodable, Encoder, Encodable};
use std::collections::HashMap;
// Mesos types
#[derive(RustcDecodable, RustcEncodable, PartialEq, Eq, Debug)]
pub enum VolumeMode {
RW,
RO
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct Volume {
pub containerPath: String,
pub hostPath: Option<String>,
pub mode: VolumeMode,
}
#[derive(RustcDecodable, RustcEncodable, PartialEq, Eq, Debug)]
enum ContainerType {
DOCKER,
MESOS
}
#[derive(RustcDecodable, RustcEncodable, PartialEq, Eq, Debug)]
pub enum DockerNetwork {
HOST,
BRIDGE,
NONE
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct Parameter {
pub key: String,
pub value: String,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct PortMapping {
pub hostPort: u32,
pub containerPort: u32,
pub protocol: String,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct DockerContainerInfo {
pub image: String,
pub network: Option<DockerNetwork>,
pub portMappings: Option<Vec<PortMapping>>,
pub privileged: Option<bool>,
pub parameters: Option<Vec<Parameter>>,
pub force_pull_image: Option<bool>,
}
pub struct ContainerInfo {
pub container_type: String,
pub volumes: Vec<Volume>,
pub hostname: Option<String>,
pub docker: Option<DockerContainerInfo>,
}
impl Decodable for ContainerInfo {
fn decode<D: Decoder>(d: &mut D) -> Result<ContainerInfo, D::Error> {
d.read_struct("ContainerInfo", 4, |d| {
Ok(ContainerInfo{
container_type: try!(d.read_struct_field("type", 0, |d| Decodable::decode(d))),
volumes: try!(d.read_struct_field("volumes", 1, |d| Decodable::decode(d))),
hostname: try!(d.read_struct_field("hostname", 2, |d| Decodable::decode(d))),
docker: try!(d.read_struct_field("docker", 3, |d| Decodable::decode(d))),
})
})
}
}
impl Encodable for ContainerInfo {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_struct("ContainerInfo", 4, |e| {
try!(e.emit_struct_field("type", 0, |e| self.container_type.encode(e)));
try!(e.emit_struct_field("volumes", 1, |e| self.volumes.encode(e)));
try!(e.emit_struct_field("hostname", 2, |e| self.hostname.encode(e)));
try!(e.emit_struct_field("docker", 3, |e| self.docker.encode(e)));
Ok(())
})
}
}
// Marathon types
#[derive(RustcDecodable, RustcEncodable, PartialEq, Eq, Debug)]
pub enum ConstraintOperator {
UNIQUE,
LIKE,
CLUSTER,
GROUP_BY,
UNLIKE,
}
pub struct Constraint {
pub field: String,
pub operator: ConstraintOperator,
pub value: Option<String>,
}
impl Decodable for Constraint {
fn decode<D: Decoder>(d: &mut D) -> Result<Constraint, D::Error> {
d.read_seq(|d, len| {
let field = try!(d.read_seq_elt(0, |d| Decodable::decode(d)));
let operator = try!(d.read_seq_elt(1, |d| Decodable::decode(d)));
let value = match len {
3 => Some(try!(d.read_seq_elt(2, |d| Decodable::decode(d)))),
_ => None
};
Ok(Constraint{field: field, operator: operator, value: value})
})
}
}
impl Encodable for Constraint {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
let len = match self.value.is_none() {
true => 2,
false => 3,
};
s.emit_seq(len, |s| {
try!(s.emit_seq_elt(0, |s| self.field.encode(s)));
try!(s.emit_seq_elt(1, |s| self.operator.encode(s)));
if self.value.is_some() {
try!(s.emit_seq_elt(2, |s| self.value.encode(s)));
}
Ok(())
})
}
}
#[derive(RustcDecodable, RustcEncodable, PartialEq, Eq, Debug)]
pub enum HealthCheckProtocol {
HTTP,
TCP,
COMMAND,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct HealthCheck {
pub protocol: HealthCheckProtocol,
pub portIndex: u32,
pub gracePeriodSeconds: Option<u32>,
pub intervalSeconds: Option<u32>,
pub timeoutSeconds: Option<u32>,
pub path: Option<String>,
pub maxConsecutiveFailures: Option<u32>,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct Deployment {
pub id: String,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct UpgradeStrategy {
pub minimumHealthCapacity: f64,
pub maximumOverCapacity: f64,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct ServiceDefinition {
pub id: String,
pub cmd: Option<String>,
pub args: Option<Vec<String>>,
pub user: Option<String>,
pub env: HashMap<String, String>,
pub instances: i32,
pub cpus: f64,
pub mem: f64,
pub disk: f64,
pub executor: String,
pub constraints: Vec<Constraint>,
pub uris: Vec<String>,
pub storeUrls: Vec<String>,
pub ports: Vec<i32>,
pub requirePorts: bool,
pub backoffSeconds: i64,
pub backoffFactor: f64,
pub maxLaunchDelaySeconds: i64,
pub container: Option<ContainerInfo>,
pub healthChecks: Vec<HealthCheck>,
pub dependencies: Vec<String>,
pub upgradeStrategy: UpgradeStrategy,
pub labels: HashMap<String, String>,
pub version: String,
pub tasksStaged: i64,
pub tasksRunning: i64,
pub deployments: Vec<Deployment>,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct HealthCheckResult {
pub taskId: String,
pub firstSuccess: String,
pub lastSuccess: String,
pub lastFailure: Option<String>,
pub consecutiveFailures: i64,
pub alive: bool,
}
#[derive(RustcDecodable, RustcEncodable)]
pub struct MarathonTask {
pub appId: String,
pub id: String,
pub host: Option<String>,
pub ports: Vec<u32>,
pub stagedAt: Option<String>,
pub startedAt: Option<String>,
pub version: Option<String>,
pub servicePorts: Option<Vec<u32>>,
pub healthCheckResults: Option<Vec<HealthCheckResult>>,
}
#[cfg(test)]
mod tests {
use rustc_serialize::json;
use super::{Constraint, ConstraintOperator, ContainerInfo};
#[test]
fn test_decode_container_info() {
let s = r##"{"type":"DOCKER","volumes":[],"hostname":"example.com","docker":null}"##;
let o: ContainerInfo = json::decode(s).unwrap();
assert_eq!(o.container_type, "DOCKER");
assert!(o.volumes.is_empty());
assert_eq!(o.hostname.unwrap(), "example.com");
assert!(o.docker.is_none());
}
#[test]
fn test_encode_container_info() {
let c = ContainerInfo{container_type: "DOCKER".to_string(), volumes: vec![], hostname: None, docker: None};
let r = json::encode(&c).unwrap();
assert_eq!(&r[..], r##"{"type":"DOCKER","volumes":[],"hostname":null,"docker":null}"##);
}
#[test]
fn test_decode_constraints_2() {
let s = "[\"hostname\",\"UNIQUE\"]";
let o: Constraint = json::decode(s).unwrap();
assert_eq!(o.field, "hostname");
assert_eq!(o.operator, ConstraintOperator::UNIQUE);
assert!(o.value.is_none());
}
#[test]
fn test_decode_constraints_3() {
let s = "[\"attribute\",\"CLUSTER\",\"value\"]";
let o: Constraint = json::decode(s).unwrap();
assert_eq!(o.field, "attribute");
assert_eq!(o.operator, ConstraintOperator::CLUSTER);
assert_eq!(o.value.expect("value"), "value");
}
#[test]
fn test_encode_constraints_2() {
let c = Constraint{field: "hostname".to_string(), operator: ConstraintOperator::UNIQUE, value: None};
let r = json::encode(&c).unwrap();
assert_eq!(&r[..], "[\"hostname\",\"UNIQUE\"]");
}
#[test]
fn test_encode_constraints_3() {
let c = Constraint{field: "attribute".to_string(), operator: ConstraintOperator::CLUSTER, value: Some("value".to_string())};
let r = json::encode(&c).unwrap();
assert_eq!(&r[..], "[\"attribute\",\"CLUSTER\",\"value\"]");
}
}
| true
|
88a44d848cbaeeafa2c0505b7de7efe81c5bed36
|
Rust
|
HowProgrammingWorks/EventEmitter
|
/Rust/src/event_emitter.rs
|
UTF-8
| 1,700
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
use std::collections::HashMap;
use std::cmp::Eq;
use std::hash::Hash;
type HandlerPtr<T> = Box<Fn(&T)>;
/// Node.js-like event emitter.
///
/// # Example
///
/// ```
/// # use event_emitter::EventEmitter;
/// #
/// #[derive(Hash, Eq, PartialEq)]
/// enum Event {
/// A,
/// B,
/// }
///
/// let mut emitter = EventEmitter::new();
///
/// emitter.on(Event::A, |&data| {
/// assert_eq!("data for A", data);
/// });
///
/// emitter.on(Event::B, |&data| {
/// assert_eq!("data for B", data);
/// });
///
/// emitter.emit(Event::A, "data for A");
/// emitter.emit(Event::B, "data for B");
/// ```
pub struct EventEmitter<T: Hash + Eq, U> {
handlers: HashMap<T, Vec<HandlerPtr<U>>>,
}
impl<T: Hash + Eq, U> EventEmitter<T, U> {
/// Creates a new instance of `EventEmitter`.
pub fn new() -> Self {
Self {
handlers: HashMap::new(),
}
}
/// Registers function `handler` as a listener for `event`. There may be
/// multiple listeners for a single event.
pub fn on<F>(&mut self, event: T, handler: F)
where
F: Fn(&U) + 'static,
{
let event_handlers =
self.handlers.entry(event).or_insert_with(|| vec![]);
event_handlers.push(Box::new(handler));
}
/// Invokes all listeners of `event`, passing a reference to `payload` as an
/// argument to each of them.
pub fn emit(&self, event: T, payload: U) {
if let Some(handlers) = self.handlers.get(&event) {
for handler in handlers {
handler(&payload);
}
}
}
}
impl<T: Hash + Eq, U> Default for EventEmitter<T, U> {
fn default() -> Self {
Self::new()
}
}
| true
|
68b353537a7f7bb09f5a83605350992654afbc3b
|
Rust
|
rrika/yvis
|
/portalvis/src/cluster.rs
|
UTF-8
| 3,480
| 2.8125
| 3
|
[] |
no_license
|
pub trait ClusterProcess {
type Item: Clone;
type ItemEx: Clone + Ord;
type Cluster: Clone; // also, must support *a|*b which isn't expressible in current rust
type ClusterEx: Clone;
fn unit(i: &Self::Item) -> Self::Cluster;
fn ex(i: &Self::Item) -> Self::ItemEx;
fn ex_score<'a>(ix: &'a Self::ItemEx) -> usize;
fn cl<'a>(ix: &'a Self::ItemEx) -> Self::ClusterEx;
fn cl_score<'a>(ex: &'a Self::ClusterEx) -> usize;
fn add_ex(c: &Self::Cluster, cx: &Self::ClusterEx, i: &Self::Item, ix: &Self::ItemEx) -> Self::ClusterEx;
fn add(c: &mut Self::Cluster, i: &Self::Item, ix: &Self::ItemEx);
}
use crate::bits::bitvec_and_not_pop;
use crate::bits::DenseBits;
pub struct DenseVisClusterProcess();
impl ClusterProcess for DenseVisClusterProcess {
type Item = DenseBits;
type Cluster = DenseBits;
type ItemEx = usize;
type ClusterEx = (usize, usize, usize);
fn unit(i: &DenseBits) -> DenseBits {
i.clone()
}
fn ex(i: &DenseBits) -> usize {
i.count_ones()
}
fn ex_score(ix: &usize) -> usize {
Self::cl_score(&Self::cl(ix))
}
fn cl(ix: &usize) -> Self::ClusterEx {
(*ix, *ix, 1)
}
fn cl_score(cx: &Self::ClusterEx) -> usize {
let (union_pop, total_pop, nmembers) = *cx;
let precompute_steps = if nmembers > 1 {
nmembers * union_pop
} else { 0 };
let main_steps = total_pop;
let steps = precompute_steps + main_steps;
let step_matrix_work = union_pop * union_pop;
let work = steps * step_matrix_work;
work / nmembers
}
fn add_ex(
c: &DenseBits,
cx: &Self::ClusterEx,
i: &DenseBits,
ix: &usize) -> Self::ClusterEx
{(
cx.0 + bitvec_and_not_pop(c, i),
cx.1 + *ix,
cx.2 + 1
)}
fn add(
c: &mut DenseBits,
i: &DenseBits,
_ix: &usize)
{
*c = &*c | i;
}
}
use std::collections::BinaryHeap;
pub fn greedy<T: ClusterProcess>(items: &[&T::Item]) -> Vec<Vec<usize>> {
let tiebreaker = 1;
let mut cchoices = Vec::new();
let mut h: BinaryHeap<(usize, usize, T::ItemEx)> = BinaryHeap::new();
for i in 0..items.len() {
let ex = T::ex(&items[i]);
let ex_score = T::ex_score(&ex);
//if ip > 0 {
h.push((ex_score, i, ex))
//}
}
while h.len() > 0 {
let (_, initial, initialx) = h.pop().unwrap();
let mut cluster_indices: Vec<usize> = Vec::new();
let mut cluster_content = T::unit(&items[initial]);
let mut cluster_x: T::ClusterEx = T::cl(&initialx);
let mut cluster_score = T::cl_score(&cluster_x);
// oc = old candidate score
// nc = new candidate score
while let Some((oc, index, x)) = h.pop() {
//let newuniques = bitvec_and_not_pop(items[index], &cbv);
let ncx = T::add_ex(&cluster_content, &cluster_x, items[index], &x);
let nc = T::cl_score(&ncx);
// not sound when underestimating items
assert!(nc <= oc);
if nc < oc {
// disappointed by this candidate
// send them back to the queue
h.push((nc, index, x));
continue
} else if nc > oc {
// underestimated this candidate
panic!("rating function not sound");
} else if nc == oc {
// the best the queue has to offer
// but is it enough?
if nc >= cluster_score + tiebreaker {
cluster_indices.push(index);
T::add(&mut cluster_content, items[index], &x);
cluster_x = ncx;
cluster_score = nc;
} else {
break;
}
}
}
cchoices.push(cluster_indices);
// reset all scores
let mut h2 = BinaryHeap::new();
for (_, i, ex) in h {
let ex_score = T::ex_score(&ex);
h2.push((ex_score, i, ex));
}
h = h2;
}
cchoices
}
| true
|
732e0921ca2dbed8234f93bd9a23370ee112810d
|
Rust
|
miso24/rust_brainfuck
|
/tests/lexer_test.rs
|
UTF-8
| 1,049
| 3.25
| 3
|
[] |
no_license
|
use brainfuck::lexer::{lex, Token, LexError};
#[test]
fn lex_simple_code() {
let code = ">+-<";
let tokens = lex(code).unwrap();
assert_eq!(tokens, vec![
Token::Less,
Token::Plus,
Token::Minus,
Token::Greater,
])
}
#[test]
fn lex_complex_code() {
let code = ">++++++[<++++++++>-].";
let tokens = lex(code).unwrap();
assert_eq!(tokens, vec![
Token::Less,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::LBracket,
Token::Greater,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Plus,
Token::Less,
Token::Minus,
Token::RBracket,
Token::Period,
])
}
#[test]
fn lex_invalid_char() {
let code = "+++!";
let result = lex(code);
assert!(result.is_err());
assert_eq!(result, Err(LexError::InvalidChar(3, '!')))
}
| true
|
bcc89a48c37d9a1153c97fdb5aaf8efeefbaa4ee
|
Rust
|
abizzaar/rust-lockfree
|
/src/main.rs
|
UTF-8
| 3,020
| 3.375
| 3
|
[] |
no_license
|
// see blog post of arr_macro here: https://www.joshmcguigan.com/blog/array-initialization-rust/
use arr_macro::arr;
use std::sync::atomic::{AtomicI32, Ordering};
fn main() {}
struct Entry {
pub key: AtomicI32,
pub value: AtomicI32,
}
struct HashTable {
pub entries: [Entry; 1000],
}
impl HashTable {
pub fn new() -> Self {
HashTable {
entries: arr![Entry {key: AtomicI32::new(0), value: AtomicI32::new(0)}; 1000],
}
}
pub fn set_item(&mut self, key: i32, val: i32) {
for e in self.entries.iter() {
let cas_result = e.key.compare_and_swap(0, key, Ordering::SeqCst);
if cas_result == 0 || cas_result == key {
e.value.store(val, Ordering::SeqCst);
return;
}
}
}
pub fn get_item(&self, key: i32) -> i32 {
for e in self.entries.iter() {
let load_result = e.key.load(Ordering::SeqCst);
// println!("on {} {}", load_result, e.value.load(Ordering::SeqCst));
if load_result == key {
return e.value.load(Ordering::SeqCst);
} else if load_result == 0 {
return 0;
}
}
0
}
}
#[cfg(test)]
mod tests {
use crate::*;
use rand::{random, Rng};
use std::cell::UnsafeCell;
use std::marker::{Send, Sync};
use std::sync::Arc;
use std::{thread, time};
#[test]
fn sequential() {
let mut h = HashTable::new();
h.set_item(1, 1);
assert_eq!(1, h.get_item(1));
h.set_item(2, 2);
h.set_item(3, 33);
assert_eq!(1, h.get_item(1));
assert_eq!(2, h.get_item(2));
assert_eq!(33, h.get_item(3));
}
#[test]
fn multithreaded_simple() {
// Need to use UnsafeCell b/c, by default, Arc cannot be passed mutably across threads
// Need to make new struct b/c cannot impl Sync/Send for UnsafeCell directly
struct NotThreadSafe<T> {
value: UnsafeCell<T>,
}
unsafe impl<T> Sync for NotThreadSafe<T> {}
unsafe impl<T> Send for NotThreadSafe<T> {}
let h = HashTable::new();
let arc_h = Arc::new(NotThreadSafe {
value: UnsafeCell::new(h),
});
for i in 1..1000 {
let arc_h1 = arc_h.clone();
unsafe {
thread::spawn(move || (*arc_h1.value.get()).set_item(i, i));
}
let arc_h2 = arc_h.clone();
unsafe {
thread::spawn(move || {
let millis = generate_random_duration();
thread::sleep(millis);
let val = (*arc_h2.value.get()).get_item(i);
assert!(i == val || 0 == val);
});
}
}
}
fn generate_random_duration() -> time::Duration {
let mut rng = rand::thread_rng();
let rand: u64 = rng.gen_range(1, 20);
time::Duration::from_millis(rand)
}
}
| true
|
f17298de24ed1855d9981c84ab672e280742fb8a
|
Rust
|
inda20plusplus/leopoldh-chess
|
/gui/src/network.rs
|
UTF-8
| 2,934
| 3.21875
| 3
|
[] |
no_license
|
use std::io::{Read, Write};
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
pub struct Network {
stream: TcpStream,
rx: mpsc::Receiver<(char, char, (i32, i32), (i32, i32))>,
}
impl Network {
pub fn new(address: &str, connect: bool) -> Self {
let (tx, rx) = mpsc::channel::<(char, char, (i32, i32), (i32, i32))>();
let stream;
if connect {
// Connect to the specified address
stream = TcpStream::connect(address).ok().unwrap();
let mut socket = stream.try_clone().ok().unwrap();
let mut buffer = [0; 32];
std::thread::spawn(move || {
while let Ok(msg) = socket.read(&mut buffer) {
if msg == 0 {
break;
}
tx.send(parse_buffer(buffer)).expect("Could not send");
}
});
} else {
// Listening to the specified address
let listener = TcpListener::bind(address).unwrap();
println!("Listening to {}", address);
// Getting the socket and the address of a new client.
let (mut socket, addr) = listener.accept().unwrap();
println!("New client is connected {}", addr);
stream = socket.try_clone().ok().unwrap();
let mut buffer = [0; 32];
std::thread::spawn(move || {
while let Ok(msg) = socket.read(&mut buffer) {
if msg == 0 {
break;
}
tx.send(parse_buffer(buffer)).expect("Could not send");
}
});
}
Self { stream, rx }
}
pub fn recieve(&mut self) -> (char, char, (i32, i32), (i32, i32)) {
println!("Recived");
let mut message = (' ', ' ', (0, 0), (0, 0));
while let Ok(read) = self.rx.try_recv() {
message = read;
}
message
}
pub fn write(&mut self, message: Vec<u8>) {
self.stream.write(&message).expect("Failed writing process");
}
}
fn parse_buffer(buffer: [u8; 32]) -> (char, char, (i32, i32), (i32, i32)) {
let msg: (char, char, (i32, i32), (i32, i32)) = match buffer[0] {
0 => ('D', 'D', (0, 0), (0, 0)),
1 => match buffer[1] {
0 | 1 | 2 => (
'M',
'S',
((buffer[2] & 0x07).into(), ((buffer[2] & 0x38) >> 3).into()),
((buffer[3] & 0x07).into(), ((buffer[3] & 0x38) >> 3).into()),
),
3 | 4 => ('M', 'S', (0, 0), (0, 0)),
_ => ('C', 'C', (0, 0), (0, 0)),
},
2 => ('U', 'U', (0, 0), (0, 0)),
3 => ('A', 'A', (0, 0), (0, 0)),
4 => ('C', 'C', (0, 0), (0, 0)),
5 => ('D', 'D', (0, 0), (0, 0)),
6 => ('R', 'R', (0, 0), (0, 0)),
_ => (' ', ' ', (0, 0), (0, 0)),
};
msg
}
| true
|
768da8766288c52beaf496c678240f7eda83c496
|
Rust
|
eduhenke/yolm
|
/src/sway.rs
|
UTF-8
| 605
| 2.828125
| 3
|
[] |
no_license
|
use std::os::unix::process::CommandExt;
use std::process::Command;
use users::User;
pub fn spawn(user: User) -> Result<(), ()> {
// we now try to spawn `/usr/bin/sway` as this user
// note that setting the uid/gid is likely to fail if this program is not already run as the
// proper user or as root
let sway_call = Command::new("/usr/bin/sway")
.env("XDG_RUNTIME_DIR", format!("/run/user/{}", user.uid()))
.uid(user.uid())
.gid(user.primary_group_id())
.spawn();
match sway_call {
Ok(_sway_proc) => Ok(()),
Err(_e) => Err(()),
}
}
| true
|
5bf6fee62bab70ab416f973cba369d22614d82bb
|
Rust
|
Devolutions/picky-rs
|
/picky-asn1/src/restricted_string.rs
|
UTF-8
| 12,549
| 3.203125
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
use serde::{de, ser};
use std::error::Error;
use std::fmt;
use std::marker::PhantomData;
use std::ops::Deref;
use std::str::FromStr;
// === CharSetError === //
#[derive(Debug)]
pub struct CharSetError;
impl Error for CharSetError {}
impl fmt::Display for CharSetError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
writeln!(f, "invalid charset")
}
}
// === CharSet === //
pub trait CharSet {
const NAME: &'static str;
/// Checks whether a sequence is a valid string or not.
fn check(data: &[u8]) -> bool;
}
// === RestrictedString === //
/// A generic restricted character string.
#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct RestrictedString<C> {
data: Vec<u8>,
marker: PhantomData<C>,
}
impl<C: CharSet> RestrictedString<C> {
/// Create a new RestrictedString without CharSet validation.
///
/// # Safety
///
/// You have to make sure the right CharSet is used.
pub unsafe fn new_unchecked<V>(data: V) -> Self
where
V: Into<Vec<u8>>,
{
RestrictedString {
data: data.into(),
marker: PhantomData,
}
}
pub fn new<V>(data: V) -> Result<Self, CharSetError>
where
V: Into<Vec<u8>>,
{
let data = data.into();
if !C::check(&data) {
return Err(CharSetError);
};
Ok(RestrictedString {
data,
marker: PhantomData,
})
}
/// Converts into underlying bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.data
}
/// Returns underlying bytes.
pub fn as_bytes(&self) -> &[u8] {
&self.data
}
}
impl<C: CharSet> Deref for RestrictedString<C> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<C: CharSet> AsRef<[u8]> for RestrictedString<C> {
fn as_ref(&self) -> &[u8] {
&self.data
}
}
impl<C: CharSet> fmt::Debug for RestrictedString<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}(", C::NAME)?;
if let Ok(utf8) = std::str::from_utf8(&self.data) {
fmt::Debug::fmt(utf8, f)?;
} else {
write!(f, "0x")?;
self.data.iter().try_for_each(|byte| write!(f, "{byte:02X}"))?;
}
write!(f, ")")?;
Ok(())
}
}
impl<C: CharSet> fmt::Display for RestrictedString<C> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&String::from_utf8_lossy(&self.data), fmt)
}
}
impl<C: CharSet> From<RestrictedString<C>> for Vec<u8> {
fn from(rs: RestrictedString<C>) -> Self {
rs.into_bytes()
}
}
impl<'de, C> de::Deserialize<'de> for RestrictedString<C>
where
C: CharSet,
{
fn deserialize<D>(deserializer: D) -> Result<RestrictedString<C>, D::Error>
where
D: de::Deserializer<'de>,
{
struct Visitor<C>(std::marker::PhantomData<C>);
impl<'de, C> de::Visitor<'de> for Visitor<C>
where
C: CharSet,
{
type Value = RestrictedString<C>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a valid buffer representing a restricted string")
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
self.visit_byte_buf(v.to_vec())
}
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: de::Error,
{
RestrictedString::new(v).map_err(|_| {
E::invalid_value(
de::Unexpected::Other("invalid charset"),
&"a buffer representing a string using the right charset",
)
})
}
}
deserializer.deserialize_byte_buf(Visitor(std::marker::PhantomData))
}
}
impl<C> ser::Serialize for RestrictedString<C> {
fn serialize<S>(&self, serializer: S) -> Result<<S as ser::Serializer>::Ok, <S as ser::Serializer>::Error>
where
S: ser::Serializer,
{
serializer.serialize_bytes(&self.data)
}
}
// === NumericString === //
/// 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, and SPACE
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct NumericCharSet;
impl CharSet for NumericCharSet {
const NAME: &'static str = "NUMERIC";
fn check(data: &[u8]) -> bool {
for &c in data {
if c != b' ' && !c.is_ascii_digit() {
return false;
}
}
true
}
}
pub type NumericString = RestrictedString<NumericCharSet>;
impl NumericString {
pub fn from_string(s: String) -> Result<Self, CharSetError> {
Self::new(s.into_bytes())
}
pub fn as_utf8(&self) -> &str {
core::str::from_utf8(self.as_bytes()).expect("valid UTF-8 subset")
}
pub fn into_string(self) -> String {
String::from_utf8(self.into_bytes()).expect("valid UTF-8 subset")
}
}
impl FromStr for NumericString {
type Err = CharSetError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::new(s.as_bytes())
}
}
// === PrintableString === //
/// a-z, A-Z, ' () +,-.?:/= and SPACE
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct PrintableCharSet;
impl CharSet for PrintableCharSet {
const NAME: &'static str = "PRINTABLE";
fn check(data: &[u8]) -> bool {
for &c in data {
if !(c.is_ascii_alphanumeric()
|| c == b' '
|| c == b'\''
|| c == b'('
|| c == b')'
|| c == b'+'
|| c == b','
|| c == b'-'
|| c == b'.'
|| c == b'/'
|| c == b':'
|| c == b'='
|| c == b'?')
{
return false;
}
}
true
}
}
pub type PrintableString = RestrictedString<PrintableCharSet>;
impl PrintableString {
pub fn from_string(s: String) -> Result<Self, CharSetError> {
Self::new(s.into_bytes())
}
pub fn as_utf8(&self) -> &str {
core::str::from_utf8(self.as_bytes()).expect("valid UTF-8 subset")
}
pub fn into_string(self) -> String {
String::from_utf8(self.into_bytes()).expect("valid UTF-8 subset")
}
}
impl FromStr for PrintableString {
type Err = CharSetError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::new(s.as_bytes())
}
}
// === Utf8String === //
/// any character from a recognized alphabet (including ASCII control characters)
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Utf8CharSet;
impl CharSet for Utf8CharSet {
const NAME: &'static str = "UTF8";
fn check(data: &[u8]) -> bool {
std::str::from_utf8(data).is_ok()
}
}
pub type Utf8String = RestrictedString<Utf8CharSet>;
impl Utf8String {
pub fn from_string(s: String) -> Result<Self, CharSetError> {
Self::new(s.into_bytes())
}
pub fn as_utf8(&self) -> &str {
core::str::from_utf8(self.as_bytes()).expect("valid UTF-8 subset")
}
pub fn into_string(self) -> String {
String::from_utf8(self.into_bytes()).expect("valid UTF-8 subset")
}
}
impl FromStr for Utf8String {
type Err = CharSetError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::new(s.as_bytes())
}
}
// === IA5String === //
/// First 128 ASCII characters (values from `0x00` to `0x7F`)
/// Used to represent ISO 646 (IA5) characters.
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Ia5CharSet;
impl CharSet for Ia5CharSet {
const NAME: &'static str = "IA5";
fn check(data: &[u8]) -> bool {
for &c in data {
if !c.is_ascii() {
return false;
}
}
true
}
}
pub type Ia5String = RestrictedString<Ia5CharSet>;
#[deprecated = "Use IA5String instead"]
pub use Ia5String as IA5String;
impl Ia5String {
pub fn from_string(s: String) -> Result<Self, CharSetError> {
Self::new(s.into_bytes())
}
pub fn as_utf8(&self) -> &str {
core::str::from_utf8(self.as_bytes()).expect("valid UTF-8 subset")
}
pub fn into_string(self) -> String {
String::from_utf8(self.into_bytes()).expect("valid UTF-8 subset")
}
}
impl FromStr for Ia5String {
type Err = CharSetError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::new(s.as_bytes())
}
}
// === BmpString === //
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct BmpCharSet;
impl CharSet for BmpCharSet {
const NAME: &'static str = "BMP";
fn check(data: &[u8]) -> bool {
// BMP strings are two-byte characters
if data.len() % 2 != 0 {
return false;
}
let chunk_it = data.chunks_exact(2);
debug_assert!(chunk_it.remainder().is_empty());
// Characters are encoded in big-endian
let u16_it = chunk_it.map(|code_unit| u16::from_be_bytes([code_unit[0], code_unit[1]]));
let mut count = 0;
for res in char::decode_utf16(u16_it) {
if res.is_err() {
return false;
}
count += 1;
}
// Unlike UTF-16, BMP encoding is not a variable-length encoding.
// (i.e.: BMP is only the first plane, "plane 0", of the Unicode standard.)
count == data.len() / 2
}
}
pub type BmpString = RestrictedString<BmpCharSet>;
#[deprecated = "Use BmpString instead"]
pub use BmpString as BMPString;
impl BmpString {
pub fn to_utf8(&self) -> String {
let chunk_it = self.as_bytes().chunks_exact(2);
debug_assert!(chunk_it.remainder().is_empty());
let u16_it = chunk_it.map(|code_unit| u16::from_be_bytes([code_unit[0], code_unit[1]]));
char::decode_utf16(u16_it)
.map(|res| res.expect("valid code point"))
.collect()
}
}
impl FromStr for BmpString {
type Err = CharSetError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let data: Vec<u8> = s.encode_utf16().flat_map(|code_unit| code_unit.to_be_bytes()).collect();
Self::new(data)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn valid_printable_string() {
PrintableString::from_str("29INRUSAET3snre?:=tanui83 9283019").expect("valid string");
}
#[test]
fn invalid_printable_string() {
assert!(PrintableString::from_str("1224na÷日本語はむずかちー−×—«BUeisuteurnt").is_err());
}
#[test]
fn valid_numeric_string() {
NumericString::from_str("2983 9283019").expect("valid string");
}
#[test]
fn invalid_numeric_string() {
assert!(NumericString::from_str("1224na÷日本語はむずかちー−×—«BUeisuteurnt").is_err());
}
#[test]
fn valid_ia5_string() {
Ia5String::from_str("BUeisuteurnt").expect("valid string");
}
#[test]
fn invalid_ia5_string() {
assert!(Ia5String::from_str("BUéisuteurnt").is_err());
}
#[test]
fn valid_utf8_string() {
Utf8String::from_str("1224na÷日本語はむずかちー−×—«BUeisuteurnt").expect("valid string");
}
#[test]
fn valid_bmp_string() {
assert_eq!(
BmpString::from_str("语言处理").expect("valid BMP string").to_utf8(),
"语言处理"
);
assert_eq!(
BmpString::new(vec![
0x00, 0x43, 0x00, 0x65, 0x00, 0x72, 0x00, 0x74, 0x00, 0x69, 0x00, 0x66, 0x00, 0x69, 0x00, 0x63, 0x00,
0x61, 0x00, 0x74, 0x00, 0x65, 0x00, 0x54, 0x00, 0x65, 0x00, 0x6d, 0x00, 0x70, 0x00, 0x6c, 0x00, 0x61,
0x00, 0x74, 0x00, 0x65,
])
.expect("valid BMP string")
.to_utf8(),
"CertificateTemplate"
);
assert_eq!(
BmpString::new(vec![0x00, 0x55, 0x00, 0x73, 0x00, 0x65, 0x00, 0x72])
.expect("valid BMP string")
.to_utf8(),
"User"
);
}
#[test]
fn invalid_bmp_string() {
assert!(BmpString::new("1224na÷日本語はむずかちー−×—«BUeisuteurnt".as_bytes()).is_err())
}
}
| true
|
e6fcaa808a380e53f3eb6499b88d1e2c351b7b6d
|
Rust
|
katharostech/PolyFS
|
/src/cli/mount.rs
|
UTF-8
| 1,544
| 2.734375
| 3
|
[] |
no_license
|
//! PolyFS `mount` subcommand
use crate::cli::config::load_config;
use crate::cli::ArgSet;
use crate::PolyfsResult;
use clap::{App, Arg, SubCommand};
/// Get CLI for the `mount` subcommand
pub fn get_cli<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("mount")
.about("Mount the filesystem")
.arg(
Arg::with_name("read_only")
.long("read-only")
.short("r")
.help("Mount the filesystem as read-only"),
)
.arg(
Arg::with_name("mountpoint")
.help("location to mount the filesystem")
.required(true),
)
}
/// Run `mount` subcommand
pub fn run(args: ArgSet) -> PolyfsResult<()> {
log::debug!("Running `mount` subcommand");
use crate::app::backends::sqlite::SqliteKvStore;
use crate::app::config::Backend;
use crate::app::filesystem::PolyfsFilesystem;
let mountpoint = args
.sub
.value_of("mountpoint")
.expect("Could not load mountpoint arg");
let config = load_config(args.global)?;
let kv_store;
match config.backend {
Backend::Sqlite(sqlite_config) => {
kv_store = SqliteKvStore::new(sqlite_config)?;
}
}
use std::ffi::OsStr;
let fuse_args: &[&OsStr] = &[&OsStr::new("-o"), &OsStr::new("auto_unmount")];
let filesystem = PolyfsFilesystem::new(kv_store);
crate::try_to!(
fuse::mount(filesystem, &mountpoint, fuse_args),
"Could not mount filesystem"
);
Ok(())
}
| true
|
e316560d43cf3f7af968a0947b6075ddf26d701c
|
Rust
|
hobbitalastair/feedutils
|
/src/feedutil.rs
|
UTF-8
| 24,555
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
extern crate url;
extern crate xml;
use std::collections::HashMap;
use std::env;
use std::io;
use std::io::{BufWriter, Write, BufReader, BufRead};
use std::fs;
use std::fs::OpenOptions;
use std::path::PathBuf;
use std::process::{Command, ExitStatus};
use std::thread;
use std::time;
use chrono::DateTime;
use thiserror::Error;
use url::Url;
use xml::reader::{EventReader, XmlEvent};
const ENTRY_DATABASE_HEADER: &str = "feed\tid\tupdated\ttitle\tlink\tread\n";
#[derive(Clone, Eq, Ord, PartialEq, PartialOrd)]
pub struct Entry {
pub feed: String,
pub id: String,
pub title: String,
pub updated: String,
pub link: String,
pub read: bool,
}
fn sanitize(data: String) -> String {
// Remove control characters - this should prevent the worst issues when
// trying to deal with the resulting data stream.
// Note that this removes *characters*, not *bytes*, so I'm assuming that
// the code reading the result handles UTF-8 properly.
let mut sanitized_data = String::new();
for c in data.chars() {
if !c.is_control() {
sanitized_data.push(c);
}
}
return sanitized_data;
}
fn handle_rss_pub_date(pub_date: Option<String>) -> String {
// RSS publication date is both optional and often not proper RFC2822.
// In order to be generous, try to parse it but fall back where that isn't
// possible.
match pub_date {
Some(pub_date) => {
let mut rfc2822_pub_date = pub_date;
// Some feeds have improper RFC2822 dates, specifying UTC not UT.
// Replace with GMT - we don't care about the timezone.
rfc2822_pub_date = rfc2822_pub_date.replace("UTC", "GMT");
let updated = DateTime::parse_from_rfc2822(&rfc2822_pub_date);
match updated {
Ok(updated) => updated.to_rfc3339(),
Err(_) => {
// Couldn't parse, fall back to current datetime.
chrono::offset::Utc::now().to_rfc3339()
},
}
},
None => {
// Strictly speaking the publication date is optional... fall back
// to the current datetime.
chrono::offset::Utc::now().to_rfc3339()
},
}
}
fn parse_rss<R: std::io::Read>(parser: xml::reader::Events<R>, feed: &String) -> Vec<Entry> {
// Turn an RSS-like XML feed into a vector of entries
// Data is attempted to be sanitized
let mut pending_data: Option<String> = None;
let mut id: Option<String> = None;
let mut title: Option<String> = None;
let mut pub_date: Option<String> = None;
let mut link: Option<String> = None;
let mut entries: Vec<Entry> = Vec::new();
for e in parser {
match e {
Ok(XmlEvent::StartElement { name, .. }) => {
pending_data = None;
if name.local_name == "item" {
id = None;
title = None;
pub_date = None;
link = None;
}
}
Ok(XmlEvent::EndElement { name }) => {
match name.local_name.as_str() {
"guid" => {
id = pending_data.take();
}
"title" => {
title = pending_data.take();
}
"pubDate" => {
pub_date = pending_data.take();
}
"link" => {
link = pending_data.take();
}
"item" => {
if link.is_none() {
eprintln!("Ignoring incomplete entry, missing link field");
continue;
}
if id.is_none() {
// Fallback to the link if no GUID is specified
id = Some(link.clone().unwrap());
}
if title.is_none() {
// Empty title is not great but OK; ignore
title = Some("Untitled".to_string());
}
let entry = Entry {
feed: feed.clone(),
id: id.take().unwrap(),
title: title.take().unwrap(),
updated: handle_rss_pub_date(pub_date.take()),
link: link.take().unwrap(),
read: false,
};
entries.push(entry);
}
_ => {}
}
}
Ok(XmlEvent::CData(data)) => {
pending_data = Some(sanitize(data));
}
Ok(XmlEvent::Characters(data)) => {
pending_data = Some(sanitize(data));
}
Err(e) => {
eprintln!("Error parsing XML: {}", e);
break;
}
_ => {}
}
}
return entries;
}
fn parse_atom<R: std::io::Read>(parser: xml::reader::Events<R>, feed: &String) -> Vec<Entry> {
// Turn an Atom-like XML feed into a vector of entries
// Data is attempted to be sanitized
let mut pending_data: Option<String> = None;
let mut id: Option<String> = None;
let mut title: Option<String> = None;
let mut updated: Option<String> = None;
let mut link: Option<String> = None;
let mut entries: Vec<Entry> = Vec::new();
for e in parser {
match e {
Ok(XmlEvent::StartElement { name, attributes, .. }) => {
pending_data = None;
if name.local_name == "link" {
for attr in attributes {
if attr.name.local_name == "href" {
let url = sanitize(attr.value);
match Url::parse(&url) {
Ok(_) => {
link = Some(url);
},
Err(e) => {
eprintln!("Ignoring invalid URL: {e}");
},
}
}
}
} else if name.local_name == "entry" {
id = None;
title = None;
updated = None;
link = None;
}
}
Ok(XmlEvent::EndElement { name }) => {
match name.local_name.as_str() {
"id" => {
id = pending_data.take();
}
"title" => {
title = pending_data.take();
}
"updated" => {
updated = pending_data.take();
}
"entry" => {
if id.is_none() {
eprintln!("Ignoring incomplete entry, missing id field");
} else if title.is_none() {
eprintln!("Ignoring entry as missing title field: {}", id.take().unwrap());
} else if updated.is_none() {
eprintln!("Ignoring incomplete entry, missing updated field");
} else if link.is_none() {
eprintln!("Ignoring incomplete entry, missing link field");
} else {
let entry = Entry {
feed: feed.clone(),
id: id.take().unwrap(),
title: title.take().unwrap(),
updated: updated.take().unwrap(),
link: link.take().unwrap(),
read: false,
};
entries.push(entry);
}
}
_ => {}
}
}
Ok(XmlEvent::CData(data)) => {
pending_data = Some(sanitize(data));
}
Ok(XmlEvent::Characters(data)) => {
pending_data = Some(sanitize(data));
}
Err(e) => {
eprintln!("Error parsing XML: {}", e);
break;
}
_ => {}
}
}
return entries;
}
fn parse_feed<R: std::io::Read>(reader: R, feed: &String) -> Vec<Entry> {
// Turn an XML feed into a vector of entries.
// Format is attempted to be autodetected, either Atom or RSS.
// Data is attempted to be sanitized.
let mut parser = EventReader::new(reader).into_iter();
while let Some(e) = parser.next() {
match e {
Ok(XmlEvent::StartElement { name, .. }) => {
if name.local_name == "rss" || name.local_name == "RDF" {
// Probably an RSS feed
return parse_rss(parser, feed);
}
if name.local_name == "feed" {
// Probably an Atom feed
return parse_atom(parser, feed);
}
},
Err(e) => {
eprintln!("Error parsing XML: {}", e);
break;
},
_ => {},
}
}
eprintln!("Doesn't seem to be either an Atom or an RSS feed?");
return Vec::new();
}
fn open_lockfile(filename: PathBuf) -> io::Result<fs::File> {
// Attempt to acquire a lockfile
// Will block until it exists, or the attempt times out
let mut delay = time::Duration::from_millis(50);
let timeout = time::Duration::from_millis(2000); // 2s seems more than long enough
loop {
let result = OpenOptions::new().write(true)
.create_new(true)
.open(filename.clone());
match result {
Ok(f) => return Ok(f),
Err(e) => {
if e.kind() != io::ErrorKind::AlreadyExists {
return Err(e);
} else if delay >= timeout {
// Timed out - since delays double each time, so far we've
// waited just under `delay` time (using triangle formula).
return Err(e);
} else {
thread::sleep(delay);
delay *= 2;
}
}
}
}
}
#[derive(Error, Debug)]
pub enum DatabaseReadError {
#[error("{source}: {path}")]
IoError {
source: io::Error,
path: PathBuf,
},
#[error("Missing {field} field, ignoring entry")]
MissingField {
field: String,
},
}
fn read_entries(filename: PathBuf) -> Result<Vec<Entry>, DatabaseReadError> {
let f = OpenOptions::new().read(true).open(&filename)
.map_err(|e| DatabaseReadError::IoError{ source: e, path: filename.clone() })?;
let reader = BufReader::new(f);
let mut entries: Vec<Entry> = Vec::new();
for line in reader.lines().skip(1) {
match line {
Ok(line) => {
let mut fields = line.split("\t");
let entry = Entry {
feed: fields.next().ok_or(DatabaseReadError::MissingField{ field: "feed".to_string() })?.to_string(),
id: fields.next().ok_or(DatabaseReadError::MissingField{ field: "id".to_string() })?.to_string(),
updated: fields.next().ok_or(DatabaseReadError::MissingField{ field: "updated".to_string() })?.to_string(),
title: fields.next().ok_or(DatabaseReadError::MissingField{ field: "title".to_string() })?.to_string(),
link: fields.next().ok_or(DatabaseReadError::MissingField{ field: "link".to_string() })?.to_string(),
read: fields.next().ok_or(DatabaseReadError::MissingField{ field: "read".to_string() })? == "read",
};
entries.push(entry);
},
Err(e) => {
return Err(DatabaseReadError::IoError{ source: e, path: filename.clone() });
}
}
}
return Ok(entries);
}
fn write_entries(f: &mut fs::File, entries: &Vec<Entry>) -> io::Result<()> {
let mut writer = BufWriter::new(f);
writer.write_all(ENTRY_DATABASE_HEADER.as_bytes())?;
for e in entries {
// Can safely use tabs and newlines as delimiters as removed earlier
let line = [
e.feed.clone(),
e.id.clone(),
e.updated.clone(),
e.title.clone(),
e.link.clone(),
if e.read { "read".to_string() } else { "unread".to_string() },
].join("\t") + "\n";
writer.write_all(line.as_bytes())?;
}
return Ok(());
}
#[derive(Error, Debug)]
pub enum ModifyDatabaseError {
#[error("Unable to lock database: {source}: {path}")]
LockCreateError {
source: io::Error,
path: PathBuf,
},
#[error("Database read error: {source}")]
ReadError {
source: DatabaseReadError,
},
#[error("Unable to {operation} database: {source}: {path}")]
WriteError {
source: io::Error,
path: PathBuf,
operation: String,
},
}
pub fn modify_database<F>(modifier: F, database_path: PathBuf) -> Result<(), ModifyDatabaseError>
where F: FnOnce(Vec<Entry>) -> Vec<Entry>
{
// We assume that database_path here has a filename, which should be true since it always comes
// from get_database_path... but in that case maybe we should wrap it into here instead of
// getting it as an argument?
let mut lock_file_name = database_path.file_name().unwrap().to_os_string();
lock_file_name.push(std::ffi::OsStr::new(".lock"));
let mut lockfile_path = database_path.clone();
lockfile_path.set_file_name(lock_file_name);
let mut lockfile = open_lockfile(lockfile_path.clone())
.map_err(|e| ModifyDatabaseError::LockCreateError{ source: e, path: lockfile_path.clone() })?;
// We need to delete the lockfile on failure!
let cleanup_file = |e, path| -> ModifyDatabaseError {
if let Err(err) = fs::remove_file(path) {
eprintln!("Unable to delete lockfile: {err}");
}
e
};
let entries = read_entries(database_path.clone())
.map_err(|e| ModifyDatabaseError::ReadError{ source: e })
.map_err(|e| cleanup_file(e, lockfile_path.clone()))?;
let modified_entries = modifier(entries);
write_entries(&mut lockfile, &modified_entries)
.map_err(|e| ModifyDatabaseError::WriteError{ source: e, path: lockfile_path.clone(), operation: "write".to_string() })
.map_err(|e| cleanup_file(e, lockfile_path.clone()))?;
lockfile.sync_all()
.map_err(|e| ModifyDatabaseError::WriteError{ source: e, path: lockfile_path.clone(), operation: "sync".to_string() })
.map_err(|e| cleanup_file(e, lockfile_path.clone()))?;
fs::rename(lockfile_path.clone(), database_path)
.map_err(|e| ModifyDatabaseError::WriteError{ source: e, path: lockfile_path.clone(), operation: "replace".to_string() })
.map_err(|e| cleanup_file(e, lockfile_path.clone()))?;
return Ok(());
}
fn merge_feed(feed_name: String, feed_entries: Vec<Entry>, database_entries: Vec<Entry>) -> Vec<Entry> {
// Merging a feed:
// - entries in the feed but not in the database are added
// - read entries in the database but not in the feed are removed
// Treat all entries from the feed as new initially
let mut new_feed_entries = HashMap::new();
for entry in feed_entries.into_iter() {
new_feed_entries.insert(entry.id.clone(), entry);
}
let mut modified_database_entries: Vec<Entry> = Vec::new();
for entry in database_entries {
if entry.feed != feed_name {
// For a different feed; retain
modified_database_entries.push(entry);
} else if new_feed_entries.contains_key(&entry.id) {
// Not actually a new entry
new_feed_entries.remove(&entry.id);
modified_database_entries.push(entry);
} else if !entry.read {
// Not in the feed, but not yet read; keep
modified_database_entries.push(entry);
}
}
// Add the actually new entries
for entry in new_feed_entries.into_values() {
modified_database_entries.push(entry);
}
return modified_database_entries;
}
#[derive(Error, Debug)]
pub enum DatabasePathError {
#[error("No env var set for database path")]
NoEnvVar,
}
pub fn get_database_path() -> Result<PathBuf, DatabasePathError> {
// Database path; check possible settings env vars in sequence.
// Does not check if the directory or file actually exists.
if let Some(path) = env::var_os("FEEDUTILS_DB") {
return Ok(PathBuf::from(path));
}
if let Some(path) = env::var_os("XDG_DATA_HOME") {
return Ok(PathBuf::from(path).join("feedutils.tsv"));
}
if let Some(path) = env::var_os("HOME") {
return Ok(PathBuf::from(path).join(".local/share/feedutils.tsv"));
}
return Err(DatabasePathError::NoEnvVar);
}
fn get_feed_config_dir() -> Option<PathBuf> {
// Feed configuration directory path; check possible settings env vars in sequence.
// Does not check if the directory actually exists.
if let Some(path) = env::var_os("FEEDUTILS_CONFIGDIR") {
return Some(PathBuf::from(path));
}
if let Some(path) = env::var_os("XDG_CONFIG_HOME") {
return Some(PathBuf::from(path).join("feeds"));
}
if let Some(path) = env::var_os("HOME") {
return Some(PathBuf::from(path).join(".config/feeds"));
}
return None;
}
#[derive(Error, Debug)]
pub enum FeedDirError {
#[error("Cannot read feed config")]
CannotReadConfigDir,
#[error("Feed does not exist")]
FeedDoesNotExist,
}
pub fn get_feed_dir(feed_name: String) -> Result<PathBuf, FeedDirError> {
let config_dir = get_feed_config_dir().ok_or(FeedDirError::CannotReadConfigDir)?;
fs::metadata(config_dir.clone()).map_err(|_| FeedDirError::CannotReadConfigDir)?;
let feed_dir = config_dir.join(feed_name);
fs::metadata(feed_dir.clone()).map_err(|_| FeedDirError::FeedDoesNotExist)?;
return Ok(feed_dir);
}
pub fn get_all_feed_names() -> io::Result<Vec<String>> {
let mut feeds = Vec::new();
let config_dir = get_feed_config_dir().ok_or(io::ErrorKind::Other)?;
for entry in fs::read_dir(config_dir)? {
let entry = entry?;
let filetype = entry.file_type()?;
if filetype.is_dir() {
// We sanitize the feed name as it may be used later in the database
let sanitized_name = sanitize(entry.file_name().to_string_lossy().into_owned());
feeds.push(sanitized_name);
}
}
feeds.sort();
return Ok(feeds);
}
#[derive(Error, Debug)]
pub enum UpdateError {
#[error(transparent)]
DatabasePathError(#[from] DatabasePathError),
#[error(transparent)]
FeedDirError(#[from] FeedDirError),
#[error("Failed to launch fetch executable: {source}: {path}")]
ExecError {
source: io::Error,
path: String
},
#[error("Failed to run fetch, got {status}")]
FetchError {
stderr: Vec<u8>,
status: ExitStatus,
},
#[error("Failed to update database: {source}")]
DatabaseError {
source: ModifyDatabaseError,
},
}
pub fn update(feed_name: String) -> Result<(), UpdateError> {
let feed_dir_path = get_feed_dir(feed_name.clone())?;
let exec_path = feed_dir_path.clone().join("fetch");
let error_path = feed_dir_path.join("error.log");
let output = Command::new(exec_path.clone()).output()
.map_err(|e| UpdateError::ExecError{ source: e, path: exec_path.display().to_string() })?;
// On failure, save the error into a file so that a later interactive
// program can tell the user about the program. On success, delete any
// such error files. We don't really care if that fails though.
if !output.status.success() {
let _ = fs::write(error_path, output.stderr.clone());
return Err(UpdateError::FetchError{ stderr: output.stderr, status: output.status });
} else {
// If an old error file exists, delete it
let _ = fs::remove_file(error_path);
}
let feed_entries = parse_feed(output.stdout.as_slice(), &feed_name);
let merge = |entries: Vec<Entry>| -> Vec<Entry> {
return merge_feed(feed_name, feed_entries, entries);
};
let database_path = get_database_path().map_err(|e| UpdateError::DatabasePathError(e))?;
return modify_database(merge, database_path)
.map_err(|e| UpdateError::DatabaseError{ source: e });
}
#[derive(Error, Debug)]
pub enum MarkEntryAsReadError {
#[error(transparent)]
DatabasePathError(#[from] DatabasePathError),
#[error(transparent)]
ModifyDatabaseError(#[from] ModifyDatabaseError),
}
fn mark_entry_as_read(feed_name: String, entry_id: String) -> Result<(), MarkEntryAsReadError> {
let modifier = |entries: Vec<Entry>| -> Vec<Entry> {
let mut modified_entries: Vec<Entry> = Vec::new();
for mut entry in entries {
if entry.feed == feed_name && entry.id == entry_id {
entry.read = true;
}
modified_entries.push(entry);
}
return modified_entries;
};
let database_path = get_database_path()
.map_err(|e| MarkEntryAsReadError::DatabasePathError(e))?;
return modify_database(modifier, database_path)
.map_err(|e| MarkEntryAsReadError::ModifyDatabaseError(e));
}
#[derive(Error, Debug)]
pub enum EntryReadError {
#[error(transparent)]
FeedDirError(#[from] FeedDirError),
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
MarkEntryAsReadError(#[from] MarkEntryAsReadError),
#[error("Failed to launch open executable: {source}: {path}")]
ExecError {
source: io::Error,
path: PathBuf,
},
}
pub fn read_entry(feed_name: String, entry: Entry) -> Result<(), EntryReadError> {
let feed_dir_path = get_feed_dir(feed_name.clone())?;
let exec_path = feed_dir_path.clone().join("open");
Command::new(exec_path.clone())
.env("TITLE", entry.title.as_str())
.env("LINK", entry.link.as_str())
.status()
.map_err(|e| EntryReadError::ExecError{ source: e, path: exec_path })?;
return mark_entry_as_read(entry.feed, entry.id).map_err(|e| EntryReadError::MarkEntryAsReadError(e));
}
#[derive(Error, Debug)]
pub enum GetEntriesError {
#[error(transparent)]
DatabasePathError(#[from] DatabasePathError),
#[error(transparent)]
DatabaseReadError(#[from] DatabaseReadError),
}
pub fn get_feed_entries(feed_name: String) -> Result<Vec<Entry>, GetEntriesError> {
let database_path = get_database_path()
.map_err(|e| GetEntriesError::DatabasePathError(e))?;
let entries = read_entries(database_path)
.map_err(|e| GetEntriesError::DatabaseReadError(e))?;
let mut feed_entries = Vec::new();
for entry in entries {
if entry.feed == feed_name {
feed_entries.push(entry);
}
}
// Sort in date order, falling back to ID, so that when reading
// entries the oldest is opened first.
feed_entries.sort_by(|a, b| (&a.updated, &a.id).cmp(&(&b.updated, &b.id)));
return Ok(feed_entries);
}
pub fn count_unread_entries() -> Result<HashMap<String, u32>, GetEntriesError> {
let database_path = get_database_path()
.map_err(|e| GetEntriesError::DatabasePathError(e))?;
let entries = read_entries(database_path)
.map_err(|e| GetEntriesError::DatabaseReadError(e))?;
let mut feed_entries: HashMap<String, u32> = HashMap::new();
for entry in entries {
if !entry.read {
let count = match feed_entries.get(&entry.feed) {
Some(i) => i,
None => &0,
};
feed_entries.insert(entry.feed.clone(), count + 1);
}
}
return Ok(feed_entries);
}
| true
|
cd3404ab46ac34c27bdd7408a78a9ccc6fa0441f
|
Rust
|
Mic92/cntr
|
/src/dotcntr.rs
|
UTF-8
| 2,419
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
use libc::pid_t;
use nix::fcntl::{self, OFlag};
use nix::sys::stat;
use nix::unistd::Pid;
use simple_error::try_with;
use std::fs::{self, File};
use std::io::prelude::*;
use std::os::unix::prelude::*;
use std::{
fs::{set_permissions, Permissions},
os::unix::fs::PermissionsExt,
};
use crate::capabilities;
use crate::procfs::ProcStatus;
use crate::result::Result;
use crate::tmp;
/// Hidden directory with CAP_CHROOT enabled cntr-exec binary
pub struct DotcntrDir {
pub file: File,
pub dir: tmp::TempDir,
}
impl DotcntrDir {
pub fn write_pid_file(&self, target_pid: Pid) -> Result<()> {
let path = self.dir.path().join("pid");
let mut file = try_with!(File::create(&path), "failed to create {}", path.display());
let raw_pid: pid_t = target_pid.into();
try_with!(
file.write_all(format!("{}", raw_pid).as_bytes()),
"failed to write {}",
path.display()
);
Ok(())
}
pub fn write_setcap_exe(&self) -> Result<()> {
let path = self.dir.path().join("cntr-exec");
try_with!(
fs::copy("/proc/self/exe", &path),
"failed to copy /proc/self/exe to {}",
path.display()
);
try_with!(
capabilities::set_chroot_capability(&path),
"Failed set file capability CAP_SYS_CHROOT on {}",
path.display()
);
Ok(())
}
}
pub fn create(process_status: &ProcStatus) -> Result<DotcntrDir> {
let dotcntr_dir = try_with!(tmp::tempdir(), "failed to create temporary directory");
let permissions = Permissions::from_mode(0o755);
try_with!(
set_permissions(dotcntr_dir.path(), permissions),
"cannot change permissions of '{}'",
dotcntr_dir.path().display()
);
let dotcntr_fd = try_with!(
fcntl::open(
dotcntr_dir.path(),
OFlag::O_RDONLY | OFlag::O_CLOEXEC,
stat::Mode::all(),
),
"failed to open '{}' directory",
dotcntr_dir.path().display()
);
let dotcntr_file = unsafe { File::from_raw_fd(dotcntr_fd) };
let d = DotcntrDir {
file: dotcntr_file,
dir: dotcntr_dir,
};
try_with!(d.write_setcap_exe(), "failed to create setcap executable");
try_with!(
d.write_pid_file(process_status.local_pid),
"failed to create pid file"
);
Ok(d)
}
| true
|
8f00aa5836fd9993cd9c12c0f6a69226114564d8
|
Rust
|
feifeigd/rust
|
/slog_demo/src/player.rs
|
UTF-8
| 707
| 2.765625
| 3
|
[] |
no_license
|
use super::weapon::PlasmaCannon;
use slog::Logger;
use crate::PlayingCharacter;
// use weapon::PlasmaCannon;
pub struct Player {
name: String,
logger: Logger,
weapon: PlasmaCannon,
}
impl Player {
pub fn new(logger: &Logger, name: &str) -> Self {
let player_log = logger.new(o!("Player"=>format!("{}", name)));
let weapon_log = player_log.new(o!("PlasmaCannon"=>"M435"));
Self {
logger: player_log,
name: name.to_string(),
weapon: PlasmaCannon(weapon_log)
}
}
}
impl PlayingCharacter for Player {
fn shoot(&self) {
info!(self.logger, "{} shooting with ", self.name);
self.weapon.fire();
}
}
| true
|
a37c5eb4035693d5f5f336b42db290c4ff230180
|
Rust
|
emberian/spaceapi-server-rs
|
/tests/lib.rs
|
UTF-8
| 1,937
| 2.703125
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
extern crate spaceapi_server;
use std::net::Ipv4Addr;
use std::net::TcpStream;
use std::io::ErrorKind;
use spaceapi_server::SpaceapiServer;
use spaceapi_server::api;
use spaceapi_server::api::optional::Optional;
/// Create a new status object containing test data.
fn get_status() -> api::Status {
api::Status::new(
"ourspace",
"https://example.com/logo.png",
"https://example.com/",
api::Location {
address: Optional::Value("Street 1, Zürich, Switzerland".into()),
lat: 47.123,
lon: 8.88,
},
api::Contact {
irc: Optional::Absent,
twitter: Optional::Absent,
foursquare: Optional::Absent,
email: Optional::Value("hi@example.com".into()),
},
vec![
"email".into(),
"twitter".into(),
],
)
}
/// Create a new SpaceapiServer instance listening on the specified port.
fn get_server(ip: Ipv4Addr, port: u16, status: api::Status) -> SpaceapiServer {
// Start and return a server instance
SpaceapiServer::new(ip, port, status, "redis://127.0.0.1/", vec![]).unwrap()
}
#[test]
fn server_starts() {
//! Test that the spaceapi server starts at all.
// Ip / port for test server
let ip = Ipv4Addr::new(127, 0, 0, 1);
let port = 3344;
// Test data
let status = get_status();
// Connection to port should fail right now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_err());
assert_eq!(connect_result.unwrap_err().kind(), ErrorKind::ConnectionRefused);
// Instantiate and start server
let server = get_server(ip, port, status);
let mut listening = server.serve().unwrap();
// Connecting to server should work now
let connect_result = TcpStream::connect((ip, port));
assert!(connect_result.is_ok());
// Close server
listening.close().unwrap();
}
| true
|
ebc0bb4a2d2d00e3054ce0053f5654c5228d9a8d
|
Rust
|
pete21/tickgrinder
|
/util/src/transport/command_server.rs
|
UTF-8
| 18,300
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
//! Internal server that accepts raw commands, queues them up, and transmits
//! them to the Tick Processor asynchronously. Commands are re-transmitted
//! if a response isn't received in a timout period.
//!
//! Responses from the Tick Processor are sent back over the commands channel
//! and are sent to worker processes that register interest in them over channels.
//! Workers register interest after sending a command so that they can be notified
//! of the successful reception of the command.
//!
//! TODO: Ensure that commands aren't processed twice by storing Uuids or most
//! recent 200 commands or something and checking that list before executing (?)
//!
//! TODO: Use different channel for responses than for commands
extern crate test;
use std::collections::VecDeque;
use std::thread::{self, Thread};
use std::time::Duration;
use std::sync::{Arc, Mutex};
use std::str::FromStr;
use futures::{Stream, Canceled};
use futures::sync::mpsc::{unbounded, UnboundedSender, UnboundedReceiver};
use futures::Future;
use futures::sync::oneshot::{channel as oneshot, Sender, Receiver};
use uuid::Uuid;
use redis;
use transport::redis::{get_client, sub_channel};
use transport::commands::*;
use conf::CONF;
/// A command waiting to be sent plus a Sender to send the Response/Error String
/// through and the channel on which to broadcast the Command.
struct CommandRequest {
cmd: Command,
future: Sender<Result<Response, String>>,
channel: String,
}
/// Contains a `CommandRequest` for a worker and a Sender that resolves when the worker
/// becomes idle.
type WorkerTask = (CommandRequest, Sender<()>);
/// Threadsafe queue containing handles to idle command-sender threads in the form of `UnboundedSender`s
type UnboundedSenderQueue = Arc<Mutex<VecDeque<UnboundedSender<WorkerTask>>>>;
/// Threadsafe queue containing commands waiting to be sent
type CommandQueue = Arc<Mutex<VecDeque<CommandRequest>>>;
/// A `Vec` containing a `Uuid` of a `Response` that's expected and a `UnboundedSender` to send the
/// response through once it arrives
type RegisteredList = Vec<(Uuid, UnboundedSender<Result<Response, ()>>)>;
/// A message to be sent to the timeout thread containing how long to time out for,
/// a oneshot that resolves to a handle to the Timeout's thread as soon as the timeout begins,
/// and a oneshot that resolves to `Err(())` if the timeout completes.
///
/// The thread handle can be used to end the timeout early to make the timeout thread
/// useable again.
struct TimeoutRequest {
dur: Duration,
thread_future: Sender<Thread>,
timeout_future: Sender<Result<Response, ()>>,
}
/// A list of `UnboundedSender`s over which Results from the Tick Processor will be sent if they
/// match the ID of the request the command `UnboundedSender` thread sent.
struct AlertList {
// Vec to hold the ids of responses we're waiting for and `Sender`s
// to send the result back to the worker thread
// Wrapped in Arc<Mutex<>> so that it can be accessed from within futures
pub list: RegisteredList,
}
/// Send out the Response to a worker that is registered interest to its Uuid
fn send_messages(res: WrappedResponse, al: &Mutex<AlertList>) {
let mut al_inner = al.lock().expect("Unable to unlock al n send_messages");
let pos_opt: Option<&mut (_, UnboundedSender<Result<Response, ()>>)> = al_inner.list.iter_mut().find(|x| x.0 == res.uuid );
if pos_opt.is_some() {
pos_opt.unwrap().1.send( Ok(res.res) ).expect("Unable to send through subscribed future");
}
}
/// Utility struct for keeping track of the UUIDs of Responses that workers are
/// interested in and holding Completes to let them know when they are received
impl AlertList {
pub fn new() -> AlertList {
AlertList {
list: Vec::new(),
}
}
/// Register interest in Results with a specified Uuid and send
/// the Result over the specified Oneshot when it's received
pub fn register(&mut self, response_uuid: &Uuid, c: UnboundedSender<Result<Response, ()>>) {
self.list.push((*response_uuid, c));
}
/// Deregisters a listener if a timeout in the case of a timeout occuring
pub fn deregister(&mut self, uuid: &Uuid) {
let pos_opt = self.list.iter().position(|x| &x.0 == uuid );
match pos_opt {
Some(pos) => { self.list.remove(pos); },
None => println!("Error deregistering element from interest list; it's not in it"),
}
}
}
#[derive(Clone)]
pub struct CommandServer {
al: Arc<Mutex<AlertList>>,
command_queue: CommandQueue, // internal command queue
conn_queue: UnboundedSenderQueue, // UnboundedSenders for idle command-UnboundedSender threadss
client: redis::Client,
instance: Instance, // The instance that owns this CommandServer
}
/// Locks the `CommandQueue` and returns a queued command, if there are any.
fn try_get_new_command(command_queue: CommandQueue) -> Option<CommandRequest> {
let mut qq_inner = command_queue.lock()
.expect("Unable to unlock qq_inner in try_get_new_command");
qq_inner.pop_front()
}
fn send_command_outer(
al: &Mutex<AlertList>, command: &Command, client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, res_c: Sender<Result<Response, String>>,
command_queue: CommandQueue, mut attempts: usize, commands_channel: String
) {
let wr_cmd = command.wrap();
let _ = send_command(&wr_cmd, client, commands_channel.as_str());
let (sleepy_c, sleepy_o) = oneshot::<Thread>();
let (awake_c, awake_o) = oneshot::<Result<Response, ()>>();
// start the timeout timer on a separate thread
let dur = Duration::from_millis(CONF.cs_timeout as u64);
let timeout_msg = TimeoutRequest {
dur: dur,
thread_future: sleepy_c,
timeout_future: awake_c
};
sleeper_tx.send(timeout_msg).unwrap();
// sleepy_o fulfills immediately to a handle to the sleeper thread
let sleepy_handle = sleepy_o.wait();
// UnboundedSender for giving to the AlertList and sending the response back
let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>();
// register interest in new Responses coming in with our Command's Uuid
{
al.lock().expect("Unlock to lock al in send_command_outer #1")
.register(&wr_cmd.uuid, res_recvd_c);
}
res_recvd_o.into_future().map(|(item_opt, _)| {
item_opt.expect("item_opt was None")
}).map_err(|_| Canceled ).select(awake_o).and_then(move |res| {
let (status, _) = res;
match status {
Ok(wrapped_res) => { // command received
{
// deregister since we're only waiting on one message
al.lock().expect("Unlock to lock al in send_command_outer #2")
.deregister(&wr_cmd.uuid);
}
// end the timeout now so that we can re-use sleeper thread
sleepy_handle.expect("Couldn't unwrap handle to sleeper thread").unpark();
// resolve the Response future
res_c.complete(Ok(wrapped_res));
return Ok(sleeper_tx)
},
Err(_) => { // timed out
{
al.lock().expect("Couldn't lock al in Err(_)")
.deregister(&wr_cmd.uuid);
}
attempts += 1;
if attempts >= CONF.cs_max_retries {
// Let the main thread know it's safe to use the UnboundedSender again
// This essentially indicates that the worker thread is idle
let err_msg = String::from_str("Timed out too many times!").unwrap();
res_c.complete(Err(err_msg));
return Ok(sleeper_tx)
} else { // re-send the command
// we can do this recursively since it's only a few retries
send_command_outer(al, &wr_cmd.cmd, client, sleeper_tx, res_c,
command_queue, attempts, commands_channel)
}
}
}
Ok(sleeper_tx)
}).wait().ok().unwrap(); // block until a response is received or the command times out
}
/// Manually loop over the converted Stream of commands
fn dispatch_worker(
work: WorkerTask, al: &Mutex<AlertList>, mut client: &mut redis::Client,
mut sleeper_tx: &mut UnboundedSender<TimeoutRequest>, command_queue: CommandQueue
) -> Option<()> {
let (cr, idle_c) = work;
// completes initial command and internally iterates until queue is empty
send_command_outer(al, &cr.cmd, &mut client, sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
// keep trying to get queued commands to execute until the queue is empty;
while let Some(cr) = try_get_new_command(command_queue.clone()) {
send_command_outer(al, &cr.cmd, client, &mut sleeper_tx, cr.future, command_queue.clone(), 0, cr.channel);
}
idle_c.complete(());
Some(())
}
/// Blocks the current thread until a Duration+Complete is received.
/// Then it sleeps for that Duration and Completes the oneshot upon awakening.
/// Returns a Complete upon starting that can be used to end the timeout early
fn init_sleeper(rx: UnboundedReceiver<TimeoutRequest>,) {
for res in rx.wait() {
match res.unwrap() {
TimeoutRequest{dur, thread_future, timeout_future} => {
// send a Complete with a handle to the thread
thread_future.complete(thread::current());
thread::park_timeout(dur);
timeout_future.complete(Err(()));
}
}
}
}
/// Creates a command processor that awaits requests
fn init_command_processor(
cmd_rx: UnboundedReceiver<WorkerTask>, command_queue: CommandQueue, al: &Mutex<AlertList>
) {
let mut client = get_client(CONF.redis_host);
// channel for communicating with the sleeper thread
let (mut sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>();
thread::spawn(move || init_sleeper(sleeper_rx) );
for task in cmd_rx.wait() {
let res = dispatch_worker(
task.unwrap(), al, &mut client, &mut sleeper_tx, command_queue.clone()
);
// exit if we're in the process of collapse
if res.is_none() {
break;
}
}
}
impl CommandServer {
pub fn new(instance_uuid: Uuid, instance_type: &str) -> CommandServer {
let mut conn_queue = VecDeque::with_capacity(CONF.conn_senders);
let command_queue = Arc::new(Mutex::new(VecDeque::new()));
let al = Arc::new(Mutex::new(AlertList::new()));
let al_clone = al.clone();
// Handle newly received Responses
let rx = sub_channel(CONF.redis_host, CONF.redis_responses_channel);
thread::spawn(move || {
for raw_res_res in rx.wait() {
let raw_res = raw_res_res.expect("Res was error in CommandServer response UnboundedReceiver thread.");
let parsed_res = parse_wrapped_response(raw_res);
send_messages(parsed_res, &*al_clone);
}
});
for _ in 0..CONF.conn_senders {
let al_clone = al.clone();
let qq_copy = command_queue.clone();
// channel for getting the UnboundedSender back from the worker thread
let (tx, rx) = unbounded::<WorkerTask>();
thread::spawn(move || init_command_processor(rx, qq_copy, &*al_clone) );
// store the UnboundedSender which can be used to send queries
// to the worker in the connection queue
conn_queue.push_back(tx);
}
let client = get_client(CONF.redis_host);
CommandServer {
al: al,
command_queue: command_queue,
conn_queue: Arc::new(Mutex::new(conn_queue)),
client: client,
instance: Instance{ uuid: instance_uuid, instance_type: String::from(instance_type), },
}
}
/// Queues up a command to send to be sent. Returns a future that resolves to
/// the returned response.
pub fn execute(
&mut self, command: Command, commands_channel: String
) -> Receiver<Result<Response, String>> {
let temp_lock_res = self.conn_queue.lock().unwrap().is_empty();
// Force the guard locking conn_queue to go out of scope
// this prevents the lock from being held through the entire if/else
let copy_res = temp_lock_res;
// future for handing back to the caller that resolves to Response/Error
let (res_c, res_o) = oneshot::<Result<Response, String>>();
// future for notifying main thread when command is done and worker is idle
let (idle_c, idle_o) = oneshot::<()>();
let cr = CommandRequest {
cmd: command,
future: res_c,
channel: commands_channel,
};
if copy_res {
self.command_queue.lock().unwrap().push_back(cr);
}else{
// type WorkerTask
let req = (cr, idle_c);
let tx;
{
tx = self.conn_queue.lock().unwrap().pop_front().unwrap();
tx.send(req).unwrap();
}
let cq_clone = self.conn_queue.clone();
thread::spawn(move || {
// Wait until the worker thread signals that it is idle
let _ = idle_o.wait();
// Put the UnboundedSender for the newly idle worker into the connection queue
cq_clone.lock().unwrap().push_back(tx);
});
}
res_o
}
pub fn broadcast(
&mut self, command: Command, commands_channel: String
) -> Receiver<Vec<Response>> {
// spawn a new timeout thread just for this request
let (sleeper_tx, sleeper_rx) = unbounded::<TimeoutRequest>();
let dur = Duration::from_millis(CONF.cs_timeout as u64);
let (sleepy_c, _) = oneshot::<Thread>();
// awake_o fulfills when the timeout expires
let (awake_c, awake_o) = oneshot::<Result<Response, ()>>();
let wr_cmd = command.wrap();
// Oneshot for sending received responses back with.
let (all_responses_c, all_responses_o) = oneshot::<Vec<Response>>();
let alc = self.al.clone();
let (res_recvd_c, res_recvd_o) = unbounded::<Result<Response, ()>>();
{
// oneshot triggered with matching message received
let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast");
al_inner.register(&wr_cmd.uuid, res_recvd_c);
}
let responses_container = Arc::new(Mutex::new(Vec::new()));
let responses_container_clone = responses_container.clone();
thread::spawn(move || {
for response in res_recvd_o.wait() {
match response {
Ok(res) => {
let mut responses = responses_container_clone.lock().unwrap();
responses.push(res.expect("Inner error in responses iterator"))
},
Err(err) => println!("Got error from response iterator: {:?}", err),
}
}
});
let wr_cmd_c = wr_cmd.clone();
thread::spawn(move || { // timer waiter thread
// when a timeout happens, poll all the pending interest listners and send results back
let _ = awake_o.wait();
// deregister interest
{
let mut al_inner = alc.lock().expect("Unable to unlock to lock al in broadcast");
al_inner.deregister(&wr_cmd_c.uuid);
}
let responses;
{
responses = responses_container.lock().unwrap().clone();
}
all_responses_c.complete(responses);
});
thread::spawn(move || init_sleeper(sleeper_rx) ); // timer thread
// actually send the Command
let _ = send_command(&wr_cmd, &self.client, commands_channel.as_str());
let timeout_msg = TimeoutRequest {
dur: dur,
thread_future: sleepy_c,
timeout_future: awake_c
};
// initiate timeout
sleeper_tx.send(timeout_msg).unwrap();
all_responses_o
}
/// Sends a command asynchronously without bothering to wait for responses.
pub fn send_forget(&self, cmd: &Command, channel: &str) {
let _ = send_command(&cmd.wrap(), &self.client, channel);
}
/// Sends a message to the logger with the specified severity
pub fn log(&mut self, message_type_opt: Option<&str>, message: &str, level: LogLevel) {
let message_type = match message_type_opt {
Some(t) => t,
None => "General",
};
let line = LogMessage {
level: level,
message_type: String::from(message_type),
message: String::from(message),
sender: self.instance.clone(),
};
self.send_forget(&Command::Log{msg: line}, CONF.redis_log_channel);
}
/// Shortcut method for logging a debug-level message.
pub fn debug(&mut self, message_type: Option<&str>, message: &str) {
self.log(message_type, message, LogLevel::Debug);
}
/// Shortcut method for logging a notice-level message.
pub fn notice(&mut self, message_type: Option<&str>, message: &str) {
self.log(message_type, message, LogLevel::Notice);
}
/// Shortcut method for logging a warning-level message.
pub fn warning(&mut self, message_type: Option<&str>, message: &str) {
self.log(message_type, message, LogLevel::Warning);
}
/// Shortcut method for logging a error-level message.
pub fn error(&mut self, message_type: Option<&str>, message: &str) {
self.log(message_type, message, LogLevel::Error);
}
/// Shortcut method for logging a critical-level message.
pub fn critical(&mut self, message_type: Option<&str>, message: &str) {
self.log(message_type, message, LogLevel::Critical);
}
}
#[bench]
fn thread_spawn(b: &mut test::Bencher) {
b.iter(|| thread::spawn(|| {}))
}
| true
|
ec02366102dfc606a33a98441c03539482543944
|
Rust
|
icefoxen/lang
|
/rust/old/match.rs
|
UTF-8
| 319
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
enum OptionalInt {
Value(int),
Missing
}
fn main() {
let x = Value(5);
let y = Missing;
match x {
Value(n) => println!("Value is {:d}", n),
Missing => println!("Value is missing"),
}
match y {
Value(n) => println!("Value is {:d}", n),
Missing => println!("Value is missing"),
}
}
| true
|
0c736e11a4aa034d5690a1631ca48893cb1b3b83
|
Rust
|
dalalsunil1986/Methylenix
|
/src/kernel/drivers/acpi/table/xsdt.rs
|
UTF-8
| 8,202
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
//!
//! Extended System Description Table
//!
//! This manager contains the information about Extended System Description Table(XSDT).
//! XSDT is the list of tables like MADT.
use super::bgrt::BgrtManager;
use super::dsdt::DsdtManager;
use super::fadt::FadtManager;
use super::madt::MadtManager;
use crate::kernel::drivers::acpi::INITIAL_MMAP_SIZE;
use crate::kernel::manager_cluster::get_kernel_manager_cluster;
use crate::kernel::memory_manager::data_type::{
Address, MSize, MemoryOptionFlags, MemoryPermissionFlags, PAddress, VAddress,
};
pub struct XsdtManager {
base_address: VAddress,
/* Essential Managers */
fadt_manager: FadtManager,
dsdt_manager: DsdtManager,
}
impl XsdtManager {
pub const fn new() -> Self {
Self {
base_address: VAddress::new(0),
fadt_manager: FadtManager::new(),
dsdt_manager: DsdtManager::new(),
}
}
pub fn init(&mut self, xsdt_physical_address: PAddress) -> bool {
let xsdt_vm_address = if let Ok(a) = get_kernel_manager_cluster()
.memory_manager
.lock()
.unwrap()
.mmap(
xsdt_physical_address,
MSize::new(INITIAL_MMAP_SIZE),
MemoryPermissionFlags::rodata(),
MemoryOptionFlags::PRE_RESERVED
| MemoryOptionFlags::MEMORY_MAP
| MemoryOptionFlags::DO_NOT_FREE_PHYSICAL_ADDRESS,
) {
a
} else {
pr_err!("Cannot map XSDT.");
return false;
};
if unsafe { *(xsdt_vm_address.to_usize() as *const [u8; 4]) } != *b"XSDT" {
pr_err!("Invalid XSDT Signature");
return false;
}
if unsafe { *((xsdt_vm_address.to_usize() + 8) as *const u8) } != 1 {
pr_err!("Not supported XSDT version");
return false;
}
let xsdt_size = unsafe { *((xsdt_vm_address.to_usize() + 4) as *const u32) };
let xsdt_vm_address = if let Ok(a) = get_kernel_manager_cluster()
.memory_manager
.lock()
.unwrap()
.mremap_dev(
xsdt_vm_address,
MSize::new(INITIAL_MMAP_SIZE),
MSize::new(xsdt_size as usize),
) {
a
} else {
pr_err!("Cannot remap XSDT.");
return false;
};
self.base_address = xsdt_vm_address;
let mut index = 0;
while let Some(entry_physical_address) = self.get_entry(index) {
let v_address = if let Ok(a) = get_kernel_manager_cluster()
.memory_manager
.lock()
.unwrap()
.mmap(
entry_physical_address,
MSize::new(INITIAL_MMAP_SIZE),
MemoryPermissionFlags::rodata(),
MemoryOptionFlags::PRE_RESERVED
| MemoryOptionFlags::MEMORY_MAP
| MemoryOptionFlags::DO_NOT_FREE_PHYSICAL_ADDRESS,
) {
a
} else {
pr_err!("Cannot map ACPI Table.");
return false;
};
drop(entry_physical_address); /* Avoid using it */
match unsafe { *(v_address.to_usize() as *const [u8; 4]) } {
FadtManager::SIGNATURE => {
if !self.fadt_manager.init(v_address) {
pr_err!("Cannot init FADT Manager.");
return false;
}
}
DsdtManager::SIGNATURE => {
if !self.dsdt_manager.init(v_address) {
pr_err!("Cannot init DSDT Manager.");
return false;
}
}
_ => { /* Skip */ }
};
pr_info!(
"{}",
core::str::from_utf8(unsafe { &*(v_address.to_usize() as *const [u8; 4]) })
.unwrap_or("----")
);
index += 1;
}
if !self.dsdt_manager.is_inited() {
let v_address = if let Ok(a) = get_kernel_manager_cluster()
.memory_manager
.lock()
.unwrap()
.mmap(
self.fadt_manager.get_dsdt_address(),
MSize::new(INITIAL_MMAP_SIZE),
MemoryPermissionFlags::rodata(),
MemoryOptionFlags::PRE_RESERVED
| MemoryOptionFlags::MEMORY_MAP
| MemoryOptionFlags::DO_NOT_FREE_PHYSICAL_ADDRESS,
) {
a
} else {
pr_err!("Cannot reserve memory area of DSDT.");
return false;
};
if !self.dsdt_manager.init(v_address) {
pr_err!("Cannot init DSDT Manager.");
return false;
}
}
return true;
}
pub fn get_bgrt_manager(&self) -> Option<BgrtManager> {
if let Some(v_address) = self.search_entry(&BgrtManager::SIGNATURE) {
let mut bgrt_manager = BgrtManager::new();
if bgrt_manager.init(v_address) {
return Some(bgrt_manager);
}
pr_err!("Cannot init BGRT Manager.");
if let Err(e) = get_kernel_manager_cluster()
.memory_manager
.lock()
.unwrap()
.free(v_address)
{
pr_warn!("Cannot free memory map of BGRT. Error: {:?}", e);
}
}
return None;
}
pub fn get_fadt_manager(&self) -> &FadtManager {
&self.fadt_manager
}
pub fn get_madt_manager(&self) -> Option<MadtManager> {
if let Some(v_address) = self.search_entry(&MadtManager::SIGNATURE) {
let mut madt_manager = MadtManager::new();
if madt_manager.init(v_address) {
return Some(madt_manager);
}
pr_err!("Cannot init MADT Manager.");
if let Err(e) = get_kernel_manager_cluster()
.memory_manager
.lock()
.unwrap()
.free(v_address)
{
pr_warn!("Cannot free memory map of MADT. Error: {:?}", e);
}
}
return None;
}
pub fn get_dsdt_manager(&self) -> &DsdtManager {
&self.dsdt_manager
}
fn get_length(&self) -> usize {
unsafe { *((self.base_address.to_usize() + 4) as *const u32) as usize }
}
fn get_entry(&self, index: usize) -> Option<PAddress> {
if (self.get_length() - 0x24) >> 3 > index {
Some(PAddress::from(unsafe {
*((self.base_address.to_usize() + 0x24 + index * 8) as *const u64)
} as usize))
} else {
None
}
}
fn search_entry(&self, signature: &[u8; 4]) -> Option<VAddress> {
let mut memory_manager = get_kernel_manager_cluster().memory_manager.lock().unwrap();
let mut index = 0;
while let Some(entry_physical_address) = self.get_entry(index) {
if let Ok(v_address) = memory_manager.mmap(
entry_physical_address,
MSize::new(INITIAL_MMAP_SIZE),
MemoryPermissionFlags::rodata(),
MemoryOptionFlags::PRE_RESERVED
| MemoryOptionFlags::MEMORY_MAP
| MemoryOptionFlags::DO_NOT_FREE_PHYSICAL_ADDRESS,
) {
if unsafe { &*(v_address.to_usize() as *const [u8; 4]) } == signature {
return Some(v_address);
}
if let Err(e) = memory_manager.free(v_address) {
pr_warn!(
"Freeing memory map of ACPI Table was failed. Error: {:?}",
e
)
}
} else {
pr_err!("Cannot map ACPI Table.");
return None;
};
index += 1;
}
return None;
}
}
| true
|
2bc8ffb7d90c2ab6ff60138d445b7d1d0c7b69c0
|
Rust
|
tomgrean/stardict
|
/src/main.rs
|
UTF-8
| 18,755
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
extern crate regex;
pub mod dict;
pub mod dictionary;
pub mod idx;
pub mod ifo;
pub mod reformat;
pub mod result;
pub mod syn;
//pub mod web;
use self::regex::bytes::Regex;
use std::cmp::Ordering;
use std::io::prelude::*;
use std::iter::Iterator;
use std::mem;
use std::net::TcpListener;
use std::net::TcpStream;
use std::{env, fs, path, str};
//use self::regex::Error;
/// StarDict contains all dictionary found within the specified file system directory.
pub struct StarDict {
directories: Vec<dictionary::Dictionary>,
}
/// An iterator that merges several underlying iterators. try to dedup one duplicated
/// word from each iterator.
pub struct WordMergeIter<T: Iterator<Item = Vec<u8>>> {
wordit: Vec<T>,
cur: Vec<Option<Vec<u8>>>,
}
impl<'a, T: Iterator<Item = Vec<u8>>> Iterator for WordMergeIter<T> {
type Item = Vec<u8>;
fn next(&mut self) -> Option<Self::Item> {
let l = self.cur.len();
if l == 0 {
return None;
}
let mut x = 0usize;
let mut i = 1usize;
while i < l {
x = match (&self.cur[x], &self.cur[i]) {
(None, _) => i,
(_, None) => x,
(Some(a), Some(b)) => match idx::Idx::dict_cmp(&a, &b, false) {
Ordering::Greater => i,
Ordering::Equal => {
self.cur[i] = self.wordit[i].next();
x
}
_ => x,
},
};
i += 1;
}
mem::replace(&mut self.cur[x], self.wordit[x].next())
}
}
impl StarDict {
/// Create a StarDict struct from a system path. in the path,
/// there should be some directories. each directory contains
/// the dict files, like .ifo, .idx, .dict, etc.
/// The dictionary will be sorted by its directory name.
pub fn new(root: &path::Path) -> Result<StarDict, result::DictError> {
let mut sort_dirs = Vec::new();
let mut items = Vec::new();
if root.is_dir() {
for it in fs::read_dir(root)? {
//println!("push direc: {:?}", it);
let it = it?.path();
if it.is_dir() {
sort_dirs.push(it.into_boxed_path());
}
}
}
sort_dirs.sort();
for it in sort_dirs.iter() {
match dictionary::Dictionary::new(&**it, root) {
Ok(d) => {
items.push(d);
}
Err(e) => {
eprintln!("ignore reason: {:?}", e);
}
}
}
Ok(StarDict { directories: items })
}
/// Get the Ifo struct, which is parsed from the .ifo file.
pub fn info(&self) -> Vec<&ifo::Ifo> {
let mut items = Vec::with_capacity(self.directories.len());
for it in &self.directories {
items.push(&it.ifo);
}
items
}
/// List the following neighbor words of `word`, from `off`.
/// If `off` is a negative number, list from before `-off`.
pub fn neighbors(&self, word: &[u8], off: i32) -> WordMergeIter<dictionary::DictNeighborIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
let mut x = d.neighbors(word, off);
let mut s = d.neighbors_syn(word, off);
cur.push(x.next());
cur.push(s.next());
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Search from all dictionaries. using the specified regular expression.
/// to match the beginning of a word, use `^`, the ending of a word, use `$`.
pub fn search<'a>(&'a self, reg: &'a Regex) -> WordMergeIter<dictionary::IdxIter> {
let mut wordit = Vec::with_capacity(2 * self.directories.len());
let mut cur = Vec::with_capacity(2 * self.directories.len());
for d in self.directories.iter() {
//println!("in for {}", d.ifo.name.as_str());
let mut x = d.search_regex(reg);
let mut s = d.search_syn(reg);
//println!("created inner iter");
cur.push(x.next());
cur.push(s.next());
//println!("created 1st value");
wordit.push(x);
wordit.push(s);
}
WordMergeIter { wordit, cur }
}
/// Lookup the word. Find in the Idx case-sensitively, if not found then try to do
/// case-insensitive search. Also find all case-insensitive matching words in Syn.
pub fn lookup(&self, word: &[u8]) -> Result<Vec<dictionary::LookupResult>, result::DictError> {
let mut ret: Vec<dictionary::LookupResult> = Vec::with_capacity(self.directories.len());
for d in self.directories.iter() {
if let Ok(x) = d.lookup(word) {
ret.extend(x);
}
}
Ok(ret)
}
}
struct StardictUrl {
path: [u8; 4usize],
word: Vec<u8>,
offset: i32, // args for offset and length, may use BTreeMap, but it cost too much.
length: usize,
}
impl StardictUrl {
fn new() -> StardictUrl {
StardictUrl {
path: [0; 4],
word: Vec::with_capacity(16),
offset: 0,
length: 0,
}
}
fn byte_to_u8(b: u8) -> u8 {
match b {
b'0'..=b'9' => b - b'0',
b'A'..=b'F' => b - (b'A' - 10),
b'a'..=b'f' => b - (b'a' - 10),
_ => b,
}
}
fn add_path(&mut self, c: u8, idx: usize) {
if idx < self.path.len() {
self.path[idx] = c;
}
}
fn add_byte(&mut self, c: u8) {
self.word.push(c);
}
fn add_arg_offset(&mut self, c: i32) {
self.offset = self.offset * 10 + c;
}
fn add_arg_length(&mut self, c: usize) {
self.length = self.length * 10 + c;
}
}
fn main() {
let mut host = String::from("0.0.0.0:8888");
//let mut host = String::from("[::]:8888");
let mut dictdir = String::from("/usr/share/stardict/dic");
let dict;
{
let mut _daemon = false;
let mut pendarg = 0u8;
for arg in env::args().skip(1) {
//parse options.
println!("cmd args: {}", &arg);
let a = arg.as_bytes();
match pendarg {
b'h' => {
host.clear();
host.push_str(&arg);
pendarg = 0;
}
b'd' => {
_daemon = true;
pendarg = 0;
}
b'r' => {
dictdir.clear();
dictdir.push_str(&arg);
pendarg = 0;
}
0 => (),
_ => {
println!("parameter: [-d] [-h host:port] [-r dict-root-dir]");
return;
}
}
if a[0] == b'-' {
pendarg = a[1];
}
}
//println!("get arg host={}, daemon={}", host, daemon);
//if daemon {
//}
dict = StarDict::new(&path::PathBuf::from(&dictdir)).unwrap();
}
println!("dict size={}", dict.directories.len());
//for d in dict.info().iter() {
// println!("dict: wordcount:{} {}", d.word_count, d.name);
//}
//webs
let listener = TcpListener::bind(&host).expect("Bind Socket failed!");
//let pool = web::ThreadPool::new(4);
let cr = {
let mut fmtp = path::PathBuf::from(&dictdir);
fmtp.push("rformat.conf");
reformat::ContentReformat::from_config_file(&fmtp)
};
for stream in listener.incoming() {
let stream = stream.expect("accept TCP failed!");
//pool.execute(
if let Err(_) = handle_connection(stream, &dict, &cr, &dictdir) {
println!("communication failed!");
}
//);
}
println!("Shutting down.");
}
fn handle_connection(
mut stream: TcpStream,
dict: &StarDict,
cr: &reformat::ContentReformat,
dictdir: &str,
) -> std::io::Result<()> {
//stream.set_nonblocking(false)?;
//stream.set_nodelay(false)?;
let mut buffer = vec![0u8; 512];
{
let mut sz = 0usize;
while let Ok(bn) = stream.read(&mut buffer[sz..]) {
sz += bn;
if bn == 0 || sz <= 4 || sz > 4096 {
stream.write(b"HTTP/1.0 417 Expectation Failed\r\n\r\nFail")?;
return Ok(());
}
if buffer[sz - 4] == b'\r'
&& buffer[sz - 3] == b'\n'
&& buffer[sz - 2] == b'\r'
&& buffer[sz - 1] == b'\n'
{
buffer.resize(sz, 0);
break;
}
if sz >= buffer.len() {
buffer.resize(buffer.len() + 512, 0);
}
}
}
let get = b"GET /";
//("HTTP/1.0 200 OK\r\nConnection: close\r\n", "index.html");
let mut content: Vec<u8> = Vec::new();
let mut surl = StardictUrl::new();
if buffer.starts_with(get) {
let mut state = 0i16; //>=0 path, -1 w, -2 p0w, -3 p1w, -4 argKey, -5 argVal
let mut w = 0u8;
buffer[5..]
.iter()
.take_while(|c| **c != b' ')
.for_each(|c| {
if state < 0 {
if *c == b'%' {
state = -2;
} else if *c == b'?' {
// parse args.
state = -4;
} else {
if state == -2 {
w = StardictUrl::byte_to_u8(*c) << 4;
state = -3;
} else if state == -3 {
w |= StardictUrl::byte_to_u8(*c);
surl.add_byte(w);
state = -1;
} else if state == -4 {
if *c == b'=' {
state = -5;
} else {
w = *c;
}
} else if state == -5 {
match *c {
b'&' => {
state = -4;
}
b'-' => {
if w == b'o' {
w = b'O';
} else {
state = -32768;
}
}
b'0'..=b'9' => {
let v: i32 = (*c - b'0') as i32;
if w == b'o' {
surl.add_arg_offset(v);
} else if w == b'O' {
// negative offset
surl.add_arg_offset(-v);
} else if w == b'l' {
// length
surl.add_arg_length(v as usize);
}
}
_ => {
state = -32768;
}
}
} else {
surl.add_byte(*c);
}
}
} else if *c == b'/' {
state = -1;
} else {
surl.add_path(*c, state as usize);
state += 1;
}
});
//println!("get from url path={}, word={}, off={}, len={}", str::from_utf8(&surl.path).unwrap(), str::from_utf8(&surl.word).unwrap(), surl.offset, surl.length);
if surl.length == 0 {
surl.length = 10;
}
if surl.word.len() > 0 {
if surl.path[0] == b'W' {
//word lookup
match dict.lookup(&surl.word) {
Ok(x) => {
content.extend(b"<ol>");
for (i, e) in x.iter().enumerate() {
content.extend(b"<li><a href='#word_");
content.extend(i.to_string().as_bytes());
content.extend(b"'>");
content.extend(&e.word);
content.extend(b" : ");
content.extend(e.dictionary.name.as_bytes());
content.extend(b"</a></li>");
}
content.extend(b"</ol>\n");
for (i, e) in x.iter().enumerate() {
content.extend(b"<div id='word_");
content.extend(i.to_string().as_bytes());
content.extend(b"' class='res_word'>");
content.extend(e.dictionary.name.as_bytes());
content.extend(b" (");
content.extend(&e.word);
content.extend(b") </div><div class='res_definition'>".iter());
for (a, b) in e
.dictionary
.same_type_sequence
.as_bytes()
.iter()
.zip(e.result.split(|c| *c == 0))
{
content.extend(&cr.replace_all(
*a,
e.dictionary.dict_path.as_bytes(),
b,
));
}
content.extend(b"</div>\n");
}
}
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'n' {
//neighbor words reference
for s in dict.neighbors(&surl.word, surl.offset).take(surl.length) {
content.extend(s);
content.extend(b"\n");
}
} else if surl.path[0] == b's' {
//search with regex
match str::from_utf8(&surl.word) {
Ok(x) => match Regex::new(x) {
Ok(v) => {
content.extend(b"/~/:<ol>");
dict.search(&v).take(surl.length).for_each(|e| {
content.extend(b"<li><a>");
content.extend(e);
content.extend(b"</a></li>\n");
});
content.extend(b"</ol>");
}
Err(e) => println!("err: {:?}", e),
},
Err(e) => println!("err: {:?}", e),
}
} else if surl.path[0] == b'r' {
//html js css page etc.
if let Ok(fname) = str::from_utf8(&surl.word) {
let mut pfile = path::PathBuf::from(dictdir);
pfile.push(fname);
if let Ok(mut f) = fs::File::open(pfile) {
if f.read_to_end(&mut content).is_err() {
content.clear();
}
}
}
} else if surl.path[0] == b'w' {
content.extend(HOME_PAGE.as_bytes());
}
} else {
content.extend(HOME_PAGE.as_bytes());
}
}
fn map_by_file(f: &[u8]) -> &'static [u8] {
if let Some(s) = f.rsplit(|c| *c == b'.').next() {
match s {
b"js" => return b"application/javascript",
b"css" => return b"text/css",
b"jpg" => return b"image/jpeg",
b"png" => return b"image/png",
_ => (),
}
}
b"text/html"
}
if content.len() > 0 {
//let mut cg = 0;
//content.iter_mut().for_each(|x|{ *x = if cg % 10 == 0 {b'\n'} else {b'a'}; cg = cg + 1;});
stream.write(b"HTTP/1.0 200 OK\r\nContent-Type: ")?;
if surl.path[0] == b'n' {
stream.write(b"text/plain")?;
} else if surl.path[0] == b'r' {
stream.write(map_by_file(&surl.word))?;
} else {
stream.write(b"text/html")?;
}
stream.write(b"\r\nContent-Length: ")?;
stream.write(content.len().to_string().as_bytes())?;
stream.write(b"\r\nConnection: close\r\n\r\n")?;
//stream.write(b"\r\n\r\n")?;
/*
for blk in content.chunks(1024) {
stream.write(blk)?;
}
*/
stream.write(&content)?;
} else {
stream.write(b"HTTP/1.0 404 NOT FOUND\r\n\r\nnot found")?;
}
stream.flush()?;
//stream.shutdown(std::net::Shutdown::Both)?;
Ok(())
}
const HOME_PAGE: &'static str = r"<html><head>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Star Dictionary</title>
<style>
.res_definition{
table-layout: fixed;
border-left: thin dashed black;
border-right: thin dashed black;
padding: 5px;
}
.res_word{
table-layout: fixed;
border: thin solid black;
padding: 5px;
}
.numi{
width:5em;
}
span{
color:green;
}
a{
color:blue;
text-decoration:underline;
cursor:pointer;
}
blockquote{
margin:0em 0em 0em 1em;
padding:0em 0em 0em 0em;
}
</style>
<link href='/r/rhtm/jquery-ui.css' rel='stylesheet'>
<script src='/r/rhtm/jquery.js'></script>
<script src='/r/rhtm/jquery-ui.js'></script>
<script src='/r/rhtm/autohint.js'></script>
</head><body>
<form id='qwFORM' action='/' method='GET'>
<input id='qwt' type='text' name='w' class='ui-autocomplete-input' placeholder='input word' required='required' value=''/>/<input id='chkreg' type='checkbox'/>/
<input type='submit' value='='/> <input type='button' id='backwardbtn' value='<'/> <input type='button' id='forwardbtn' value='>'/>
(<input type='number' class='numi' id='hint_offset' value='0' disabled/>, <input type='number' class='numi' id='result_length' value='10'/>)
</form><hr/>
<div id='dict_content'></div></body></html>";
| true
|
d311a0c03217f8e1b597cf3c089f5f285895632b
|
Rust
|
gvissers/babs
|
/src/ubig/cmp.rs
|
UTF-8
| 2,712
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
// Copyright, 2021, Gé Vissers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::digit::Digit;
/// Compare two big numbers by their digits `nr0` and `nr1`.
#[inline]
pub fn cmp<T>(nr0: &[T], nr1: &[T]) -> std::cmp::Ordering
where T: Ord
{
nr0.len().cmp(&nr1.len()).then_with(|| nr0.iter().rev().cmp(nr1.iter().rev()))
}
/// Compare two big numbers by their digits, returning whether `nr0 < nr1`.
#[inline]
pub fn lt<T>(nr0: &[T], nr1: &[T]) -> bool
where T: Digit
{
cmp(nr0, nr1) == std::cmp::Ordering::Less
}
#[cfg(test)]
mod tests
{
use super::*;
use crate::digit::{BinaryDigit, DecimalDigit};
#[test]
fn test_cmp()
{
let n: &[BinaryDigit<u8>; 0] = &[];
let m: &[BinaryDigit<u8>; 0] = &[];
assert_eq!(cmp(n, m), std::cmp::Ordering::Equal);
let n: &[BinaryDigit<u8>; 0] = &[];
let m = &[BinaryDigit(1u8)];
assert_eq!(cmp(n, m), std::cmp::Ordering::Less);
assert_eq!(cmp(m, n), std::cmp::Ordering::Greater);
let n = &[DecimalDigit(2u64)];
let m = &[DecimalDigit(1u64)];
assert_eq!(cmp(n, m), std::cmp::Ordering::Greater);
assert_eq!(cmp(m, n), std::cmp::Ordering::Less);
let n = &[BinaryDigit(2u64), BinaryDigit(1), BinaryDigit(3)];
let m = &[BinaryDigit(2u64), BinaryDigit(1), BinaryDigit(3)];
assert_eq!(cmp(n, m), std::cmp::Ordering::Equal);
let n = &[BinaryDigit(1u64), BinaryDigit(1), BinaryDigit(3)];
let m = &[BinaryDigit(2u64), BinaryDigit(1), BinaryDigit(3)];
assert_eq!(cmp(n, m), std::cmp::Ordering::Less);
assert_eq!(cmp(m, n), std::cmp::Ordering::Greater);
let n = &[BinaryDigit(1u64), BinaryDigit(1), BinaryDigit(3)];
let m = &[BinaryDigit(0u64), BinaryDigit(0), BinaryDigit(4)];
assert_eq!(cmp(n, m), std::cmp::Ordering::Less);
assert_eq!(cmp(m, n), std::cmp::Ordering::Greater);
let n = &[DecimalDigit(1u32), DecimalDigit(1), DecimalDigit(3)];
let m = &[DecimalDigit(0u32), DecimalDigit(0), DecimalDigit(0), DecimalDigit(1)];
assert_eq!(cmp(n, m), std::cmp::Ordering::Less);
assert_eq!(cmp(m, n), std::cmp::Ordering::Greater);
}
}
| true
|
5b2d3888fbe7e7752c1625b091896aba603d9d6e
|
Rust
|
joshhansen/Fil
|
/src/in_/term.rs
|
UTF-8
| 6,883
| 2.796875
| 3
|
[] |
no_license
|
use std::collections::{HashMap,VecDeque};
use std::io::{Write,stdout};
use cv::Mat;
use cv::highgui::{WindowFlags,highgui_named_window};
use cv::videoio::{CapProp,VideoCapture};
use rand;
use rand::distributions::{IndependentSample, Range};
use super::super::util::{MovingAvg,MostFrequent};
const FPS: u8 = 60;
const SAMPLES: u64 = 1000;
pub trait VideoDecoder {
/// FIXME: When const generics happen, use a parameterized array rather than vector
fn decode_video(&mut self, image: &Mat) -> Option<&Vec<bool>>;
}
fn sample_color(image: &Mat, x_min: usize, x_max: usize, y_min: usize, y_max: usize) -> [u8; 3] {
let x_range = Range::new(x_min, x_max);
let y_range = Range::new(y_min, y_max);
let mut totals: [u64; 3] = [0,0,0];
let mut rng = rand::thread_rng();
for _ in 0..SAMPLES {
let x = x_range.ind_sample(&mut rng);
let y = y_range.ind_sample(&mut rng);
// let (r,g,b):(u8,u8,u8) = image.at2(x as isize, y as isize);
//FIXME fix this so hard---once cv-rs stops reversing rgb triples
//FIXME Also, why do we have to reverse the x and y coordinates?
let (b,g,r):(u8,u8,u8) = image.at2(y as isize, x as isize);
// eprintln!("{} {} {} {} {},{} -> {},{},{}", x_min, x_max, y_min, y_max, x, y, r, g, b);
totals[0] += r as u64;
totals[1] += g as u64;
totals[2] += b as u64;
}
let averages: [u8; 3] = [(totals[0]/SAMPLES) as u8, (totals[1]/SAMPLES) as u8, (totals[2]/SAMPLES) as u8];
averages
}
#[derive(Clone,Copy,Debug,Eq,Hash,PartialEq)]
enum Desc {
Clock,
Signal,
Both,
Neither
}
pub struct TimedColorCodedOneBitDecoder {
bits: Vec<bool>,
prev_clock: bool,
left_g_avg: MovingAvg,
right_r_avg: MovingAvg,
most_freq_desc: MostFrequent<Desc>
}
impl TimedColorCodedOneBitDecoder {
pub fn new() -> Self {
Self {
bits: vec![false],
prev_clock: false,
left_g_avg: MovingAvg::new(1),
right_r_avg: MovingAvg::new(1),
most_freq_desc: MostFrequent::new(5)
}
}
}
impl VideoDecoder for TimedColorCodedOneBitDecoder {
fn decode_video(&mut self, image: &Mat) -> Option<&Vec<bool>> {
let size = image.size();
let left_rgb = sample_color(&image, 0, 100, 0, size.height as usize);
let right_rgb = sample_color(&image, (size.width - 100) as usize, size.width as usize, 0, size.height as usize);
let g_avg = self.left_g_avg.push(left_rgb[1] as f64);
let r_avg = self.right_r_avg.push(right_rgb[0] as f64);
let desc = if r_avg < 20f64 && g_avg < 20f64 {
Desc::Neither
} else if r_avg > 60f64 && g_avg > 60f64 {
Desc::Both
} else if r_avg > g_avg {
Desc::Clock
} else {
Desc::Signal
};
let desc = self.most_freq_desc.push(desc);
// eprintln!("{:?} {:?} {:?} {:?} {:?} {:?}", desc, rgb, top_rgb, bottom_rgb, left_rgb, right_rgb);
let (signal, clock) = match desc {
Desc::Signal => (true, false),
Desc::Clock => (false, true),
Desc::Both => (true, true),
Desc::Neither => (false, false)
};
let result = if clock != self.prev_clock {
// eprintln!("{} {}", signal, clock);
self.bits[0] = signal;
Some(&self.bits)
} else {
None
};
self.prev_clock = clock;
result
}
}
struct GridDecoder {
bits: Vec<bool>,
prev_clock: bool,
}
impl GridDecoder {
fn new(grid_height: usize, grid_width: usize) -> Self {
Self {
bits: Vec::with_capacity(grid_height * grid_width),
prev_clock: false
}
}
}
/// From https://en.wikipedia.org/w/index.php?title=Connected-component_labeling&oldid=801482060#One_component_at_a_time
fn connected_components(image: &Mat) {
let mut components: HashMap<(isize,isize),usize> = HashMap::new();
let mut q: VecDeque<(isize,isize)> = VecDeque::new();
let size = image.size();
let mut component_num = 0;
for x in (0 as isize)..(size.width as isize) {
for y in (0 as isize)..(size.height as isize) {
let (g,b,r): (u8,u8,u8) = image.at2(x,y);
if r > 200 && g < 50 && b < 50 {
components.insert((x,y), component_num);
q.push_back((x,y));
}
}
}
}
impl VideoDecoder for GridDecoder {
fn decode_video(&mut self, image: &Mat) -> Option<&Vec<bool>> {
None
}
}
pub fn decode<F:Fn(Option<&Vec<u8>>)>(callback: F) {
let mut result: Vec<u8> = Vec::with_capacity(1);
let cap = VideoCapture::new(1);
assert!(cap.is_open());
cap.set(CapProp::FrameWidth, 320f64);
cap.set(CapProp::FrameHeight, 240f64);
cap.set(CapProp::Fps, FPS as f64);
eprintln!("Width: {}", cap.get(CapProp::FrameWidth).unwrap());
eprintln!("Height: {}", cap.get(CapProp::FrameHeight).unwrap());
eprintln!("FPS: {}", cap.get(CapProp::Fps).unwrap());
highgui_named_window("Window", WindowFlags::WindowAutosize);
let mut decoder = TimedColorCodedOneBitDecoder::new();
// let start_time = Instant::now();
// // let mut prev_time = start_time;
// let mut prev_print_time = start_time;
// let mut frames: u64 = 0;
let mut byte_in_progress: Vec<bool> = Vec::new();
while let Some(image) = cap.read() {
// frames += 1;
image.show("Window", 1).unwrap();
if let Some(bits) = decoder.decode_video(&image) {
for bit in bits {
byte_in_progress.push(*bit);
if byte_in_progress.len() == 8 {
byte_in_progress.reverse();
let mut byte = 0u8;
for idx in 0..8 {
byte <<= 1;
if byte_in_progress[idx] {
byte |= 1;
}
}
result[0] = byte;
callback(Some(&result));
let c = char::from(byte);
eprintln!("Byte: {} {}", byte, c);
print!("{}", c);
stdout().flush().unwrap();
byte_in_progress.clear();
}
}
}
callback(None);
// let new_time = Instant::now();
// let avg_fps = frames as f64 / as_f64( &new_time.duration_since(start_time) );
// let elapsed_since_printing = as_f64(&new_time.duration_since(prev_print_time));
//
// // prev_time = new_time;
//
// if elapsed_since_printing > 1.0 {
// // print!("\r{} fps", avg_fps);
// // stdout().flush().unwrap();
// prev_print_time = new_time;
// }
}
}
| true
|
141a29bb2391384d1f0b6fac82a00eab5484c033
|
Rust
|
huin/accountmerge
|
/src/rules/cmd.rs
|
UTF-8
| 1,433
| 2.75
| 3
|
[] |
no_license
|
use anyhow::Result;
use clap::{Args, Subcommand};
use crate::filespec::{self, FileSpec};
use crate::internal::TransactionPostings;
use crate::rules::processor::TransactionProcessorFactory;
#[derive(Debug, Args)]
pub struct Command {
// The engine to interpret the rules as.
#[command(subcommand)]
engine: Engine,
/// The Ledger journal to read.
input_journal: FileSpec,
/// The ledger file to write to (overwrites any existing file). "-" writes
/// to stdout.
#[arg(short = 'o', long = "output", default_value = "-")]
output: FileSpec,
}
#[derive(Debug, Subcommand)]
enum Engine {
#[command(name = "rhai")]
Rhai(crate::rules::rhai::Command),
#[command(name = "table")]
Table(crate::rules::table::Command),
}
impl Engine {
fn get_factory(&self) -> &dyn TransactionProcessorFactory {
use Engine::*;
match self {
Rhai(cmd) => cmd,
Table(cmd) => cmd,
}
}
}
impl Command {
pub fn run(&self) -> Result<()> {
let processor = self.engine.get_factory().make_processor()?;
let ledger = filespec::read_ledger_file(&self.input_journal)?;
let trns = TransactionPostings::from_ledger(ledger)?;
let new_trns = processor.update_transactions(trns)?;
let ledger = TransactionPostings::into_ledger(new_trns);
filespec::write_ledger_file(&self.output, &ledger)?;
Ok(())
}
}
| true
|
fa464abe0d42c0466c522f97c76fd4c48c666031
|
Rust
|
justinpombrio/synless
|
/language/src/ast/ast_forest.rs
|
UTF-8
| 2,501
| 3.171875
| 3
|
[] |
no_license
|
use super::ast::{Ast, Id, NodeData};
use super::ast_ref::AstRef;
use super::text::Text;
use crate::language::LanguageSet;
use crate::language::{Arity, ConstructId, Grammar};
use forest::Forest;
/// All [`Asts`] belong to an `AstForest`.
///
/// It is your responsibility to ensure that `Ast`s are kept with the forest they came from. The
/// methods on `Ast`s may panic or worse if you use them on a different forest.
pub struct AstForest<'l> {
pub(super) lang: LanguageSet<'l>,
forest: Forest<NodeData<'l>, Text>,
next_id: Id,
}
impl<'l> AstForest<'l> {
/// Construct a new, empty, forest.
pub fn new(language_set: LanguageSet<'l>) -> AstForest<'l> {
AstForest {
lang: language_set,
forest: Forest::new(),
next_id: Id(0),
}
}
/// Create a new `hole` node in this forest.
// TODO: 'cept for Id, this can take &self! Is that useful?
pub fn new_hole(&mut self) -> Ast<'l> {
let (grammar, construct_id) = self.lang.builtin_hole_info();
let node = NodeData {
grammar,
construct_id,
id: self.next_id(),
};
Ast::new(self.forest.new_branch(node, vec![]))
}
pub fn new_tree(&mut self, grammar: &'l Grammar, construct_id: ConstructId) -> Ast<'l> {
let construct = grammar.construct(construct_id);
let node = NodeData {
grammar,
construct_id,
id: self.next_id(),
};
match &construct.arity {
Arity::Texty => Ast::new(self.forest.new_leaf(node, Text::new_inactive())),
Arity::Fixed(sorts) => {
let children = (0..sorts.len())
.map(|_| self.new_hole().tree)
.collect::<Vec<_>>();
Ast::new(self.forest.new_branch(node, children))
}
Arity::Listy(_) => Ast::new(self.forest.new_branch(node, vec![])),
}
}
pub fn borrow<R>(&self, ast: &Ast<'l>, func: impl FnOnce(AstRef<'_, 'l>) -> R) -> R {
ast.tree.borrow(|tree_ref| {
func(AstRef {
lang: &self.lang,
tree_ref: tree_ref,
})
})
}
/*
pub fn borrow<'f>(&'f self, ast: &'f Ast<'l>) -> AstRef<'f, 'l> {
AstRef {
lang: &self.lang,
tree_ref: ast.tree.borrow(),
}
}
*/
fn next_id(&mut self) -> Id {
self.next_id.0 += 1;
Id(self.next_id.0)
}
}
| true
|
7d78a8eff148c5cce26528d09151268fafa19df8
|
Rust
|
tcharding/self_learning
|
/rust/rust-book/add/add-one/src/lib.rs
|
UTF-8
| 405
| 3.9375
| 4
|
[] |
no_license
|
use rand;
/// Adds one to the given number.
///
/// # Example
///
/// ```
/// let x = 5;
/// let result = add_one::add_one(x);
///
/// assert_eq!(result, 6);
/// ```
pub fn add_one(x: i32) -> i32 {
x + 1
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
#[test]
fn it_adds_one() {
assert_eq!(add_one(-3), -2);
}
}
| true
|
970e4bfc4cd6c634e6b4462dd274ef28822ac1b3
|
Rust
|
Velrok/the-rust-programming-language-book
|
/ownership/src/main.rs
|
UTF-8
| 4,406
| 3.984375
| 4
|
[
"Unlicense"
] |
permissive
|
// Keep these rules in mind as we work through the examples that illustrate them:
// -----------------------
// 1. Each value in Rust has a variable that’s called its owner.
// 2. There can be only one owner at a time.
// 3. When the owner goes out of scope, the value will be dropped.
fn main() {
simple_example();
move_example();
clone_example();
copy_trait();
ownership_and_functions();
ownership_and_return_values();
ref_borrow();
}
fn simple_example() {
{
// "hello" -> string litteral with known size at compile time
// and therefore allocated on the stack
// also immutable
let _s = "stack-allocated"; // valid from this point
} // _s no longer valid
}
fn move_example() {
{
// String type -> currently of known size, but theoretically mutable so unknown at compile
// time so allocated on the heap.
// - The memory must be requested from the operating system at runtime.
// - We need a way of returning this memory to the operating system when
// we’re done with our String.
// Rust takes a different path: the memory is automatically returned
// once the variable that owns it goes out of scope.
let _ss = String::from("heap-allocated");
let s1 = String::from("heap-allocated"); // this is a pointer to memory on the heap
let _s2 = s1; // different pointer to the same heap space
// ownership of memory is MOVED to s2
// s1 no longer valid! So we don't call free twice.
// println!("{} {}", s1, s2); // will not compile
} // free _ss
// automatically calls 'drop' which is implemented by String
// calls drop on _s2 only
}
fn clone_example() {
let s1 = String::from("heap-allocated"); // this is a pointer to memory on the heap
let s2 = s1.clone(); // deep copy
println!("{} {}", s1, s2);
}
fn copy_trait() {
let x = 2;
let _y = x; // implicit copy, because Integer implements the Copy trait
// because it's stack only
// You are not allowed to implement Copy if you also implement Drop
}
fn ownership_and_functions() {
let s = String::from("hello"); // s comes into scope
takes_ownership(s); // s's value moves into the function...
// ... and so is no longer valid here
let x = 5; // x comes into scope
makes_copy(x); // x would move into the function,
// but i32 is Copy, so it's okay to
// still use x afterward
} // Here, x goes out of scope, then s. But because s's value was moved,
// nothing special happens.
fn takes_ownership(some_string: String) {
// some_string comes into scope
println!("{}", some_string);
} // Here, some_string goes out of scope and `drop` is called. The backing
// memory is freed.
fn makes_copy(some_integer: i32) {
// some_integer comes into scope
println!("{}", some_integer);
} // Here, some_integer goes out of scope. Nothing special happens.
fn ownership_and_return_values() {
let _s1 = gives_ownership(); // gives_ownership moves its return
// value into s1
let s2 = String::from("hello"); // s2 comes into scope
let _s3 = takes_and_gives_back(s2); // s2 is moved into
// takes_and_gives_back, which also
// moves its return value into s3
} // Here, s3 goes out of scope and is dropped. s2 goes out of scope but was
// moved, so nothing happens. s1 goes out of scope and is dropped.
fn gives_ownership() -> String {
// gives_ownership will move its
// return value into the function
// that calls it
let some_string = String::from("hello"); // some_string comes into scope
some_string // some_string is returned and
// moves out to the calling
// function
}
// takes_and_gives_back will take a String and return one
fn takes_and_gives_back(a_string: String) -> String {
// a_string comes into
// scope
a_string // a_string is returned and moves out to the calling function
}
fn ref_borrow() {
let s1 = String::from("hello");
let len = calculate_length(&s1);
println!("The length of '{}' is {}", s1, len);
}
fn calculate_length(s: &String) -> usize {
s.len()
}
| true
|
8239396f3faa8a8316266eed195d92660eb0acaf
|
Rust
|
stec-zcps/rperf
|
/src/server_udp.rs
|
UTF-8
| 2,424
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
/*<copyright file="server_udp.rs" company="Fraunhofer Institute for Manufacturing Engineering and Automation IPA">
Copyright 2021 Fraunhofer Institute for Manufacturing Engineering and Automation IPA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
</copyright>*/
pub mod server {
use std::net::UdpSocket;
use std::time::{SystemTime, UNIX_EPOCH};
pub struct ServerUdp {
run: bool
}
impl ServerUdp {
pub fn new() -> ServerUdp {
ServerUdp {
run: true
}
}
pub fn start(&self, port: u16, symmetric_network_load: bool) -> std::io::Result<()> {
// Open UDP socket
let server_address = format!("{}:{}", "0.0.0.0", port);
let socket = UdpSocket::bind(server_address)?;
println!("Started UDP server on port '{}'", port);
// Wait for packets
while self.run {
let mut buf = [0u8; 1500];
let (_amt, src) = socket.recv_from(&mut buf)?;
let mut payload: Vec<u8>;
if symmetric_network_load && _amt >= 16
{
payload = vec![1u8; _amt];
} else {
payload = vec![1u8; 16];
}
payload[0..=7].copy_from_slice(&buf[0..=7]);
let current_system_time_unix_epoch = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let current_system_time_unix_epoch_ms = current_system_time_unix_epoch.as_secs() as f64
+ current_system_time_unix_epoch.subsec_nanos() as f64 * 1e-9;
payload[8..=15].copy_from_slice(¤t_system_time_unix_epoch_ms.to_be_bytes());
socket.send_to(&payload, &src)?;
}
Ok(())
}
#[allow(dead_code)]
pub fn stop(&mut self)
{
self.run = false;
}
}
}
| true
|
36c814398e869cce47d7a1203241b5a889369a7f
|
Rust
|
ChristophHaag/wyvern
|
/src/algebra/vector.rs
|
UTF-8
| 16,059
| 3.015625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
// Copyright (c) 2016-2017 Bruce Stenning. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
// AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
#![allow(dead_code)]
use std::ops::*;
use std::mem;
use num::*;
use std::cmp::PartialEq;
use std::fmt;
#[derive(Clone, Copy)]
pub struct Vec2<T> {
pub x: T,
pub y: T,
}
impl<T: Add<T, Output = T>> Add for Vec2<T> {
type Output = Vec2<T>;
/// 2-component vector add
///
/// other: The RHS of the addition
fn add(self, other: Vec2<T>) -> Vec2<T> {
Vec2::<T> {
x: self.x + other.x,
y: self.y + other.y,
}
}
}
impl<T: AddAssign<T> + Copy> AddAssign<T> for Vec2<T> {
/// 2-component vector add-and-assign
///
/// other: The RHS of the addition
fn add_assign(&mut self, addition: T) {
self.x += addition;
self.y += addition;
}
}
impl<T: Sub<T, Output = T>> Sub for Vec2<T> {
type Output = Vec2<T>;
/// 2-component vector subtraction
///
/// other: The RHS of the subtraction
fn sub(self, other: Vec2<T>) -> Vec2<T> {
Vec2::<T> {
x: self.x - other.x,
y: self.y - other.y,
}
}
}
impl<'a, T: Sub<T, Output = T> + Copy> Sub for &'a Vec2<T> {
type Output = Vec2<T>;
/// 2-component vector subtraction, by reference
///
/// other: The RHS of the subtraction
fn sub(self, other: &'a Vec2<T>) -> Vec2<T> {
Vec2 {
x: self.x - other.x,
y: self.y - other.y,
}
}
}
/// 2-component vector subtraction-and-assign
///
/// other: The RHS of the subtraction
impl<T: SubAssign<T> + Copy> SubAssign<T> for Vec2<T> {
fn sub_assign(&mut self, subtraction: T) {
self.x -= subtraction;
self.y -= subtraction;
}
}
/// Equivalence operator for 2-component vectors
///
/// other: Vector for comparison
impl<T: PartialEq> PartialEq for Vec2<T> {
fn eq(&self, other: &Vec2<T>) -> bool {
(self.x == other.x) && (self.y == other.y)
}
}
/// Approximate equivalence for 2-component vectors
///
/// other: Vector for comparison
/// ulps: How many units in the last place to compare to (approximately)
impl Vec2<f32> {
pub fn approx_eq_ulps(&self, other: &Vec2<f32>, ulps: i32) -> bool {
let pe = 10.0f32.powf((ulps - 7) as f32);
if self.x < other.x - pe {
return false;
};
if self.x > other.x + pe {
return false;
};
if self.y < other.y - pe {
return false;
};
if self.y > other.y + pe {
return false;
};
return true;
}
}
/// How to display a 2-component vector
impl<T: fmt::Display> fmt::Display for Vec2<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {})", self.x, self.y)
}
}
/// How to display a 2-component vector of f32 for debugging purposes
impl fmt::Debug for Vec2<f32> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
write!(f,
"({:x}, {:x})",
mem::transmute::<f32, i32>(self.x),
mem::transmute::<f32, i32>(self.y))
}
}
}
#[derive(Clone, Copy)]
pub struct Vec3<T> {
pub x: T,
pub y: T,
pub z: T,
}
impl<T: Add<T, Output = T>> Add for Vec3<T> {
type Output = Vec3<T>;
/// 3-component vector add
///
/// other: The RHS of the addition
fn add(self, other: Vec3<T>) -> Vec3<T> {
Vec3::<T> {
x: self.x + other.x,
y: self.y + other.y,
z: self.z + other.z,
}
}
}
impl<T: AddAssign<T> + Copy> AddAssign<T> for Vec3<T> {
/// 3-component vector add-and-assign
///
/// other: The RHS of the addition
fn add_assign(&mut self, addition: T) {
self.x += addition;
self.y += addition;
self.z += addition;
}
}
impl<T: Sub<T, Output = T>> Sub for Vec3<T> {
type Output = Vec3<T>;
/// 3-component vector subtraction
///
/// other: The RHS of the subtraction
fn sub(self, other: Vec3<T>) -> Vec3<T> {
Vec3::<T> {
x: self.x - other.x,
y: self.y - other.y,
z: self.z - other.z,
}
}
}
impl<'a, T: Sub<T, Output = T> + Copy> Sub for &'a Vec3<T> {
type Output = Vec3<T>;
/// 3-component vector subtraction, by reference
///
/// other: The RHS of the subtraction
fn sub(self, other: &'a Vec3<T>) -> Vec3<T> {
Vec3 {
x: self.x - other.x,
y: self.y - other.y,
z: self.z - other.z,
}
}
}
/// 3-component vector subtraction-and-assign
///
/// other: The RHS of the subtraction
impl<T: SubAssign<T> + Copy> SubAssign<T> for Vec3<T> {
fn sub_assign(&mut self, subtraction: T) {
self.x -= subtraction;
self.y -= subtraction;
self.z -= subtraction;
}
}
impl<T: Div<T, Output = T> + Copy> Div<T> for Vec3<T> {
type Output = Vec3<T>;
/// 3-component vector divide by scalar
///
/// divisor: The divisor
fn div(self, divisor: T) -> Vec3<T> {
Vec3::<T> {
x: self.x / divisor,
y: self.y / divisor,
z: self.z / divisor,
}
}
}
impl<T: DivAssign<T> + Copy> DivAssign<T> for Vec3<T> {
/// 3-component vector divide by scalar and assign
///
/// divisor: The divisor
fn div_assign(&mut self, divisor: T) {
self.x /= divisor;
self.y /= divisor;
self.z /= divisor;
}
}
impl<T: Mul<T, Output = T> + Copy> Mul<T> for Vec3<T> {
type Output = Vec3<T>;
/// 3-component vector multiply by scalar
///
/// multiplicand: The multiplicand
fn mul(self, multiplicand: T) -> Vec3<T> {
Vec3 {
x: self.x * multiplicand,
y: self.y * multiplicand,
z: self.z * multiplicand,
}
}
}
impl<'a, T: Mul<T, Output = T> + Copy> Mul<T> for &'a Vec3<T> {
type Output = Vec3<T>;
/// 3-component vector multiply by scalar, by reference
///
/// multiplicand: The multiplicand
fn mul(self, multiplicand: T) -> Vec3<T> {
Vec3 {
x: self.x * multiplicand,
y: self.y * multiplicand,
z: self.z * multiplicand,
}
}
}
/// 3-component vector multiply by scalar and asssign
///
/// multiplicand: The multiplicand
impl<T: MulAssign<T> + Copy> MulAssign<T> for Vec3<T> {
fn mul_assign(&mut self, multiplicand: T) {
self.x *= multiplicand;
self.y *= multiplicand;
self.z *= multiplicand;
}
}
/// Construct new zero 3-component vector
impl<T: Zero> Vec3<T> {
pub fn new() -> Vec3<T> {
Vec3 {
x: T::zero(),
y: T::zero(),
z: T::zero(),
}
}
}
impl Vec3<f32> {
/// Calculate the magnitude of a 3-component vector
pub fn magnitude(&self) -> f32 {
let magsq = self.x * self.x + self.y * self.y + self.z * self.z;
magsq.sqrt()
}
/// Calculate the magnitude squared of a 3-component vector
pub fn magnitude_squared(&self) -> f32 {
self.x * self.x + self.y * self.y + self.z * self.z
}
/// Normalise a 3-component vector
pub fn normalise(&self) -> Vec3<f32> {
let magsq = self.x * self.x + self.y * self.y + self.z * self.z;
let invmag = 1.0f32 / magsq.sqrt();
Vec3::<f32> {
x: self.x * invmag,
y: self.y * invmag,
z: self.z * invmag,
}
}
/// Normalise a 3-component vector using an evil hack
///
/// https://en.wikipedia.org/wiki/Fast_inverse_square_root
pub fn normalise_evil(&self) -> Vec3<f32> {
let magsq = self.x * self.x + self.y * self.y + self.z * self.z;
unsafe {
let x2 = magsq * 0.5f32;
let mut y = magsq;
let mut i = mem::transmute::<f32, i32>(y);
i = 0x5f3759df - (i >> 1);
y = mem::transmute::<i32, f32>(i);
y = y * (1.5f32 - (x2 * y * y));
let invmag = y;
Vec3::<f32> {
x: self.x * invmag,
y: self.y * invmag,
z: self.z * invmag,
}
}
}
}
impl<T: AddAssign<T> + Copy> Vec3<T> {
/// 3-component vector add to 3-component vector
///
/// other: The RHS of the addition
pub fn add_assign_vec(&mut self, other: &Vec3<T>) {
self.x += other.x;
self.y += other.y;
self.z += other.z;
}
}
impl<T: Add<T, Output = T> + Mul<T, Output = T> + Copy> Vec3<T> {
/// Calculate the dot product of two 3-component vectors
///
/// other: The RHS of the dot-product
pub fn dot(&self, other: &Vec3<T>) -> T {
self.x * other.x + self.y * other.y + self.z * other.z
}
}
impl<T: Sub<T, Output = T> + Mul<T, Output = T> + Copy> Vec3<T> {
/// Calculate the cross product of two 3-component vectors
///
/// other: The RHS of the cross-product
pub fn cross(a: &Vec3<T>, b: &Vec3<T>) -> Vec3<T> {
Vec3::<T> {
x: a.y * b.z - b.y * a.z,
y: a.z * b.x - b.z * a.x,
z: a.x * b.y - b.x * a.y,
}
}
}
impl<T: One + Copy> Vec3<T> {
/// Convert a 3-component vector to a 4-component vector in homogeneous coordinates
pub fn to_homogeneous(&self) -> Vec4<T> {
Vec4 {
x: self.x,
y: self.y,
z: self.z,
w: T::one(),
}
}
}
/// Linear interpolation of two vectors
///
/// This version should guarantee a at t = 0.0 and b at t = 1.0
///
/// a: First vector
/// b: Second vector
/// t: Interpolation value in range [0, 1]
impl<T: One + Mul<T, Output = T> + Sub<T, Output = T> + Add<T, Output = T> + Copy> Vec3<T> {
pub fn lerp(a: &Vec3<T>, b: &Vec3<T>, t: T) -> Vec3<T> {
Vec3 {
x: a.x * (T::one() - t) + b.x * t,
y: a.y * (T::one() - t) + b.y * t,
z: a.z * (T::one() - t) + b.z * t,
}
}
}
/// Equivalence operator for 3-component vectors
///
/// other: Vector for comparison
impl<T: PartialEq> PartialEq for Vec3<T> {
fn eq(&self, other: &Vec3<T>) -> bool {
(self.x == other.x) && (self.y == other.y) && (self.z == other.z)
}
}
/// Approximate equivalence for 3-component vectors
///
/// other: Vector for comparison
/// ulps: How many units in the last place to compare to (approximately)
impl Vec3<f32> {
pub fn approx_eq_ulps(&self, other: &Vec3<f32>, ulps: i32) -> bool {
let pe = 10.0f32.powf((ulps - 7) as f32);
if self.x < other.x - pe {
return false;
};
if self.x > other.x + pe {
return false;
};
if self.y < other.y - pe {
return false;
};
if self.y > other.y + pe {
return false;
};
if self.z < other.z - pe {
return false;
};
if self.z > other.z + pe {
return false;
};
return true;
}
}
/// How to display a 3-component vector
impl<T: fmt::Display> fmt::Display for Vec3<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {}, {})", self.x, self.y, self.z)
}
}
/// How to display a 3-component vector of f32 for debugging purposes
impl fmt::Debug for Vec3<f32> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
write!(f,
"({:x}, {:x}, {:x})",
mem::transmute::<f32, i32>(self.x),
mem::transmute::<f32, i32>(self.y),
mem::transmute::<f32, i32>(self.z))
}
}
}
#[derive(Clone, Copy)]
pub struct Vec4<T> {
pub x: T,
pub y: T,
pub z: T,
pub w: T,
}
/// Construct a 4-component zero vector
impl<T: Zero> Vec4<T> {
pub fn new() -> Vec4<T> {
Vec4 {
x: T::zero(),
y: T::zero(),
z: T::zero(),
w: T::zero(),
}
}
}
/// Project a 4-component vector in homogeneous coordinates to 3-space
impl<T: Div<T, Output = T> + Copy> Vec4<T> {
pub fn project(&self) -> Vec3<T> {
Vec3 {
x: self.x / self.w,
y: self.y / self.w,
z: self.z / self.w,
}
}
}
impl<T: Add<T, Output = T>> Add for Vec4<T> {
type Output = Vec4<T>;
/// 4-component vector addition
///
/// other: The RHS of the addition
fn add(self, other: Vec4<T>) -> Vec4<T> {
Vec4::<T> {
x: self.x + other.x,
y: self.y + other.y,
z: self.z + other.z,
w: self.w + other.w,
}
}
}
/// Equivalence operator for 4-component vectors
///
/// other: Vector for comparison
impl<T: PartialEq> PartialEq for Vec4<T> {
fn eq(&self, other: &Vec4<T>) -> bool {
(self.x == other.x) && (self.y == other.y) && (self.z == other.z) && (self.w == other.w)
}
}
/// Approximate equivalence for 4-component vectors
///
/// other: Vector for comparison
/// ulps: How many units in the last place to compare to (approximately)
impl Vec4<f32> {
pub fn approx_eq_ulps(&self, other: &Vec4<f32>, ulps: i32) -> bool {
let pe = 10.0f32.powf((ulps - 7) as f32);
if self.x < other.x - pe {
return false;
};
if self.x > other.x + pe {
return false;
};
if self.y < other.y - pe {
return false;
};
if self.y > other.y + pe {
return false;
};
if self.z < other.z - pe {
return false;
};
if self.z > other.z + pe {
return false;
};
if self.w < other.w - pe {
return false;
};
if self.w > other.w + pe {
return false;
};
return true;
}
}
/// How to display a 4-component vector
impl<T: fmt::Display> fmt::Display for Vec4<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {}, {}, {})", self.x, self.y, self.z, self.w)
}
}
/// How to display a 4-component vector of f32 for debugging purposes
impl fmt::Debug for Vec4<f32> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
write!(f,
"({:x}, {:x}, {:x}, {:x})",
mem::transmute::<f32, i32>(self.x),
mem::transmute::<f32, i32>(self.y),
mem::transmute::<f32, i32>(self.z),
mem::transmute::<f32, i32>(self.w))
}
}
}
| true
|
6b424a4a26a6937d3fea616049e9d2bd4fb51aa1
|
Rust
|
noocene/core-futures-io
|
/src/ext/read/take.rs
|
UTF-8
| 1,681
| 2.546875
| 3
|
[] |
no_license
|
use crate::AsyncRead;
use _futures::ready;
use core::{
mem::MaybeUninit,
pin::Pin,
task::{Context, Poll},
};
use pin_project_lite::pin_project;
pin_project! {
#[derive(Debug)]
#[must_use = "streams do nothing unless you `.await` or poll them"]
#[cfg_attr(docsrs, doc(cfg(feature = "io-util")))]
pub struct Take<R> {
#[pin]
inner: R,
limit_: u64,
}
}
pub(super) fn take<R: AsyncRead>(inner: R, limit: u64) -> Take<R> {
Take {
inner,
limit_: limit,
}
}
impl<R: AsyncRead> Take<R> {
pub fn limit(&self) -> u64 {
self.limit_
}
pub fn set_limit(&mut self, limit: u64) {
self.limit_ = limit
}
pub fn get_ref(&self) -> &R {
&self.inner
}
pub fn get_mut(&mut self) -> &mut R {
&mut self.inner
}
pub fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut R> {
self.project().inner
}
pub fn into_inner(self) -> R {
self.inner
}
}
impl<R: AsyncRead> AsyncRead for Take<R> {
type Error = R::Error;
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [MaybeUninit<u8>]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut [u8],
) -> Poll<Result<usize, Self::Error>> {
if self.limit_ == 0 {
return Poll::Ready(Ok(0));
}
let me = self.project();
let max = core::cmp::min(buf.len() as u64, *me.limit_) as usize;
let n = ready!(me.inner.poll_read(cx, &mut buf[..max]))?;
*me.limit_ -= n as u64;
Poll::Ready(Ok(n))
}
}
| true
|
39366e8179ad610d5aa3ed932528c83d1d1a946f
|
Rust
|
nolanderc/advent-of-code
|
/2020/day-5/part-1/src/main.rs
|
UTF-8
| 997
| 3.453125
| 3
|
[] |
no_license
|
fn main() {
std::io::BufRead::lines(std::io::stdin().lock())
.map(Result::unwrap)
.map(|line| {
let mut splits = line.chars().map(|ch| match ch {
'F' | 'L' => Split::Low,
'B' | 'R' => Split::High,
_ => panic!("not a valid partition: {:?}", ch),
});
let row = binary_partition(0, 127, splits.by_ref().take(7));
let col = binary_partition(0, 7, splits.by_ref().take(3));
row * 8 + col
})
.max()
.into_iter()
.for_each(|max| println!("{}", max))
}
enum Split {
Low,
High,
}
fn binary_partition(mut low: u32, mut high: u32, splits: impl Iterator<Item = Split>) -> u32 {
for split in splits {
let mid = (low + high) / 2;
match split {
Split::Low => high = mid,
Split::High => low = mid + 1,
}
}
assert_eq!(low, high, "could not partition {}..={}", low, high);
low
}
| true
|
ad29a21d0934341fc588eaf9cd4d31ad49f1972f
|
Rust
|
primitiv/primitiv-rust
|
/primitiv-derive/src/lib.rs
|
UTF-8
| 15,148
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
extern crate proc_macro;
extern crate proc_macro2;
extern crate syn;
#[macro_use]
extern crate quote;
use proc_macro2::{Span, TokenStream};
use syn::punctuated::Punctuated;
use syn::token::Comma;
use syn::*;
// TODO(chantera): support generics
//
// ```rust
// struct ModelImpl<T>(T);
//
// impl Model for Modelmpl<Parameter> {
// fn register_parameters(&mut self) {
// ...
// }
// }
//
// impl<M: Model> Model for Modelmpl<M> {
// fn register_parameters(&mut self) {
// ...
// }
// }
// ```
#[proc_macro_derive(Model, attributes(primitiv))]
pub fn derive_model(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let input: DeriveInput = syn::parse(input).unwrap();
match expand_derive_model(&input).into() {
Ok(expanded) => expanded.into(),
Err(msg) => panic!(msg),
}
}
fn expand_derive_model(input: &DeriveInput) -> Result<TokenStream, &'static str> {
let ident = &input.ident;
let name = ident.to_string();
let generics = input.generics.clone();
let (_, ty_generics, _) = input.generics.split_for_impl();
let (impl_generics, _, where_clause) = generics.split_for_impl();
let impl_body = match input.data {
Data::Struct(ref data) => impl_body_from_struct(ident, &data.fields),
Data::Enum(ref data) => impl_body_from_enum(ident, &data.variants),
Data::Union(_) => {
return Err("primitiv does not support derive for unions");
}
};
let impl_model = match impl_body {
Some(body) => quote! {
impl #impl_generics _primitiv::Model for #ident #ty_generics #where_clause {
fn register_parameters(&mut self) {
let handle: *mut _ = self;
unsafe {
let model = &mut *handle;
#body
}
}
fn identifier(&self) -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
let mut hasher = DefaultHasher::new();
hasher.write(format!("{}-{:p}", #name, self).as_bytes());
hasher.finish()
}
}
},
None => quote! {
impl #impl_generics _primitiv::Model for #ident #ty_generics #where_clause {
fn register_parameters(&mut self) {}
}
},
};
let impl_drop = quote! {
impl #impl_generics Drop for #ident #ty_generics #where_clause {
fn drop(&mut self) {
_primitiv::Model::invalidate(self);
}
}
};
let dummy_const = Ident::new(&format!("_IMPL_MODEL_FOR_{}", ident), Span::call_site());
let generated = quote! {
#[allow(non_upper_case_globals)]
const #dummy_const: () = {
extern crate primitiv as _primitiv;
#impl_model
#impl_drop
};
};
Ok(generated)
}
fn impl_body_from_struct(_name: &Ident, fields: &Fields) -> Option<TokenStream> {
match fields {
Fields::Named(ref f) => Some(map_fields(
&f.named,
true,
Some(&Ident::new("self", Span::call_site())),
None,
)),
Fields::Unnamed(ref f) => Some(map_fields(
&f.unnamed,
false,
Some(&Ident::new("self", Span::call_site())),
None,
)),
Fields::Unit => None,
}.and_then(|tokens| {
let stmts: Vec<TokenStream> = tokens.into_iter().filter_map(|token| token).collect();
if stmts.len() > 0 {
Some(quote!(#(#stmts)*))
} else {
None
}
})
}
fn impl_body_from_enum(name: &Ident, variants: &Punctuated<Variant, Comma>) -> Option<TokenStream> {
let stmts: Vec<(TokenStream, bool)> = variants
.iter()
.map(|variant| {
let variant_ident = &variant.ident;
let variant_name = variant_ident.to_string();
match variant.fields {
Fields::Named(ref f) => {
let tokens = map_fields(&f.named, true, None, Some(&variant_name[..]));
if tokens.iter().any(|token| token.is_some()) {
let mut fields = Vec::with_capacity(f.named.len());
let mut stmts = Vec::with_capacity(tokens.len());
f.named
.iter()
.zip(tokens)
.for_each(|(field, token)| match token {
Some(stmt) => {
let ident = field.ident.as_ref().unwrap();
fields.push(quote!(ref mut #ident));
stmts.push(stmt);
}
None => {
let ident = field.ident.as_ref().unwrap();
let unused_ident = Ident::new(
&format!("_{}", ident.to_string()),
Span::call_site(),
);
fields.push(quote!(#ident: ref mut #unused_ident));
}
});
(
quote! {
#name::#variant_ident{#(#fields),*} => {
#(#stmts)*
}
},
true,
)
} else {
(quote!(#name::#variant_ident{..} => {}), false)
}
}
Fields::Unnamed(ref f) => {
let tokens = map_fields(&f.unnamed, false, None, Some(&variant_name[..]));
if tokens.iter().any(|token| token.is_some()) {
let mut fields = Vec::with_capacity(f.unnamed.len());
let mut stmts = Vec::with_capacity(tokens.len());
tokens
.iter()
.enumerate()
.for_each(|(i, token)| match token {
Some(stmt) => {
let ident =
Ident::new(&format!("attr{}", i), Span::call_site());
fields.push(quote!(ref mut #ident));
stmts.push(stmt);
}
None => {
let ident =
Ident::new(&format!("_attr{}", i), Span::call_site());
fields.push(quote!(ref mut #ident));
}
});
(
quote! {
#name::#variant_ident(#(#fields),*) => {
#(#stmts)*
}
},
true,
)
} else {
(quote!(#name::#variant_ident(_) => {}), false)
}
}
Fields::Unit => (quote!(#name::#variant_ident => {}), false),
}
})
.collect();
if stmts.len() > 0 && stmts.iter().any(|stmt| stmt.1) {
let stmts: Vec<_> = stmts.into_iter().map(|stmt| stmt.0).collect();
Some(quote! { match &mut *self {
#(#stmts),*
}})
} else {
None
}
}
fn map_fields(
fields: &Punctuated<Field, Comma>,
named: bool,
root_ident: Option<&Ident>,
root_name: Option<&str>,
) -> Vec<Option<TokenStream>> {
let iter = fields.iter().enumerate();
let stmts = if named {
iter.map(|(_i, field)| {
let field_ident = field.ident.as_ref().unwrap();
let (field_token, is_ref) = match root_ident {
Some(ident) => (quote!(#ident.#field_ident), false),
None => (quote!(#field_ident), true),
};
parse_field(&field_token, &field.ty, &parse_attrs(&field.attrs), is_ref).map(|stmt| {
let mut field_name = field_ident.to_string();
if let Some(root) = root_name {
field_name = format!("{}.{}", root, field_name);
}
quote!({
let name = #field_name;
#stmt
})
})
}).collect()
} else {
iter.map(|(i, field)| {
let (field_token, is_ref) = match root_ident {
Some(ident) => {
let index = Index::from(i);
(quote!(#ident.#index), false)
}
None => {
let field_ident = Ident::new(&format!("attr{}", i), Span::call_site());
(quote!(#field_ident), true)
}
};
parse_field(&field_token, &field.ty, &parse_attrs(&field.attrs), is_ref).map(|stmt| {
let mut field_name = i.to_string();
if let Some(root) = root_name {
field_name = format!("{}.{}", root, field_name);
}
quote!({
let name = #field_name;
#stmt
})
})
}).collect()
};
stmts
}
fn parse_field(
field: &TokenStream,
ty: &Type,
attrs: &[FieldAttr],
is_ref: bool,
) -> Option<TokenStream> {
match FieldType::from_ty(ty, attrs) {
FieldType::Array(sub_type) | FieldType::Vec(sub_type) => {
parse_field("e!(f), sub_type, attrs, true).map(|stmt| {
quote! {
for (i, f) in #field.iter_mut().enumerate() {
let name = format!("{}.{}", name, i);
#stmt
}
}
})
}
FieldType::Tuple(sub_types) => {
let stmts: Vec<_> = sub_types
.iter()
.enumerate()
.filter_map(|(i, sub_type)| {
let index = Index::from(i);
parse_field("e!(#field.#index), sub_type, attrs, false).map(|stmt| {
quote!({
let name = format!("{}.{}", name, #i);
#stmt
})
})
})
.collect();
if stmts.len() > 0 {
Some(quote!(#(#stmts)*))
} else {
None
}
}
FieldType::Option(sub_type) => parse_field("e!(f), sub_type, attrs, true).map(|stmt| {
quote! {
if let Some(f) = #field.as_mut() {
#stmt
}
}
}),
FieldType::Parameter => {
if is_ref {
Some(quote! {
model.add_parameter(&name[..], #field);
})
} else {
Some(quote! {
model.add_parameter(&name[..], &mut #field);
})
}
}
FieldType::Model => {
if is_ref {
Some(quote! {
#field.register_parameters();
model.add_submodel(&name[..], #field);
})
} else {
Some(quote! {
#field.register_parameters();
model.add_submodel(&name[..], &mut #field);
})
}
}
FieldType::Other => None,
}
}
enum FieldType<'a> {
Array(&'a Type),
Vec(&'a Type),
Tuple(Vec<&'a Type>),
Option(&'a Type),
Parameter,
Model,
Other,
}
impl<'a> FieldType<'a> {
fn from_ty(ty: &'a Type, attrs: &[FieldAttr]) -> Self {
match ty {
Type::Array(ref t) => FieldType::Array(&t.elem),
Type::Tuple(ref t) => FieldType::Tuple(t.elems.iter().collect()),
Type::Path(ref t) => match t
.path
.segments
.iter()
.last()
.unwrap()
.ident
.to_string()
.as_str()
{
"Vec" => FieldType::Vec(FieldType::generic_subtype(ty).unwrap()),
"Option" => FieldType::Option(FieldType::generic_subtype(ty).unwrap()),
"Parameter" => FieldType::Parameter,
_ => match attrs.last() {
Some(FieldAttr::Parameter) => FieldType::Parameter,
Some(FieldAttr::Model) => FieldType::Model,
None => FieldType::Other,
},
},
_ => FieldType::Other,
}
}
fn generic_subtype(ty: &Type) -> Option<&Type> {
match ty {
Type::Path(ref t) => match t.path.segments.iter().last().unwrap() {
PathSegment {
arguments:
PathArguments::AngleBracketed(AngleBracketedGenericArguments {
ref args, ..
}),
..
} if args.len() == 1 =>
{
if let GenericArgument::Type(ref t) = args[0] {
Some(t)
} else {
None
}
}
_ => None,
},
_ => None,
}
}
}
enum FieldAttr {
Parameter,
Model,
}
fn parse_attrs(attrs: &[Attribute]) -> Vec<FieldAttr> {
let iter = attrs
.iter()
.filter_map(|attr| {
let path = &attr.path;
match quote!(#path).to_string() == "primitiv" {
true => Some(
attr.interpret_meta()
.expect(&format!("invalid primitiv syntax: {}", quote!(attr))),
),
false => None,
}
})
.flat_map(|m| match m {
Meta::List(l) => l.nested,
tokens => panic!("unsupported syntax: {}", quote!(#tokens).to_string()),
})
.map(|m| match m {
NestedMeta::Meta(m) => m,
ref tokens => panic!("unsupported syntax: {}", quote!(#tokens).to_string()),
});
iter.filter_map(|attr| match attr {
Meta::Word(ref w) if w == "parameter" => Some(FieldAttr::Parameter),
Meta::Word(ref w) if w == "submodel" => Some(FieldAttr::Model),
ref v @ Meta::NameValue(..) | ref v @ Meta::List(..) | ref v @ Meta::Word(..) => {
panic!("unsupported option: {}", quote!(#v))
}
}).collect()
}
| true
|
6b9474831a255ec47cf27f937e36d4a57efa0c45
|
Rust
|
rune-rs/rune
|
/crates/rune/src/runtime/guarded_args.rs
|
UTF-8
| 1,504
| 2.890625
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use crate::runtime::{Stack, UnsafeToValue, VmResult};
/// Trait for converting arguments onto the stack.
///
/// This can take references, because it is unsafe to call. And should only be
/// implemented in contexts where it can be guaranteed that the references will
/// not outlive the call.
pub trait GuardedArgs {
/// Guard that when dropped will invalidate any values encoded.
type Guard;
/// Encode arguments onto a stack.
///
/// # Safety
///
/// This is implemented for and allows encoding references on the stack.
/// The returned guard must be dropped before any used references are
/// invalidated.
unsafe fn unsafe_into_stack(self, stack: &mut Stack) -> VmResult<Self::Guard>;
/// The number of arguments.
fn count(&self) -> usize;
}
macro_rules! impl_into_args {
($count:expr $(, $ty:ident $value:ident $_:expr)*) => {
impl<$($ty,)*> GuardedArgs for ($($ty,)*)
where
$($ty: UnsafeToValue,)*
{
type Guard = ($($ty::Guard,)*);
#[allow(unused)]
unsafe fn unsafe_into_stack(self, stack: &mut Stack) -> VmResult<Self::Guard> {
let ($($value,)*) = self;
$(let $value = vm_try!($value.unsafe_to_value());)*
$(stack.push($value.0);)*
VmResult::Ok(($($value.1,)*))
}
fn count(&self) -> usize {
$count
}
}
};
}
repeat_macro!(impl_into_args);
| true
|
5f049777e884b9c507b45678a15d558e8f05f1c7
|
Rust
|
Tamiyo/Mango
|
/src/bytecode/constant.rs
|
UTF-8
| 496
| 2.796875
| 3
|
[] |
no_license
|
/// Defines the 'Constants' to be used within the Compiler.
///
/// These are constants that the Compiler comes by during it's compilation phase,
/// and they get stored in the Constant Pool for later use.
use crate::bytecode::distance::Distance;
use string_interner::Sym;
use crate::compiler::class::Class;
use crate::compiler::function::Closure;
#[derive(Debug, PartialEq, Eq, Hash, Clone)]
pub enum Constant {
Number(Distance),
String(Sym),
Closure(Closure),
Class(Class),
}
| true
|
20237af79e3c6ac49a3a71c2a1310049b4b215cc
|
Rust
|
inferiorhumanorgans/crc-rs
|
/benches/bench.rs
|
UTF-8
| 818
| 2.546875
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#![feature(test)]
extern crate crc;
extern crate test;
use crc::{crc32, crc64};
use test::Bencher;
#[bench]
fn bench_crc32_make_table(b: &mut Bencher) {
b.iter(|| crc32::make_table(crc32::IEEE, true));
}
#[bench]
fn bench_crc32_update_megabytes(b: &mut Bencher) {
let table = crc32::make_table(crc32::IEEE, true);
let bytes = Box::new([0u8; 1_000_000]);
b.iter(|| crc32::update(0xFFFFFFFF, &table, &*bytes, &crc32::CalcType::Reverse) ^ 0xFFFFFFFF);
}
#[bench]
fn bench_crc64_make_table(b: &mut Bencher) {
b.iter(|| crc64::make_table(crc64::ECMA, true));
}
#[bench]
fn bench_crc64_update_megabytes(b: &mut Bencher) {
let table = crc64::make_table(crc64::ECMA, true);
let bytes = Box::new([0u8; 1_000_000]);
b.iter(|| crc64::update(0, &table, &*bytes, &crc64::CalcType::Reverse));
}
| true
|
a379b362dcacb86df8a66092865dd2698276d2b7
|
Rust
|
lucadonnoh/pathfinder
|
/crates/pathfinder/src/rpc/types.rs
|
UTF-8
| 58,973
| 2.78125
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! Data structures used by the JSON-RPC API methods.
use crate::core::{StarknetBlockHash, StarknetBlockNumber};
use serde::{Deserialize, Serialize};
/// Special tag used when specifying the `latest` or `pending` block.
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub enum Tag {
/// The most recent fully constructed block
///
/// Represented as the JSON string `"latest"` when passed as an RPC method argument,
/// for example:
/// `{"jsonrpc":"2.0","id":"0","method":"starknet_getBlockWithTxsByHash","params":["latest"]}`
#[serde(rename = "latest")]
Latest,
/// Currently constructed block
///
/// Represented as the JSON string `"pending"` when passed as an RPC method argument,
/// for example:
/// `{"jsonrpc":"2.0","id":"0","method":"starknet_getBlockWithTxsByHash","params":["pending"]}`
#[serde(rename = "pending")]
Pending,
}
impl std::fmt::Display for Tag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Tag::Latest => f.write_str("latest"),
Tag::Pending => f.write_str("pending"),
}
}
}
/// A wrapper that contains either a [Hash](self::BlockHashOrTag::Hash) or a [Tag](self::BlockHashOrTag::Tag).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(untagged)]
#[serde(deny_unknown_fields)]
pub enum BlockHashOrTag {
/// Hash of a block
///
/// Represented as a `0x`-prefixed hex JSON string of length from 1 up to 64 characters
/// when passed as an RPC method argument, for example:
/// `{"jsonrpc":"2.0","id":"0","method":"starknet_getBlockWithTxsByHash","params":["0x7d328a71faf48c5c3857e99f20a77b18522480956d1cd5bff1ff2df3c8b427b"]}`
Hash(StarknetBlockHash),
/// Special [Tag](crate::rpc::types::Tag) describing a block
Tag(Tag),
}
impl std::fmt::Display for BlockHashOrTag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockHashOrTag::Hash(StarknetBlockHash(h)) => f.write_str(&h.to_hex_str()),
BlockHashOrTag::Tag(t) => std::fmt::Display::fmt(t, f),
}
}
}
/// A wrapper that contains either a block [Number](self::BlockNumberOrTag::Number) or a [Tag](self::BlockNumberOrTag::Tag).
#[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(untagged)]
#[serde(deny_unknown_fields)]
pub enum BlockNumberOrTag {
/// Number (height) of a block
Number(StarknetBlockNumber),
/// Special [Tag](crate::rpc::types::Tag) describing a block
Tag(Tag),
}
impl std::fmt::Display for BlockNumberOrTag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BlockNumberOrTag::Number(StarknetBlockNumber(n)) => std::fmt::Display::fmt(n, f),
BlockNumberOrTag::Tag(t) => std::fmt::Display::fmt(t, f),
}
}
}
/// Groups all strictly input types of the RPC API.
pub mod request {
use crate::{
core::{
CallParam, CallSignatureElem, ContractAddress, EntryPoint, EventKey, Fee,
TransactionVersion,
},
rpc::serde::{CallSignatureElemAsDecimalStr, FeeAsHexStr, TransactionVersionAsHexStr},
};
use serde::Deserialize;
use serde_with::{serde_as, skip_serializing_none};
/// Contains parameters passed to `starknet_call`.
#[serde_as]
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Serialize))]
#[serde(deny_unknown_fields)]
pub struct Call {
pub contract_address: ContractAddress,
pub calldata: Vec<CallParam>,
pub entry_point_selector: EntryPoint,
/// EstimateFee hurry: it doesn't make any sense to use decimal numbers for one field
#[serde(default)]
#[serde_as(as = "Vec<CallSignatureElemAsDecimalStr>")]
pub signature: Vec<CallSignatureElem>,
/// EstimateFee hurry: max fee is needed if there's a signature
#[serde_as(as = "FeeAsHexStr")]
#[serde(default = "call_default_max_fee")]
pub max_fee: Fee,
/// EstimateFee hurry: transaction version might be interesting, might not be around for
/// long
#[serde_as(as = "TransactionVersionAsHexStr")]
#[serde(default = "call_default_version")]
pub version: TransactionVersion,
}
const fn call_default_max_fee() -> Fee {
Call::DEFAULT_MAX_FEE
}
const fn call_default_version() -> TransactionVersion {
Call::DEFAULT_VERSION
}
impl Call {
pub const DEFAULT_MAX_FEE: Fee = Fee(web3::types::H128::zero());
pub const DEFAULT_VERSION: TransactionVersion =
TransactionVersion(web3::types::H256::zero());
}
/// This is what [`Call`] used to be, but is used in
/// [`crate::rpc::api::RpcApi::add_invoke_transaction`] for example.
///
/// It might be that [`Call`] and arguments of `addInvokeTransaction` could be unified in the
/// future when the dust has settled on the implementation.
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Serialize))]
#[serde(deny_unknown_fields)]
pub struct ContractCall {
pub contract_address: ContractAddress,
pub calldata: Vec<CallParam>,
pub entry_point_selector: EntryPoint,
}
/// Contains event filter parameters passed to `starknet_getEvents`.
#[skip_serializing_none]
#[derive(Clone, Debug, Deserialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Serialize))]
#[serde(deny_unknown_fields)]
pub struct EventFilter {
#[serde(default, rename = "fromBlock")]
pub from_block: Option<crate::core::BlockId>,
#[serde(default, rename = "toBlock")]
pub to_block: Option<crate::core::BlockId>,
#[serde(default)]
pub address: Option<ContractAddress>,
#[serde(default)]
pub keys: Vec<EventKey>,
// These are inlined here because serde flatten and deny_unknown_fields
// don't work together.
pub page_size: usize,
pub page_number: usize,
}
}
/// Groups all strictly output types of the RPC API.
pub mod reply {
// At the moment both reply types are the same for get_code, hence the re-export
use crate::{
core::{
CallParam, ClassHash, ConstructorParam, ContractAddress, ContractAddressSalt,
EntryPoint, EventData, EventKey, Fee, GlobalRoot, SequencerAddress, StarknetBlockHash,
StarknetBlockNumber, StarknetBlockTimestamp, StarknetTransactionHash, TransactionNonce,
TransactionSignatureElem, TransactionVersion,
},
rpc::{
api::{BlockResponseScope, RawBlock},
serde::{FeeAsHexStr, TransactionVersionAsHexStr},
},
sequencer,
};
use serde::Serialize;
use serde_with::{serde_as, skip_serializing_none};
use stark_hash::StarkHash;
use std::convert::From;
/// L2 Block status as returned by the RPC API.
#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub enum BlockStatus {
#[serde(rename = "PENDING")]
Pending,
#[serde(rename = "ACCEPTED_ON_L2")]
AcceptedOnL2,
#[serde(rename = "ACCEPTED_ON_L1")]
AcceptedOnL1,
#[serde(rename = "REJECTED")]
Rejected,
}
impl From<sequencer::reply::Status> for BlockStatus {
fn from(status: sequencer::reply::Status) -> Self {
match status {
// TODO verify this mapping with Starkware
sequencer::reply::Status::AcceptedOnL1 => BlockStatus::AcceptedOnL1,
sequencer::reply::Status::AcceptedOnL2 => BlockStatus::AcceptedOnL2,
sequencer::reply::Status::NotReceived => BlockStatus::Rejected,
sequencer::reply::Status::Pending => BlockStatus::Pending,
sequencer::reply::Status::Received => BlockStatus::Pending,
sequencer::reply::Status::Rejected => BlockStatus::Rejected,
sequencer::reply::Status::Reverted => BlockStatus::Rejected,
sequencer::reply::Status::Aborted => BlockStatus::Rejected,
}
}
}
/// Wrapper for transaction data returned in block related queries,
/// chosen variant depends on [crate::rpc::api::BlockResponseScope](crate::rpc::api::BlockResponseScope).
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
#[serde(untagged)]
pub enum Transactions {
Full(Vec<Transaction>),
HashesOnly(Vec<StarknetTransactionHash>),
}
/// L2 Block as returned by the RPC API.
#[serde_as]
#[skip_serializing_none]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct Block {
pub status: BlockStatus,
pub block_hash: Option<StarknetBlockHash>,
pub parent_hash: StarknetBlockHash,
pub block_number: Option<StarknetBlockNumber>,
pub new_root: Option<GlobalRoot>,
pub timestamp: StarknetBlockTimestamp,
pub sequencer_address: SequencerAddress,
pub transactions: Transactions,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct BlockHashAndNumber {
#[serde(rename = "block_hash")]
pub hash: StarknetBlockHash,
#[serde(rename = "block_number")]
pub number: StarknetBlockNumber,
}
impl Block {
/// Constructs [Block] from [RawBlock]
pub fn from_raw(block: RawBlock, transactions: Transactions) -> Self {
Self {
status: block.status,
block_hash: Some(block.hash),
parent_hash: block.parent_hash,
block_number: Some(block.number),
new_root: Some(block.root),
timestamp: block.timestamp,
sequencer_address: block.sequencer,
transactions,
}
}
/// Constructs [Block] from [sequencer's block representation](crate::sequencer::reply::Block)
pub fn from_sequencer_scoped(
block: sequencer::reply::MaybePendingBlock,
scope: BlockResponseScope,
) -> Self {
let transactions = match scope {
BlockResponseScope::TransactionHashes => {
let hashes = block.transactions().iter().map(|t| t.hash()).collect();
Transactions::HashesOnly(hashes)
}
BlockResponseScope::FullTransactions => {
let transactions = block.transactions().iter().map(|t| t.into()).collect();
Transactions::Full(transactions)
}
};
use sequencer::reply::MaybePendingBlock;
match block {
MaybePendingBlock::Block(block) => Self {
status: block.status.into(),
block_hash: Some(block.block_hash),
parent_hash: block.parent_block_hash,
block_number: Some(block.block_number),
new_root: Some(block.state_root),
timestamp: block.timestamp,
sequencer_address: block
.sequencer_address
// Default value for cairo <0.8.0 is 0
.unwrap_or(SequencerAddress(StarkHash::ZERO)),
transactions,
},
MaybePendingBlock::Pending(pending) => Self {
status: pending.status.into(),
block_hash: None,
parent_hash: pending.parent_hash,
block_number: None,
new_root: None,
timestamp: pending.timestamp,
sequencer_address: pending.sequencer_address,
transactions,
},
}
}
}
/// Starkware specific RPC error codes.
// TODO verify with Starkware how `sequencer::reply::starknet::ErrorCode` should
// map to the values below in all JSON-RPC API methods. Also verify if
// the mapping should be method-specific or common for all methods.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ErrorCode {
FailedToReceiveTransaction = 1,
ContractNotFound = 20,
InvalidMessageSelector = 21,
InvalidCallData = 22,
InvalidBlockId = 24,
InvalidTransactionHash = 25,
InvalidTransactionIndex = 27,
InvalidContractClassHash = 28,
PageSizeTooBig = 31,
NoBlocks = 32,
ContractError = 40,
}
/// We can have this equality and should have it in order to use it for tests. It is meant to
/// be used when expecting that the rpc result is an error. The rpc result should first be
/// accessed with [`Result::unwrap_err`], then compared to the expected [`ErrorCode`] with
/// [`assert_eq!`].
#[cfg(test)]
impl PartialEq<jsonrpsee::core::error::Error> for ErrorCode {
fn eq(&self, other: &jsonrpsee::core::error::Error) -> bool {
use jsonrpsee::core::error::Error;
use jsonrpsee::types::error::CallError;
if let Error::Call(CallError::Custom(custom)) = other {
// this is quite ackward dance to go back to error level then come back to the
// custom error object. it however allows not having the json structure in two
// places, and leaning on ErrorObject partialeq impl.
let repr = match self {
ErrorCode::PageSizeTooBig => {
Error::from(crate::storage::EventFilterError::PageSizeTooBig(
crate::storage::StarknetEventsTable::PAGE_SIZE_LIMIT,
))
}
other => Error::from(*other),
};
let repr = match repr {
Error::Call(CallError::Custom(repr)) => repr,
unexpected => unreachable!("using pathfinders ErrorCode to create jsonrpsee did not create a custom error: {unexpected:?}")
};
&repr == custom
} else {
false
}
}
}
impl TryFrom<i32> for ErrorCode {
type Error = i32;
fn try_from(code: i32) -> Result<ErrorCode, Self::Error> {
use ErrorCode::*;
Ok(match code {
1 => FailedToReceiveTransaction,
20 => ContractNotFound,
21 => InvalidMessageSelector,
22 => InvalidCallData,
24 => InvalidBlockId,
25 => InvalidTransactionHash,
27 => InvalidTransactionIndex,
28 => InvalidContractClassHash,
31 => PageSizeTooBig,
32 => NoBlocks,
40 => ContractError,
x => return Err(x),
})
}
}
impl ErrorCode {
/// Returns the message specified in the openrpc api spec.
fn as_str(&self) -> &'static str {
match self {
ErrorCode::FailedToReceiveTransaction => "Failed to write transaction",
ErrorCode::ContractNotFound => "Contract not found",
ErrorCode::InvalidMessageSelector => "Invalid message selector",
ErrorCode::InvalidCallData => "Invalid call data",
ErrorCode::InvalidBlockId => "Invalid block id",
ErrorCode::InvalidTransactionHash => "Invalid transaction hash",
ErrorCode::InvalidTransactionIndex => "Invalid transaction index in a block",
ErrorCode::InvalidContractClassHash => {
"The supplied contract class hash is invalid or unknown"
}
ErrorCode::PageSizeTooBig => "Requested page size is too big",
ErrorCode::ContractError => "Contract error",
ErrorCode::NoBlocks => "There are no blocks",
}
}
}
impl std::string::ToString for ErrorCode {
fn to_string(&self) -> String {
self.as_str().to_owned()
}
}
impl From<ErrorCode> for jsonrpsee::core::error::Error {
fn from(ecode: ErrorCode) -> Self {
use jsonrpsee::core::error::Error;
use jsonrpsee::types::error::{CallError, ErrorObject};
if ecode == ErrorCode::PageSizeTooBig {
#[cfg(debug_assertions)]
panic!("convert jsonrpsee::...::Error from EventFilterError to get error data");
}
let error = ecode as i32;
Error::Call(CallError::Custom(ErrorObject::owned(
error,
ecode.to_string(),
// this is insufficient in every situation (PageSizeTooBig)
None::<()>,
)))
}
}
/// L2 state update as returned by the [RPC API v0.1.0](https://github.com/starkware-libs/starknet-specs/blob/30e5bafcda60c31b5fb4021b4f5ddcfc18d2ff7d/api/starknet_api_openrpc.json#L846).
///
/// # Serialization
///
/// This structure derives [serde::Deserialize] without depending
/// on the `rpc-full-serde` feature because state updates are
/// stored in the DB as compressed raw JSON bytes.
#[skip_serializing_none]
#[derive(Clone, Debug, serde::Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct StateUpdate {
/// None for `pending`
#[serde(default)]
pub block_hash: Option<StarknetBlockHash>,
pub new_root: GlobalRoot,
pub old_root: GlobalRoot,
pub state_diff: state_update::StateDiff,
}
impl From<sequencer::reply::StateUpdate> for StateUpdate {
fn from(x: sequencer::reply::StateUpdate) -> Self {
Self {
block_hash: x.block_hash,
new_root: x.new_root,
old_root: x.old_root,
state_diff: x.state_diff.into(),
}
}
}
/// State update related substructures.
///
/// # Serialization
///
/// All structures in this module derive [serde::Deserialize] without depending
/// on the `rpc-full-serde` feature because state updates are
/// stored in the DB as compressed raw JSON bytes.
pub mod state_update {
use crate::core::{
ClassHash, ContractAddress, ContractNonce, StorageAddress, StorageValue,
};
use crate::sequencer;
use serde::{Deserialize, Serialize};
/// L2 state diff.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct StateDiff {
pub storage_diffs: Vec<StorageDiff>,
pub declared_contracts: Vec<DeclaredContract>,
pub deployed_contracts: Vec<DeployedContract>,
pub nonces: Vec<Nonce>,
}
impl From<sequencer::reply::state_update::StateDiff> for StateDiff {
fn from(x: sequencer::reply::state_update::StateDiff) -> Self {
Self {
storage_diffs: x
.storage_diffs
.into_iter()
.flat_map(|(contract_address, storage_diffs)| {
storage_diffs.into_iter().map(move |x| StorageDiff {
address: contract_address,
key: x.key,
value: x.value,
})
})
.collect(),
declared_contracts: x
.declared_contracts
.into_iter()
.map(|class_hash| DeclaredContract { class_hash })
.collect(),
deployed_contracts: x
.deployed_contracts
.into_iter()
.map(|deployed_contract| DeployedContract {
address: deployed_contract.address,
class_hash: deployed_contract.class_hash,
})
.collect(),
// FIXME once the sequencer API provides the nonces
nonces: vec![],
}
}
}
/// L2 storage diff of a contract.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct StorageDiff {
pub address: ContractAddress,
pub key: StorageAddress,
pub value: StorageValue,
}
// impl From<sequencer::reply::state_update::StorageDiff> for StorageItem {
// fn from(x: sequencer::reply::state_update::StorageDiff) -> Self {
// Self {
// key: x.key,
// value: x.value,
// }
// }
// }
/// L2 state diff declared contract item.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct DeclaredContract {
pub class_hash: ClassHash,
}
/// L2 state diff deployed contract item.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct DeployedContract {
pub address: ContractAddress,
pub class_hash: ClassHash,
}
/// L2 state diff nonce item.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct Nonce {
pub contract_address: ContractAddress,
pub nonce: ContractNonce,
}
}
/// L2 transaction as returned by the RPC API.
///
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(tag = "type")]
pub enum Transaction {
#[serde(rename = "DECLARE")]
Declare(DeclareTransaction),
#[serde(rename = "INVOKE")]
Invoke(InvokeTransaction),
#[serde(rename = "DEPLOY")]
Deploy(DeployTransaction),
}
impl Transaction {
pub fn hash(&self) -> StarknetTransactionHash {
match self {
Transaction::Declare(declare) => declare.common.hash,
Transaction::Invoke(invoke) => invoke.common.hash,
Transaction::Deploy(deploy) => deploy.hash,
}
}
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct CommonTransactionProperties {
#[serde(rename = "transaction_hash")]
pub hash: StarknetTransactionHash,
#[serde_as(as = "FeeAsHexStr")]
pub max_fee: Fee,
#[serde_as(as = "TransactionVersionAsHexStr")]
pub version: TransactionVersion,
pub signature: Vec<TransactionSignatureElem>,
pub nonce: TransactionNonce,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct DeclareTransaction {
#[serde(flatten)]
pub common: CommonTransactionProperties,
pub class_hash: ClassHash,
pub sender_address: ContractAddress,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct InvokeTransaction {
#[serde(flatten)]
pub common: CommonTransactionProperties,
pub contract_address: ContractAddress,
pub entry_point_selector: EntryPoint,
pub calldata: Vec<CallParam>,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct DeployTransaction {
// This part is a subset of CommonTransactionProperties
#[serde(rename = "transaction_hash")]
pub hash: StarknetTransactionHash,
#[serde_as(as = "TransactionVersionAsHexStr")]
pub version: TransactionVersion,
pub contract_address: ContractAddress,
pub contract_address_salt: ContractAddressSalt,
pub class_hash: ClassHash,
pub constructor_calldata: Vec<ConstructorParam>,
}
impl TryFrom<sequencer::reply::Transaction> for Transaction {
type Error = anyhow::Error;
fn try_from(txn: sequencer::reply::Transaction) -> Result<Self, Self::Error> {
let txn = txn
.transaction
.ok_or_else(|| anyhow::anyhow!("Transaction not found."))?;
Ok(txn.into())
}
}
impl From<sequencer::reply::transaction::Transaction> for Transaction {
fn from(txn: sequencer::reply::transaction::Transaction) -> Self {
Self::from(&txn)
}
}
impl From<&sequencer::reply::transaction::Transaction> for Transaction {
fn from(txn: &sequencer::reply::transaction::Transaction) -> Self {
match txn {
sequencer::reply::transaction::Transaction::Invoke(txn) => {
Self::Invoke(InvokeTransaction {
common: CommonTransactionProperties {
hash: txn.transaction_hash,
max_fee: txn.max_fee,
// no `version` in invoke transactions
version: TransactionVersion(Default::default()),
signature: txn.signature.clone(),
// no `nonce` in invoke transactions
nonce: TransactionNonce(Default::default()),
},
contract_address: txn.contract_address,
entry_point_selector: txn.entry_point_selector,
calldata: txn.calldata.clone(),
})
}
sequencer::reply::transaction::Transaction::Declare(txn) => {
Self::Declare(DeclareTransaction {
common: CommonTransactionProperties {
hash: txn.transaction_hash,
max_fee: txn.max_fee,
version: txn.version,
signature: txn.signature.clone(),
nonce: txn.nonce,
},
class_hash: txn.class_hash,
sender_address: txn.sender_address,
})
}
sequencer::reply::transaction::Transaction::Deploy(txn) => {
Self::Deploy(DeployTransaction {
hash: txn.transaction_hash,
// no `version` in deploy transactions
version: TransactionVersion(Default::default()),
contract_address: txn.contract_address,
contract_address_salt: txn.contract_address_salt,
class_hash: txn.class_hash,
constructor_calldata: txn.constructor_calldata.clone(),
})
}
}
}
}
/// L2 transaction receipt as returned by the RPC API.
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(untagged)]
pub enum TransactionReceipt {
Invoke(InvokeTransactionReceipt),
// We can't differentiate between declare and deploy in an untagged enum: they
// have the same properties in the JSON.
DeclareOrDeploy(DeclareOrDeployTransactionReceipt),
// Pending receipts don't have status, status_data, block_hash, block_number fields
PendingInvoke(PendingInvokeTransactionReceipt),
PendingDeclareOrDeploy(PendingDeclareOrDeployTransactionReceipt),
}
impl TransactionReceipt {
pub fn hash(&self) -> StarknetTransactionHash {
match self {
Self::Invoke(tx) => tx.common.transaction_hash,
Self::DeclareOrDeploy(tx) => tx.common.transaction_hash,
Self::PendingInvoke(tx) => tx.common.transaction_hash,
Self::PendingDeclareOrDeploy(tx) => tx.common.transaction_hash,
}
}
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct InvokeTransactionReceipt {
#[serde(flatten)]
pub common: CommonTransactionReceiptProperties,
pub messages_sent: Vec<transaction_receipt::MessageToL1>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub l1_origin_message: Option<transaction_receipt::MessageToL2>,
pub events: Vec<transaction_receipt::Event>,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct CommonTransactionReceiptProperties {
pub transaction_hash: StarknetTransactionHash,
#[serde_as(as = "FeeAsHexStr")]
pub actual_fee: Fee,
pub status: TransactionStatus,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status_data: Option<String>,
pub block_hash: StarknetBlockHash,
pub block_number: StarknetBlockNumber,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct DeclareOrDeployTransactionReceipt {
#[serde(flatten)]
pub common: CommonTransactionReceiptProperties,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct PendingInvokeTransactionReceipt {
#[serde(flatten)]
pub common: CommonPendingTransactionReceiptProperties,
pub messages_sent: Vec<transaction_receipt::MessageToL1>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub l1_origin_message: Option<transaction_receipt::MessageToL2>,
pub events: Vec<transaction_receipt::Event>,
}
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct CommonPendingTransactionReceiptProperties {
pub transaction_hash: StarknetTransactionHash,
#[serde_as(as = "FeeAsHexStr")]
pub actual_fee: Fee,
}
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct PendingDeclareOrDeployTransactionReceipt {
#[serde(flatten)]
pub common: CommonPendingTransactionReceiptProperties,
}
impl TransactionReceipt {
pub fn pending_from(
receipt: sequencer::reply::transaction::Receipt,
transaction: &sequencer::reply::transaction::Transaction,
) -> Self {
match transaction {
sequencer::reply::transaction::Transaction::Declare(_)
| sequencer::reply::transaction::Transaction::Deploy(_) => {
Self::PendingDeclareOrDeploy(PendingDeclareOrDeployTransactionReceipt {
common: CommonPendingTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
},
})
}
sequencer::reply::transaction::Transaction::Invoke(_) => {
Self::PendingInvoke(PendingInvokeTransactionReceipt {
common: CommonPendingTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
},
messages_sent: receipt
.l2_to_l1_messages
.into_iter()
.map(transaction_receipt::MessageToL1::from)
.collect(),
l1_origin_message: receipt
.l1_to_l2_consumed_message
.map(transaction_receipt::MessageToL2::from),
events: receipt
.events
.into_iter()
.map(transaction_receipt::Event::from)
.collect(),
})
}
}
}
pub fn with_block_data(
receipt: sequencer::reply::transaction::Receipt,
status: BlockStatus,
block_hash: StarknetBlockHash,
block_number: StarknetBlockNumber,
transaction: &sequencer::reply::transaction::Transaction,
) -> Self {
match transaction {
sequencer::reply::transaction::Transaction::Declare(_)
| sequencer::reply::transaction::Transaction::Deploy(_) => {
Self::DeclareOrDeploy(DeclareOrDeployTransactionReceipt {
common: CommonTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
status: status.into(),
// TODO: at the moment not available in sequencer replies
status_data: None,
block_hash,
block_number,
},
})
}
sequencer::reply::transaction::Transaction::Invoke(_) => {
Self::Invoke(InvokeTransactionReceipt {
common: CommonTransactionReceiptProperties {
transaction_hash: receipt.transaction_hash,
actual_fee: receipt
.actual_fee
.unwrap_or_else(|| Fee(Default::default())),
status: status.into(),
// TODO: at the moment not available in sequencer replies
status_data: None,
block_hash,
block_number,
},
messages_sent: receipt
.l2_to_l1_messages
.into_iter()
.map(transaction_receipt::MessageToL1::from)
.collect(),
l1_origin_message: receipt
.l1_to_l2_consumed_message
.map(transaction_receipt::MessageToL2::from),
events: receipt
.events
.into_iter()
.map(transaction_receipt::Event::from)
.collect(),
})
}
}
}
}
/// Transaction receipt related substructures.
pub mod transaction_receipt {
use crate::{
core::{
ContractAddress, EthereumAddress, EventData, EventKey, L1ToL2MessagePayloadElem,
L2ToL1MessagePayloadElem,
},
rpc::serde::EthereumAddressAsHexStr,
sequencer::reply::transaction::{L1ToL2Message, L2ToL1Message},
};
use serde::Serialize;
use serde_with::serde_as;
use std::convert::From;
/// Message sent from L2 to L1.
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct MessageToL1 {
#[serde_as(as = "EthereumAddressAsHexStr")]
pub to_address: EthereumAddress,
pub payload: Vec<L2ToL1MessagePayloadElem>,
}
impl From<L2ToL1Message> for MessageToL1 {
fn from(msg: L2ToL1Message) -> Self {
Self {
to_address: msg.to_address,
payload: msg.payload,
}
}
}
/// Message sent from L1 to L2.
#[serde_as]
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct MessageToL2 {
#[serde_as(as = "EthereumAddressAsHexStr")]
pub from_address: EthereumAddress,
pub payload: Vec<L1ToL2MessagePayloadElem>,
}
impl From<L1ToL2Message> for MessageToL2 {
fn from(msg: L1ToL2Message) -> Self {
Self {
from_address: msg.from_address,
payload: msg.payload,
}
}
}
/// Event emitted as a part of a transaction.
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct Event {
pub from_address: ContractAddress,
pub keys: Vec<EventKey>,
pub data: Vec<EventData>,
}
impl From<crate::sequencer::reply::transaction::Event> for Event {
fn from(e: crate::sequencer::reply::transaction::Event) -> Self {
Self {
from_address: e.from_address,
keys: e.keys,
data: e.data,
}
}
}
}
/// Represents transaction status.
#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub enum TransactionStatus {
#[serde(rename = "PENDING")]
Pending,
#[serde(rename = "ACCEPTED_ON_L2")]
AcceptedOnL2,
#[serde(rename = "ACCEPTED_ON_L1")]
AcceptedOnL1,
#[serde(rename = "REJECTED")]
Rejected,
}
impl From<BlockStatus> for TransactionStatus {
fn from(status: BlockStatus) -> Self {
match status {
BlockStatus::Pending => TransactionStatus::Pending,
BlockStatus::AcceptedOnL2 => TransactionStatus::AcceptedOnL2,
BlockStatus::AcceptedOnL1 => TransactionStatus::AcceptedOnL1,
BlockStatus::Rejected => TransactionStatus::Rejected,
}
}
}
/// Describes Starknet's syncing status RPC reply.
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(untagged)]
pub enum Syncing {
False(bool),
Status(syncing::Status),
}
impl std::fmt::Display for Syncing {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Syncing::False(_) => f.write_str("false"),
Syncing::Status(status) => {
write!(f, "{}", status)
}
}
}
}
/// Starknet's syncing status substructures.
pub mod syncing {
use crate::{
core::{StarknetBlockHash, StarknetBlockNumber},
rpc::serde::StarknetBlockNumberAsHexStr,
};
use serde::Serialize;
use serde_with::serde_as;
/// Represents Starknet node syncing status.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct Status {
#[serde(flatten, with = "prefix_starting")]
pub starting: NumberedBlock,
#[serde(flatten, with = "prefix_current")]
pub current: NumberedBlock,
#[serde(flatten, with = "prefix_highest")]
pub highest: NumberedBlock,
}
serde_with::with_prefix!(prefix_starting "starting_");
serde_with::with_prefix!(prefix_current "current_");
serde_with::with_prefix!(prefix_highest "highest_");
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"starting: {:?}, current: {:?}, highest: {:?}",
self.starting, self.current, self.highest,
)
}
}
/// Block hash and a number, for `starknet_syncing` response only.
#[serde_as]
#[derive(Clone, Copy, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
pub struct NumberedBlock {
#[serde(rename = "block_hash")]
pub hash: StarknetBlockHash,
#[serde_as(as = "StarknetBlockNumberAsHexStr")]
#[serde(rename = "block_num")]
pub number: StarknetBlockNumber,
}
impl std::fmt::Debug for NumberedBlock {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(fmt, "({}, {})", self.hash.0, self.number.0)
}
}
impl From<(StarknetBlockHash, StarknetBlockNumber)> for NumberedBlock {
fn from((hash, number): (StarknetBlockHash, StarknetBlockNumber)) -> Self {
NumberedBlock { hash, number }
}
}
/// Helper to make it a bit less painful to write examples.
#[cfg(test)]
impl<'a> From<(&'a str, u64)> for NumberedBlock {
fn from((h, n): (&'a str, u64)) -> Self {
use stark_hash::StarkHash;
NumberedBlock {
hash: StarknetBlockHash(StarkHash::from_hex_str(h).unwrap()),
number: StarknetBlockNumber(n),
}
}
}
}
#[test]
fn roundtrip_syncing() {
use syncing::NumberedBlock;
let examples = [
(line!(), "false", Syncing::False(false)),
// this shouldn't exist but it exists now
(line!(), "true", Syncing::False(true)),
(
line!(),
r#"{"starting_block_hash":"0xa","starting_block_num":"0x1","current_block_hash":"0xb","current_block_num":"0x2","highest_block_hash":"0xc","highest_block_num":"0x3"}"#,
Syncing::Status(syncing::Status {
starting: NumberedBlock::from(("a", 1)),
current: NumberedBlock::from(("b", 2)),
highest: NumberedBlock::from(("c", 3)),
}),
),
];
for (line, input, expected) in examples {
let parsed = serde_json::from_str::<Syncing>(input).unwrap();
let output = serde_json::to_string(&parsed).unwrap();
assert_eq!(parsed, expected, "example from line {}", line);
assert_eq!(&output, input, "example from line {}", line);
}
}
/// Describes an emitted event returned by starknet_getEvents
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct EmittedEvent {
pub data: Vec<EventData>,
pub keys: Vec<EventKey>,
pub from_address: ContractAddress,
/// [None] for pending events.
pub block_hash: Option<StarknetBlockHash>,
/// [None] for pending events.
pub block_number: Option<StarknetBlockNumber>,
pub transaction_hash: StarknetTransactionHash,
}
impl From<crate::storage::StarknetEmittedEvent> for EmittedEvent {
fn from(event: crate::storage::StarknetEmittedEvent) -> Self {
Self {
data: event.data,
keys: event.keys,
from_address: event.from_address,
block_hash: Some(event.block_hash),
block_number: Some(event.block_number),
transaction_hash: event.transaction_hash,
}
}
}
// Result type for starknet_getEvents
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct GetEventsResult {
pub events: Vec<EmittedEvent>,
pub page_number: usize,
pub is_last_page: bool,
}
// Result type for starknet_addInvokeTransaction
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct InvokeTransactionResult {
pub transaction_hash: StarknetTransactionHash,
}
// Result type for starknet_addDeclareTransaction
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct DeclareTransactionResult {
pub transaction_hash: StarknetTransactionHash,
pub class_hash: ClassHash,
}
// Result type for starknet_addDeployTransaction
#[derive(Clone, Debug, Serialize, PartialEq, Eq)]
#[cfg_attr(any(test, feature = "rpc-full-serde"), derive(serde::Deserialize))]
#[serde(deny_unknown_fields)]
pub struct DeployTransactionResult {
pub transaction_hash: StarknetTransactionHash,
pub contract_address: ContractAddress,
}
/// Return type of transaction fee estimation
#[serde_as]
#[derive(Clone, Debug, serde::Deserialize, Serialize, PartialEq, Eq)]
#[serde(deny_unknown_fields)]
pub struct FeeEstimate {
/// The Ethereum gas cost of the transaction
#[serde_as(as = "crate::rpc::serde::H256AsHexStr")]
#[serde(rename = "gas_consumed")]
pub consumed: web3::types::H256,
/// The gas price (in gwei) that was used in the cost estimation (input to fee estimation)
#[serde_as(as = "crate::rpc::serde::H256AsHexStr")]
pub gas_price: web3::types::H256,
/// The estimated fee for the transaction (in gwei), product of gas_consumed and gas_price
#[serde_as(as = "crate::rpc::serde::H256AsHexStr")]
#[serde(rename = "overall_fee")]
pub fee: web3::types::H256,
}
#[cfg(test)]
mod tests {
macro_rules! fixture {
($file_name:literal) => {
include_str!(concat!("../../fixtures/rpc/0.31.0/", $file_name))
.replace(&[' ', '\n'], "")
};
}
/// The aim of these tests is to check if serialization works correctly
/// **without resorting to deserialization to prepare the test data**,
/// which in itself could contain an "opposite phase" bug that cancels out.
///
/// Deserialization is tested btw, because the fixture and the data is already available.
///
/// These tests were added due to recurring regressions stemming from, among others:
/// - `serde(flatten)` and it's side-effects (for example when used in conjunction with `skip_serializing_none`),
/// - `*AsDecimalStr*` creeping in from `sequencer::reply` as opposed to spec.
mod serde {
use super::super::*;
use crate::starkhash;
use pretty_assertions::assert_eq;
#[test]
fn block_and_transaction() {
impl Block {
pub fn test_data() -> Self {
let common = CommonTransactionProperties {
hash: StarknetTransactionHash(starkhash!("04")),
max_fee: Fee(web3::types::H128::from_low_u64_be(0x5)),
version: TransactionVersion(web3::types::H256::from_low_u64_be(0x6)),
signature: vec![TransactionSignatureElem(starkhash!("07"))],
nonce: TransactionNonce(starkhash!("08")),
};
Self {
status: BlockStatus::AcceptedOnL1,
block_hash: Some(StarknetBlockHash(starkhash!("00"))),
parent_hash: StarknetBlockHash(starkhash!("01")),
block_number: Some(StarknetBlockNumber::GENESIS),
new_root: Some(GlobalRoot(starkhash!("02"))),
timestamp: StarknetBlockTimestamp(1),
sequencer_address: SequencerAddress(starkhash!("03")),
transactions: Transactions::Full(vec![
Transaction::Declare(DeclareTransaction {
common: common.clone(),
class_hash: ClassHash(starkhash!("09")),
sender_address: ContractAddress(starkhash!("0a")),
}),
Transaction::Invoke(InvokeTransaction {
common,
contract_address: ContractAddress(starkhash!("0b")),
entry_point_selector: EntryPoint(starkhash!("0c")),
calldata: vec![CallParam(starkhash!("0d"))],
}),
Transaction::Deploy(DeployTransaction {
hash: StarknetTransactionHash(starkhash!("0e")),
version: TransactionVersion(
web3::types::H256::from_low_u64_be(1),
),
contract_address: ContractAddress(starkhash!("0f")),
contract_address_salt: ContractAddressSalt(starkhash!("ee")),
class_hash: ClassHash(starkhash!("10")),
constructor_calldata: vec![ConstructorParam(starkhash!("11"))],
}),
]),
}
}
}
let data = vec![
// All fields populated
Block::test_data(),
// All optional are None
Block {
block_hash: None,
block_number: None,
new_root: None,
transactions: Transactions::HashesOnly(vec![StarknetTransactionHash(
starkhash!("04"),
)]),
..Block::test_data()
},
];
assert_eq!(
serde_json::to_string(&data).unwrap(),
fixture!("block.json")
);
assert_eq!(
serde_json::from_str::<Vec<Block>>(&fixture!("block.json")).unwrap(),
data
);
}
#[test]
fn receipt() {
impl CommonTransactionReceiptProperties {
pub fn test_data() -> Self {
Self {
transaction_hash: StarknetTransactionHash(starkhash!("00")),
actual_fee: Fee(web3::types::H128::from_low_u64_be(0x1)),
status: TransactionStatus::AcceptedOnL1,
status_data: Some("blah".to_string()),
block_hash: StarknetBlockHash(starkhash!("0aaa")),
block_number: StarknetBlockNumber(3),
}
}
}
impl CommonPendingTransactionReceiptProperties {
pub fn test_data() -> Self {
Self {
transaction_hash: StarknetTransactionHash(starkhash!("01")),
actual_fee: Fee(web3::types::H128::from_low_u64_be(0x2)),
}
}
}
impl InvokeTransactionReceipt {
pub fn test_data() -> Self {
Self {
common: CommonTransactionReceiptProperties::test_data(),
messages_sent: vec![transaction_receipt::MessageToL1 {
to_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x2),
),
payload: vec![crate::core::L2ToL1MessagePayloadElem(starkhash!(
"03"
))],
}],
l1_origin_message: Some(transaction_receipt::MessageToL2 {
from_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x4),
),
payload: vec![crate::core::L1ToL2MessagePayloadElem(starkhash!(
"05"
))],
}),
events: vec![transaction_receipt::Event {
from_address: ContractAddress(starkhash!("06")),
keys: vec![EventKey(starkhash!("07"))],
data: vec![EventData(starkhash!("08"))],
}],
}
}
}
impl PendingInvokeTransactionReceipt {
pub fn test_data() -> Self {
Self {
common: CommonPendingTransactionReceiptProperties::test_data(),
messages_sent: vec![transaction_receipt::MessageToL1 {
to_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x5),
),
payload: vec![crate::core::L2ToL1MessagePayloadElem(starkhash!(
"06"
))],
}],
l1_origin_message: Some(transaction_receipt::MessageToL2 {
from_address: crate::core::EthereumAddress(
web3::types::H160::from_low_u64_be(0x77),
),
payload: vec![crate::core::L1ToL2MessagePayloadElem(starkhash!(
"07"
))],
}),
events: vec![transaction_receipt::Event {
from_address: ContractAddress(starkhash!("a6")),
keys: vec![EventKey(starkhash!("a7"))],
data: vec![EventData(starkhash!("a8"))],
}],
}
}
}
let data = vec![
// All fields populated
TransactionReceipt::Invoke(InvokeTransactionReceipt::test_data()),
// All optional are None
TransactionReceipt::Invoke(InvokeTransactionReceipt {
common: CommonTransactionReceiptProperties {
status_data: None,
..CommonTransactionReceiptProperties::test_data()
},
l1_origin_message: None,
events: vec![],
..InvokeTransactionReceipt::test_data()
}),
// Somewhat redundant, but want to exhaust the variants
TransactionReceipt::DeclareOrDeploy(DeclareOrDeployTransactionReceipt {
common: CommonTransactionReceiptProperties::test_data(),
}),
TransactionReceipt::PendingInvoke(PendingInvokeTransactionReceipt::test_data()),
TransactionReceipt::PendingDeclareOrDeploy(
PendingDeclareOrDeployTransactionReceipt {
common: CommonPendingTransactionReceiptProperties::test_data(),
},
),
];
assert_eq!(
serde_json::to_string(&data).unwrap(),
fixture!("receipt.json")
);
assert_eq!(
serde_json::from_str::<Vec<TransactionReceipt>>(&fixture!("receipt.json"))
.unwrap(),
data
);
}
}
}
}
| true
|
0e60fa8c90e85d9107ac2c3fa3c8e925936dd1d7
|
Rust
|
hamadakafu/kyopro
|
/src/bin/abc210c.rs
|
UTF-8
| 671
| 2.515625
| 3
|
[] |
no_license
|
use std::cmp::{max, min};
use std::collections::{HashMap, HashSet};
use itertools::Itertools;
use whiteread::parse_line;
fn main() {
let (n, k): (usize, usize) = parse_line().unwrap();
let cc: Vec<usize> = parse_line().unwrap();
let mut m = HashMap::new();
for i in 0..k {
let e = m.entry(cc[i]).or_insert(0);
*e += 1;
}
let mut ans = m.len();
for i in k..n {
let e = m.entry(cc[i - k]).or_insert(0);
*e -= 1;
if *e == 0 {
m.remove_entry(&cc[i-k]);
}
let e = m.entry(cc[i]).or_insert(0);
*e += 1;
ans = max(ans, m.len());
}
println!("{}", ans);
}
| true
|
828ab097cc6979e51c904d6ae6bbcce9c9b50502
|
Rust
|
amgarrett09/discord-chatbot
|
/src/types/module_status.rs
|
UTF-8
| 1,082
| 3.125
| 3
|
[] |
no_license
|
use std::error;
use std::fmt;
use std::str::FromStr;
#[derive(Clone, Copy)]
pub enum ModuleStatus {
Enabled,
Disabled,
}
impl ToString for ModuleStatus {
fn to_string(&self) -> String {
match self {
ModuleStatus::Enabled => "enabled".to_string(),
_ => "disabled".to_string(),
}
}
}
impl FromStr for ModuleStatus {
type Err = ParseStatusError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"enabled" => Ok(ModuleStatus::Enabled),
"disabled" => Ok(ModuleStatus::Disabled),
_ => Err(ParseStatusError),
}
}
}
#[derive(Debug, Clone)]
pub struct ParseStatusError;
impl fmt::Display for ParseStatusError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"failed to parse string slice: it needs to be either \"enabled\" \
or \"disabled\""
)
}
}
impl error::Error for ParseStatusError {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
None
}
}
| true
|
4f43f26eb5bd6d797ea7c2b8248a812a6d5ced36
|
Rust
|
davhogan/RestaurantTheGame
|
/src/ui/simulator/restaurant/menu_item.rs
|
UTF-8
| 2,444
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
// Copyright © 2019 David Hogan
// [This program is licensed under the "MIT License"]
// Please see the file COPYING in the source
// distribution of this software for license terms.
// The following code is used to represent a menu item at the restaurant.
// A menu item has a name, price, quality and an inventory.
// Every menu item has an inventory of 100 upon creation.
// A menu item doesn't have many functions, it just manages its own data.
use std::borrow::Borrow;
#[derive(Clone)]
pub struct MenuItem {
name: String,
price: f64,
quality: i64,
inv: i64,
}
impl MenuItem {
pub fn new(name: String, price: f64, quality: i64) -> MenuItem {
MenuItem {
name,
price,
quality,
inv: 100,
}
}
//Getters
pub fn get_name(&self) -> String {
self.name.to_owned()
}
pub fn get_price(&self) -> f64 {
self.price
}
pub fn get_quality(&self) -> i64 {
self.quality
}
pub fn get_inv(&self) -> i64 {
self.inv
}
//Setters
pub fn set_quality(&mut self, new_quality: i64) {
self.quality = new_quality;
}
pub fn set_price(&mut self, new_price: f64) {
self.price = new_price;
}
pub fn set_inv(&mut self, new_inv: i64) {
self.inv = new_inv;
}
//Increases Inventory by the given amount.
//Allows the user to order more inventory for a specific item.
pub fn inc_inv(&mut self, inc_amount: i64) {
self.inv += inc_amount;
}
//Decrease Inventory
//Used when a customer orders an item off the menu.
pub fn dec_inv(&mut self, dec_amount: i64) {
if self.inv <= 0 {
return;
}
self.inv -= dec_amount;
}
}
#[test]
fn inc_inv_test() {
let mut menu_test = MenuItem::new("Test".to_owned(), 9.99, 1);
MenuItem::set_inv(&mut menu_test, 0);
MenuItem::inc_inv(&mut menu_test, 25);
assert_eq!(25, emp_test.get_inv());
}
#[test]
fn dec_inv_test() {
let mut menu_test = MenuItem::new("Test".to_owned(), 9.99, 1);
MenuItem::set_inv(&mut menu_test, 25);
MenuItem::dec_inv(&mut menu_test, 25);
assert_eq!(0, emp_test.get_inv());
}
#[test]
fn dec_inv_test_zero() {
let mut menu_test = MenuItem::new("Test".to_owned(), 9.99, 1);
MenuItem::set_inv(&mut menu_test, 0);
MenuItem::dec_inv(&mut menu_test, 25);
assert_eq!(0, emp_test.get_inv());
}
| true
|
65d3d76117e10c6b4dcfc88aa1b8e7c5f03712a9
|
Rust
|
ThomasZumsteg/project-euler
|
/common.rs
|
UTF-8
| 8,393
| 3.1875
| 3
|
[] |
no_license
|
extern crate clap;
use clap::ArgMatches;
use env_logger::Builder;
use log::{info, LevelFilter};
use std::collections::HashSet;
use std::io::Write;
pub fn set_log_level(args: &ArgMatches) -> LevelFilter {
let log_level = match args.occurrences_of("verbose") {
0 => LevelFilter::Off,
1 => LevelFilter::Error,
2 => LevelFilter::Warn,
3 => LevelFilter::Info,
4 => LevelFilter::Debug,
_ => LevelFilter::Trace,
};
Builder::new()
.filter_level(log_level)
.format(|buf, record| writeln!(buf, "[{}:{}] {}", record.level(), record.module_path().unwrap_or("None"), record.args()))
.init();
info!("Set log level {}", log_level);
log_level
}
pub fn integer_square_root(number: usize) -> Option<usize> {
let root = (number as f64).sqrt() as usize;
if root * root == number { Some(root) } else { None }
}
#[cfg(test)]
mod test {
use super::integer_square_root;
#[test]
fn test_integer_square_root() {
assert_eq!(integer_square_root(12), None);
assert_eq!(integer_square_root(4), Some(2));
assert_eq!(integer_square_root(1), Some(1));
assert_eq!(integer_square_root(7), None);
}
}
pub mod primes {
use std::collections::HashMap;
pub struct Primes {
primes: Vec<usize>,
pub current: usize,
}
impl Primes {
pub fn new() -> Primes {
const SIZE: usize = 1000000;
let mut block = [true; SIZE] ;
let mut primes = Vec::new();
log::debug!("Starting seive {}", SIZE);
for i in 2..SIZE {
if block[i] {
log::debug!("Seived Prime: {} {}", primes.len(), i);
primes.push(i);
for i in ((2*i)..SIZE).step_by(i) {
block[i] = false;
}
}
}
log::debug!("Finished seive {}", SIZE);
Primes {
primes: primes,
current: 0,
}
}
pub fn is_prime(&mut self, number: usize) -> bool {
if number < 2 {
return false;
}
let mut last_prime: usize = *self.primes.last().unwrap();
while last_prime * last_prime <= number {
last_prime = self.next_prime();
self.primes.push(last_prime);
}
for p in &self.primes {
if number % p == 0 && &number != p {
return false
} else if number < p * p {
return true
}
}
panic!("We sould never run out of primes");
}
fn next_prime(&self) -> usize {
let mut num: usize = *self.primes.last().unwrap();
loop {
num += 2;
for prime in &self.primes {
if num < prime * prime {
return num;
} else if num % prime == 0 {
break;
}
}
}
}
pub fn nth_prime(&mut self, nth: usize) -> usize {
while nth >= self.primes.len() {
self.primes.push(self.next_prime());
}
self.primes[nth]
}
pub fn prime_factors(&mut self, number: usize) -> HashMap<usize, usize> {
let mut result = HashMap::new();
let mut remainer = number;
for n in 0.. {
if self.is_prime(remainer) {
*result.entry(remainer).or_insert(0) += 1;
break;
} else if remainer == 1 {
break;
}
let prime = self.nth_prime(n);
while remainer % prime == 0 {
*result.entry(prime).or_insert(0) += 1;
remainer /= prime;
}
}
result
}
}
impl Iterator for Primes {
type Item = usize;
fn next(&mut self) -> Option<usize> {
while self.current >= self.primes.len() {
let next_prime = self.next_prime();
log::debug!("Found Prime {}: {}", self.current, next_prime);
self.primes.push(next_prime);
}
self.current += 1;
Some(self.primes[self.current-1])
}
}
#[cfg(test)]
mod test {
use super::*;
use maplit::hashmap;
#[test]
fn test_primes() {
let mut primes = Primes::new();
assert!(primes.is_prime(2));
assert!(primes.is_prime(3));
assert!(primes.is_prime(5));
assert!(primes.is_prime(7));
assert!(!primes.is_prime(0));
assert!(!primes.is_prime(1));
assert!(!primes.is_prime(4));
assert!(!primes.is_prime(9));
assert!(!primes.is_prime(100));
assert!(!primes.is_prime(68341));
assert!(!primes.is_prime(41683));
}
#[test]
fn test_prime_factors() {
let mut primes = Primes::new();
assert_eq!(primes.prime_factors(4), hashmap!{2 => 2});
assert_eq!(primes.prime_factors(7), hashmap!{7 => 1});
assert_eq!(primes.prime_factors(14), hashmap!{2 => 1, 7 => 1});
assert_eq!(primes.prime_factors(15), hashmap!{3 => 1, 5 => 1});
assert_eq!(primes.prime_factors(644), hashmap!{2 => 2, 7 => 1, 23 => 1});
assert_eq!(primes.prime_factors(645), hashmap!{3 => 1, 5 => 1, 43 => 1});
assert_eq!(primes.prime_factors(646), hashmap!{2 => 1, 17 => 1, 19 => 1});
}
}
}
pub fn find_divisors(num: usize) -> HashSet<usize> {
let mut result = HashSet::new();
for n in 1.. {
if n * n > num {
break
}
if num % n == 0 {
result.insert(n);
result.insert(num / n);
}
};
result
}
pub struct Fibonacci {
curr: usize,
next: usize,
}
impl Fibonacci {
pub fn new() -> Fibonacci {
Fibonacci { curr: 1, next: 1 }
}
}
impl Iterator for Fibonacci {
type Item = usize;
fn next(&mut self) -> Option<usize> {
let next = self.curr + self.next;
self.curr = self.next;
self.next = next;
Some(self.curr)
}
}
pub mod digits {
use num::bigint::BigInt;
#[derive(Debug, Clone)]
pub struct Digits {
pub digits: Vec<usize>
}
impl Digits {
pub fn filter(&self, digit: &usize) -> Digits {
Digits {
digits: self.digits.iter().map(|&d| d).filter(|d| d != digit).collect()
}
}
}
impl From<usize> for Digits {
fn from(number: usize) -> Self {
let mut digits = Vec::new();
let mut remainer = number;
while remainer > 0 {
digits.insert(0, remainer % 10);
remainer /= 10;
}
Digits { digits: digits }
}
}
impl From<BigInt> for Digits {
fn from(number: BigInt) -> Self {
Digits {
digits: number.to_string().chars().map(|d| d.to_digit(10).unwrap() as usize).collect()
}
}
}
impl From<Digits> for BigInt {
fn from(number: Digits) -> Self {
let mut result = BigInt::from(0);
for d in number.digits {
result = result * BigInt::from(10) + BigInt::from(d);
}
result
}
}
impl From<Digits> for usize {
fn from(number: Digits) -> Self {
let mut result = 0;
for d in number.digits {
result = result * 10 + d;
}
result
}
}
impl From<Vec<usize>> for Digits {
fn from(digits: Vec<usize>) -> Self {
Digits { digits: digits }
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_from_big_int() {
assert_eq!(Digits::from(BigInt::from(123456)).digits, vec![1,2,3,4,5,6]);
assert_eq!(Digits::from(BigInt::from(0)).digits, vec![0]);
assert_eq!(Digits::from(BigInt::from(10)).digits, vec![1, 0]);
}
}
}
| true
|
fcd4d23562836037891d2dc5f50ef322efc9e3be
|
Rust
|
oOBoomberOo/rna
|
/src/util.rs
|
UTF-8
| 2,554
| 2.984375
| 3
|
[] |
no_license
|
use std::path::PathBuf;
use std::fs;
use serde_json as js;
use serde::{Serialize, Deserialize};
#[derive(Serialize, Deserialize)]
struct MetaFormat {
compiler_options: Option<Vec<CompilerOption>>
}
#[derive(Serialize, Deserialize)]
struct CompilerOption {
name: String
}
/// Shorthand for `Result<(), MetaError>`
pub type MetaResult = Result<(), MetaError>;
/// Check `pack.mcmeta` file for `compiler_options` field
pub fn check_meta(path: impl Into<PathBuf>) -> MetaResult {
let path: PathBuf = path.into();
if !path.exists() {
return Err(MetaError::NotExist(path))
}
if path.is_dir() {
return Err(MetaError::NotAFile(path));
}
let content = match fs::read(&path) {
Ok(value) => value,
Err(error) => return Err(MetaError::Io((path, error)))
};
let interpret: MetaFormat = match js::from_slice(&content) {
Ok(value) => value,
Err(error) => return Err(MetaError::Serde((path, error)))
};
if interpret.compiler_options.is_none() {
return Err(MetaError::NoCompilerOptions(path));
}
Ok(())
}
use std::io;
/// General error type for `check_meta()` function
#[derive(Debug)]
pub enum MetaError {
/// Emit when `path` does not exists
NotExist(PathBuf),
/// Emit when `path` is directory
NotAFile(PathBuf),
/// Emit when `serde_json` cannot parse JSON
Serde((PathBuf, serde_json::Error)),
/// Emit when I/O error occur
Io((PathBuf, io::Error)),
/// Emit when `compiler_options` is not found inside `path`
NoCompilerOptions(PathBuf)
}
use colored::*;
use std::fmt;
impl fmt::Display for MetaError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
MetaError::NotAFile(path) => write!(f, "'{}' is directory.", path.display().to_string().cyan()),
MetaError::NotExist(path) => write!(f, "'{}' does not exists.", path.display().to_string().cyan()),
MetaError::Serde((path, error)) => write!(f, "[{}] {}", path.display().to_string().green(), error),
MetaError::Io((path, error)) => write!(f, "[{}] {}", path.display().to_string().green(), error),
MetaError::NoCompilerOptions(path) => write!(f, "'{}' does not have {} field.", path.display().to_string().cyan(), "compiler_options".white().on_blue()),
}
}
}
/// Check if `path` has the correct extension for loot table script
pub fn is_loot_table_script(path: impl Into<PathBuf>) -> bool {
let path: PathBuf = path.into();
if let Some(extension) = path.extension() {
if let Some(extension) = extension.to_str() {
match extension {
"ult" | "json.merge" | "megu" => return true,
_ => false
};
};
}
false
}
| true
|
400e486cd678372482c6aeed96ca6ef1cefc183b
|
Rust
|
flada-auxv/graph-node
|
/graph/src/components/query.rs
|
UTF-8
| 299
| 2.640625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
use futures::sync::mpsc::Sender;
use data::query::Query;
/// Common trait for query runners that run queries against a [Store](../store/trait.Store.html).
pub trait QueryRunner {
// Sender to which others can write queries that need to be run.
fn query_sink(&mut self) -> Sender<Query>;
}
| true
|
6331af272cba2251681c7db445461046d333517b
|
Rust
|
Byron/gitoxide
|
/gix/src/reference/log.rs
|
UTF-8
| 1,274
| 2.78125
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
//!
use gix_object::commit::MessageRef;
use gix_ref::file::ReferenceExt;
use crate::{
bstr::{BStr, BString, ByteVec},
Reference,
};
impl<'repo> Reference<'repo> {
/// Return a platform for obtaining iterators over reference logs.
pub fn log_iter(&self) -> gix_ref::file::log::iter::Platform<'_, '_> {
self.inner.log_iter(&self.repo.refs)
}
/// Return true if a reflog is present for this reference.
pub fn log_exists(&self) -> bool {
self.inner.log_exists(&self.repo.refs)
}
}
/// Generate a message typical for git commit logs based on the given `operation`, commit `message` and `num_parents` of the commit.
pub fn message(operation: &str, message: &BStr, num_parents: usize) -> BString {
let mut out = BString::from(operation);
if let Some(commit_type) = commit_type_by_parents(num_parents) {
out.push_str(b" (");
out.extend_from_slice(commit_type.as_bytes());
out.push_byte(b')');
}
out.push_str(b": ");
out.extend_from_slice(&MessageRef::from_bytes(message).summary());
out
}
pub(crate) fn commit_type_by_parents(count: usize) -> Option<&'static str> {
Some(match count {
0 => "initial",
1 => return None,
_two_or_more => "merge",
})
}
| true
|
4d13611f3077cc271e786c6cac3f75e9cd531500
|
Rust
|
Nazek42/krisp
|
/src/builtins.rs
|
UTF-8
| 11,522
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#![allow(non_snake_case)]
use bacon_rajan_cc::Cc;
use std::ops::{Add, BitAnd, BitOr, BitXor, Div, Mul, Rem, Shl, Shr, Sub};
use std::iter::once;
use std::collections::HashMap;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use crate::expr::*;
use crate::interpreter::Namespace;
use crate::parse::parse_source_string;
macro_rules! arithmetic {
($($method:ident => $kname:expr),+) => {
$(
fn $method(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!($kname, args, 2);
let a = Cc::clone(&args[0]);
let b = Cc::clone(&args[1]);
Ok(Cc::new(match (&*a, &*b) {
(SExpr::Atom(Atom::Native(na)), SExpr::Atom(Atom::Native(nb)))
=> SExpr::Atom(Atom::Native({
use NativeAtom::{Int, Float};
match (na, nb) {
(Int(ia), Int(ib) ) => Int(ia.$method(ib)),
(Int(ia), Float(fb)) => Float((*ia as f64).$method(fb)),
(Float(fa), Int(ib) ) => Float(fa.$method(*ib as f64)),
(Float(fa), Float(fb)) => Float(fa.$method(fb)),
_ => { return Err(format!("{} expects int or float (got {} . {})", $kname, a, b)) }
}
})),
_ => { return Err(format!("{} expects int or float (got {} . {})", $kname, a, b)) }
}))
}
)+
}
}
macro_rules! namespace {
($nsname:ident; $($func:ident => $kname:expr),*) => {
pub fn $nsname() -> Namespace {
let mut module = Namespace::new();
$(module.insert($kname.to_owned(), Cc::new(SExpr::extern_fn(ExternFunction{ func: $func })));)*
module
}
}
}
arithmetic!{ add => "+", sub => "-", mul => "*", div => "/", rem => "mod" }
fn cmp(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("=", args, 2);
let a = Cc::clone(&args[0]);
let b = Cc::clone(&args[1]);
Ok(Cc::new(match (&*a, &*b) {
(SExpr::Atom(Atom::Native(na)), SExpr::Atom(Atom::Native(nb)))
=> SExpr::Atom(Atom::Native({
use NativeAtom::{Int, Float, Str, Ident};
Int(match (na, nb) {
(Int(ia), Int(ib) ) => if ia < ib { -1 } else if ia > ib { 1 } else { 0 },
(Int(ia), Float(fb)) => if (*ia as f64) < *fb { -1 } else if (*ia as f64) > *fb { 1 } else { 0 },
(Float(fa), Int(ib) ) => if *fa < (*ib as f64) { -1 } else if *fa > (*ib as f64) { 1 } else { 0 },
(Float(fa), Float(fb)) => if fa < fb { -1 } else if fa > fb { 1 } else { 0 },
(Str(sa), Str(sb) ) => if sa < sb { -1 } else if sa > sb { 1 } else { 0 },
(Str(sa), Ident(sb) ) => if sa < sb { -1 } else if sa > sb { 1 } else { 0 },
(Ident(sa), Str(sb) ) => if sa < sb { -1 } else if sa > sb { 1 } else { 0 },
(Ident(sa), Ident(sb) ) => if sa < sb { -1 } else if sa > sb { 1 } else { 0 },
_ => { return Err(format!("cmp expects int, float, or string (got {} . {})", a, b)) }
})
})),
_ => { return Err(format!("cmp expects int, float, or string (got {} . {})", a, b)) }
}))
}
fn head(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("head", args, 1);
Ok(Cc::clone(&args[0].get_list().ok_or_else(|| format!("head expected list, got {}", args[0]))?[0]))
}
fn tail(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("tail", args, 1);
let result: Vec<_> = (&args[0].get_list().ok_or_else(|| format!("tail expected list, got {}", args[0]))?[1..]).iter().map(Cc::clone).collect();
Ok(if result.len() == 1 {
Cc::clone(&result[0])
} else {
Cc::new(SExpr::List(result))
})
}
fn cons(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("cons", args, 2);
let first = Cc::clone(&args[0]);
let rest = Cc::clone(&args[1]);
if let Some(list) = rest.get_list() {
let mut new_list: Vec<_> = vec![first];
new_list.extend(list.iter().map(Cc::clone));
Ok(Cc::new(SExpr::List(new_list)))
} else {
Ok(Cc::new(SExpr::List(vec![first, rest])))
}
}
fn append(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("append", args, 2);
let first = Cc::clone(&args[0]);
if first.get_list().is_none() { return Err("append expects a list as its first argument".to_owned()); }
let rest = Cc::clone(&args[1]);
if let Some(list) = rest.get_list() {
let new_list: Vec<_> = first.get_list().unwrap().iter().chain(list.iter()).map(Cc::clone).collect();
Ok(Cc::new(SExpr::List(new_list)))
} else {
let new_list: Vec<_> = first.get_list().unwrap().iter().chain(once(&rest)).map(Cc::clone).collect();
Ok(Cc::new(SExpr::List(new_list)))
}
}
fn len(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("len", args, 1);
Ok(Cc::new(SExpr::int(args[0].get_list().ok_or_else(|| format!("len expected list, got {}", args[0]))?.len() as i64)))
}
macro_rules! type_check {
($($getter:ident => $kname:expr),+) => {
$(
fn $getter(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!($kname, args, 1);
Ok(Cc::new(SExpr::int(args[0].$getter().is_some() as i64)))
}
)+
}
}
type_check!{
get_int => "int?",
get_float => "float?",
get_string => "string?",
get_ident => "ident?",
get_native_fn => "native-fn?",
get_extern_fn => "extern-fn?",
get_extern_obj => "extern-obj?",
get_list => "list?"
}
fn make_list(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
Ok(Cc::new(SExpr::list(args)))
}
fn println_fn(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
for expr in args.iter() {
if let Some(s) = expr.get_string() {
println!("{}", s);
} else {
println!("{}", expr);
}
}
Ok(Cc::new(SExpr::null()))
}
fn lookup(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("get", args, 2);
if let Some(i) = args[0].get_int() {
if let Some(list) = args[1].get_list() {
Ok(Cc::clone(&list[*i as usize]))
} else {
Err(format!("get expected a list (got {})", args[1]))
}
} else {
Err(format!("get expected an int (got {})", args[0]))
}
}
fn parse_fn(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("parse", args, 1);
if let Some(s) = args[0].get_string() {
let parsed = parse_source_string(s)?;
Ok(if parsed.is_empty() {
Cc::new(SExpr::null())
} else if parsed.len() == 1 {
Cc::new(parsed[0].clone())
} else {
Cc::new(SExpr::List(parsed.iter().map(|e| Cc::new(e.clone())).collect()))
})
} else {
Err("parse expects a string".to_owned())
}
}
type KrispHashMap = HashMap<u64, Cc<SExpr>>;
impl ExternObj for KrispHashMap {
fn box_clone(&self) -> Box<ExternObj> {
Box::new(self.clone())
}
fn name(&self) -> String {
"hashmap".to_owned()
}
fn as_bool(&self) -> bool {
self.is_empty()
}
}
trait TryHash {
fn try_hash(&self) -> Option<u64>;
}
impl TryHash for SExpr {
fn try_hash(&self) -> Option<u64> {
match self {
SExpr::Atom(atom) => if let Atom::Native(native) = atom {
use NativeAtom::*;
match native {
Int(x) => {
let mut h = DefaultHasher::new();
x.hash(&mut h);
Some(h.finish())
},
Str(x) | Ident(x) => {
let mut h = DefaultHasher::new();
x.hash(&mut h);
Some(h.finish())
},
_ => None
}
} else { None },
SExpr::List(list) => {
let hash_attempt: Option<Vec<u64>> = list.iter().map(|expr| expr.try_hash()).collect();
hash_attempt.map(|hashes| {
let mut h = DefaultHasher::new();
for &hash in hashes.iter() {
h.write_u64(hash);
}
h.finish()
})
}
}
}
}
fn hash_constructor(args_outer: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("#new", args_outer, 1);
if let Some(args) = args_outer[0].get_list() {
let pairs = args.iter().enumerate().map(|(i, arg)| {
if let Some(list) = arg.get_list() {
if list.len() == 2 {
Ok((Cc::clone(&list[0]), Cc::clone(&list[1])))
} else {
Err(format!("#new expected a list of pairs (length of index {} was {})", i, list.len()))
}
} else {
Err(format!("#new expected a list of pairs (index {} was {})", i, arg))
}
}).collect::<Result<Vec<(Cc<SExpr>, Cc<SExpr>)>, String>>()?;
let hash_pairs = pairs.iter().map(|(k, v)| {
if let Some(hash) = k.try_hash() {
Ok((hash, Cc::clone(v)))
} else {
Err(format!("keys must be hashable ({} is not hashable)", k))
}
}).collect::<Result<Vec<(u64, Cc<SExpr>)>, String>>()?;
Ok(Cc::new(SExpr::extern_obj({
let mut hm = KrispHashMap::new();
for (hash, v) in hash_pairs {
hm.insert(hash, Cc::clone(&v));
}
Box::new(hm)
})))
} else {
Err(format!("#new expected a list of pairs (got {})", args_outer[0]))
}
}
fn hash_lookup(args: Vec<Cc<SExpr>>) -> Result<Cc<SExpr>, String> {
wrong_args_check!("#get", args, 2);
if let Some(eo) = args[0].get_extern_obj() {
if let Some(hm) = eo.downcast_ref::<KrispHashMap>() {
if let Some(key) = args[1].try_hash() {
if let Some(val) = hm.get(&key) {
Ok(Cc::clone(val))
} else {
Err(format!("key {} not found", args[1]))
}
} else {
Err(format!("keys must be hashable ({} is not hashable)", args[1]))
}
} else {
Err(format!("#get expected a hash map (got {:?})", eo))
}
} else {
Err(format!("#get expected a hash map (got {})", args[0]))
}
}
namespace! { BUILTINS;
add => "+",
sub => "-",
mul => "*",
div => "/",
rem => "mod",
cmp => "cmp",
head => "head",
tail => "tail",
cons => "cons",
append => "cat",
len => "len",
lookup => "get",
make_list => "list",
println_fn => "print",
parse_fn => "parse",
hash_constructor => "#new",
hash_lookup => "#get",
get_int => "int?",
get_float => "float?",
get_string => "string?",
get_ident => "ident?",
get_native_fn => "native-fn?",
get_extern_fn => "extern-fn?",
get_extern_obj => "extern-obj?",
get_list => "list?"
}
| true
|
6262cde677e34eabaccfa9a375d455ac26960991
|
Rust
|
dtamai/aoc
|
/2020/src/bin/05.rs
|
UTF-8
| 1,507
| 3.546875
| 4
|
[
"Unlicense"
] |
permissive
|
fn main() -> eyre::Result<()> {
let passes = passes()?;
let ids = passes.into_iter().map(|p| seat_id(&p));
// first part
let max = ids.clone().max().unwrap();
println!("Max seatID = {}", max);
// second part
let mut sorted = ids.collect::<Vec<i32>>();
sorted.sort_unstable();
let first = sorted[0];
for (idx, id) in sorted.iter().enumerate() {
let maybe = first + idx as i32;
if maybe == *id {
continue;
}
println!("My seatID = {}", maybe);
break;
}
Ok(())
}
fn seat_id(pass: &str) -> i32 {
let mut row = 0;
for (idx, c) in pass[0..7].chars().enumerate() {
match c {
'B' => row += 1 << (6 - idx),
'F' => continue,
_ => unreachable!(),
}
}
let mut col = 0;
for (idx, c) in pass[7..].chars().enumerate() {
match c {
'R' => col += 1 << (2 - idx),
'L' => continue,
_ => unreachable!(),
}
}
row * 8 + col
}
fn passes() -> eyre::Result<Vec<String>> {
use std::fs::File;
use std::io::{BufRead, Lines};
let lines: Lines<_> = std::io::BufReader::new(File::open("data/05.txt")?).lines();
Ok(lines.map(|l| l.unwrap()).collect())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn examples() {
assert_eq!(567, seat_id("BFFFBBFRRR"));
assert_eq!(119, seat_id("FFFBBBFRRR"));
assert_eq!(820, seat_id("BBFFBBFRLL"));
}
}
| true
|
7b87494e9dc1cdc938afd86f2753c3727f4f8273
|
Rust
|
rust-lang/rust
|
/src/tools/unicode-table-generator/src/cascading_map.rs
|
UTF-8
| 3,497
| 2.578125
| 3
|
[
"Apache-2.0",
"LLVM-exception",
"NCSA",
"BSD-2-Clause",
"LicenseRef-scancode-unicode",
"MIT",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
use crate::fmt_list;
use crate::raw_emitter::RawEmitter;
use std::collections::HashMap;
use std::fmt::Write as _;
use std::ops::Range;
impl RawEmitter {
pub fn emit_cascading_map(&mut self, ranges: &[Range<u32>]) -> bool {
let mut map: [u8; 256] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
let points = ranges
.iter()
.flat_map(|r| (r.start..r.end).into_iter().collect::<Vec<u32>>())
.collect::<Vec<u32>>();
println!("there are {} points", points.len());
// how many distinct ranges need to be counted?
let mut codepoints_by_high_bytes = HashMap::<usize, Vec<u32>>::new();
for point in points {
// assert that there is no whitespace over the 0x3000 range.
assert!(point <= 0x3000, "the highest unicode whitespace value has changed");
let high_bytes = point as usize >> 8;
let codepoints = codepoints_by_high_bytes.entry(high_bytes).or_insert_with(Vec::new);
codepoints.push(point);
}
let mut bit_for_high_byte = 1u8;
let mut arms = Vec::<String>::new();
let mut high_bytes: Vec<usize> =
codepoints_by_high_bytes.keys().map(|k| k.clone()).collect();
high_bytes.sort();
for high_byte in high_bytes {
let codepoints = codepoints_by_high_bytes.get_mut(&high_byte).unwrap();
if codepoints.len() == 1 {
let ch = codepoints.pop().unwrap();
arms.push(format!("{} => c as u32 == {:#04x}", high_byte, ch));
continue;
}
// more than 1 codepoint in this arm
for codepoint in codepoints {
map[(*codepoint & 0xff) as usize] |= bit_for_high_byte;
}
arms.push(format!(
"{} => WHITESPACE_MAP[c as usize & 0xff] & {} != 0",
high_byte, bit_for_high_byte
));
bit_for_high_byte <<= 1;
}
writeln!(&mut self.file, "static WHITESPACE_MAP: [u8; 256] = [{}];", fmt_list(map.iter()))
.unwrap();
self.bytes_used += 256;
writeln!(&mut self.file, "#[inline]").unwrap();
writeln!(&mut self.file, "pub fn lookup(c: char) -> bool {{").unwrap();
writeln!(&mut self.file, " match c as u32 >> 8 {{").unwrap();
for arm in arms {
writeln!(&mut self.file, " {},", arm).unwrap();
}
writeln!(&mut self.file, " _ => false,").unwrap();
writeln!(&mut self.file, " }}").unwrap();
writeln!(&mut self.file, "}}").unwrap();
true
}
}
| true
|
331cd57826ee0411246facf691b5ed13deb55a51
|
Rust
|
WisartArfun/Divisionaries
|
/src/logger/mod.rs
|
UTF-8
| 1,644
| 3.234375
| 3
|
[] |
no_license
|
//! Configures and manages the logging.
use log;
use log4rs;
/// Initializes the root logger from the rust log crate using a config file.
///
/// This is done with a config file. It uses the `log` crate,
/// which is a lightweight logging facade, and `log4rs` crate,
/// which is inspired by the java log4j library.
///
/// # Arguments
///
/// * stc: `&str` - path to a config file
///
/// # Examples
///
/// Importing, initializing and logging:
///
/// ```rust
/// use log;
/// use bucketer::logger;
///
/// logger::init("tests/log4rs.yaml");
/// log::debug!("This is a log message on the debug level.");
/// ```
///
/// Here is an example config file,
/// more details can be found in the documentation for `log4rs`.
///
/// ```yaml
/// refresh_rate: 30 seconds
/// appenders:
// / stdout_detailed:
/// kind: console
/// encoder:
/// pattern: "{f}{n}{M} - l: {L}{n}{d(%Y-%m-%d %H:%M:%S %Z)(utc)} [{h({l})}] - {m}{n}"
/// stdout:
/// kind: console
/// encoder:
/// pattern: "{d(%H:%M:%S %Z)(utc)} [{h({l})}] - {m}{n}"
/// file:
/// kind: file
/// path: "logs/log.log"
/// encoder:
/// pattern: "{f}{n}{M} - l: {L}{n}{d(%Y-%m-%d %H:%M:%S %Z)(utc)} [{h({l})}] - {m}{n}"
/// root:
/// level: debug
/// appenders:
/// - stdout
/// - file
/// loggers:
/// app::backend::db:
/// level: debug
/// app::requests:
/// level: debug
/// appenders:
/// - file
/// additive: false
/// ```
pub fn init(src: &str) {
log4rs::init_file(src, Default::default()).unwrap(); // this is global
log::info!("log4rs initialized from config file at src: {}", src);
}
| true
|
293b2e8a246a2d0edf2959c791b6a3a79c4cd0ad
|
Rust
|
thibault-ml/lfw-solver
|
/src/lfw/graph.rs
|
UTF-8
| 5,852
| 3.125
| 3
|
[
"Unlicense"
] |
permissive
|
extern crate serde_json;
use self::serde_json::Error as JsonError;
use self::serde_json::Value as JsonValue;
use self::serde_json::from_value as parse_json_value;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::error;
use std::fmt;
use std::iter::FromIterator;
use std::ops::Index;
pub const MIN_INDEX: usize = 1;
pub const MAX_INDEX: usize = 195;
pub const REG_MOVE_KEY: &'static str = "regular_moves";
pub const ALLEY_MOVE_KEY: &'static str = "alleyway_moves";
#[derive(Debug)]
pub enum Error {
GraphNotAnObject,
LocationNotFound(u32),
LocationNotParsable(u32, JsonError),
MissingKey(String),
InvalidLocationValue(u32),
}
impl error::Error for Error {
fn description(&self) -> &str {
match *self {
Error::GraphNotAnObject => "invalid graph argument",
Error::LocationNotFound(_) => "location not found",
Error::LocationNotParsable(_, ref error) => error.description(),
Error::MissingKey(_) => "missing key",
Error::InvalidLocationValue(_) => "invalid location value",
}
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::LocationNotParsable(_, ref error) => Some(error),
_ => None,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::GraphNotAnObject => {
write!(fmt, "Graph argument is not a JSON Object")
},
Error::LocationNotFound(ref idx) => {
write!(fmt, "Could not find expected location: {}", idx)
},
Error::LocationNotParsable(ref idx, ref error) => {
write!(fmt, "Value for location {} not parsable: {}", idx, error)
},
Error::MissingKey(ref key) => {
write!(fmt, "Could not find key {}", key)
},
Error::InvalidLocationValue(ref loc) => {
write!(fmt, "Invalid location value {} (must be 1...195)", loc)
},
}
}
}
#[derive(Debug)]
pub struct Graph {
// locations: Vec<Vec<u32>>,
regular_connections: Vec<Vec<u32>>,
alleyway_connections: Vec<Vec<u32>>,
}
impl Graph {
fn verified_connections(all_connections: &BTreeMap<String, Vec<u32>>, key: &str) -> Result<Vec<u32>, Error> {
let connections = try!(all_connections
.get(key)
.ok_or(Error::MissingKey(key.to_string())));
connections.iter().map(|cnx| {
match *cnx {
c @ 1 ... 195 => Ok(c),
_ => Err(Error::InvalidLocationValue(*cnx))
}
}).collect::<Result<Vec<u32>, Error>>()
}
pub fn from_json(json: JsonValue) -> Result<Graph, Error> {
// Not using `as_object` since it gives us a ref, and we'd have to clone the values.
// We're not interested in the JSON object once we parse it, so it's fine to move values.
let mut location_map = try!(match json {
JsonValue::Object(obj) => Some(obj),
_ => None
}.ok_or(Error::GraphNotAnObject));
let mut all_regular_connections: Vec<Vec<u32>> = Vec::new();
let mut all_alleyway_connections: Vec<Vec<u32>> = Vec::new();
for x in 0..195 {
let idx = x + 1;
let location = try!(location_map
.remove( &(idx.to_string()) )
.ok_or(Error::LocationNotFound(idx)));
let all_connections = try!(parse_json_value::<BTreeMap<String, Vec<u32>>>(location).or_else(|e| {
Err(Error::LocationNotParsable(idx, e))
}));
let regular_cnx = try!(Graph::verified_connections(&all_connections, REG_MOVE_KEY));
all_regular_connections.push(regular_cnx);
let alleyway_cnx = try!(Graph::verified_connections(&all_connections, ALLEY_MOVE_KEY));
all_alleyway_connections.push(alleyway_cnx);
}
Ok(Graph {
regular_connections: all_regular_connections,
alleyway_connections: all_alleyway_connections,
})
}
pub fn connections_for_location(&self, location: &u32) -> Option<&Vec<u32>> {
if let loc @ 1 ... 195 = *location {
self.regular_connections.get((loc as usize) - 1)
} else {
None
}
}
pub fn alleyway_connections_for_location(&self, location: &u32) -> Option<&Vec<u32>> {
if let loc @ 1 ... 195 = *location {
self.alleyway_connections.get((loc as usize) - 1)
} else {
None
}
}
pub fn all_connections_for_location(&self, location: &u32) -> Option<Vec<&u32>> {
if let loc @ 1 ... 195 = *location {
let mut all_cnx_set = HashSet::new();
if let Some(regular_cnx) = self.regular_connections.get(loc as usize - 1) {
for cnx in regular_cnx {
all_cnx_set.insert(cnx);
}
}
if let Some(alleyway_cnx) = self.alleyway_connections.get(loc as usize - 1) {
for cnx in alleyway_cnx {
all_cnx_set.insert(cnx);
}
}
let all_cnx = Vec::from_iter(all_cnx_set.into_iter());
return Some(all_cnx);
}
None
}
}
impl Index<usize> for Graph {
type Output = Vec<u32>;
fn index(&self, location: usize) -> &Vec<u32> {
match location {
MIN_INDEX...MAX_INDEX => &(self.regular_connections[location - 1]),
loc => {
panic!("Invalid location {} (must be between {} and {})",
loc, MIN_INDEX, MAX_INDEX)
}
}
}
}
| true
|
4a177d41d0c1b2e4e7e4b76180488d4f1bd6c384
|
Rust
|
stuij/thalgar
|
/src/common.rs
|
UTF-8
| 1,948
| 3.828125
| 4
|
[] |
no_license
|
pub trait MemAccess {
fn read_mem(src: &[u8], addr: usize) -> Self;
fn write_mem(src: &mut [u8], addr: usize, val: Self);
}
impl MemAccess for u8 {
fn read_mem(src: &[u8], addr: usize) -> u8 {
src[addr]
}
fn write_mem(src: &mut [u8], addr: usize, val: u8) {
src[addr] = val;
}
}
impl MemAccess for u16 {
fn read_mem(src: &[u8], addr: usize) -> u16 {
(src[addr] as u16) << 8 | src[addr + 1] as u16
}
fn write_mem(src: &mut [u8], addr: usize, val: u16) {
src[addr] = (val >> 8) as u8;
src[addr + 1] = (val & 0xFF) as u8;
}
}
impl MemAccess for u32 {
fn read_mem(src: &[u8], addr: usize) -> u32 {
(src[addr] as u32) << 24 |
(src[addr + 1] as u32) << 16 |
(src[addr + 2] as u32) << 8 |
src[addr + 3] as u32
}
fn write_mem(src: &mut [u8], addr: usize, val: u32) {
src[addr] = (val >> 24) as u8;
src[addr + 1] = ((val >> 16) & 0xFF) as u8;
src[addr + 2] = ((val >> 8) & 0xFF) as u8;
src[addr + 3] = (val & 0xFF) as u8;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn read_u16_from_vec() {
let word = vec!(0x12, 0x34, 0x56, 0x78);
let half_word = u16::read_mem(&word, 0);
assert_eq!(0x1234, half_word);
}
#[test]
fn write_u16_to_vec() {
let mut vec = vec!(1, 2, 3, 4, 5, 6, 7, 8);
u16::write_mem(&mut vec, 0, 0xEEFF);
assert_eq!(vec, vec!(0xEE, 0xFF, 3, 4, 5, 6, 7, 8));
}
#[test]
fn read_u32_from_vec() {
let first_word = vec!(0x12, 0x34, 0x56, 0x78);
let word = u32::read_mem(&first_word, 0);
assert_eq!(0x12345678, word);
}
#[test]
fn write_u32_to_vec() {
let mut vec = vec!(1, 2, 3, 4, 5, 6, 7, 8);
u32::write_mem(&mut vec, 0, 0xCCDDEEFF);
assert_eq!(vec, vec!(0xCC, 0xDD, 0xEE, 0xFF, 5, 6, 7, 8));
}
}
| true
|
284ca9970884b44c3691ad75d28731887debf1fe
|
Rust
|
lallotta/chip8-wasm
|
/src/cpu.rs
|
UTF-8
| 10,423
| 2.875
| 3
|
[] |
no_license
|
use crate::display::{Display, FONT_SET};
use crate::keypad::Keypad;
pub struct Cpu {
pub memory: [u8; 4096],
pub v: [u8; 16],
pub i: u16,
// program counter
pub pc: u16,
pub stack: [u16; 16],
// stack pointer
pub sp: u8,
pub display: Display,
pub keypad: Keypad,
pub draw_flag: bool,
// delay timer
pub dt: u8,
//sound timer
pub st: u8,
}
impl Cpu {
pub fn new() -> Cpu {
Cpu {
memory: [0; 4096],
v: [0; 16],
i: 0,
pc: 0,
stack: [0; 16],
sp: 0,
display: Display::new(),
keypad: Keypad::new(),
draw_flag: false,
dt: 0,
st: 0,
}
}
fn read_opcode(&self) -> u16 {
(self.memory[self.pc as usize] as u16) << 8 |
self.memory[self.pc as usize + 1] as u16
}
fn execute_opcode(&mut self, opcode: u16) {
let nnn = opcode & 0x0FFF;
let kk = (opcode & 0x00FF) as u8;
let n = (opcode & 0x000F) as usize;
let x = ((opcode & 0x0F00) >> 8) as usize;
let y = ((opcode & 0x00F0) >> 4) as usize;
let nibbles = (
opcode >> 12,
(opcode & 0x0F00) >> 8,
(opcode & 0x00F0) >> 4,
opcode & 0x000F
);
self.pc += 2;
match nibbles {
(0, 0, 0xE, 0) => self.op_00e0(),
(0, 0, 0xE, 0xE) => self.op_00ee(),
(0x1, _, _, _) => self.op_1nnn(nnn),
(0x2, _, _, _) => self.op_2nnn(nnn),
(0x3, _, _, _) => self.op_3xkk(x, kk),
(0x4, _, _, _) => self.op_4xkk(x, kk),
(0x5, _, _, 0) => self.op_5xy0(x, y),
(0x6, _, _, _) => self.op_6xkk(x, kk),
(0x7, _, _, _) => self.op_7xkk(x, kk),
(0x8, _, _, 0) => self.op_8xy0(x, y),
(0x8, _, _, 0x1) => self.op_8xy1(x, y),
(0x8, _, _, 0x2) => self.op_8xy2(x, y),
(0x8, _, _, 0x3) => self.op_8xy3(x, y),
(0x8, _, _, 0x4) => self.op_8xy4(x, y),
(0x8, _, _, 0x5) => self.op_8xy5(x, y),
(0x8, _, _, 0x6) => self.op_8xy6(x),
(0x8, _, _, 0x7) => self.op_8xy7(x, y),
(0x8, _, _, 0xE) => self.op_8xye(x),
(0x9, _, _, 0) => self.op_9xy0(x, y),
(0xA, _, _, _) => self.op_annn(nnn),
(0xB, _, _, _) => self.op_bnnn(nnn),
(0xC, _, _, _) => self.op_cxkk(x, kk),
(0xD, _, _, _) => self.op_dxyn(x, y, n),
(0xE, _, 0x9, 0xE) => self.op_ex9e(x),
(0xE, _, 0xA, 0x1) => self.op_exa1(x),
(0xF, _, 0, 0x7) => self.op_fx07(x),
(0xF, _, 0, 0xA) => self.op_fx0a(x),
(0xF, _, 0x1, 0x5) => self.op_fx15(x),
(0xF, _, 0x1, 0x8) => self.op_fx18(x),
(0xF, _, 0x1, 0xE) => self.op_fx1e(x),
(0xF, _, 0x2, 0x9) => self.op_fx29(x),
(0xF, _, 0x3, 0x3) => self.op_fx33(x),
(0xF, _, 0x5, 0x5) => self.op_fx55(x),
(0xF, _, 0x6, 0x5) => self.op_fx65(x),
_ => panic!("unknown opcode: {:#x}", opcode)
}
}
fn update_timers(&mut self) {
if self.dt > 0 {
self.dt -= 1;
}
if self.st > 0 {
self.st -= 1;
}
}
pub fn emulate_cycle(&mut self) {
let opcode = self.read_opcode();
self.execute_opcode(opcode);
self.update_timers();
}
pub fn reset(&mut self) {
self.memory = [0; 4096];
self.v = [0; 16];
self.i = 0;
self.pc = 0x200;
self.stack = [0; 16];
self.sp = 0;
self.display.clear();
self.draw_flag = false;
for i in 0..FONT_SET.len() {
self.memory[i] = FONT_SET[i];
}
}
pub fn unset_draw_flag(&mut self) {
self.draw_flag = false;
}
pub fn draw_pending(&self) -> bool {
self.draw_flag
}
fn op_00e0(&mut self) {
self.display.clear();
self.draw_flag = true;
}
fn op_00ee(&mut self) {
self.sp -= 1;
self.pc = self.stack[self.sp as usize];
}
fn op_1nnn(&mut self, nnn: u16) {
self.pc = nnn;
}
fn op_2nnn(&mut self, nnn: u16) {
self.stack[self.sp as usize] = self.pc;
self.sp += 1;
self.pc = nnn;
}
fn op_3xkk(&mut self, x: usize, kk: u8) {
if self.v[x] == kk {
self.pc += 2;
}
}
fn op_4xkk(&mut self, x: usize, kk: u8) {
if self.v[x] != kk {
self.pc += 2;
}
}
fn op_5xy0(&mut self, x: usize, y: usize) {
if self.v[x] == self.v[y] {
self.pc += 2;
}
}
fn op_6xkk(&mut self, x: usize, kk: u8) {
self.v[x] = kk;
}
fn op_7xkk(&mut self, x: usize, kk: u8) {
self.v[x] = self.v[x].wrapping_add(kk);
}
fn op_8xy0(&mut self, x: usize, y: usize) {
self.v[x] = self.v[y];
}
fn op_8xy1(&mut self, x: usize, y: usize) {
self.v[x] |= self.v[y];
}
fn op_8xy2(&mut self, x: usize, y: usize) {
self.v[x] &= self.v[y];
}
fn op_8xy3(&mut self, x: usize, y: usize) {
self.v[x] ^= self.v[y];
}
fn op_8xy4(&mut self, x: usize, y: usize) {
let (sum, overflow) = self.v[x].overflowing_add(self.v[y]);
self.v[0xF] = if overflow { 1 } else { 0 };
self.v[x] = sum;
}
fn op_8xy5(&mut self, x: usize, y: usize) {
let (res, overflow) = self.v[x].overflowing_sub(self.v[y]);
self.v[0xF] = if overflow { 0 } else { 1 };
self.v[x] = res;
}
fn op_8xy6(&mut self, x: usize) {
self.v[0xF] = self.v[x] & 1;
self.v[x] >>= 1;
}
fn op_8xy7(&mut self, x: usize, y: usize) {
let (res, overflow) = self.v[y].overflowing_sub(self.v[x]);
self.v[0xF] = if overflow { 0 } else { 1 };
self.v[x] = res;
}
fn op_8xye(&mut self, x: usize) {
self.v[0xF] = self.v[x] & 0x80;
self.v[x] <<= 1;
}
fn op_9xy0(&mut self, x: usize, y: usize) {
if self.v[x] != self.v[y] {
self.pc += 2;
}
}
fn op_annn(&mut self, nnn: u16) {
self.i = nnn;
}
fn op_bnnn(&mut self, nnn: u16) {
self.pc = self.v[0] as u16 + nnn;
}
fn op_cxkk(&mut self, x: usize, kk: u8) {
self.v[x] = (js_sys::Math::random() * 256f64) as u8 & kk;
}
fn op_dxyn(&mut self, x: usize, y: usize, n: usize) {
let col = self.v[x] as usize;
let row = self.v[y] as usize;
let sprite = &self.memory[self.i as usize..self.i as usize + n];
let collision = self.display.draw_sprite(col, row, sprite);
self.v[0xF] = if collision { 1 } else { 0 };
self.draw_flag = true;
}
fn op_ex9e(&mut self, x: usize) {
if self.keypad.is_pressed(self.v[x]) {
self.pc += 2;
}
}
fn op_exa1(&mut self, x: usize) {
if !self.keypad.is_pressed(self.v[x]) {
self.pc += 2;
}
}
fn op_fx07(&mut self, x: usize) {
self.v[x] = self.dt;
}
fn op_fx0a(&mut self, x: usize) {
for (i, pressed) in (0u8..).zip(&self.keypad.keys) {
if *pressed {
self.v[x] = i;
return;
}
}
self.pc -= 2;
}
fn op_fx15(&mut self, x: usize) {
self.dt = self.v[x];
}
fn op_fx18(&mut self, x: usize) {
self.st = self.v[x];
}
fn op_fx1e(&mut self, x: usize) {
self.i += self.v[x] as u16;
}
fn op_fx29(&mut self, x: usize) {
self.i = self.v[x] as u16 * 5;
}
fn op_fx33(&mut self, x: usize) {
self.memory[self.i as usize] = self.v[x] / 100;
self.memory[self.i as usize + 1] = (self.v[x] / 10) % 10;
self.memory[self.i as usize + 2] = self.v[x] % 10;
}
fn op_fx55(&mut self, x: usize) {
let start = self.i as usize;
self.memory[start..=start+x].copy_from_slice(&self.v[0..=x]);
}
fn op_fx65(&mut self, x: usize) {
let start = self.i as usize;
self.v[0..=x].copy_from_slice(&self.memory[start..=start+x]);
}
}
#[cfg(test)]
mod tests {
use super::Cpu;
#[test]
fn test_op_00e0() {
let mut cpu = Cpu::new();
cpu.display.gfx[1] = 1;
cpu.display.gfx[2] = 1;
cpu.execute_opcode(0x00E0);
assert_eq!(cpu.display.gfx[1], 0);
assert_eq!(cpu.display.gfx[2], 0);
assert!(cpu.draw_flag);
}
#[test]
fn test_op_00ee() {
let mut cpu = Cpu::new();
let addr = 0x220;
cpu.stack[0] = addr;
cpu.sp = 1;
cpu.pc = 0x300;
cpu.execute_opcode(0x00EE);
assert_eq!(cpu.pc, addr);
assert_eq!(cpu.sp, 0);
}
#[test]
fn test_op_1nnn() {
let mut cpu = Cpu::new();
cpu.pc = 0x220;
cpu.execute_opcode(0x1210);
assert_eq!(cpu.pc, 0x210);
}
#[test]
fn test_op_2nnn() {
let mut cpu = Cpu::new();
cpu.pc = 0x300;
cpu.execute_opcode(0x21AF);
assert_eq!(cpu.stack[0], 0x302);
assert_eq!(cpu.sp, 1);
assert_eq!(cpu.pc, 0x1AF);
}
#[test]
fn test_op_3xkk() {
let mut cpu = Cpu::new();
let pc = 0x400;
cpu.v[3] = 5;
cpu.pc = pc;
cpu.execute_opcode(0x3306);
assert_eq!(cpu.pc, pc + 2);
cpu.execute_opcode(0x3305);
assert_eq!(cpu.pc, pc + 6);
}
#[test]
fn test_op_4xkk() {
let mut cpu = Cpu::new();
let pc = 0x400;
cpu.v[3] = 5;
cpu.pc = pc;
cpu.execute_opcode(0x4305);
assert_eq!(cpu.pc, pc + 2);
cpu.execute_opcode(0x4306);
assert_eq!(cpu.pc, pc + 6);
}
#[test]
fn test_op_5xy0() {
let mut cpu = Cpu::new();
let pc = 0x2AF;
cpu.v[2] = 30;
cpu.v[3] = 40;
cpu.pc = pc;
cpu.execute_opcode(0x5230);
assert_eq!(cpu.pc, pc + 2);
cpu.v[2] = 40;
cpu.execute_opcode(0x5230);
assert_eq!(cpu.pc, pc + 6);
}
#[test]
fn test_op_6xkk() {
let mut cpu = Cpu::new();
cpu.v[4] = 10;
cpu.execute_opcode(0x6455);
assert_eq!(cpu.v[4], 0x55);
}
}
| true
|
0b979349557d3f40b7431b233f596976fa7659ce
|
Rust
|
jwarwick/aoc_2018
|
/device/src/lib.rs
|
UTF-8
| 13,330
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
type Instruction = (OpCode, i64, i64, i64);
type Registers = [i64; 6];
use std::collections::HashSet;
#[derive(Debug, Clone)]
enum OpCode {
Nop,
AddR, AddI,
MulR, MulI,
BanR, BanI,
BorR, BorI,
SetR, SetI,
GtRI, GtIR, GtRR,
EqRI, EqIR, EqRR,
}
#[derive(Debug)]
pub struct Device {
registers: Registers,
ip: i64,
ip_register: usize,
instructions: Vec<Instruction>,
}
impl Device {
pub fn load(content: &str, registers: &Registers) -> Device {
let ip = 0;
let mut ip_register = 0;
let mut instructions = Vec::new();
for l in content.lines() {
let s: Vec<_> = l.split_whitespace().collect();
let opcode: String = s.get(0).expect("Opcode string").to_string();
if opcode == "#ip" {
ip_register = Device::num_at_index(&s, 1) as usize;
} else {
instructions.push((Device::op_string_to_code(&opcode),
Device::num_at_index(&s, 1) as i64,
Device::num_at_index(&s, 2) as i64,
Device::num_at_index(&s, 3) as i64));
}
}
Device {registers: *registers, ip, ip_register, instructions}
}
pub fn execute_max(&mut self, max_steps: &usize) -> usize{
let mut s: HashSet<i64> = HashSet::new();
let mut cnt: usize = 0;
while self.ip_is_valid() && cnt < *max_steps {
cnt = cnt + 1;
self.registers[self.ip_register] = self.ip;
let inst = self.instructions[self.ip as usize].clone();
//if self.ip == 28 {
// let c = self.registers[2];
// if !s.contains(&c) {
// s.insert(c);
// println!("{}", c);
//
// }
// println!("{}\t{:?}\t{:?}", self.ip, inst, self.registers);
//}
self.op(&inst);
self.ip = self.registers[self.ip_register];
self.ip += 1;
//println!("\t\t=>\t{}\t{:?}", self.ip, self.registers);
}
cnt
}
pub fn execute(&mut self) -> usize {
self.execute_max(&std::usize::MAX)
}
pub fn registers(&self) -> Registers {
self.registers.clone()
}
fn num_at_index(v: &Vec<&str>, i: usize) -> isize {
let val_str: String = v.get(i).expect("Num at index").to_string();
val_str.parse().expect("Number in string")
}
fn op_string_to_code(s: &String) -> OpCode {
match s.as_str() {
"addr" => OpCode::AddR,
"addi" => OpCode::AddI,
"mulr" => OpCode::MulR,
"muli" => OpCode::MulI,
"banr" => OpCode::BanR,
"bani" => OpCode::BanI,
"borr" => OpCode::BorR,
"bori" => OpCode::BorI,
"setr" => OpCode::SetR,
"seti" => OpCode::SetI,
"gtri" => OpCode::GtRI,
"gtir" => OpCode::GtIR,
"gtrr" => OpCode::GtRR,
"eqri" => OpCode::EqRI,
"eqir" => OpCode::EqIR,
"eqrr" => OpCode::EqRR,
c => {println!("Unknown opcode: {}", c); OpCode::Nop},
}
}
fn ip_is_valid(&self) -> bool {
self.ip >= 0 && self.ip < self.instructions.len() as i64
}
fn op(&mut self, instruction: &Instruction) {
let op = &instruction.0;
match op {
OpCode::AddR => self.addr(&instruction),
OpCode::AddI => self.addi(&instruction),
OpCode::MulR => self.mulr(&instruction),
OpCode::MulI => self.muli(&instruction),
OpCode::BanR => self.banr(&instruction),
OpCode::BanI => self.bani(&instruction),
OpCode::BorR => self.borr(&instruction),
OpCode::BorI => self.bori(&instruction),
OpCode::SetR => self.setr(&instruction),
OpCode::SetI => self.seti(&instruction),
OpCode::GtRR => self.gtrr(&instruction),
OpCode::GtRI => self.gtri(&instruction),
OpCode::GtIR => self.gtir(&instruction),
OpCode::EqRR => self.eqrr(&instruction),
OpCode::EqRI => self.eqri(&instruction),
OpCode::EqIR => self.eqir(&instruction),
c => println!("Unknown instruction {:?}", c),
}
}
fn addr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] +
self.registers[instruction.2 as usize];
}
fn addi(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] +
instruction.2;
}
fn mulr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] *
self.registers[instruction.2 as usize];
}
fn muli(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] *
instruction.2;
}
fn banr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] &
self.registers[instruction.2 as usize];
}
fn bani(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] &
instruction.2;
}
fn borr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] |
self.registers[instruction.2 as usize];
}
fn bori(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
self.registers[instruction.1 as usize] |
instruction.2;
}
fn setr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] = self.registers[instruction.1 as usize]
}
fn seti(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] = instruction.1;
}
fn gtir(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
if instruction.1 > self.registers[instruction.2 as usize] {
1
} else {
0
};
}
fn gtri(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
if self.registers[instruction.1 as usize] > instruction.2 {
1
} else {
0
};
}
fn gtrr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
if self.registers[instruction.1 as usize] > self.registers[instruction.2 as usize] {
1
} else {
0
};
}
fn eqir(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
if instruction.1 == self.registers[instruction.2 as usize] {
1
} else {
0
};
}
fn eqri(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
if self.registers[instruction.1 as usize] == instruction.2 {
1
} else {
0
};
}
fn eqrr(&mut self, instruction: &Instruction) {
self.registers[instruction.3 as usize] =
if self.registers[instruction.1 as usize] == self.registers[instruction.2 as usize] {
1
} else {
0
};
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sample_input() {
let contents = "#ip 0
seti 5 0 1
seti 6 0 2
addi 0 1 0
addr 1 2 3
setr 1 0 0
seti 8 0 4
seti 9 0 5";
let mut d = Device::load(contents, &[0; 6]);
d.execute();
let r = d.registers();
assert_eq!(r[0], 6);
}
#[test]
fn test_addi() {
let r: Registers = [3, 0, 7, 1, 0, 0];
let i: Instruction = (OpCode::AddI, 0, 7, 1);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 10, 7, 1, 0, 0]);
}
#[test]
fn test_addr() {
let r: Registers = [3, 0, 7, 1, 0, 0];
let i: Instruction = (OpCode::AddR, 0, 2, 1);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 10, 7, 1, 0, 0]);
}
#[test]
fn test_muli() {
let r: Registers = [3, 0, 7, 1, 0, 0];
let i: Instruction = (OpCode::MulI, 0, 2, 1);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 6, 7, 1, 0, 0]);
}
#[test]
fn test_mulr() {
let r: Registers = [3, 0, 7, 1, 0, 0];
let i: Instruction = (OpCode::MulR, 0, 2, 3);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 0, 7, 21, 0, 0]);
}
#[test]
fn test_bani() {
let r: Registers = [3, 0, 255, 16, 0, 0];
let i: Instruction = (OpCode::BanI, 2, 1, 3);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 0, 255, 1, 0, 0]);
}
#[test]
fn test_banr() {
let r: Registers = [3, 1, 255, 16, 0, 0];
let i: Instruction = (OpCode::BanR, 2, 1, 3);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 1, 255, 1, 0, 0]);
}
#[test]
fn test_bori() {
let r: Registers = [3, 0, 254, 16, 0, 0];
let i: Instruction = (OpCode::BorI, 2, 1, 1);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 255, 254, 16, 0, 0]);
}
#[test]
fn test_borr() {
let r: Registers = [3, 254, 1, 16, 0, 0];
let i: Instruction = (OpCode::BorR, 2, 1, 0);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [255, 254, 1, 16, 0, 0]);
}
#[test]
fn test_seti() {
let r: Registers = [3, 0, 254, 16, 0, 0];
let i: Instruction = (OpCode::SetI, 2, 1, 1);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 2, 254, 16, 0, 0]);
}
#[test]
fn test_setr() {
let r: Registers = [3, 254, 1, 16, 0, 0];
let i: Instruction = (OpCode::SetR, 1, 1, 0);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [254, 254, 1, 16, 0, 0]);
}
#[test]
fn test_gtir() {
let r: Registers = [3, 6, 1, 16, 0, 0];
let i: Instruction = (OpCode::GtIR, 7, 1, 0);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [1, 6, 1, 16, 0, 0]);
}
#[test]
fn test_gtri() {
let r: Registers = [3, 6, 1, 16, 0, 0];
let i: Instruction = (OpCode::GtRI, 1, 7, 0);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [0, 6, 1, 16, 0, 0]);
}
#[test]
fn test_gtrr() {
let r: Registers = [3, 6, 1, 16, 0, 0];
let i: Instruction = (OpCode::GtRR, 2, 1, 2);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 6, 0, 16, 0, 0]);
}
#[test]
fn test_eqir() {
let r: Registers = [3, 6, 1, 16, 0, 0];
let i: Instruction = (OpCode::EqIR, 7, 1, 0);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [0, 6, 1, 16, 0, 0]);
}
#[test]
fn test_eqri() {
let r: Registers = [3, 6, 1, 16, 0, 0];
let i: Instruction = (OpCode::EqRI, 1, 7, 0);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [0, 6, 1, 16, 0, 0]);
}
#[test]
fn test_eqrr() {
let r: Registers = [3, 6, 6, 16, 0, 0];
let i: Instruction = (OpCode::EqRR, 2, 1, 2);
let mut device = Device {ip: 0, ip_register: 5, registers: r, instructions: vec![i]};
device.execute();
assert_eq!(device.registers, [3, 6, 1, 16, 0, 0]);
}
}
| true
|
e86593eaa5a8c911dd0f02193c86d3658dc76dab
|
Rust
|
studylessshape/qcpro
|
/src/addition/string_addition.rs
|
UTF-8
| 1,482
| 3.328125
| 3
|
[] |
no_license
|
use std::fs;
/// Get the first index by pattern
fn first_index(s: &String, pattern: &str) -> Option<usize> {
let matched_pattern : Option<_> = s.match_indices(pattern).next();
if let Some((index, _)) = matched_pattern {
Some(index)
} else {
None
}
}
/// Get the project name for file contents or last directory of path
pub fn get_project_name(source: &String, is_directory: bool) -> Option<String> {
let mut s_cp = source.clone();
let mut project_name = String::new();
if is_directory{
loop {
if s_cp.len() < 1 {
break;
}
let ch = s_cp.pop().unwrap();
if ch == '\\' || ch == '/' {
break;
}
project_name.insert(0, ch);
}
}else{
if let Ok(contents) = fs::read(source) {
let contents = String::from_utf8(contents).unwrap();
let pat = "project(";
let fir = match first_index(&contents, pat) {
Some(n) => n,
None => return None,
};
let contents: Vec<char> = contents.chars().collect();
for idx in fir + pat.len()..contents.len() {
if contents[idx] == ')' {
break;
}
project_name.push(contents[idx]);
}
}
}
if project_name.len() < 1 {
return None;
}else{
return Some(project_name);
}
}
| true
|
4d7de8e1c5346b16992c14779822e237d114a73f
|
Rust
|
PsypherPunk/advent-of-code
|
/2017/08/src/main.rs
|
UTF-8
| 413
| 2.796875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
use std::fs;
use ::day08::*;
fn main() {
let input = fs::read_to_string("input.txt").expect("Error reading input.txt");
let cpu = get_cpu(&input);
println!(
"What is the largest value in any register…? {}",
&cpu.registers.values().max().unwrap(),
);
println!(
"…the highest value held in any register during this process… {}",
&cpu.highest,
);
}
| true
|
1d2aebde7cbe81b575f1c642fd4c875f9a1fb054
|
Rust
|
niconicoj/amethyst-physics
|
/src/resources/context.rs
|
UTF-8
| 724
| 2.609375
| 3
|
[] |
no_license
|
use serde::Deserialize;
use ron::de::from_reader;
use std::error::Error;
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
#[derive(Clone, Copy, Deserialize)]
pub struct Context {
pub map_width: f32,
pub map_height: f32,
pub scale: f32,
}
impl Default for Context {
fn default() -> Self {
Context {
map_width: 0.,
map_height: 0.,
scale: 1.,
}
}
}
impl Context {
pub fn from_config_file<P: AsRef<Path>>(path: P) -> Result<Context, Box<dyn Error>> {
let file = File::open(path)?;
let reader = BufReader::new(file);
let context = from_reader(reader)?;
Ok(context)
}
}
| true
|
c2d084763900cca75d0adf0df38fbe673e997090
|
Rust
|
mmstick/redox
|
/libstd/src/path.rs
|
UTF-8
| 1,396
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
use fmt;
use mem;
use core_collections::borrow::{Cow, IntoCow};
use string::String;
pub struct Display<'a> {
string: &'a str
}
impl<'a> fmt::Display for Display<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.string)
}
}
pub struct Path {
pub inner: str,
}
impl Path {
/// Create a new path
/// # Safety
/// This uses the same logic in libstd, it should be safe for valid &str
pub fn new<S: AsRef<str> + ?Sized>(s: &S) -> &Path {
unsafe { mem::transmute(s.as_ref()) }
}
}
impl AsRef<Path> for str {
fn as_ref(&self) -> &Path {
Path::new(self)
}
}
impl AsRef<Path> for String {
fn as_ref(&self) -> &Path {
Path::new(self)
}
}
impl AsRef<Path> for PathBuf {
fn as_ref(&self) -> &Path {
Path::new(&self.inner)
}
}
#[derive(Debug)]
pub struct PathBuf {
pub inner: String,
}
impl From<String> for PathBuf {
fn from(inner: String) -> PathBuf {
PathBuf { inner: inner }
}
}
impl PathBuf {
pub fn to_str(&self) -> Option<&str> {
Some(&self.inner)
}
pub fn to_string_lossy(&self) -> Cow<str> {
self.inner.clone().into_cow()
}
pub fn to_string(&self) -> String {
self.inner.clone()
}
pub fn display(&self) -> Display {
Display {
string: &self.inner
}
}
}
| true
|
febe9f59017fb1bf705b2ef4e2881e1a8cf5a9ef
|
Rust
|
creativcoder/forest
|
/utils/bitfield/src/rleplus/mod.rs
|
UTF-8
| 7,733
| 2.875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
// Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
//! # RLE+ Bitset Encoding
//!
//! (from https://github.com/filecoin-project/specs/blob/master/src/listings/data_structures.md)
//!
//! RLE+ is a lossless compression format based on [RLE](https://en.wikipedia.org/wiki/Run-length_encoding).
//! Its primary goal is to reduce the size in the case of many individual bits, where RLE breaks down quickly,
//! while keeping the same level of compression for large sets of contiguous bits.
//!
//! In tests it has shown to be more compact than RLE iteself, as well as [Concise](https://arxiv.org/pdf/1004.0403.pdf) and [Roaring](https://roaringbitmap.org/).
//!
//! ## Format
//!
//! The format consists of a header, followed by a series of blocks, of which there are three different types.
//!
//! The format can be expressed as the following [BNF](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) grammar.
//!
//! ```text
//! <encoding> ::= <header> <blocks>
//! <header> ::= <version> <bit>
//! <version> ::= "00"
//! <blocks> ::= <block> <blocks> | ""
//! <block> ::= <block_single> | <block_short> | <block_long>
//! <block_single> ::= "1"
//! <block_short> ::= "01" <bit> <bit> <bit> <bit>
//! <block_long> ::= "00" <unsigned_varint>
//! <bit> ::= "0" | "1"
//! ```
//!
//! An `<unsigned_varint>` is defined as specified [here](https://github.com/multiformats/unsigned-varint).
//!
//! ### Header
//!
//! The header indiciates the very first bit of the bit vector to encode. This means the first bit is always
//! the same for the encoded and non encoded form.
//!
//! ### Blocks
//!
//! The blocks represent how many bits, of the current bit type there are. As `0` and `1` alternate in a bit vector
//! the inital bit, which is stored in the header, is enough to determine if a length is currently referencing
//! a set of `0`s, or `1`s.
//!
//! #### Block Single
//!
//! If the running length of the current bit is only `1`, it is encoded as a single set bit.
//!
//! #### Block Short
//!
//! If the running length is less than `16`, it can be encoded into up to four bits, which a short block
//! represents. The length is encoded into a 4 bits, and prefixed with `01`, to indicate a short block.
//!
//! #### Block Long
//!
//! If the running length is `16` or larger, it is encoded into a varint, and then prefixed with `00` to indicate
//! a long block.
//!
//!
//! > **Note:** The encoding is unique, so no matter which algorithm for encoding is used, it should produce
//! > the same encoding, given the same input.
//!
mod reader;
mod writer;
pub use reader::BitReader;
pub use writer::BitWriter;
use super::{BitField, Result};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::borrow::Cow;
impl Serialize for BitField {
fn serialize<S>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error>
where
S: Serializer,
{
let bytes = self.to_bytes();
serde_bytes::serialize(&bytes, serializer)
}
}
impl<'de> Deserialize<'de> for BitField {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let bytes: Cow<'de, [u8]> = serde_bytes::deserialize(deserializer)?;
Self::from_bytes(&bytes).map_err(serde::de::Error::custom)
}
}
impl BitField {
/// Decodes RLE+ encoded bytes into a bit field.
pub fn from_bytes(bytes: &[u8]) -> Result<Self> {
let mut reader = BitReader::new(bytes);
let version = reader.read(2);
if version != 0 {
return Err("incorrect version");
}
let mut next_value = reader.read(1) == 1;
let mut ranges = Vec::new();
let mut index = 0;
while let Some(len) = reader.read_len()? {
let start = index;
index += len;
let end = index;
if next_value {
ranges.push(start..end);
}
next_value = !next_value;
}
Ok(Self {
ranges,
..Default::default()
})
}
/// Turns a bit field into its RLE+ encoded form.
pub fn to_bytes(&self) -> Vec<u8> {
let mut iter = self.ranges();
let first_range = match iter.next() {
Some(range) => range,
None => return Default::default(),
};
let mut writer = BitWriter::new();
writer.write(0, 2); // version 00
if first_range.start == 0 {
writer.write(1, 1); // the first bit is a 1
} else {
writer.write(0, 1); // the first bit is a 0
writer.write_len(first_range.start); // the number of leading 0s
}
writer.write_len(first_range.len());
let mut index = first_range.end;
// for each range of 1s we first encode the number of 0s that came prior
// before encoding the number of 1s
for range in iter {
writer.write_len(range.start - index); // zeros
writer.write_len(range.len()); // ones
index = range.end;
}
writer.finish()
}
}
#[cfg(test)]
mod tests {
use super::{
super::{bitfield, ranges_from_bits},
BitField, BitWriter,
};
use rand::{Rng, SeedableRng};
use rand_xorshift::XorShiftRng;
#[test]
fn test() {
for (bits, expected) in vec![
(vec![], bitfield![]),
(
vec![
0, 0, // version
1, // starts with 1
0, 1, // fits into 4 bits
0, 0, 0, 1, // 8 - 1
],
bitfield![1, 1, 1, 1, 1, 1, 1, 1],
),
(
vec![
0, 0, // version
1, // starts with 1
0, 1, // fits into 4 bits
0, 0, 1, 0, // 4 - 1
1, // 1 - 0
0, 1, // fits into 4 bits
1, 1, 0, 0, // 3 - 1
],
bitfield![1, 1, 1, 1, 0, 1, 1, 1],
),
(
vec![
0, 0, // version
1, // starts with 1
0, 0, // does not fit into 4 bits
1, 0, 0, 1, 1, 0, 0, 0, // 25 - 1
],
bitfield![
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
],
),
// when a length of 0 is encountered, the rest of the encoded bits should be ignored
(
vec![
0, 0, // version
1, // starts with 1
1, // 1 - 1
0, 1, // fits into 4 bits
0, 0, 0, 0, // 0 - 0
1, // 1 - 1
],
bitfield![1],
),
] {
let mut writer = BitWriter::new();
for bit in bits {
writer.write(bit, 1);
}
let bf = BitField::from_bytes(&writer.finish()).unwrap();
assert_eq!(bf, expected);
}
}
#[test]
fn roundtrip() {
let mut rng = XorShiftRng::seed_from_u64(1);
for _i in 0..1000 {
let len: usize = rng.gen_range(0, 1000);
let bits: Vec<_> = (0..len).filter(|_| rng.gen::<bool>()).collect();
let ranges: Vec<_> = ranges_from_bits(bits.clone()).collect();
let bf = BitField::from_ranges(ranges_from_bits(bits));
assert_eq!(bf.ranges().collect::<Vec<_>>(), ranges);
}
}
}
| true
|
9d324fa13610dda80e0e602b3686f5a6385dec8d
|
Rust
|
amadeusine/pdbtbx
|
/src/structs/atom.rs
|
UTF-8
| 28,144
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#![allow(dead_code)]
use crate::reference_tables;
use crate::structs::*;
use crate::transformation::*;
use doc_cfg::doc_cfg;
use std::cmp::Ordering;
use std::fmt;
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
static ATOM_COUNTER: AtomicUsize = AtomicUsize::new(0);
/// A struct to represent a single Atom in a protein
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[derive(Debug)]
pub struct Atom {
/// The unique serial number given to this atom
counter: usize,
/// Determines if this atom is an hetero atom (true), a non standard atom, or a normal atom (false)
hetero: bool,
/// The serial number of the Atom, should be unique within its model
serial_number: usize,
/// The name of the Atom, can only use the standard allowed characters
name: String,
/// The X position of the Atom (Å)
x: f64,
/// The Y position of the Atom (Å)
y: f64,
/// The Z position of the Atom (Å)
z: f64,
/// The occupancy of the Atom
occupancy: f64,
/// The B-factor (or temperature factor) of the Atom
b_factor: f64,
/// The element of the Atom, can only use the standard allowed characters
element: String,
/// The charge of the Atom
charge: isize,
/// The anisotropic temperature factors, if applicable
atf: Option<[[f64; 3]; 3]>,
}
impl Atom {
/// Create a new Atom
#[allow(clippy::too_many_arguments)]
pub fn new(
hetero: bool,
serial_number: usize,
atom_name: &str,
x: f64,
y: f64,
z: f64,
occupancy: f64,
b_factor: f64,
element: &str,
charge: isize,
) -> Option<Atom> {
if valid_identifier(atom_name)
&& valid_identifier(element)
&& x.is_finite()
&& y.is_finite()
&& z.is_finite()
&& occupancy.is_finite()
&& b_factor.is_finite()
{
Some(Atom {
counter: ATOM_COUNTER.fetch_add(1, AtomicOrdering::SeqCst),
hetero,
serial_number,
name: atom_name.trim().to_ascii_uppercase(),
x,
y,
z,
occupancy,
b_factor,
element: element.trim().to_ascii_uppercase(),
charge,
atf: None,
})
} else {
None
}
}
/// Get the unique immutable counter for this atom
pub(crate) fn counter(&self) -> usize {
self.counter
}
/// Get if this atom is an hetero atom (`true`), a non standard atom, or a normal atom (`false`)
pub fn hetero(&self) -> bool {
self.hetero
}
/// Set if this atom is an hetero atom (`true`), a non standard atom, or a normal atom (`false`)
pub fn set_hetero(&mut self, new_hetero: bool) {
self.hetero = new_hetero
}
/// Get the position of the atom as a tuple of `f64`, in the following order: (x, y, z).
/// Given in Aͦ as defined by PDB, to be specific in the orthogonal coordinate system.
pub fn pos(&self) -> (f64, f64, f64) {
(self.x, self.y, self.z)
}
/// Get the position of the atom as an array of `f64`, in the following order: [x, y, z].
/// Given in Aͦ as defined by PDB, to be specific in the orthogonal coordinate system.
/// This function is only included for use with `Point` from rstar, so it will be removed
/// as soon as rstar implements `Point` for Tuples (<https://github.com/georust/rstar/pull/57>).
#[doc_cfg(feature = "rstar")]
pub fn pos_array(&self) -> [f64; 3] {
[self.x, self.y, self.z]
}
/// Set the position of the atom as a tuple of `f64`, in the following order: (x, y, z).
/// It fails if one or more of the numbers is not finite (`f64.is_finite()`).
pub fn set_pos(&mut self, new_pos: (f64, f64, f64)) -> Result<(), String> {
if new_pos.0.is_finite() && new_pos.1.is_finite() && new_pos.2.is_finite() {
self.x = new_pos.0;
self.y = new_pos.1;
self.z = new_pos.2;
Ok(())
} else {
Err(format!(
"One (or more) of values of the new position is not finite for atom {} values {:?}",
self.serial_number, new_pos
))
}
}
/// Get the X position of the atom.
/// Given in Aͦ as defined by PDB, to be specific in the orthogonal coordinate system.
/// This number has a precision of 8.3 in PDB files and 5 decimal places of precision in mmCIF files.
pub fn x(&self) -> f64 {
self.x
}
/// Set the X position of the atom in Aͦ.
/// It fails if `new_pos` is not finite (`f64.is_finite()`).
pub fn set_x(&mut self, new_pos: f64) -> Result<(), String> {
if new_pos.is_finite() {
self.x = new_pos;
Ok(())
} else {
Err(format!(
"The value of the new x position is not finite for atom {} value {}",
self.serial_number, new_pos
))
}
}
/// Get the Y position of the atom.
/// Given in Aͦ as defined by PDB, to be specific in the orthogonal coordinate system.
/// This number has a precision of 8.3 in PDB files and 5 decimal places of precision in mmCIF files.
pub fn y(&self) -> f64 {
self.y
}
/// Set the Y position of the atom.
/// It fails if `new_pos` is not finite (`f64.is_finite()`).
pub fn set_y(&mut self, new_pos: f64) -> Result<(), String> {
if new_pos.is_finite() {
self.y = new_pos;
Ok(())
} else {
Err(format!(
"The value of the new y position is not finite for atom {} value {}",
self.serial_number, new_pos
))
}
}
/// Get the Z position of the atom.
/// Given in Aͦ as defined by PDB, to be specific in the orthogonal coordinate system.
/// This number has a precision of 8.3 in PDB files and 5 decimal places of precision in mmCIF files.
pub fn z(&self) -> f64 {
self.z
}
/// Set the Z position of the atom.
/// It fails if `new_pos` is not finite (`f64.is_finite()`).
pub fn set_z(&mut self, new_pos: f64) -> Result<(), String> {
if new_pos.is_finite() {
self.z = new_pos;
Ok(())
} else {
Err(format!(
"The value of the new z position is not finite for atom {} value {}",
self.serial_number, new_pos
))
}
}
/// Get the serial number of the atom.
/// This number combined with the alt_loc from the Conformer of this Atom is defined to be unique in the containing model, which is not enforced.
/// THe precision of this number is 5 digits in PDB files.
pub fn serial_number(&self) -> usize {
self.serial_number
}
/// Set the serial number of the atom.
/// This number combined with the alt_loc from the Conformer of this Atom is defined to be unique in the containing model, which is not enforced.
pub fn set_serial_number(&mut self, new_serial_number: usize) {
self.serial_number = new_serial_number;
}
/// Get the name of the atom. The name will be trimmed (whitespace removed) and changed to ASCII uppercase.
/// For PDB files the name is max 4 characters.
pub fn name(&self) -> &str {
&self.name
}
/// Set the name of the atom. The name will be trimmed (whitespace removed) and changed to ASCII uppercase as requested by PDB/PDBx standard.
/// For PDB files the name can at most contain 4 characters.
/// If the name is invalid an error message is provided.
/// The name can only contain valid characters, the ASCII graphic characters (`char.is_ascii_graphic() || char == ' '`).
pub fn set_name(&mut self, new_name: &str) -> Result<(), String> {
if !valid_identifier(new_name) {
Err(format!(
"New name has invalid characters for atom {} name {}",
self.serial_number, new_name
))
} else {
self.name = new_name.trim().to_ascii_uppercase();
Ok(())
}
}
/// Get the occupancy or Q factor of the atom. This indicates the fraction of unit cells in which this atom is present, in the normal case this will be one (1) and it can range between 1 and 0 (inclusive).
/// This number has a precision of 6.2 in PDB files and 5 decimal places of precision in mmCIF files.
pub fn occupancy(&self) -> f64 {
self.occupancy
}
/// Set the occupancy or Q factor of the atom.
/// It fails if `new_occupancy` is not finite (`f64.is_finite()`) or if it is negative.
pub fn set_occupancy(&mut self, new_occupancy: f64) -> Result<(), String> {
if new_occupancy.is_finite() {
if new_occupancy >= 0.0 {
self.occupancy = new_occupancy;
Ok(())
} else {
Err(format!(
"The value of the new occupancy is negative for atom {} value {}",
self.serial_number, new_occupancy
))
}
} else {
Err(format!(
"The value of the new occupancy is not finite for atom {} value {}",
self.serial_number, new_occupancy
))
}
}
/// Get the B factor or temperature factor of the atom.
/// This indicates the uncertainty in the position of the atom as seen over all unit cells in the whole crystal.
/// A low uncertainty is modelled with a low B factor, with zero uncertainty being equal to a B factor of 0. A higher uncertainty is modelled by a high B factor.
/// This number has a precision of 6.2 in PDB files and 5 decimal places of precision in mmCIF files.
pub fn b_factor(&self) -> f64 {
self.b_factor
}
/// Set the B factor or temperature factor of the atom.
/// It fails if `new_b_factor` is not finite (`f64.is_finite()`) or if it is negative.
pub fn set_b_factor(&mut self, new_b_factor: f64) -> Result<(), String> {
if new_b_factor.is_finite() {
if new_b_factor >= 0.0 {
self.b_factor = new_b_factor;
Ok(())
} else {
Err(format!(
"The value of the new b_factor is negative for atom {} value {}",
self.serial_number, new_b_factor
))
}
} else {
Err(format!(
"The value of the new b_factor is not finite for atom {} value {}",
self.serial_number, new_b_factor
))
}
}
/// Get the element of this atom.
/// In PDB files the element can at most contain 2 characters.
pub fn element(&self) -> &str {
&self.element
}
/// Set the element of this atom. The element will be trimmed (whitespace removed) and changed to ASCII uppercase as requested by PDB/PDBx standard.
/// For PDB files the element can at most contain 2 characters.
/// If the element is invalid an error message is provided.
/// The element can only contain valid characters, the ASCII graphic characters (`char.is_ascii_graphic() || char == ' '`).
pub fn set_element(&mut self, new_element: &str) -> Result<(), String> {
if !valid_identifier(new_element) {
Err(format!(
"New element has invalid characters for atom {} name {}",
self.serial_number, new_element
))
} else {
self.element = new_element.trim().to_ascii_uppercase();
Ok(())
}
}
/// Get the atomic number of this Atom. If defined it uses `self.element()`, otherwise it uses `self.name()`.
/// It fails when `self.element()` is not an element in the periodic table, or if `self.element()` is undefined and `self.name()` is not an element in the periodic table.
pub fn atomic_number(&self) -> Option<usize> {
if !self.element.is_empty() {
reference_tables::get_atomic_number(&self.element())
} else {
reference_tables::get_atomic_number(&self.name())
}
}
/// Get the atomic radius of this Atom in Å. The radius is defined up to element 'Cm' or atomic number 96.
/// Source: Martin Rahm, Roald Hoffmann, and N. W. Ashcroft. Atomic and Ionic Radii of Elements 1-96.
/// Chemistry - A European Journal, 22(41):14625–14632, oct 2016. URL: <http://doi.org/10.1002/chem.201602949>.
/// Updated to the corrigendum: <https://doi.org/10.1002/chem.201700610>.
///
/// It fails if the atomic number of this Atom is not defined (see `self.atomic_number()`).
/// It also fails when the atomic radius is not defined for the given atomic number, so if the atomic
/// number is higher than 96.
pub fn atomic_radius(&self) -> Option<f64> {
if let Some(s) = self.atomic_number() {
reference_tables::get_atomic_radius(s)
} else {
None
}
}
/// Get the van der Waals radius for this Atom in Å. The radius is defined up to element 'Es' or atomic number 99.
/// Source: Alvarez, S. (2013). A cartography of the van der Waals territories. Dalton Transactions, 42(24), 8617. <https://doi.org/10.1039/c3dt50599e>.
///
/// It fails if the atomic number of this Atom is not defined (see `self.atomic_number()`).
/// It also fails when the atomic radius is not defined for the given atomic number, so if the atomic
/// number is higher than 99.
pub fn vanderwaals_radius(&self) -> Option<f64> {
if let Some(s) = self.atomic_number() {
reference_tables::get_vanderwaals_radius(s)
} else {
None
}
}
/// Gets the covalent bond radii for this Atom.
/// The result is the radius for a single, double and triple bond, where the last two are optional. If the radius for a double bond is unknown the radius for a triple bond is also unknown.
/// All values are given in picometers.
/// Sources:
/// * P. Pyykkö; M. Atsumi (2009). "Molecular Single-Bond Covalent Radii for Elements 1-118". Chemistry: A European Journal. 15 (1): 186–197. <https://doi.org/10.1002/chem.200800987>
/// * P. Pyykkö; M. Atsumi (2009). "Molecular Double-Bond Covalent Radii for Elements Li–E112". Chemistry: A European Journal. 15 (46): 12770–12779. <https://doi.org/10.1002/chem.200901472>
/// * P. Pyykkö; S. Riedel; M. Patzschke (2005). "Triple-Bond Covalent Radii". Chemistry: A European Journal. 11 (12): 3511–3520. <https://doi.org/10.1002/chem.200401299>
///
/// It fails if the atomic number of this Atom is not defined (see `self.atomic_number()`).
pub fn covalent_bond_radii(&self) -> Option<(usize, Option<usize>, Option<usize>)> {
self.atomic_number()
.map(reference_tables::get_covalent_bond_radii)
}
/// Get the charge of the atom.
/// In PDB files the charge is one digit with a sign.
pub fn charge(&self) -> isize {
self.charge
}
/// Set the charge of this atom.
pub fn set_charge(&mut self, new_charge: isize) {
self.charge = new_charge;
}
/// Get the charge in the PDB format `[0-9][-+]`. If the charge is 0 or outside bounds (below -9 or above 9) it returns an empty string.
#[allow(clippy::cast_possible_truncation)]
pub fn pdb_charge(&self) -> String {
if self.charge == 0 || self.charge < -9 || self.charge > 9 {
String::new()
} else {
let mut sign = '+';
let charge = (48 + self.charge.abs() as u8) as char;
if self.charge < 0 {
sign = '-';
}
let mut output = String::new();
output.push(charge);
output.push(sign);
output
}
}
/// Get the anisotropic temperature factors, if available.
/// This number has a precision of 8.3 in PDB files and 5 decimal places of precision in mmCIF files.
pub fn anisotropic_temperature_factors(&self) -> Option<[[f64; 3]; 3]> {
self.atf
}
/// Set the anisotropic temperature factors.
pub fn set_anisotropic_temperature_factors(&mut self, factors: [[f64; 3]; 3]) {
self.atf = Some(factors);
}
/// Get if this atom is likely to be a part of the backbone of a protein.
/// This is based on this Atom only, for a more precise definition use [AtomWithHierarchy]`.is_backbone()`.
pub fn is_backbone(&self) -> bool {
reference_tables::is_backbone(self.name())
}
/// Apply a transformation to the position of this atom, the new position is immediately set.
pub fn apply_transformation(&mut self, transformation: &TransformationMatrix) {
self.set_pos(transformation.apply(self.pos()))
.expect("Some numbers were invalid in applying a transformation");
}
/// See if the `other` Atom corresponds with this Atom.
/// Which means that the Atoms are equal except for the position, occupancy, and b_factor.
/// Used to validate that multiple models contain the same atoms, but with different positional data.
pub fn corresponds(&self, other: &Atom) -> bool {
self.serial_number == other.serial_number
&& self.name() == other.name()
&& self.element() == other.element()
&& self.charge() == other.charge()
&& ((self.atf.is_none() && other.atf.is_none())
|| (self.atf.is_some() && other.atf.is_some()))
}
/// Gives the distance between the centers of two atoms in Aͦ.
pub fn distance(&self, other: &Atom) -> f64 {
((other.x - self.x).powi(2) + (other.y - self.y).powi(2) + (other.z - self.z).powi(2))
.sqrt()
}
/// Gives the distance between the centers of two atoms in Aͦ.
/// Wrapping around the unit cell if needed.
/// Meaning it will give the shortest distance between the two atoms or any of their copies given a crystal of the size of the given unit cell stretching out to all sides.
pub fn distance_wrapping(&self, other: &Atom, cell: &UnitCell) -> f64 {
let mut x = other.x;
if (self.x - other.x).abs() > cell.a() / 2.0 {
if self.x > other.x {
x += cell.a();
} else {
x -= cell.a();
}
}
let mut y = other.y;
if (self.y - other.y).abs() > cell.b() / 2.0 {
if self.y > other.y {
y += cell.b();
} else {
y -= cell.b();
}
}
let mut z = other.z;
if (self.z - other.z).abs() > cell.c() / 2.0 {
if self.z > other.z {
z += cell.c();
} else {
z -= cell.c();
}
}
((x - self.x).powi(2) + (y - self.y).powi(2) + (z - self.z).powi(2)).sqrt()
}
/// Checks if this Atom overlaps with the given atom. It overlaps if the distance between the atoms is
/// less then the sum of the radius from this atom and the other atom. The used radius is (`atom.atomic_radius()`).
///
/// It fails if for any one of the two atoms the radius (`atom.atomic_radius()`) is not defined.
pub fn overlaps(&self, other: &Atom) -> Option<bool> {
self.atomic_radius()
.map(|self_rad| {
other
.atomic_radius()
.map(|other_rad| self.distance(other) <= self_rad + other_rad)
})
.flatten()
}
/// Checks if this Atom overlaps with the given atom. It overlaps if the distance between the atoms is
/// less then the sum of the radius from this atom and the other atom. The used radius is (`atom.atomic_radius()`).
/// Wrapping around the unit cell if needed. Meaning it will give the shortest distance between the two
/// atoms or any of their copies given a crystal of the size of the given unit cell stretching out to
/// all sides.
///
/// It fails if for any one of the two atoms the radius (`atom.atomic_radius()`) is not defined.
pub fn overlaps_wrapping(&self, other: &Atom, cell: &UnitCell) -> Option<bool> {
self.atomic_radius()
.map(|self_rad| {
other
.atomic_radius()
.map(|other_rad| self.distance_wrapping(other, cell) <= self_rad + other_rad)
})
.flatten()
}
}
impl fmt::Display for Atom {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"ATOM ID: {}, Number: {}, Element: {}, X: {}, Y: {}, Z: {}, OCC: {}, B: {}, ANISOU: {}",
self.name(),
self.serial_number(),
self.element(),
self.x(),
self.y(),
self.z(),
self.occupancy(),
self.b_factor(),
self.atf.is_some()
)
}
}
impl Clone for Atom {
/// The clone implementation needs to use the constructor to guarantee the uniqueness of the counter
fn clone(&self) -> Self {
let mut atom = Atom::new(
self.hetero,
self.serial_number,
&self.name,
self.x,
self.y,
self.z,
self.occupancy,
self.b_factor,
&self.element,
self.charge,
)
.expect("Invalid Atom properties in a clone");
atom.atf = self.atf;
atom
}
}
impl PartialEq for Atom {
fn eq(&self, other: &Self) -> bool {
self.serial_number == other.serial_number
&& self.name() == other.name()
&& self.element() == other.element()
&& self.charge() == other.charge()
&& self.atf == other.atf
&& self.pos() == other.pos()
&& self.occupancy == other.occupancy
&& self.b_factor == other.b_factor
}
}
/// As there are a lot of checks to make sure only 'normal' f64 values are used
/// Atom satisfies the properties needed for Eq while having f64 values.
impl Eq for Atom {}
impl PartialOrd for Atom {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.serial_number.cmp(&other.serial_number))
}
}
impl Ord for Atom {
fn cmp(&self, other: &Self) -> Ordering {
self.serial_number.cmp(&other.serial_number)
}
}
#[cfg(feature = "rstar")]
use rstar::{PointDistance, RTreeObject, AABB};
#[cfg(feature = "rstar")]
impl RTreeObject for &Atom {
type Envelope = AABB<[f64; 3]>;
fn envelope(&self) -> Self::Envelope {
AABB::from_point([self.x(), self.y(), self.z()])
}
}
#[cfg(feature = "rstar")]
impl PointDistance for &Atom {
fn distance_2(&self, other: &[f64; 3]) -> f64 {
// No square root as that is required by the package
(other[0] - self.x).powi(2) + (other[1] - self.y).powi(2) + (other[2] - self.z).powi(2)
}
}
#[cfg(test)]
#[allow(clippy::unwrap_used)]
mod tests {
use super::Atom;
use super::UnitCell;
#[test]
fn set_name() {
let mut a = Atom::new(false, 0, "", 0.0, 0.0, 0.0, 0.0, 0.0, "", 0).unwrap();
assert!(a.set_name("Å").is_err());
assert!(a.set_name("ATOMS").is_ok());
a.set_name("ATOM").unwrap();
a.set_name("HOH").unwrap();
a.set_name("RK").unwrap();
a.set_name("R").unwrap();
a.set_name("").unwrap();
}
#[test]
fn set_element() {
let mut a = Atom::new(false, 0, "", 0.0, 0.0, 0.0, 0.0, 0.0, "", 0).unwrap();
assert!(a.set_element("R̈").is_err());
assert!(a.set_element("HOH").is_ok());
a.set_element("RK").unwrap();
a.set_element("R").unwrap();
a.set_element("").unwrap();
}
#[test]
fn distance() {
let a = Atom::new(false, 0, "", 1.0, 0.0, 0.0, 0.0, 0.0, "C", 0).unwrap();
let b = Atom::new(false, 0, "", 9.0, 0.0, 0.0, 0.0, 0.0, "C", 0).unwrap();
let cell = UnitCell::new(10.0, 10.0, 10.0, 90.0, 90.0, 90.0);
assert!(!a.overlaps(&b).unwrap());
assert!(a.overlaps_wrapping(&b, &cell).unwrap());
assert_eq!(a.distance(&b), 8.0);
assert_eq!(a.distance_wrapping(&b, &cell), 2.0);
}
#[test]
fn distance_all_axes() {
let a = Atom::new(false, 0, "", 1.0, 1.0, 1.0, 0.0, 0.0, "C", 0).unwrap();
let b = Atom::new(false, 0, "", 9.0, 9.0, 9.0, 0.0, 0.0, "C", 0).unwrap();
let cell = UnitCell::new(10.0, 10.0, 10.0, 90.0, 90.0, 90.0);
assert!(!a.overlaps(&b).unwrap());
assert!(a.overlaps_wrapping(&b, &cell).unwrap());
}
#[test]
fn check_equality() {
let a = Atom::new(false, 0, "", 1.0, 0.0, 0.0, 0.0, 0.0, "C", 0).unwrap();
let b = Atom::new(false, 0, "", 9.0, 0.0, 0.0, 0.0, 0.0, "C", 0).unwrap();
let c = Atom::new(false, 0, "", 9.0, 0.0, 0.0, 0.0, 0.0, "C", 0).unwrap();
assert_ne!(a, b);
assert_eq!(b, c);
assert_ne!(a, c);
}
#[test]
fn invalid_new_values() {
let mut a = Atom::new(false, 0, "", 1.0, 1.0, 1.0, 0.0, 0.0, "C", 0).unwrap();
assert!(Atom::new(false, 0, "Rͦ", 1.0, 1.0, 1.0, 0.0, 0.0, "C", 0).is_none());
assert!(Atom::new(false, 0, "R", 1.0, 1.0, 1.0, 0.0, 0.0, "Cͦ", 0).is_none());
assert!(a.set_x(f64::INFINITY).is_err());
assert!(a.set_x(f64::NAN).is_err());
assert!(a.set_x(f64::NEG_INFINITY).is_err());
assert!(a.set_y(f64::INFINITY).is_err());
assert!(a.set_z(f64::INFINITY).is_err());
assert!(a.set_pos((f64::INFINITY, 0., 0.)).is_err());
assert!(a.set_pos((0., f64::INFINITY, 0.)).is_err());
assert!(a.set_pos((0., 0., f64::INFINITY)).is_err());
assert!(a.set_b_factor(f64::INFINITY).is_err());
assert!(a.set_b_factor(-1.0).is_err());
assert!(a.set_occupancy(f64::INFINITY).is_err());
assert!(a.set_occupancy(-1.).is_err());
}
#[test]
fn check_setters() {
let mut a = Atom::new(false, 0, "C", 1.0, 1.0, 1.0, 0.0, 0.0, "", 0).unwrap();
assert!(a.set_x(2.0).is_ok());
assert_eq!(a.x(), 2.0);
assert!(a.set_y(2.0).is_ok());
assert_eq!(a.y(), 2.0);
assert!(a.set_z(2.0).is_ok());
assert_eq!(a.z(), 2.0);
assert!(a.set_pos((3.0, 3.0, 3.0)).is_ok());
assert_eq!(a.x(), 3.0);
assert_eq!(a.y(), 3.0);
assert_eq!(a.z(), 3.0);
assert_eq!(a.pos(), (3.0, 3.0, 3.0));
assert!(a.set_b_factor(2.0).is_ok());
assert_eq!(a.b_factor(), 2.0);
assert!(a.set_occupancy(2.0).is_ok());
assert_eq!(a.occupancy(), 2.0);
assert!(a.set_occupancy(0.0).is_ok());
assert!(a.set_b_factor(0.0).is_ok());
a.set_hetero(true);
assert_eq!(a.hetero(), true);
a.set_serial_number(42);
assert_eq!(a.serial_number(), 42);
assert_eq!(a.atomic_number(), Some(6));
assert!(a.set_name("HOH").is_ok());
assert!(a.atomic_radius().is_none());
assert!(a.vanderwaals_radius().is_none());
assert!(a.covalent_bond_radii().is_none());
a.set_charge(-1);
assert_eq!(a.charge(), -1);
assert_eq!(a.pdb_charge(), "1-".to_string());
}
#[test]
fn check_radii() {
let a = Atom::new(false, 0, "H", 1.0, 1.0, 1.0, 0.0, 0.0, "", 0).unwrap();
assert_eq!(a.atomic_radius(), Some(1.54));
assert_eq!(a.vanderwaals_radius(), Some(1.20));
assert_eq!(a.covalent_bond_radii(), Some((32, None, None)));
let a = Atom::new(false, 0, "Cl", 1.0, 1.0, 1.0, 0.0, 0.0, "", 0).unwrap();
assert_eq!(a.atomic_radius(), Some(2.06));
assert_eq!(a.vanderwaals_radius(), Some(1.82));
assert_eq!(a.covalent_bond_radii(), Some((99, Some(95), Some(93))));
}
#[test]
fn check_display() {
let a = Atom::new(false, 0, "C", 1.0, 1.0, 1.0, 0.0, 0.0, "", 0).unwrap();
format!("{:?}", a);
format!("{}", a);
}
}
| true
|
54863df2e06c1ececb137785d93d84ba7487321f
|
Rust
|
osenft/manticore
|
/src/crypto/ring/rsa.rs
|
UTF-8
| 5,882
| 2.65625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
//! Implementations of [`crypto::rsa`] based on `ring`.
//!
//! Requires the `std` feature flag to be enabled.
use ring::error::Unspecified;
use ring::signature::KeyPair as _;
use ring::signature::RsaPublicKeyComponents;
use crate::crypto::rsa;
#[cfg(doc)]
use crate::crypto;
/// A `ring`-based [`rsa::PublicKey`].
#[derive(Clone)]
pub struct PublicKey {
key: RsaPublicKeyComponents<Box<[u8]>>,
}
impl PublicKey {
/// Creates a new `PublicKey` with the given modulus and exponent, both of
/// which should be given in big-endian, padded with zeroes out to the
/// desired bit length.
///
/// Returns `None` if the key modulus is not of one of the sanctioned sizes
/// in [`rsa::ModulusLength`].
pub fn new(modulus: Box<[u8]>, exponent: Box<[u8]>) -> Option<Self> {
rsa::ModulusLength::from_byte_len(modulus.len()).map(|_| Self {
key: RsaPublicKeyComponents {
n: modulus,
e: exponent,
},
})
}
}
impl rsa::PublicKey for PublicKey {
fn len(&self) -> rsa::ModulusLength {
rsa::ModulusLength::from_byte_len(self.key.n.len())
.expect("the keypair should already be a sanctioned size!")
}
}
/// A `ring`-based [`rsa::Keypair`].
pub struct Keypair {
keypair: ring::signature::RsaKeyPair,
}
impl Keypair {
/// Creates a new `Keypair` from the given PKCS#8-encoded private key.
///
/// This function will return `None` if parsing fails or if it is not one
/// of the sanctioned sizes in [`rsa::ModulusLength`].
pub fn from_pkcs8(pkcs8: &[u8]) -> Option<Self> {
let keypair = ring::signature::RsaKeyPair::from_pkcs8(pkcs8).unwrap();
rsa::ModulusLength::from_byte_len(keypair.public_modulus_len())
.map(|_| Self { keypair })
}
}
impl rsa::Keypair for Keypair {
type Pub = PublicKey;
fn public(&self) -> Self::Pub {
let n = self
.keypair
.public_key()
.modulus()
.big_endian_without_leading_zero()
.to_vec()
.into_boxed_slice();
let e = self
.keypair
.public_key()
.exponent()
.big_endian_without_leading_zero()
.to_vec()
.into_boxed_slice();
PublicKey::new(n, e)
.expect("the keypair should already be a sanctioned size!")
}
fn pub_len(&self) -> rsa::ModulusLength {
rsa::ModulusLength::from_byte_len(self.keypair.public_modulus_len())
.expect("the keypair should already be a sanctioned size!")
}
}
/// A `ring`-based [`rsa::Builder`] and [`rsa::SignerBuilder`].
pub struct Builder {
_priv: (),
}
impl Builder {
/// Creates a new `Builder`.
pub fn new() -> Self {
Builder { _priv: () }
}
}
impl Default for Builder {
fn default() -> Self {
Self::new()
}
}
impl rsa::Builder for Builder {
type Engine = Engine;
fn supports_modulus(&self, _: rsa::ModulusLength) -> bool {
true
}
fn new_engine(
&self,
key: PublicKey,
) -> Result<Engine, rsa::Error<Unspecified>> {
Ok(Engine { key })
}
}
impl rsa::SignerBuilder for Builder {
type Signer = Signer;
fn new_signer(
&self,
keypair: Keypair,
) -> Result<Signer, rsa::Error<Unspecified>> {
Ok(Signer { keypair })
}
}
/// A `ring`-based [`rsa::Engine`].
pub struct Engine {
key: PublicKey,
}
impl rsa::Engine for Engine {
type Error = Unspecified;
type Key = PublicKey;
fn verify_signature(
&mut self,
signature: &[u8],
message: &[u8],
) -> Result<(), rsa::Error<Unspecified>> {
let scheme = &ring::signature::RSA_PKCS1_2048_8192_SHA256;
self.key
.key
.verify(scheme, message, signature)
.map_err(rsa::Error::Custom)
}
}
/// A `ring`-based [`rsa::Signer`].
pub struct Signer {
keypair: Keypair,
}
impl rsa::Signer for Signer {
type Engine = Engine;
type Keypair = Keypair;
fn pub_len(&self) -> rsa::ModulusLength {
use crate::crypto::rsa::Keypair as _;
self.keypair.pub_len()
}
fn sign(
&mut self,
message: &[u8],
signature: &mut [u8],
) -> Result<(), rsa::Error<Unspecified>> {
let scheme = &ring::signature::RSA_PKCS1_SHA256;
let rng = ring::rand::SystemRandom::new();
self.keypair
.keypair
.sign(scheme, &rng, message, signature)
.map_err(rsa::Error::Custom)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::crypto::rsa;
use crate::crypto::rsa::Builder as _;
use crate::crypto::rsa::Engine as _;
use crate::crypto::rsa::Keypair as _;
use crate::crypto::rsa::Signer as _;
use crate::crypto::rsa::SignerBuilder as _;
use crate::crypto::testdata;
#[test]
fn rsa() {
let keypair =
Keypair::from_pkcs8(testdata::RSA_2048_PRIV_PKCS8).unwrap();
assert_eq!(keypair.pub_len(), rsa::ModulusLength::Bits2048);
let rsa = Builder::new();
let mut engine = rsa.new_engine(keypair.public()).unwrap();
engine
.verify_signature(
testdata::RSA_2048_SHA256_SIG_PKCS1,
testdata::PLAIN_TEXT,
)
.unwrap();
let mut signer = rsa.new_signer(keypair).unwrap();
let mut generated_sig = vec![0; signer.pub_len().byte_len()];
signer
.sign(testdata::PLAIN_TEXT, &mut generated_sig)
.unwrap();
engine
.verify_signature(&generated_sig, testdata::PLAIN_TEXT)
.unwrap();
}
}
| true
|
777be5e5fa569791a232954e07606437afdd8ba6
|
Rust
|
pnowojski/gravity
|
/src/models/player.rs
|
UTF-8
| 3,021
| 2.859375
| 3
|
[] |
no_license
|
use graphics::{Context, rectangle, polygon, Transformed};
use opengl_graphics::GlGraphics;
use color;
use geom;
use geom::Direction;
use piston::window::Size;
use super::GameObject;
use super::PhysicalObject;
const PLAYER_SPEED: f64 = 200_000_000.0;
const PLAYER_SIZE: f64 = 20.0;
// Drift for this long after movement key is released.
// You don't came to a hard stop in space!
const PLAYER_DRIFT: f64 = 0.2;
pub struct Player {
pub physical_object: PhysicalObject,
pub size: f64,
pub drift_ttl: f64,
move_offset: geom::Vector2,
}
impl Player {
pub fn new(x: f64, y: f64) -> Player {
return Player {
physical_object: PhysicalObject::new(1000_000.0, geom::Vector2::new(x, y)),
drift_ttl: 0.0,
move_offset: geom::Vector2::new(0.0, 0.0),
size: PLAYER_SIZE,
};
}
pub fn start_move(&mut self, dir: Direction) {
match dir {
Direction::WEST => self.move_offset.x = -PLAYER_SPEED,
Direction::NORTH => self.move_offset.y = -PLAYER_SPEED,
Direction::EAST => self.move_offset.x = PLAYER_SPEED,
Direction::SOUTH => self.move_offset.y = PLAYER_SPEED,
}
}
pub fn stop_move(&mut self, dir: Direction) {
match dir {
Direction::WEST => self.move_offset.x = 0.0,
Direction::NORTH => self.move_offset.y = 0.0,
Direction::EAST => self.move_offset.x = 0.0,
Direction::SOUTH => self.move_offset.y = 0.0,
}
}
}
impl GameObject for Player {
fn render(&self, ctxt: &Context, gl: &mut GlGraphics) {
let square = rectangle::square(0.0, 0.0, self.size);
let radius = self.get_physical_object().radius;
let transform = ctxt.transform.trans(self.get_position().x, self.get_position().y)
.trans(-radius, -radius);
rectangle(color::RED, square, transform, gl);
}
fn render_dbg(&self, ctxt: &Context, gl: &mut GlGraphics) {
// Render collison box
let radius = self.radius();
let diam = radius * 2.0;
let circle = rectangle::Rectangle::new_round_border(color::WHITE, radius, 1.0);
// Center on x/y
let transform = ctxt.transform
.trans(self.get_position().x, self.get_position().y)
.trans(-radius, -radius);
circle.draw([0.0, 0.0, diam, diam], &ctxt.draw_state, transform, gl);
}
fn update(&mut self, dt: f64, size: Size) {
// TODO: Prevent movement outside of boundaries.
let radius = self.radius();
self.physical_object.apply(dt, &self.move_offset);
self.physical_object.update(dt);
geom::restrict_to_bounds(
&mut self.position(),
[radius, radius, size.width as f64, size.height as f64]
);
}
fn physical_object(&mut self) -> &mut PhysicalObject {
&mut self.physical_object
}
fn get_physical_object(&self) -> &PhysicalObject {
&self.physical_object
}
}
| true
|
76e97b4341fb1b1340686d4cd87a0a865bca0a24
|
Rust
|
kroeckx/ruma
|
/crates/ruma-common/src/directory.rs
|
UTF-8
| 9,496
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
//! Common types for room directory endpoints.
use std::fmt;
use js_int::UInt;
use ruma_identifiers::{MxcUri, RoomAliasId, RoomId};
use ruma_serde::Outgoing;
use serde::{
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use serde_json::Value as JsonValue;
/// A chunk of a room list response, describing one room.
///
/// To create an instance of this type, first create a `PublicRoomsChunkInit` and convert it via
/// `PublicRoomsChunk::from` / `.into()`.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct PublicRoomsChunk {
/// Aliases of the room.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub aliases: Vec<RoomAliasId>,
/// The canonical alias of the room, if any.
#[serde(skip_serializing_if = "Option::is_none")]
pub canonical_alias: Option<RoomAliasId>,
/// The name of the room, if any.
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// The number of members joined to the room.
pub num_joined_members: UInt,
/// The ID of the room.
pub room_id: RoomId,
/// The topic of the room, if any.
#[serde(skip_serializing_if = "Option::is_none")]
pub topic: Option<String>,
/// Whether the room may be viewed by guest users without joining.
pub world_readable: bool,
/// Whether guest users may join the room and participate in it.
///
/// If they can, they will be subject to ordinary power level rules like any other user.
pub guest_can_join: bool,
/// The URL for the room's avatar, if one is set.
///
/// If you activate the `compat` feature, this field being an empty string in JSON will give
/// you `None` here.
#[serde(skip_serializing_if = "Option::is_none")]
#[cfg_attr(
feature = "compat",
serde(default, deserialize_with = "ruma_serde::empty_string_as_none")
)]
pub avatar_url: Option<MxcUri>,
}
/// Initial set of mandatory fields of `PublicRoomsChunk`.
///
/// This struct will not be updated even if additional fields are added to `PublicRoomsChunk` in a
/// new (non-breaking) release of the Matrix specification.
#[derive(Debug)]
#[allow(clippy::exhaustive_structs)]
pub struct PublicRoomsChunkInit {
/// The number of members joined to the room.
pub num_joined_members: UInt,
/// The ID of the room.
pub room_id: RoomId,
/// Whether the room may be viewed by guest users without joining.
pub world_readable: bool,
/// Whether guest users may join the room and participate in it.
///
/// If they can, they will be subject to ordinary power level rules like any other user.
pub guest_can_join: bool,
}
impl From<PublicRoomsChunkInit> for PublicRoomsChunk {
fn from(init: PublicRoomsChunkInit) -> Self {
let PublicRoomsChunkInit { num_joined_members, room_id, world_readable, guest_can_join } =
init;
Self {
aliases: Vec::new(),
canonical_alias: None,
name: None,
num_joined_members,
room_id,
topic: None,
world_readable,
guest_can_join,
avatar_url: None,
}
}
}
/// A filter for public rooms lists
#[derive(Clone, Debug, Default, Outgoing, Serialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(Default)]
pub struct Filter<'a> {
/// A string to search for in the room metadata, e.g. name, topic, canonical alias etc.
#[serde(skip_serializing_if = "Option::is_none")]
pub generic_search_term: Option<&'a str>,
}
impl Filter<'_> {
/// Creates an empty `Filter`.
pub fn new() -> Self {
Default::default()
}
/// Returns `true` if the filter is empty.
pub fn is_empty(&self) -> bool {
self.generic_search_term.is_none()
}
}
/// Information about which networks/protocols from application services on the
/// homeserver from which to request rooms.
#[derive(Clone, Debug, PartialEq, Eq, Outgoing)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
#[incoming_derive(Clone, PartialEq, Eq, !Deserialize)]
pub enum RoomNetwork<'a> {
/// Return rooms from the Matrix network.
Matrix,
/// Return rooms from all the networks/protocols the homeserver knows about.
All,
/// Return rooms from a specific third party network/protocol.
ThirdParty(&'a str),
}
impl<'a> Default for RoomNetwork<'a> {
fn default() -> Self {
RoomNetwork::Matrix
}
}
impl<'a> Serialize for RoomNetwork<'a> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state;
match self {
Self::Matrix => {
state = serializer.serialize_struct("RoomNetwork", 0)?;
}
Self::All => {
state = serializer.serialize_struct("RoomNetwork", 1)?;
state.serialize_field("include_all_networks", &true)?;
}
Self::ThirdParty(network) => {
state = serializer.serialize_struct("RoomNetwork", 1)?;
state.serialize_field("third_party_instance_id", network)?;
}
}
state.end()
}
}
impl<'de> Deserialize<'de> for IncomingRoomNetwork {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_map(RoomNetworkVisitor)
}
}
struct RoomNetworkVisitor;
impl<'de> Visitor<'de> for RoomNetworkVisitor {
type Value = IncomingRoomNetwork;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("Network selection")
}
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut include_all_networks = false;
let mut third_party_instance_id = None;
while let Some((key, value)) = access.next_entry::<String, JsonValue>()? {
match key.as_str() {
"include_all_networks" => {
include_all_networks = match value.as_bool() {
Some(b) => b,
_ => false,
}
}
"third_party_instance_id" => {
third_party_instance_id = value.as_str().map(|v| v.to_owned())
}
_ => {}
};
}
if include_all_networks {
if third_party_instance_id.is_none() {
Ok(IncomingRoomNetwork::All)
} else {
Err(M::Error::custom(
"`include_all_networks = true` and `third_party_instance_id` are mutually exclusive.",
))
}
} else {
Ok(match third_party_instance_id {
Some(network) => IncomingRoomNetwork::ThirdParty(network),
None => IncomingRoomNetwork::Matrix,
})
}
}
}
#[cfg(test)]
mod tests {
use serde_json::{from_value as from_json_value, json, to_value as to_json_value};
use super::{IncomingRoomNetwork, RoomNetwork};
#[test]
fn serialize_matrix_network_only() {
let json = json!({});
assert_eq!(to_json_value(RoomNetwork::Matrix).unwrap(), json);
}
#[test]
fn deserialize_matrix_network_only() {
let json = json!({ "include_all_networks": false });
assert_eq!(
from_json_value::<IncomingRoomNetwork>(json).unwrap(),
IncomingRoomNetwork::Matrix
);
}
#[test]
fn serialize_default_network_is_empty() {
let json = json!({});
assert_eq!(to_json_value(RoomNetwork::default()).unwrap(), json);
}
#[test]
fn deserialize_empty_network_is_default() {
let json = json!({});
assert_eq!(
from_json_value::<IncomingRoomNetwork>(json).unwrap(),
IncomingRoomNetwork::Matrix
);
}
#[test]
fn serialize_include_all_networks() {
let json = json!({ "include_all_networks": true });
assert_eq!(to_json_value(RoomNetwork::All).unwrap(), json);
}
#[test]
fn deserialize_include_all_networks() {
let json = json!({ "include_all_networks": true });
assert_eq!(from_json_value::<IncomingRoomNetwork>(json).unwrap(), IncomingRoomNetwork::All);
}
#[test]
fn serialize_third_party_network() {
let json = json!({ "third_party_instance_id": "freenode" });
assert_eq!(to_json_value(RoomNetwork::ThirdParty("freenode")).unwrap(), json);
}
#[test]
fn deserialize_third_party_network() {
let json = json!({ "third_party_instance_id": "freenode" });
assert_eq!(
from_json_value::<IncomingRoomNetwork>(json).unwrap(),
IncomingRoomNetwork::ThirdParty("freenode".into())
);
}
#[test]
fn deserialize_include_all_networks_and_third_party_exclusivity() {
let json = json!({ "include_all_networks": true, "third_party_instance_id": "freenode" });
assert_eq!(
from_json_value::<IncomingRoomNetwork>(json).unwrap_err().to_string().as_str(),
"`include_all_networks = true` and `third_party_instance_id` are mutually exclusive."
);
}
}
| true
|
39f6275d17a30edb3da3989ec7636ceda4cbe596
|
Rust
|
xpeerchain/xpeerchain
|
/network/src/protocols/direct_send/mod.rs
|
UTF-8
| 11,703
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
// Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Protocol for fire-and-forget style message delivery to a peer
//!
//! DirectSend protocol takes advantage of [muxers] and [substream negotiation] to build a simple
//! best effort message delivery protocol. Concretely,
//!
//! 1. Every message runs in its own ephemeral substream. The substream is directional in the way
//! that only the dialer sends a message to the listener, but no messages or acknowledgements
//! sending back on the other direction. So the message delivery is best effort and not
//! guaranteed. Because the substreams are independent, there is no guarantee on the ordering
//! of the message delivery either.
//! 2. An DirectSend call negotiates which protocol to speak using [`protocol-select`]. This
//! allows simple versioning of message delivery and negotiation of which message types are
//! supported. In the future, we can potentially support multiple backwards-incompatible
//! versions of any messages.
//! 3. The actual structure of the wire messages is left for higher layers to specify. The
//! DirectSend protocol is only concerned with shipping around opaque blobs. Current xpeer
//! DirectSend clients (consensus, mempool) mostly send protobuf enums around over a single
//! DirectSend protocol, e.g., `/xpeer/consensus/direct_send/0.1.0`.
//!
//! ## Wire Protocol (dialer):
//!
//! To send a message to a remote peer, the dialer
//!
//! 1. Requests a new outbound substream from the muxer.
//! 2. Negotiates the substream using [`protocol-select`] to the protocol they
//! wish to speak, e.g., `/xpeer/mempool/direct_send/0.1.0`.
//! 3. Sends the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! ## Wire Protocol (listener):
//!
//! To receive a message from remote peers, the listener
//!
//! 1. Polls for new inbound substreams on the muxer.
//! 2. Negotiates inbound substreams using [`protocol-select`]. The negotiation
//! must only succeed if the requested protocol is actually supported.
//! 3. Awaits the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! Note: negotiated substreams are currently framed with the
//! [muiltiformats unsigned varint length-prefix](https://github.com/multiformats/unsigned-varint)
//!
//! [muxers]: ../../../netcore/multiplexing/index.html
//! [substream negotiation]: ../../../netcore/negotiate/index.html
//! [`protocol-select`]: ../../../netcore/negotiate/index.html
use crate::{
counters,
error::NetworkError,
peer_manager::{PeerManagerNotification, PeerManagerRequestSender},
ProtocolId,
};
use bytes::Bytes;
use channel;
use futures::{
compat::Sink01CompatExt,
future::{FutureExt, TryFutureExt},
io::{AsyncRead, AsyncReadExt, AsyncWrite},
sink::SinkExt,
stream::StreamExt,
};
use logger::prelude::*;
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Debug,
};
use tokio::{codec::Framed, runtime::TaskExecutor};
use types::PeerId;
use unsigned_varint::codec::UviBytes;
#[cfg(test)]
mod test;
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendRequest {
/// A request to send out a message.
SendMessage(PeerId, Message),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendNotification {
/// A notification that a DirectSend message is received.
RecvMessage(PeerId, Message),
}
#[derive(Clone, Eq, PartialEq)]
pub struct Message {
/// Message type.
pub protocol: ProtocolId,
/// Serialized message data.
pub mdata: Bytes,
}
impl Debug for Message {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mdata_str = if self.mdata.len() <= 10 {
format!("{:?}", self.mdata)
} else {
format!("{:?}...", self.mdata.slice_to(10))
};
write!(
f,
"Message {{ protocol: {:?}, mdata: {} }}",
self.protocol, mdata_str
)
}
}
/// The DirectSend actor.
pub struct DirectSend<TSubstream> {
/// A handle to a tokio executor.
executor: TaskExecutor,
/// Channel to receive requests from other upstream actors.
ds_requests_rx: channel::Receiver<DirectSendRequest>,
/// Channels to send notifictions to upstream actors.
ds_notifs_tx: channel::Sender<DirectSendNotification>,
/// Channel to receive notifications from PeerManager.
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
/// Channel to send requests to PeerManager.
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
/// Outbound message queues for each (PeerId, ProtocolId) pair.
message_queues: HashMap<(PeerId, ProtocolId), channel::Sender<Bytes>>,
}
impl<TSubstream> DirectSend<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite + Send + Unpin + Debug + 'static,
{
pub fn new(
executor: TaskExecutor,
ds_requests_rx: channel::Receiver<DirectSendRequest>,
ds_notifs_tx: channel::Sender<DirectSendNotification>,
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Self {
Self {
executor,
ds_requests_rx,
ds_notifs_tx,
peer_mgr_notifs_rx,
peer_mgr_reqs_tx,
message_queues: HashMap::new(),
}
}
pub async fn start(mut self) {
loop {
futures::select! {
req = self.ds_requests_rx.select_next_some() => {
self.handle_direct_send_request(req).await;
}
notif = self.peer_mgr_notifs_rx.select_next_some() => {
self.handle_peer_mgr_notification(notif);
}
complete => {
crit!("Direct send actor terminated");
break;
}
}
}
}
// Handle PeerManagerNotification, which can only be NewInboundSubstream for now.
fn handle_peer_mgr_notification(&self, notif: PeerManagerNotification<TSubstream>) {
trace!("PeerManagerNotification::{:?}", notif);
match notif {
PeerManagerNotification::NewInboundSubstream(peer_id, substream) => {
self.executor.spawn(
Self::handle_inbound_substream(
peer_id,
substream.protocol,
substream.substream,
self.ds_notifs_tx.clone(),
)
.boxed()
.unit_error()
.compat(),
);
}
_ => unreachable!("Unexpected PeerManagerNotification"),
}
}
// Handle a new inbound substream. Keep forwarding the messages to the NetworkProvider.
async fn handle_inbound_substream(
peer_id: PeerId,
protocol: ProtocolId,
substream: TSubstream,
mut ds_notifs_tx: channel::Sender<DirectSendNotification>,
) {
let mut substream =
Framed::new(substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
while let Some(item) = substream.next().await {
match item {
Ok(data) => {
let notif = DirectSendNotification::RecvMessage(
peer_id,
Message {
protocol: protocol.clone(),
mdata: data.freeze(),
},
);
ds_notifs_tx
.send(notif)
.await
.expect("DirectSendNotification send error");
}
Err(e) => {
warn!(
"DirectSend substream with peer {} receives error {}",
peer_id.short_str(),
e
);
break;
}
}
}
warn!(
"DirectSend inbound substream with peer {} closed",
peer_id.short_str()
);
}
// Create a new message queue and spawn a task to forward the messages from the queue to the
// corresponding substream.
async fn start_message_queue_handler(
executor: TaskExecutor,
mut peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
peer_id: PeerId,
protocol: ProtocolId,
) -> Result<channel::Sender<Bytes>, NetworkError> {
// Create a channel for the (PeerId, ProtocolId) pair.
let (msg_tx, msg_rx) = channel::new::<Bytes>(
1024,
&counters::OP_COUNTERS.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
),
);
// Open a new substream for the (PeerId, ProtocolId) pair
let raw_substream = peer_mgr_reqs_tx.open_substream(peer_id, protocol).await?;
let substream =
Framed::new(raw_substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
// Spawn a task to forward the messages from the queue to the substream.
let f_substream = async move {
if let Err(e) = msg_rx.map(Ok).forward(substream).await {
warn!(
"Forward messages to peer {} error {:?}",
peer_id.short_str(),
e
);
}
// The messages in queue will be dropped
counters::DIRECT_SEND_MESSAGES_DROPPED.inc_by(
counters::OP_COUNTERS
.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
)
.get(),
);
};
executor.spawn(f_substream.boxed().unit_error().compat());
Ok(msg_tx)
}
// Try to send a message to the message queue.
async fn try_send_msg(
&mut self,
peer_id: PeerId,
msg: Message,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Result<(), NetworkError> {
let protocol = msg.protocol.clone();
let substream_queue_tx = match self.message_queues.entry((peer_id, protocol.clone())) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let msg_tx = Self::start_message_queue_handler(
self.executor.clone(),
peer_mgr_reqs_tx,
peer_id,
protocol.clone(),
)
.await?;
entry.insert(msg_tx)
}
};
substream_queue_tx.send(msg.mdata).await.map_err(|e| {
self.message_queues.remove(&(peer_id, protocol));
e.into()
})
}
// Handle DirectSendRequest, which can only be SendMessage request for now.
async fn handle_direct_send_request(&mut self, req: DirectSendRequest) {
trace!("DirectSendRequest::{:?}", req);
match req {
DirectSendRequest::SendMessage(peer_id, msg) => {
if let Err(e) = self
.try_send_msg(peer_id, msg.clone(), self.peer_mgr_reqs_tx.clone())
.await
{
counters::DIRECT_SEND_MESSAGES_DROPPED.inc();
warn!("DirectSend to peer {} failed: {}", peer_id.short_str(), e);
}
}
}
}
}
| true
|
7ee172d6456d981848e047e78e7d91d07986b42f
|
Rust
|
yutopp/erl_tokenize
|
/examples/tokenize.rs
|
UTF-8
| 824
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
extern crate clap;
extern crate erl_tokenize;
#[macro_use]
extern crate trackable;
use std::fs::File;
use std::io::Read;
use clap::{App, Arg};
use erl_tokenize::Tokenizer;
fn main() {
let matches = App::new("tokenize")
.arg(Arg::with_name("SOURCE_FILE").index(1).required(true))
.get_matches();
let src_file = matches.value_of("SOURCE_FILE").unwrap();
let mut src = String::new();
let mut file = File::open(src_file).expect("Cannot open file");
file.read_to_string(&mut src).expect("Cannot read file");
let mut line = 1;
let tokenizer = Tokenizer::new(&src);
for token in tokenizer {
let token = track_try_unwrap!(token, "line={}", line);
println!("[line:{}] {:?}", line, token);
if token.text() == "\n" {
line += 1;
}
}
}
| true
|
b21d4c387182f2d7844bb98ff05d7c7f3c879726
|
Rust
|
BrianOn99/matasano-crypto-challenges
|
/src/set1/challenges.rs
|
UTF-8
| 1,216
| 3.140625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
use super::*;
use hex::FromHex;
extern crate base64;
// challenge1
#[test]
fn test_hex_to_base64() {
fn hex_to_base64(src: &str) -> String {
let decoded = Vec::from_hex(src).expect("invalid hex string");
base64::encode(&decoded)
}
assert_eq!(
hex_to_base64("49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d"),
"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t");
}
// challenge2
#[test]
fn test_xor() {
assert_eq!(
xor_buffers(&Vec::from_hex("1c0111001f010100061a024b53535009181c").unwrap(),
&Vec::from_hex("686974207468652062756c6c277320657965").unwrap()),
Vec::from_hex("746865206b696420646f6e277420706c6179").unwrap());
}
#[test]
fn test_short_key() {
let ans: Vec<u8> = vec![0xb0, 0x70];
assert_eq!(xor_buffers_cycle(&vec![0xe9, 0x29], &vec![0x59]), ans)
}
// challenge5, almost same as challenge2
#[test]
fn test_repeating_key_xor() {
let plain_text =
b"Burning 'em, if you ain't quick and nimble
I go crazy when I hear a cymbal";
let key = b"ICE";
let ans = Vec::from_hex(
"0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272a282b2f20430a652e2c65\
2a3124333a653e2b2027630c692b20283165286326302e27282f").unwrap();
let ciphertext = xor_buffers_cycle(plain_text, key);
assert_eq!(ciphertext, ans)
}
| true
|
95be0256183ca283d1631075a1365f001a68c5c6
|
Rust
|
exonum/exonum
|
/components/merkledb/tests/migration.rs
|
UTF-8
| 14,175
| 2.734375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
//! This test checks that migration works properly:
//!
//! - Migrated indexes are properly aggregated during and after migration
//! - Migrated data is correctly added / replaced / removed after merge
//! - Migration rollbacks work properly
//!
//! **NB.** For performance, some tests initialize the database outside the test. This should
//! be fine if the test passes, but can lead to weird errors if it fails. In this case,
//! move database initialization inside the test to extract the sequence of actions failing the test.
use exonum_crypto::Hash;
use proptest::{
bool,
collection::vec,
prop_assert_eq, prop_oneof, proptest, sample, strategy,
strategy::Strategy,
test_runner::{Config, TestCaseResult},
};
use std::collections::{HashMap, HashSet};
use exonum_merkledb::{
access::{Access, AccessExt},
migration::{flush_migration, rollback_migration, Migration},
Database, HashTag, IndexAddress, IndexType, ObjectHash, Snapshot, SystemSchema, TemporaryDB,
};
mod work;
use self::work::*;
const ACTIONS_MAX_LEN: usize = 15;
const NAMESPACES: Strings = &["test", "other", "tes"];
const UNRELATED_NAMESPACES: Strings = &["other_", "unrelated"];
type Strings = &'static [&'static str];
type NewIndexes = HashMap<(&'static str, IndexAddress), IndexData>;
/// Constituent action applied to the DB during migration.
#[derive(Debug, Clone)]
enum MigrationAction {
/// Do some work on a certain index. The index may be in the migration, or outside of it
/// (including the case when the index will be replaced / removed by the migration).
WorkOnIndex {
/// Migration namespace. Empty for indexes outside a migration.
namespace: &'static str,
/// Index address.
addr: IndexAddress,
/// Type to initialize index to if it doesn't exist.
index_type: IndexType,
/// Value to insert into the index. If `None`, the index will be cleared instead.
value: Option<Vec<u8>>,
},
/// Create a tombstone for the specified address.
CreateTombstone {
namespace: &'static str,
addr: IndexAddress,
},
/// Roll back the specified migration.
Rollback(&'static str),
/// Flush the fork.
FlushFork,
/// Merge the fork into the DB.
MergeFork,
}
/// Generates an atomic migration action.
///
/// `namespaces` denotes a list of namespaces in which migrations will be performed. Namespaces
/// should not intersect with `UNRELATED_NAMESPACES`.
fn generate_action(namespaces: Strings) -> impl Strategy<Value = MigrationAction> {
let work_args = (
sample::select(namespaces),
generate_address(),
generate_index_type(),
generate_value(),
bool::ANY,
);
let related_work =
work_args.prop_map(|(namespace, addr, index_type, value, is_in_migration)| {
if is_in_migration {
MigrationAction::WorkOnIndex {
namespace,
addr,
index_type,
value,
}
} else {
let addr = addr.prepend_name(namespace);
MigrationAction::WorkOnIndex {
namespace: "",
addr,
index_type,
value,
}
}
});
let unrelated_work_args = (
sample::select(UNRELATED_NAMESPACES),
generate_address(),
generate_index_type(),
generate_value(),
);
let unrelated_work = unrelated_work_args.prop_map(|(ns, addr, index_type, value)| {
let addr = addr.prepend_name(ns);
MigrationAction::WorkOnIndex {
namespace: "",
addr,
index_type,
value,
}
});
prop_oneof![
related_work,
unrelated_work,
(sample::select(namespaces), generate_address())
.prop_map(|(namespace, addr)| MigrationAction::CreateTombstone { namespace, addr }),
strategy::Just(MigrationAction::FlushFork),
strategy::Just(MigrationAction::MergeFork),
]
}
fn generate_action_with_rollbacks(namespaces: Strings) -> impl Strategy<Value = MigrationAction> {
prop_oneof![
9 => generate_action(namespaces),
1 => sample::select(namespaces).prop_map(MigrationAction::Rollback),
]
}
fn get_object_hash<S>(snapshot: &S, name: &str, index_type: IndexType) -> Hash
where
S: Access,
{
match index_type {
IndexType::ProofEntry => snapshot.get_proof_entry::<_, ()>(name).object_hash(),
IndexType::ProofList => snapshot.get_proof_list::<_, ()>(name).object_hash(),
IndexType::ProofMap => snapshot.get_proof_map::<_, (), ()>(name).object_hash(),
_ => unreachable!(),
}
}
/// Checks the state of a particular state aggregator. `single_indexes` are the expected single
/// indexes in the DB within the `namespace`, together with their types.
fn check_namespace_aggregator<'a>(
snapshot: &dyn Snapshot,
namespace: &str,
single_indexes: impl Iterator<Item = (&'a str, IndexType)>,
) -> TestCaseResult {
let migration = Migration::new(namespace, snapshot);
let aggregator = migration.state_aggregator();
let mut expected_names = HashSet::new();
for (name, index_type) in single_indexes {
let aggregated_name = format!("{}.{}", namespace, name);
let maybe_hash = if index_type.is_merkelized() {
expected_names.insert(aggregated_name.clone());
Some(get_object_hash(&migration, name, index_type))
} else {
None
};
prop_assert_eq!(aggregator.get(&aggregated_name), maybe_hash);
}
prop_assert_eq!(aggregator.keys().collect::<HashSet<_>>(), expected_names);
Ok(())
}
fn check_default_aggregator<'a>(
snapshot: &dyn Snapshot,
single_indexes: impl Iterator<Item = (&'a str, IndexType)>,
) -> TestCaseResult {
let aggregator = SystemSchema::new(snapshot).state_aggregator();
let mut expected_names = HashSet::new();
for (name, index_type) in single_indexes {
let maybe_hash = if index_type.is_merkelized() {
expected_names.insert(name.to_owned());
Some(get_object_hash(&snapshot, name, index_type))
} else {
None
};
prop_assert_eq!(aggregator.get(name), maybe_hash);
}
prop_assert_eq!(aggregator.keys().collect::<HashSet<_>>(), expected_names);
Ok(())
}
fn single_indexes<'a>(
indexes: &'a NewIndexes,
namespace: &'static str,
) -> impl Iterator<Item = (&'a str, IndexType)> {
indexes.iter().filter_map(move |((ns, addr), data)| {
if addr.id_in_group().is_none() && *ns == namespace {
Some((addr.name(), data.ty))
} else {
None
}
})
}
fn check_intermediate_consistency(
snapshot: &dyn Snapshot,
namespaces: Strings,
old_single_indexes: &HashMap<String, IndexType>,
new_indexes: &NewIndexes,
) -> TestCaseResult {
check_default_aggregator(
snapshot,
old_single_indexes
.iter()
.map(|(name, ty)| (name.as_str(), *ty)),
)?;
for &namespace in namespaces {
let indexes = single_indexes(new_indexes, namespace);
check_namespace_aggregator(snapshot, namespace, indexes)?;
}
for ((ns, addr), data) in new_indexes {
let migration = Migration::new(*ns, snapshot);
data.check(migration, addr.to_owned())?;
}
Ok(())
}
fn check_final_consistency(
snapshot: &dyn Snapshot,
namespaces: Strings,
aggregated_indexes: &HashMap<String, IndexType>,
new_indexes: &HashMap<IndexAddress, IndexData>,
) -> TestCaseResult {
for (addr, data) in new_indexes {
data.check(snapshot, addr.to_owned())?;
}
check_default_aggregator(
snapshot,
aggregated_indexes
.iter()
.map(|(name, ty)| (name.as_str(), *ty)),
)?;
for &namespace in namespaces {
let ns_aggregator = Migration::new(namespace, snapshot).state_aggregator();
prop_assert_eq!(ns_aggregator.keys().count(), 0);
}
for (name, ty) in aggregated_indexes {
if *ty == IndexType::Tombstone {
// The index should be fully removed; thus, creating a `ProofMapIndex` on its place
// should succeed and it should have a default `object_hash`.
prop_assert_eq!(
get_object_hash(&snapshot, name, IndexType::ProofMap),
HashTag::empty_map_hash()
);
}
}
Ok(())
}
fn apply_actions(
db: &TemporaryDB,
actions: Vec<MigrationAction>,
namespaces: Strings,
) -> TestCaseResult {
// Original single indexes together with their type.
let mut original_indexes = HashMap::new();
// All indexes in the migration together with type and expected contents.
let mut new_indexes: NewIndexes = HashMap::new();
let mut fork = db.fork();
for action in actions {
match action {
MigrationAction::WorkOnIndex {
namespace,
addr,
index_type,
value,
} => {
let is_in_group = addr.id_in_group().is_some();
let real_type = if namespace.is_empty() {
work_on_index(&fork, addr.clone(), index_type, value.clone())
} else {
let migration = Migration::new(namespace, &fork);
work_on_index(migration.clone(), addr.clone(), index_type, value.clone())
};
if !namespace.is_empty() {
let entry = new_indexes
.entry((namespace, addr))
.or_insert_with(|| IndexData {
ty: real_type,
values: vec![],
});
if let Some(value) = value {
entry.values.push(value);
} else {
entry.values.clear();
}
} else if !is_in_group {
original_indexes.insert(addr.name().to_owned(), real_type);
}
}
MigrationAction::CreateTombstone { namespace, addr } => {
let migration = Migration::new(namespace, &fork);
if migration.index_type(addr.clone()).is_none() {
migration.create_tombstone(addr.clone());
new_indexes.insert(
(namespace, addr),
IndexData {
ty: IndexType::Tombstone,
values: vec![],
},
);
}
}
MigrationAction::Rollback(namespace) => {
rollback_migration(&mut fork, namespace);
new_indexes.retain(|(ns, _), _| *ns != namespace);
}
MigrationAction::FlushFork => {
fork.flush();
}
MigrationAction::MergeFork => {
let patch = fork.into_patch();
check_intermediate_consistency(
&patch,
namespaces,
&original_indexes,
&new_indexes,
)?;
db.merge(patch).unwrap();
fork = db.fork();
}
}
}
for &namespace in namespaces {
flush_migration(&mut fork, namespace);
}
// Compute the final list of indexes. Note that indexes removed in the migration
// will have `Tombstone` type.
let new_indexes: HashMap<_, _> = new_indexes
.into_iter()
.map(|((ns, addr), data)| {
let new_addr = addr.prepend_name(ns);
(new_addr, data)
})
.collect();
let mut aggregated_indexes = original_indexes;
aggregated_indexes.extend(new_indexes.iter().filter_map(|(addr, data)| {
if addr.id_in_group().is_none() {
Some((addr.name().to_owned(), data.ty))
} else {
None
}
}));
let patch = fork.into_patch();
check_final_consistency(&patch, namespaces, &aggregated_indexes, &new_indexes)?;
db.merge(patch).unwrap();
let snapshot = db.snapshot();
check_final_consistency(&snapshot, namespaces, &aggregated_indexes, &new_indexes)?;
Ok(())
}
#[test]
fn single_migration_with_honest_db_initialization() {
const SINGLE_NAMESPACE: Strings = &["test"];
let config = Config::with_cases(Config::default().cases / 4);
proptest!(config, |(actions in vec(generate_action(SINGLE_NAMESPACE), 1..ACTIONS_MAX_LEN))| {
let db = TemporaryDB::new();
apply_actions(&db, actions, SINGLE_NAMESPACE)?;
});
}
/// All migration actions are in a single namespace `test`.
#[test]
fn single_migration() {
const SINGLE_NAMESPACE: Strings = &["test"];
let db = TemporaryDB::new();
proptest!(|(actions in vec(generate_action(SINGLE_NAMESPACE), 1..ACTIONS_MAX_LEN))| {
apply_actions(&db, actions, SINGLE_NAMESPACE)?;
db.clear().unwrap();
});
}
#[test]
fn single_migration_with_rollbacks() {
const SINGLE_NAMESPACE: Strings = &["test"];
let db = TemporaryDB::new();
let action = generate_action_with_rollbacks(SINGLE_NAMESPACE);
proptest!(|(actions in vec(action, 1..ACTIONS_MAX_LEN))| {
apply_actions(&db, actions, SINGLE_NAMESPACE)?;
db.clear().unwrap();
});
}
#[test]
fn multiple_migrations_with_synced_end() {
let db = TemporaryDB::new();
proptest!(|(actions in vec(generate_action(NAMESPACES), 1..ACTIONS_MAX_LEN))| {
apply_actions(&db, actions, NAMESPACES)?;
db.clear().unwrap();
});
}
#[test]
fn multiple_migrations_with_synced_end_and_rollbacks() {
let db = TemporaryDB::new();
let action = generate_action_with_rollbacks(NAMESPACES);
proptest!(|(actions in vec(action, 1..ACTIONS_MAX_LEN))| {
apply_actions(&db, actions, NAMESPACES)?;
db.clear().unwrap();
});
}
| true
|
5e37fca649dc5ec29b43f6019c2f61664f0d5b5f
|
Rust
|
DLR-FT/a653rs
|
/src/apex/types.rs
|
UTF-8
| 7,426
| 2.921875
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
/// ARINC653 types
pub mod basic {
/// According to ARINC653-P1, the maximum name length is always 32
pub const MAX_NAME_LENGTH: usize = 32;
/// Apex internal ReturnCode Type
pub type ReturnCode = u32;
pub type ApexName = [u8; MAX_NAME_LENGTH];
// Base Types
pub type ApexByte = u8;
pub type ApexInteger = i32;
pub type ApexUnsigned = u32;
pub type ApexLongInteger = i64;
pub type MessageSize = ApexUnsigned;
pub type MessageRange = ApexUnsigned;
pub type WaitingRange = ApexInteger;
/// The normal APEX Return Codes without the non-error variant
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "strum", derive(strum::FromRepr))]
pub enum ErrorReturnCode {
/// status of system unaffected by request
NoAction = 1,
/// resource required by request unavailable
NotAvailable = 2,
/// invalid parameter specified in request
InvalidParam = 3,
/// parameter incompatible with configuration
InvalidConfig = 4,
/// request incompatible with current mode
InvalidMode = 5,
/// time-out tied up with request has expired
TimedOut = 6,
}
impl ErrorReturnCode {
/// Convenience function for gaining a Result from a given [ReturnCode]
///
/// # Return Values for given [ReturnCode]
///
/// - `0` => `Ok(())`
/// - `1..=6` => `Err(Self)`
/// - `7..` => `panic`
pub fn from(from: ReturnCode) -> Result<(), Self> {
use ErrorReturnCode::*;
match from {
0 => Ok(()),
1 => Err(NoAction),
2 => Err(NotAvailable),
3 => Err(InvalidParam),
4 => Err(InvalidConfig),
5 => Err(InvalidMode),
6 => Err(TimedOut),
unexpected => panic!("{unexpected}"),
}
}
}
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "strum", derive(strum::FromRepr))]
pub enum PortDirection {
Source = 0,
Destination = 1,
}
impl TryFrom<ApexUnsigned> for PortDirection {
type Error = ApexUnsigned;
fn try_from(value: ApexUnsigned) -> Result<Self, Self::Error> {
match value {
0 => Ok(PortDirection::Source),
1 => Ok(PortDirection::Destination),
_ => Err(value),
}
}
}
#[repr(u32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "strum", derive(strum::FromRepr))]
pub enum QueuingDiscipline {
/// First in/first out queue
Fifo = 0,
/// Priority queue
Priority = 1,
}
impl TryFrom<ApexUnsigned> for QueuingDiscipline {
type Error = ApexUnsigned;
fn try_from(value: ApexUnsigned) -> Result<Self, Self::Error> {
match value {
0 => Ok(QueuingDiscipline::Fifo),
1 => Ok(QueuingDiscipline::Priority),
_ => Err(value),
}
}
}
pub type ProcessorCoreId = ApexInteger;
pub const CORE_AFFINITY_NO_PREFERENCE: ProcessorCoreId = -1;
}
pub mod abstraction {
use core::str::{FromStr, Utf8Error};
// Reexport important basic-types for downstream-user
pub use super::basic::{
ApexByte, ApexUnsigned, MessageRange, MessageSize, QueuingDiscipline, MAX_NAME_LENGTH,
};
use crate::bindings::*;
/// Error Type used by abstracted functions.
/// Includes all Variants of [ErrorReturnCode] plus a `WriteError` and `ReadError` variant
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Error {
/// status of system unaffected by request
NoAction,
/// resource required by request unavailable
NotAvailable,
/// invalid parameter specified in request
InvalidParam,
/// parameter incompatible with configuration
InvalidConfig,
/// request incompatible with current mode
InvalidMode,
/// time-out tied up with request has expired
TimedOut,
/// buffer got zero length or is to long
WriteError,
/// buffer is to small
ReadError,
}
impl From<ErrorReturnCode> for Error {
fn from(rc: ErrorReturnCode) -> Self {
use Error::*;
match rc {
ErrorReturnCode::NoAction => NoAction,
ErrorReturnCode::NotAvailable => NotAvailable,
ErrorReturnCode::InvalidParam => InvalidParam,
ErrorReturnCode::InvalidConfig => InvalidConfig,
ErrorReturnCode::InvalidMode => InvalidMode,
ErrorReturnCode::TimedOut => TimedOut,
}
}
}
/// Convenient Abstraction Name Type
/// Uses [ApexName] internally
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Name(ApexName);
impl Name {
pub const fn new(name: ApexName) -> Self {
Name(name)
}
pub fn to_str(&self) -> Result<&str, Utf8Error> {
let nul_range_end = self
.0
.iter()
.position(|&c| c == b'\0')
.unwrap_or(self.0.len());
core::str::from_utf8(&self.0[0..nul_range_end])
}
pub fn into_inner(self) -> ApexName {
self.0
}
}
impl From<Name> for ApexName {
fn from(val: Name) -> Self {
val.0
}
}
impl FromStr for Name {
type Err = ApexUnsigned;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() > MAX_NAME_LENGTH {
return Err(s.len() as ApexUnsigned);
}
let mut array_name = [0; MAX_NAME_LENGTH];
array_name[..s.len()].copy_from_slice(s.as_bytes());
Ok(Self(array_name))
}
}
pub trait BufferExt {
fn validate_read(&mut self, size: MessageSize) -> Result<&mut Self, Error>;
/// Validate a buffer to be at most as long as the given usize.
/// If not returns [Self] with the length of the passed buffer
fn validate_write(&self, size: MessageSize) -> Result<&Self, Error>;
}
impl BufferExt for [ApexByte] {
fn validate_read(&mut self, size: MessageSize) -> Result<&mut Self, Error> {
if usize::try_from(size)
.map(|ss| self.len() < ss)
.unwrap_or(true)
{
return Err(Error::ReadError);
}
Ok(self)
}
fn validate_write(&self, size: MessageSize) -> Result<&Self, Error> {
if usize::try_from(size)
.map(|ss| self.len() > ss)
.unwrap_or(false)
|| self.is_empty()
{
return Err(Error::WriteError);
}
Ok(self)
}
}
}
| true
|
5033765e20b5e84d23fa41aaa548875a3d8abfdf
|
Rust
|
AntyMew/livesplit-core
|
/src/run/run_metadata.rs
|
UTF-8
| 3,444
| 3.59375
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use indexmap::map::{IndexMap, Iter};
/// The Run Metadata stores additional information about a run, like the
/// platform and region of the game. All of this information is optional.
#[derive(Default, Clone, Debug, PartialEq)]
pub struct RunMetadata {
run_id: String,
platform_name: String,
uses_emulator: bool,
region_name: String,
variables: IndexMap<String, String>,
}
impl RunMetadata {
/// Creates a new empty Run Metadata.
#[inline]
pub fn new() -> Self {
Default::default()
}
/// Accesses the speedrun.com Run ID of the run. This Run ID specify which
/// Record on speedrun.com this run is associated with. This should be
/// changed once the Personal Best doesn't match up with that record
/// anymore. This may be empty if there's no association.
#[inline]
pub fn run_id(&self) -> &str {
&self.run_id
}
/// Sets the speedrun.com Run ID of the run. You need to ensure that the
/// record on speedrun.com matches up with the Personal Best of this run.
/// This may be empty if there's no association.
#[inline]
pub fn set_run_id<S>(&mut self, id: S)
where
S: AsRef<str>,
{
self.run_id.clear();
self.run_id.push_str(id.as_ref());
}
/// Accesses the name of the platform this game is run on. This may be empty
/// if it's not specified.
#[inline]
pub fn platform_name(&self) -> &str {
&self.platform_name
}
/// Sets the name of the platform this game is run on. This may be empty if
/// it's not specified.
#[inline]
pub fn set_platform_name<S>(&mut self, name: S)
where
S: AsRef<str>,
{
self.platform_name.clear();
self.platform_name.push_str(name.as_ref());
}
/// Returns `true` if this speedrun is done on an emulator. However `false`
/// may also indicate that this information is simply not known.
#[inline]
pub fn uses_emulator(&self) -> bool {
self.uses_emulator
}
/// Specifies whether this speedrun is done on an emulator. Keep in mind
/// that `false` may also mean that this information is simply not known.
#[inline]
pub fn set_emulator_usage(&mut self, uses_emulator: bool) {
self.uses_emulator = uses_emulator;
}
/// Accesses the name of the region this game is from. This may be empty if
/// it's not specified.
#[inline]
pub fn region_name(&self) -> &str {
&self.region_name
}
/// Sets the name of the region this game is from. This may be empty if it's
/// not specified.
#[inline]
pub fn set_region_name<S>(&mut self, region_name: S)
where
S: AsRef<str>,
{
self.region_name.clear();
self.region_name.push_str(region_name.as_ref());
}
/// Adds a new variable to this run metadata. A variable is an arbitrary key
/// value pair storing additional information about the category. An example
/// of this may be whether Amiibos are used in this category.
pub fn add_variable<N, V>(&mut self, name: N, value: V)
where
N: Into<String>,
V: Into<String>,
{
self.variables.insert(name.into(), value.into());
}
/// Returns an iterator iterating over all the variables and their values
/// that have been specified.
pub fn variables(&self) -> Iter<String, String> {
self.variables.iter()
}
}
| true
|
67575a6f6f1ad10645144cdc920e9d294a7f5c15
|
Rust
|
bk-rs/rust-io-peek
|
/futures-util-io-peek/tests/cursor.rs
|
UTF-8
| 731
| 2.84375
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use futures_executor::block_on;
use futures_util::io::{AsyncReadExt as _, Cursor};
use futures_util_io_peek::AsyncPeekExt as _;
#[test]
fn sample() -> Result<(), Box<dyn std::error::Error>> {
block_on(async {
let mut cursor = Cursor::new(vec![1, 2, 3]);
let mut buf = vec![0; 5];
let n = cursor.peek_async(&mut buf).await?;
assert_eq!(buf, vec![1, 2, 3, 0, 0]);
assert_eq!(n, 3);
cursor.get_mut().push(4);
let n = cursor.peek_async(&mut buf).await?;
assert_eq!(buf, vec![1, 2, 3, 4, 0]);
assert_eq!(n, 4);
let n = cursor.read(&mut buf).await?;
assert_eq!(buf, vec![1, 2, 3, 4, 0]);
assert_eq!(n, 4);
Ok(())
})
}
| true
|
f338169dc30e89dcbbaf434cfa476ad10039193b
|
Rust
|
pfeyz/learners
|
/src/domain.rs
|
UTF-8
| 9,316
| 2.65625
| 3
|
[] |
no_license
|
extern crate csv;
extern crate rand;
use std::mem;
use rand::{Rng};
use rand::distributions::{Range, Sample};
use std::error::Error;
use std::collections::{HashSet, HashMap};
use sentence::{SurfaceForm, Illoc};
pub const NUM_PARAMS: usize = 13;
pub type Grammar = u16;
pub type Sentence = u32;
pub type TriggerVec = [Trigger; NUM_PARAMS];
pub trait LanguageDomain {
fn language(&self, g: &Grammar) -> Result<&HashSet<Sentence>, IllegalGrammar>;
fn language_vec(&self, g: &Grammar) -> Result<&Vec<Sentence>, IllegalGrammar>;
fn surface_form(&self, g: &Sentence) -> &SurfaceForm;
fn triggers(&self, &Sentence) -> &TriggerVec;
fn parses(&self, &Grammar, &Sentence) -> Result<bool, IllegalGrammar>;
fn random_grammar<T: Rng>(&self, rng: &mut T) -> &Grammar;
}
#[derive(Debug)]
pub struct IllegalGrammar {
grammar: Grammar
}
#[derive(Debug)]
pub enum Trigger {
On,
Off,
Ambiguous,
Irrelevant
}
type ColagTsvLine = (u16, u32, u32);
pub struct Colag {
pub language: HashMap<Grammar, HashSet<u32>>,
language_vec: HashMap<Grammar, Vec<Sentence>>,
grammars: Vec<Grammar>,
trigger: HashMap<Sentence, TriggerVec>,
surface_form: HashMap<Sentence, SurfaceForm>
}
impl LanguageDomain for Colag {
fn language(&self, g: &Grammar) -> Result<&HashSet<Sentence>, IllegalGrammar> {
self.language.get(g).ok_or_else({|| IllegalGrammar {grammar: *g } })
}
fn language_vec(&self, g: &Grammar) -> Result<&Vec<Sentence>, IllegalGrammar> {
self.language_vec.get(g).ok_or_else({|| IllegalGrammar {grammar: *g } })
}
fn triggers(&self, s: &Sentence) -> &TriggerVec {
self.trigger.get(s).unwrap()
}
fn parses(&self, g: &Grammar, s: &Sentence) -> Result<bool, IllegalGrammar> {
match self.language.get(g) {
None => Err(IllegalGrammar{ grammar: *g }),
Some(sents) => Ok(sents.contains(s))
}
}
fn surface_form(&self, s: &Sentence) -> &SurfaceForm {
self.surface_form.get(s).unwrap()
}
fn random_grammar<T: Rng>(&self, rng: &mut T) -> &Grammar {
rng.choose(&self.grammars).unwrap()
}
}
impl Colag {
pub fn new() -> Colag {
let lang = HashMap::new();
Colag { language: lang,
language_vec: HashMap::new(),
grammars: Vec::new(),
trigger: HashMap::new(),
surface_form: HashMap::new()
}
}
pub fn default() -> Colag {
Colag::from_file("./data/COLAG_2011_ids.txt")
.unwrap()
.read_triggers("./data/irrelevance-output.txt")
.unwrap()
.read_surface_forms("./data/COLAG_2011_sents.txt")
.unwrap()
}
pub fn random_weighted_grammar<T: Rng>(rng: &mut T,
weights: &[f64; NUM_PARAMS]) -> Grammar {
let mut grammar = 0;
for param in 0..NUM_PARAMS {
if weighted_coin_flip(rng, weights[param]) {
grammar = set_param(grammar, param);
}
}
grammar
}
pub fn from_file(filename: &str) -> Result<Colag, Box<Error>> {
let mut rdr = csv::ReaderBuilder::new()
.delimiter(b'\t')
.has_headers(false)
.from_path(filename)
.expect(filename);
let mut domain = Colag::new();
for result in rdr.deserialize() {
let (grammar, sentence, _tree): ColagTsvLine = result?;
if domain.language.contains_key(&grammar){
domain.language.get_mut(&grammar).map(|set| set.insert(sentence));
} else {
let mut set = HashSet::new();
set.insert(sentence);
domain.language.insert(grammar, set);
}
}
for (grammar, sentences) in domain.language.iter() {
domain.language_vec.insert(*grammar,
sentences.clone().into_iter().collect());
}
domain.grammars = domain.language.keys().map(|x| *x).collect();
assert!(domain.language.len() == 3072, "Expected 3072 languages in Colag");
assert!(domain.language_vec.len() == 3072, "Expected 3072 languages in Colag");
{
let english = domain.language.get(&611).unwrap();
assert!(english.len() == 360, "Expected 360 sentences in Colag English");
for s in vec![3138, 1970, 5871, 6923, 1969].iter() {
assert!(english.contains(&s), format!("Expected sentence {} in Colag English", &s))
}
}
Ok(domain)
}
pub fn read_triggers(mut self, filename: &str) -> Result<Self, Box<Error>> {
let mut rdr = csv::ReaderBuilder::new()
.delimiter(b' ')
.has_headers(false)
.from_path(filename)
.expect(filename);
for result in rdr.deserialize() {
let (sentence, trigger_str): (Sentence, String) = result?;
assert!(trigger_str.len() == NUM_PARAMS);
let mut trigger_vec: Vec<Trigger> = trigger_str
.as_bytes()
.iter()
.map(|b| {
match *b {
b'0' => Trigger::Off,
b'1' => Trigger::On,
b'*' => Trigger::Ambiguous,
b'~' => Trigger::Irrelevant,
_ => panic!("illegal char in irrel str")
}
}).collect();
unsafe {
let mut array: TriggerVec = mem::uninitialized();
for i in 0..NUM_PARAMS {
array[i] = trigger_vec.remove(0);
}
self.trigger.insert(sentence, array);
}
}
Ok(self)
}
fn sentence_generators(&self) -> HashMap<&Sentence, &Grammar> {
unimplemented!();
}
fn all_sentences(&self) -> HashSet<Sentence> {
let mut all_sents: HashSet<Sentence> = HashSet::new();
for sents in self.language.values() {
all_sents.extend(sents);
}
all_sents
}
fn unambiguous_trigger(&self, sent: &Sentence, param: usize) -> Result<bool, Vec<Grammar>> {
unimplemented!();
}
fn illegal_grammar(&self, g: &Grammar) -> bool {
unimplemented!();
}
fn ambig_or_irrel(&self, generators: Vec<Grammar>, param: usize) -> Trigger {
for generator in generators.iter() {
let min_pair = toggled(&generator, param);
if !generators.contains(&&min_pair) && !self.illegal_grammar(&&min_pair){
return Trigger::Ambiguous
}
}
Trigger::Irrelevant
}
pub fn gen_triggers(&mut self) {
let sentences = self.all_sentences().into_iter();
for sentence in sentences {
let mut triggers = unsafe {
let mut triggers: TriggerVec = mem::uninitialized();
for param in 0..NUM_PARAMS {
triggers[param] = match self.unambiguous_trigger(&sentence, param) {
Ok(true) => Trigger::On,
Ok(false) => Trigger::Off,
Err(generators) => self.ambig_or_irrel(generators, param)
};
}
triggers
};
self.trigger.insert(sentence, triggers);
}
}
pub fn read_surface_forms(mut self, filename: &str) -> Result<Self, Box<Error>> {
let mut rdr = csv::ReaderBuilder::new()
.delimiter(b'\t')
.has_headers(false)
.from_path(filename)
.expect(filename);
for result in rdr.deserialize() {
let (sentence, illoc, form): (Sentence, String, String) = result?;
let illoc: Illoc = illoc.trim().into();
let mut form: SurfaceForm = form.trim().into();
form.illoc = illoc;
self.surface_form.insert(sentence, form);
}
Ok(self)
}
}
fn toggled(grammar: &Grammar, param_num: usize) -> Grammar {
unimplemented!();
}
/// Returns parameter # `param_num` from `grammar`.
pub fn get_param(grammar: &Grammar, param_num: usize) -> Grammar {
(grammar >> (NUM_PARAMS - param_num - 1)) & 1
}
/// Returns `grammar` with `param_num` turned on.
fn set_param(grammar: Grammar, param_num: usize) -> Grammar {
grammar + (1 << (NUM_PARAMS - param_num - 1))
}
/// Returns true `weight` percent of the time
fn weighted_coin_flip<T: Rng>(rng: &mut T, weight: f64) -> bool {
debug_assert!((weight >= 0.) & (weight <= 1.));
let mut range = Range::new(0., 1.);
range.sample(rng) < weight
}
mod bench {
extern crate test;
use domain::{LanguageDomain, Colag, NUM_PARAMS};
use self::test::Bencher;
use rand;
#[bench]
fn random_grammar(b: &mut Bencher) {
let colag = Colag::default();
let ref mut rng = rand::weak_rng();
b.iter(|| colag.random_grammar(rng));
}
#[bench]
fn random_weighted_grammar(b: &mut Bencher) {
let colag = Colag::default();
let ref mut rng = rand::weak_rng();
let ref weights = [0.5; NUM_PARAMS];
b.iter(|| Colag::random_weighted_grammar(rng, weights));
}
}
| true
|
d823b99b6f413711fe728464945bad67296834be
|
Rust
|
Journeycorner/advent-of-code
|
/aoc18/src/main.rs
|
UTF-8
| 6,375
| 3.21875
| 3
|
[
"MIT",
"Unlicense"
] |
permissive
|
use std::error::Error;
use std::fmt;
use std::io::{self, Read, Write};
use std::mem;
use std::result;
use std::str::{self, FromStr};
macro_rules! err {
($($tt:tt)*) => { Err(Box::<Error>::from(format!($($tt)*))) }
}
type Result<T> = result::Result<T, Box<Error>>;
fn main() -> Result<()> {
let mut input = String::new();
io::stdin().read_to_string(&mut input)?;
let minutes = 10;
let mut area: Area = input.parse()?;
for _ in 0..minutes {
area.step();
}
writeln!(
io::stdout(),
"resource value after {} minutes: {}",
minutes,
area.resource_value(),
)?;
// Doing 1000000000 will take way too long. Instead, print out resource
// values at a lower number. It is easy to notice that it is periodic.
// Specifically, it is periodic over 28 values. Namely,
// 1_000_000_000 % 28 == 20. The period is active, at minimum, after 1000
// minutes. Therefore, 1028 % 28 == 20 implies that the resource value
// after 1028 minutes is the same as the resource value after 1_000_000_000
// minutes.
let minutes = 1028;
let mut area: Area = input.parse()?;
for _ in 0..minutes {
area.step();
}
writeln!(
io::stdout(),
"resource value after {} minutes: {}",
minutes,
area.resource_value(),
)?;
Ok(())
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
struct Coordinate {
x: i64,
y: i64,
}
#[derive(Clone, Debug)]
struct Area {
acres: Vec<Vec<Acre>>,
acres2: Vec<Vec<Acre>>,
}
impl Area {
fn resource_value(&self) -> usize {
let (mut wooded, mut lumber) = (0, 0);
for row in &self.acres {
for acre in row {
match acre {
Acre::Open => {}
Acre::Trees => wooded += 1,
Acre::Lumberyard => lumber += 1,
}
}
}
wooded * lumber
}
// I foolishly tried to optimize the code below before realizing it was
// futile and started looking for a pattern in the output. ---AG
fn step(&mut self) {
let mut new = mem::replace(&mut self.acres2, vec![]);
for y in 0..self.height() {
for x in 0..self.width() {
self.step_cell(x, y, &mut new);
}
}
self.acres2 = mem::replace(&mut self.acres, vec![]);
self.acres = new;
}
fn step_cell(
&self,
x: usize,
y: usize,
new: &mut Vec<Vec<Acre>>,
) {
use self::Acre::*;
new[y][x] = self.acres[y][x];
match self.acres[y][x] {
Open => {
let count = self.neighbors(
x, y, 0, |count, n| {
if n == Trees { count + 1 } else { count }
},
);
if count >= 3 {
new[y][x] = Trees;
}
}
Trees => {
let count = self.neighbors(
x, y, 0, |count, n| {
if n == Lumberyard { count + 1 } else { count }
},
);
if count >= 3 {
new[y][x] = Lumberyard;
}
}
Lumberyard => {
let (has_lumber, has_trees) = self.neighbors(
x, y, (false, false),
|(lumber, trees), n| {
(lumber || n == Lumberyard, trees || n == Trees)
},
);
if !has_lumber || !has_trees {
new[y][x] = Open;
}
}
}
}
fn neighbors<T>(
&self,
ox: usize,
oy: usize,
init: T,
mut f: impl FnMut(T, Acre) -> T,
) -> T {
let mut ret = init;
for y in oy.saturating_sub(1)..=oy.saturating_add(1) {
for x in ox.saturating_sub(1)..=ox.saturating_add(1) {
if x == ox && y == oy {
continue;
}
if x >= self.width() || y >= self.height() {
continue;
}
ret = f(ret, self.acres[y][x]);
}
}
ret
}
fn width(&self) -> usize {
self.acres[0].len()
}
fn height(&self) -> usize {
self.acres.len()
}
}
impl FromStr for Area {
type Err = Box<Error>;
fn from_str(s: &str) -> Result<Area> {
if !s.is_ascii() {
return err!("area must be in ASCII");
}
let ylen = s.lines().count();
if ylen == 0 {
return err!("area cannot be empty");
}
let xlen = s.lines().next().unwrap().len();
let mut area = Area {
acres: vec![vec![Acre::Open; xlen]; ylen],
acres2: vec![vec![Acre::Open; xlen]; ylen],
};
for (y, line) in s.lines().enumerate() {
if line.len() != xlen {
return err!(
"all rows expected to have length {}, but found {}",
xlen, line.len()
);
}
for x in 0..line.len() {
area.acres[y][x] = line[x..x+1].parse()?;
}
}
Ok(area)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum Acre {
Open,
Trees,
Lumberyard,
}
impl FromStr for Acre {
type Err = Box<Error>;
fn from_str(s: &str) -> Result<Acre> {
match s.chars().next() {
None => err!("cannot parse acre from empty string"),
Some('.') => Ok(Acre::Open),
Some('|') => Ok(Acre::Trees),
Some('#') => Ok(Acre::Lumberyard),
Some(c) => err!("invalid acre: '{}'", c),
}
}
}
impl fmt::Display for Area {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for row in &self.acres {
for col in row {
write!(f, "{}", col)?;
}
write!(f, "\n")?;
}
Ok(())
}
}
impl fmt::Display for Acre {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Acre::Open => write!(f, "."),
Acre::Trees => write!(f, "|"),
Acre::Lumberyard => write!(f, "#"),
}
}
}
| true
|
d5c03ebf4f14123685a9f4b5a40ac7b25da41ed1
|
Rust
|
xaviripo/aoc2020
|
/src/day22.rs
|
UTF-8
| 522
| 2.5625
| 3
|
[] |
no_license
|
pub mod part1;
pub mod part2;
use std::collections::VecDeque;
pub const INPUT_FILE: &str = "input/22.txt";
fn parse<T: Iterator<Item=String>>(mut lines: T) -> (VecDeque<usize>, VecDeque<usize>) {
let deck1: VecDeque<usize> = lines
.by_ref()
.take_while(|line| line != "")
.skip(1)
.map(|line| line.parse().unwrap())
.collect();
let deck2: VecDeque<usize> = lines
.take_while(|line| line != "")
.skip(1)
.map(|line| line.parse().unwrap())
.collect();
(deck1, deck2)
}
| true
|
66c32c8f5595e5cfe0c341b86f158570597b4b1e
|
Rust
|
Joey9801/igc-rs
|
/src/util/manufacturer.rs
|
UTF-8
| 4,632
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(
feature = "serde",
derive(Deserialize, Serialize),
serde(rename_all = "lowercase")
)]
pub enum Manufacturer<'a> {
Aircotec,
CambridgeAeroInstruments,
ClearNavInstruments,
DataSwan,
EwAvionics,
Filser,
Flarm,
Flytech,
Garrecht,
ImiGlidingEquipment,
Logstream,
LxNavigation,
LxNav,
Naviter,
NewTechnologies,
NielsenKellerman,
Peschges,
PressFinishElectronics,
PrintTechnik,
Scheffel,
StreamlineDataInstruments,
TriadisEngineering,
Zander,
UnknownSingle(u8),
UnknownTriple(&'a str),
}
impl<'a> Manufacturer<'a> {
pub fn parse_single_char(character: u8) -> Self {
use self::Manufacturer::*;
match character {
b'I' => Aircotec,
b'C' => CambridgeAeroInstruments,
b'D' => DataSwan,
b'E' => EwAvionics,
b'F' => Filser,
b'G' => Flarm,
b'A' => Garrecht,
b'M' => ImiGlidingEquipment,
b'L' => LxNavigation,
b'V' => LxNav,
b'N' => NewTechnologies,
b'K' => NielsenKellerman,
b'P' => Peschges,
b'R' => PrintTechnik,
b'H' => Scheffel,
b'S' => StreamlineDataInstruments,
b'T' => TriadisEngineering,
b'Z' => Zander,
unknown => UnknownSingle(unknown),
}
}
pub fn parse_triple_char(triple: &'a str) -> Self {
use self::Manufacturer::*;
match triple {
"ACT" => Aircotec,
"CAM" => CambridgeAeroInstruments,
"CNI" => ClearNavInstruments,
"DSX" => DataSwan,
"EWA" => EwAvionics,
"FIL" => Filser,
"FLA" => Flarm,
"FLY" => Flytech,
"GCS" => Garrecht,
"IMI" => ImiGlidingEquipment,
"LGS" => Logstream,
"LXN" => LxNavigation,
"LXV" => LxNav,
"NAV" => Naviter,
"NTE" => NewTechnologies,
"NKL" => NielsenKellerman,
"PES" => Peschges,
"PFE" => PressFinishElectronics,
"PRT" => PrintTechnik,
"SCH" => Scheffel,
"SDI" => StreamlineDataInstruments,
"TRI" => TriadisEngineering,
"ZAN" => Zander,
_ => UnknownTriple(triple),
}
}
pub fn to_single_char(&self) -> Option<u8> {
use self::Manufacturer::*;
// It's sad that rustfmt currently nukes the alignment on these match arms
match self {
Aircotec => Some(b'I'),
CambridgeAeroInstruments => Some(b'C'),
DataSwan => Some(b'D'),
EwAvionics => Some(b'E'),
Filser => Some(b'F'),
Flarm => Some(b'G'),
Garrecht => Some(b'A'),
ImiGlidingEquipment => Some(b'M'),
LxNavigation => Some(b'L'),
LxNav => Some(b'V'),
NewTechnologies => Some(b'N'),
NielsenKellerman => Some(b'K'),
Peschges => Some(b'P'),
PrintTechnik => Some(b'R'),
Scheffel => Some(b'H'),
StreamlineDataInstruments => Some(b'S'),
TriadisEngineering => Some(b'T'),
Zander => Some(b'Z'),
UnknownSingle(s) => Some(*s),
_ => None,
}
}
pub fn to_triple_char(&self) -> Option<&'a str> {
use self::Manufacturer::*;
match self {
Aircotec => Some("ACT"),
CambridgeAeroInstruments => Some("CAM"),
ClearNavInstruments => Some("CNI"),
DataSwan => Some("DSX"),
EwAvionics => Some("EWA"),
Filser => Some("FIL"),
Flarm => Some("FLA"),
Flytech => Some("FLY"),
Garrecht => Some("GCS"),
ImiGlidingEquipment => Some("IMI"),
Logstream => Some("LGS"),
LxNavigation => Some("LXN"),
LxNav => Some("LXV"),
Naviter => Some("NAV"),
NewTechnologies => Some("NTE"),
NielsenKellerman => Some("NKL"),
Peschges => Some("PES"),
PressFinishElectronics => Some("PFE"),
PrintTechnik => Some("PRT"),
Scheffel => Some("SCH"),
StreamlineDataInstruments => Some("SDI"),
TriadisEngineering => Some("TRI"),
Zander => Some("ZAN"),
UnknownTriple(t) => Some(t),
_ => None,
}
}
}
| true
|
977f0a0bd42482a2df8797b8b173b3f691b91972
|
Rust
|
alexmeli100/pbrt-rust
|
/src/core/pbrt.rs
|
UTF-8
| 6,814
| 2.78125
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use std::ops::{Sub, Add, Mul, BitAnd};
use num::{One, Zero};
use num::traits::Pow;
use std::sync::{Arc, Weak};
use std::path::PathBuf;
use lazy_static::lazy_static;
use indicatif::ProgressBar;
use parking_lot::RwLock;
lazy_static! {
static ref PB: RwLock<Option<Weak<ProgressBar>>> = RwLock::new(None);
}
pub fn set_progress_bar(pb: Option<Weak<ProgressBar>>) {
*PB.write() = pb;
}
pub fn get_progress_bar() -> Option<Arc<ProgressBar>> {
PB.read().as_ref()?.upgrade()
}
pub type Float = f32;
pub const PI : Float = 3.14159265358979323846;
pub const PI_OVER2 : Float = 1.57079632679489661923;
pub const PI_OVER4 : Float = 0.78539816339744830961;
pub const INV_PI : Float = 0.31830988618379067154;
pub const INV2_PI : Float = 0.15915494309189533577;
pub const INV4_PI : Float = 0.07957747154594766788;
pub const INFINITY : Float = std::f32::INFINITY;
pub const SHADOW_EPSILON : Float = 0.0001;
pub const MACHINE_EPSILON : Float = std::f32::EPSILON * 0.5;
pub const SQRT2 : Float = 1.41421356237309504880;
#[derive(Default, Clone)]
pub struct Options {
pub quick_render : bool,
pub quiet : bool,
pub cat : bool,
pub to_ply : bool,
pub image_file : PathBuf,
pub integrator_name : String,
pub crop_window : [[Float; 2]; 2]
}
impl Options {
pub fn new() -> Self {
Self {
crop_window: [[0.0, 1.0], [0.0, 1.0]],
..Default::default()
}
}
}
#[inline(always)]
pub fn float_to_bits(f: f32) -> u32 {
let ui: u32;
unsafe {
let res: u32 = std::mem::transmute_copy(&f);
ui = res;
}
ui
}
#[inline(always)]
pub fn bits_to_float(ui: u32) -> f32 {
let f: f32;
unsafe {
let res: f32 = std::mem::transmute_copy(&ui);
f = res;
}
f
}
pub fn next_float_up(v: f32) -> f32 {
if v.is_infinite() && v > 0.0 { return v; }
let mut i = v;
if i == -0.0 { i = 0.0; }
let mut ui = float_to_bits(i);
if i >= 0.0 {
ui += 1;
} else {
ui -= 1;
}
bits_to_float(ui)
}
pub fn next_float_down(v: f32) -> f32 {
if v.is_infinite() && v < 0.0 { return v; }
let mut i = v;
if i == 0.0 { i = -0.0; }
let mut ui = float_to_bits(i);
if i > 0.0 {
ui -= 1;
} else {
ui += 1;
}
bits_to_float(ui)
}
pub fn log2(x: Float) -> Float {
let inv_log2 = 1.442695040888963387004650940071;
x.ln() * inv_log2
}
pub fn log2_uint(v: u32) -> i32 {
31_i32 - v.leading_zeros() as i32
}
pub fn log2_int(v: i32) -> i32 {
log2_uint(v as u32)
}
pub fn log2_uint64(v: u64) -> i64 {
63 - v.leading_zeros() as i64
}
pub fn log2_int64(v: i64) -> i64 {
log2_uint64(v as u64)
}
pub fn lerp<T, S>(t: S, x: T, y: T) -> T
where
S: Copy + num::One + Sub<S, Output=S>,
T: Add<T, Output=T> + Mul<S, Output=T>
{
let one: S = One::one();
x * (one - t) + y * t
}
#[inline]
pub fn quadratic(a: Float, b: Float, c: Float, t0: &mut Float, t1: &mut Float) -> bool {
// Find quadratic discriminant
let discrim = b as f64 * b as f64 - 4.0 * a as f64 * c as f64;
if discrim < 0.0 { return false; }
let root_discrim = discrim.sqrt();
// Compute quadratic t values
let q = if b < 0.0 {
-0.5 * (b as f64 - root_discrim)
} else {
-0.5 * (b as f64 + root_discrim)
};
*t0 = (q / a as f64) as Float;
*t1 = (c as f64 / q) as Float;
if *t0 > *t1 { std::mem::swap(t0, t1); }
true
}
#[inline(always)]
pub fn radians(deg: Float) -> Float {
(PI / 180.0) as Float * deg
}
pub fn clamp<T>(val: T, low: T, high: T) -> T
where T: PartialOrd
{
if val < low {
low
} else if val > high {
high
} else {
val
}
}
pub fn find_interval<F>(size: i32, pred: F) -> i32
where F: Fn(i32) -> bool
{
let mut first = 0;
let mut len = size;
while len > 0 {
let half = len >> 1;
let middle = first + half;
// Bisect range based on value of pred at middle
if pred(middle) {
first = middle + 1;
len -= half + 1;
} else {
len = half;
}
}
clamp(first - 1, 0, size - 2)
}
pub fn gamma(n: isize) -> Float {
(n as Float * MACHINE_EPSILON) / (1.0 - n as Float * MACHINE_EPSILON)
}
pub fn gamma_correct(value: Float) -> Float {
if value <= 0.0031308 {
return 12.92 * value;
}
1.055 * value.pow(1.0 / 2.4) - 0.055
}
pub fn inverse_gamma_correct(value: Float) -> Float {
if value <= 0.04045 { return value * 1.0 / 12.92 }
((value + 0.055) * 1.0 / 1.055).pow(2.4)
}
pub fn mod_<T>(a: T, b: T) -> T
where
T: Copy + Zero + PartialOrd + num::Num
{
let result = a - (a / b) * b;
match result < Zero::zero() {
true => result + b,
_ => result
}
}
pub fn is_power_of2<T>(v: T) -> bool
where T: Copy + num::One + num::Zero + PartialOrd + BitAnd<T, Output=T> + Sub<T, Output=T>
{
(v > T::zero()) && !((v & (v - T::one())) > T::zero())
}
pub fn round_up_pow2_32(mut v: i32) -> i32 {
v -= 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v + 1
}
pub fn round_up_pow2_64(mut v: i64) -> i64 {
v -= 1;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v |= v >> 32;
v + 1
}
pub fn erf(mut x: Float) -> Float {
// constants
let a1 = 0.254829592;
let a2 = -0.284496736;
let a3 = 1.421413741;
let a4 = -1.453152027;
let a5 = 1.061405429;
let p = 0.3275911;
// Save the sign of x
let sign = if x < 0.0 { -1 } else { 1 };
x = x.abs();
// A&S formula 7.1.26
let t = 1.0 / (1.0 + p * x);
let y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * (-x * x).exp();
sign as Float * y
}
pub fn erf_inv(mut x: Float) -> Float {
let mut p: Float;
x = clamp(x, -0.99999, 0.99999);
let mut w = -((1.0 - x) * (1.0 + x)).ln();
if w < 5.0 {
w = w - 2.5;
p = 2.81022636e-08;
p = 3.43273939e-07 + p * w;
p = -3.5233877e-06 + p * w;
p = -4.39150654e-06 + p * w;
p = 0.00021858087 + p * w;
p = -0.00125372503 + p * w;
p = -0.00417768164 + p * w;
p = 0.246640727 + p * w;
p = 1.50140941 + p * w;
} else {
w = w.sqrt() - 3.0;
p = -0.000200214257;
p = 0.000100950558 + p * w;
p = 0.00134934322 + p * w;
p = -0.00367342844 + p * w;
p = 0.00573950773 + p * w;
p = -0.0076224613 + p * w;
p = 0.00943887047 + p * w;
p = 1.00167406 + p * w;
p = 2.83297682 + p * w;
}
p * x
}
| true
|
2953aef49047961bc513f3565f6a3e4a34343cf4
|
Rust
|
brayden-marshall/Logo
|
/src/lib.rs
|
UTF-8
| 2,435
| 3.5
| 4
|
[] |
no_license
|
mod command;
mod error;
mod evaluator;
mod lexer;
mod parser;
use error::LogoError;
use evaluator::Evaluator;
use lexer::Lexer;
use parser::Parser;
// re-exports
pub use evaluator::Instruction;
pub use command::Command;
/// Exposed type that acts as the interface to the library.
pub struct Interpreter {
evaluator: Evaluator,
}
impl Interpreter {
pub fn new() -> Self {
Interpreter {
evaluator: Evaluator::new(),
}
}
/// # Args
/// - self
/// - source: program source code to be run
///
/// Goes through all phases of the interpreter (lexing, parsing, and evaluation) and
/// returns a set instructions to be run by the frontend.
///
/// # Return
/// Returns a Vec of Instruction objects if the program runs successfully. The
/// instructions being returned correspond to the turtle commands that will be
/// run by the frontend. This includes things such as
/// - Movement commands (forward, left, setxy ...)
/// - Console output (show)
/// - Misc. turtle commands (penup, hideturtle, setscreencolor ...)
/// - Exit command
///
/// Returns a LogoError if an error is encountered during execution.
///
/// # Side effects
/// Not all valid programs will return a set of instructions. Some programs will
/// modify the state of the evaluator without having to send instructions to the
/// frontend. Some examples include the following:
/// - Declaring variables
/// - Modifying variables
/// - Declaring procedures
///
/// If one of these programs runs successfully, it will return a Vec of length 0
/// as the instructions set. If it fails, it will return an error as usual.
pub fn run_program(&mut self, source: &str) -> Result<Vec<Instruction>, LogoError> {
// lexing phase
let mut lexer = Lexer::new(&source);
let tokens = match lexer.collect_tokens() {
Ok(t) => Ok(t),
Err(e) => Err(LogoError::Lex(e)),
}?;
// parsing phase
let mut parser = Parser::new(&tokens);
let ast = match parser.build_ast() {
Ok(ast) => Ok(ast),
Err(e) => Err(LogoError::Parse(e)),
}?;
// evaluation phase
match self.evaluator.evaluate_ast(&ast) {
Ok(instructions) => Ok(instructions),
Err(e) => Err(LogoError::Runtime(e)),
}
}
}
| true
|
6a2e32a64c567108c82bce065b2e701ecbb5025a
|
Rust
|
adriano-moreira/benchmark
|
/rust_project/src/nothing.rs
|
UTF-8
| 99
| 2.8125
| 3
|
[] |
no_license
|
pub fn exec() {
let value = 2 + 2;
if value == 5 {
print!("value is five")
}
}
| true
|
40a75e1d0b640fc2443db87d3ecf2862476a4328
|
Rust
|
bevyengine/bevy
|
/examples/stress_tests/many_glyphs.rs
|
UTF-8
| 2,571
| 2.984375
| 3
|
[
"Apache-2.0",
"MIT",
"Zlib",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
//! Simple text rendering benchmark.
//!
//! Creates a `Text` with a single `TextSection` containing `100_000` glyphs,
//! and renders it with the UI in a white color and with Text2d in a red color.
//!
//! To recompute all text each frame run
//! `cargo run --example many_glyphs --release recompute-text`
use bevy::{
diagnostic::{FrameTimeDiagnosticsPlugin, LogDiagnosticsPlugin},
prelude::*,
text::{BreakLineOn, Text2dBounds},
window::{PresentMode, WindowPlugin},
};
fn main() {
let mut app = App::new();
app.add_plugins((
DefaultPlugins.set(WindowPlugin {
primary_window: Some(Window {
present_mode: PresentMode::AutoNoVsync,
..default()
}),
..default()
}),
FrameTimeDiagnosticsPlugin,
LogDiagnosticsPlugin::default(),
))
.add_systems(Startup, setup);
if std::env::args().any(|arg| arg == "recompute-text") {
app.add_systems(Update, force_text_recomputation);
}
app.run();
}
fn setup(mut commands: Commands) {
warn!(include_str!("warning_string.txt"));
commands.spawn(Camera2dBundle::default());
let mut text = Text {
sections: vec![TextSection {
value: "0123456789".repeat(10_000),
style: TextStyle {
font_size: 4.,
color: Color::WHITE,
..default()
},
}],
alignment: TextAlignment::Left,
linebreak_behavior: BreakLineOn::AnyCharacter,
};
commands
.spawn(NodeBundle {
style: Style {
flex_basis: Val::Percent(100.),
align_items: AlignItems::Center,
justify_content: JustifyContent::Center,
..default()
},
..default()
})
.with_children(|commands| {
commands.spawn(TextBundle {
text: text.clone(),
style: Style {
width: Val::Px(1000.),
..Default::default()
},
..Default::default()
});
});
text.sections[0].style.color = Color::RED;
commands.spawn(Text2dBundle {
text,
text_anchor: bevy::sprite::Anchor::Center,
text_2d_bounds: Text2dBounds {
size: Vec2::new(1000., f32::INFINITY),
},
..Default::default()
});
}
fn force_text_recomputation(mut text_query: Query<&mut Text>) {
for mut text in &mut text_query {
text.set_changed();
}
}
| true
|
31b949bef1c67805c2c6c3d862b2e5abdae85a9a
|
Rust
|
dgreid/crosvm
|
/fuse/src/filesystem.rs
|
UTF-8
| 51,652
| 2.640625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
// Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use std::convert::TryInto;
use std::ffi::CStr;
use std::fs::File;
use std::io;
use std::mem;
use std::time::Duration;
use crate::sys;
use crate::server::Mapper;
pub use crate::sys::{
FsOptions, IoctlFlags, IoctlIovec, OpenOptions, RemoveMappingOne, SetattrValid, ROOT_ID,
};
const MAX_BUFFER_SIZE: u32 = 1 << 20;
/// Information about a path in the filesystem.
pub struct Entry {
/// An `Inode` that uniquely identifies this path. During `lookup`, setting this to `0` means a
/// negative entry. Returning `ENOENT` also means a negative entry but setting this to `0`
/// allows the kernel to cache the negative result for `entry_timeout`. The value should be
/// produced by converting a `FileSystem::Inode` into a `u64`.
pub inode: u64,
/// The generation number for this `Entry`. Typically used for network file systems. An `inode`
/// / `generation` pair must be unique over the lifetime of the file system (rather than just
/// the lifetime of the mount). In other words, if a `FileSystem` implementation re-uses an
/// `Inode` after it has been deleted then it must assign a new, previously unused generation
/// number to the `Inode` at the same time.
pub generation: u64,
/// Inode attributes. Even if `attr_timeout` is zero, `attr` must be correct. For example, for
/// `open()`, FUSE uses `attr.st_size` from `lookup()` to determine how many bytes to request.
/// If this value is not correct, incorrect data will be returned.
pub attr: libc::stat64,
/// How long the values in `attr` should be considered valid. If the attributes of the `Entry`
/// are only modified by the FUSE client, then this should be set to a very large value.
pub attr_timeout: Duration,
/// How long the name associated with this `Entry` should be considered valid. If directory
/// entries are only changed or deleted by the FUSE client, then this should be set to a very
/// large value.
pub entry_timeout: Duration,
}
impl From<Entry> for sys::EntryOut {
fn from(entry: Entry) -> sys::EntryOut {
sys::EntryOut {
nodeid: entry.inode,
generation: entry.generation,
entry_valid: entry.entry_timeout.as_secs(),
attr_valid: entry.attr_timeout.as_secs(),
entry_valid_nsec: entry.entry_timeout.subsec_nanos(),
attr_valid_nsec: entry.attr_timeout.subsec_nanos(),
attr: entry.attr.into(),
}
}
}
/// Represents information about an entry in a directory.
pub struct DirEntry<'a> {
/// The inode number for this entry. This does NOT have to be the same as the `Inode` for this
/// directory entry. However, it must be the same as the `attr.st_ino` field of the `Entry` that
/// would be returned by a `lookup` request in the parent directory for `name`.
pub ino: libc::ino64_t,
/// Any non-zero value that the kernel can use to identify the current point in the directory
/// entry stream. It does not need to be the actual physical position. A value of `0` is
/// reserved to mean "from the beginning" and should never be used. The `offset` value of the
/// first entry in a stream should point to the beginning of the second entry and so on.
pub offset: u64,
/// The type of this directory entry. Valid values are any of the `libc::DT_*` constants.
pub type_: u32,
/// The name of this directory entry. There are no requirements for the contents of this field
/// and any sequence of bytes is considered valid.
pub name: &'a CStr,
}
/// A reply to a `getxattr` method call.
pub enum GetxattrReply {
/// The value of the requested extended attribute. This can be arbitrary textual or binary data
/// and does not need to be nul-terminated.
Value(Vec<u8>),
/// The size of the buffer needed to hold the value of the requested extended attribute. Should
/// be returned when the `size` parameter is 0. Callers should note that it is still possible
/// for the size of the value to change in between `getxattr` calls and should not assume that a
/// subsequent call to `getxattr` with the returned count will always succeed.
Count(u32),
}
/// A reply to a `listxattr` method call.
pub enum ListxattrReply {
/// A buffer containing a nul-separated list of the names of all the extended attributes
/// associated with this `Inode`. This list of names may be unordered and includes a namespace
/// prefix. There may be several disjoint namespaces associated with a single `Inode`.
Names(Vec<u8>),
/// This size of the buffer needed to hold the full list of extended attribute names associated
/// with this `Inode`. Should be returned when the `size` parameter is 0. Callers should note
/// that it is still possible for the set of extended attributes to change between `listxattr`
/// calls and so should not assume that a subsequent call to `listxattr` with the returned count
/// will always succeed.
Count(u32),
}
/// A reply to an `ioctl` method call.
pub enum IoctlReply {
/// Indicates that the ioctl should be retried. This is only a valid reply when the `flags`
/// field of the ioctl request contains `IoctlFlags::UNRESTRICTED`. The kernel will read in data
/// and prepare output buffers as specified in the `input` and `output` fields before re-sending
/// the ioctl message.
Retry {
/// Data that should be read by the kernel module and sent to the server when the ioctl is
/// retried.
input: Vec<IoctlIovec>,
/// Buffer space that should be prepared so that the server can send back the response to
/// the ioctl.
output: Vec<IoctlIovec>,
},
/// Indicates that the ioctl was processed.
Done(io::Result<Vec<u8>>),
}
/// A trait for directly copying data from the fuse transport into a `File` without first storing it
/// in an intermediate buffer.
pub trait ZeroCopyReader {
/// Copies at most `count` bytes from `self` directly into `f` at offset `off` without storing
/// it in any intermediate buffers. If the return value is `Ok(n)` then it must be guaranteed
/// that `0 <= n <= count`. If `n` is `0`, then it can indicate one of 3 possibilities:
///
/// 1. There is no more data left in `self`.
/// 2. There is no more space in `f`.
/// 3. `count` was `0`.
///
/// # Errors
///
/// If any error is returned then the implementation must guarantee that no bytes were copied
/// from `self`. If the underlying write to `f` returns `0` then the implementation must return
/// an error of the kind `io::ErrorKind::WriteZero`.
fn read_to(&mut self, f: &mut File, count: usize, off: u64) -> io::Result<usize>;
/// Copies exactly `count` bytes of data from `self` into `f` at offset `off`. `off + count`
/// must be less than `u64::MAX`.
///
/// # Errors
///
/// If an error is returned then the number of bytes copied from `self` is unspecified but it
/// will never be more than `count`.
fn read_exact_to(&mut self, f: &mut File, mut count: usize, mut off: u64) -> io::Result<()> {
let c = count
.try_into()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
if off.checked_add(c).is_none() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"`off` + `count` must be less than u64::MAX",
));
}
while count > 0 {
match self.read_to(f, count, off) {
Ok(0) => {
return Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to fill whole buffer",
))
}
Ok(n) => {
count -= n;
off += n as u64;
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(())
}
/// Copies all remaining bytes from `self` into `f` at offset `off`. Equivalent to repeatedly
/// calling `read_to` until it returns either `Ok(0)` or a non-`ErrorKind::Interrupted` error.
///
/// # Errors
///
/// If an error is returned then the number of bytes copied from `self` is unspecified.
fn copy_to_end(&mut self, f: &mut File, mut off: u64) -> io::Result<usize> {
let mut out = 0;
loop {
match self.read_to(f, ::std::usize::MAX, off) {
Ok(0) => return Ok(out),
Ok(n) => {
off = off.saturating_add(n as u64);
out += n;
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
}
}
impl<'a, R: ZeroCopyReader> ZeroCopyReader for &'a mut R {
fn read_to(&mut self, f: &mut File, count: usize, off: u64) -> io::Result<usize> {
(**self).read_to(f, count, off)
}
fn read_exact_to(&mut self, f: &mut File, count: usize, off: u64) -> io::Result<()> {
(**self).read_exact_to(f, count, off)
}
fn copy_to_end(&mut self, f: &mut File, off: u64) -> io::Result<usize> {
(**self).copy_to_end(f, off)
}
}
/// A trait for directly copying data from a `File` into the fuse transport without first storing
/// it in an intermediate buffer.
pub trait ZeroCopyWriter {
/// Copies at most `count` bytes from `f` at offset `off` directly into `self` without storing
/// it in any intermediate buffers. If the return value is `Ok(n)` then it must be guaranteed
/// that `0 <= n <= count`. If `n` is `0`, then it can indicate one of 3 possibilities:
///
/// 1. There is no more data left in `f`.
/// 2. There is no more space in `self`.
/// 3. `count` was `0`.
///
/// # Errors
///
/// If any error is returned then the implementation must guarantee that no bytes were copied
/// from `f`. If the underlying read from `f` returns `0` then the implementation must return an
/// error of the kind `io::ErrorKind::UnexpectedEof`.
fn write_from(&mut self, f: &mut File, count: usize, off: u64) -> io::Result<usize>;
/// Copies exactly `count` bytes of data from `f` at offset `off` into `self`. `off + count`
/// must be less than `u64::MAX`.
///
/// # Errors
///
/// If an error is returned then the number of bytes copied from `self` is unspecified but it
/// well never be more than `count`.
fn write_all_from(&mut self, f: &mut File, mut count: usize, mut off: u64) -> io::Result<()> {
let c = count
.try_into()
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?;
if off.checked_add(c).is_none() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"`off` + `count` must be less than u64::MAX",
));
}
while count > 0 {
match self.write_from(f, count, off) {
Ok(0) => {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"failed to write whole buffer",
))
}
Ok(n) => {
// No need for checked math here because we verified that `off + count` will not
// overflow and `n` must be <= `count`.
count -= n;
off += n as u64;
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
Ok(())
}
/// Copies all remaining bytes from `f` at offset `off` into `self`. Equivalent to repeatedly
/// calling `write_from` until it returns either `Ok(0)` or a non-`ErrorKind::Interrupted`
/// error.
///
/// # Errors
///
/// If an error is returned then the number of bytes copied from `f` is unspecified.
fn copy_to_end(&mut self, f: &mut File, mut off: u64) -> io::Result<usize> {
let mut out = 0;
loop {
match self.write_from(f, ::std::usize::MAX, off) {
Ok(0) => return Ok(out),
Ok(n) => {
off = off.saturating_add(n as u64);
out += n;
}
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => return Err(e),
}
}
}
}
impl<'a, W: ZeroCopyWriter> ZeroCopyWriter for &'a mut W {
fn write_from(&mut self, f: &mut File, count: usize, off: u64) -> io::Result<usize> {
(**self).write_from(f, count, off)
}
fn write_all_from(&mut self, f: &mut File, count: usize, off: u64) -> io::Result<()> {
(**self).write_all_from(f, count, off)
}
fn copy_to_end(&mut self, f: &mut File, off: u64) -> io::Result<usize> {
(**self).copy_to_end(f, off)
}
}
/// Additional context associated with requests.
#[derive(Clone, Copy, Debug)]
pub struct Context {
/// The user ID of the calling process.
pub uid: libc::uid_t,
/// The group ID of the calling process.
pub gid: libc::gid_t,
/// The thread group ID of the calling process.
pub pid: libc::pid_t,
}
impl From<sys::InHeader> for Context {
fn from(source: sys::InHeader) -> Self {
Context {
uid: source.uid,
gid: source.gid,
pid: source.pid as i32,
}
}
}
/// A trait for iterating over the contents of a directory. This trait is needed because rust
/// doesn't support generic associated types, which means that it's not possible to implement a
/// regular iterator that yields a `DirEntry` due to its generic lifetime parameter.
pub trait DirectoryIterator {
/// Returns the next entry in the directory or `None` if there are no more.
fn next(&mut self) -> Option<DirEntry>;
}
/// The main trait that connects a file system with a transport.
#[allow(unused_variables)]
pub trait FileSystem {
/// Represents a location in the filesystem tree and can be used to perform operations that act
/// on the metadata of a file/directory (e.g., `getattr` and `setattr`). Can also be used as the
/// starting point for looking up paths in the filesystem tree. An `Inode` may support operating
/// directly on the content of the path that to which it points. `FileSystem` implementations
/// that support this should set the `FsOptions::ZERO_MESSAGE_OPEN` option in the return value
/// of the `init` function. On linux based systems, an `Inode` is equivalent to opening a file
/// or directory with the `libc::O_PATH` flag.
///
/// # Lookup Count
///
/// The `FileSystem` implementation is required to keep a "lookup count" for every `Inode`.
/// Every time an `Entry` is returned by a `FileSystem` trait method, this lookup count should
/// increase by 1. The lookup count for an `Inode` decreases when the kernel sends a `forget`
/// request. `Inode`s with a non-zero lookup count may receive requests from the kernel even
/// after calls to `unlink`, `rmdir` or (when overwriting an existing file) `rename`.
/// `FileSystem` implementations must handle such requests properly and it is recommended to
/// defer removal of the `Inode` until the lookup count reaches zero. Calls to `unlink`, `rmdir`
/// or `rename` will be followed closely by `forget` unless the file or directory is open, in
/// which case the kernel issues `forget` only after the `release` or `releasedir` calls.
///
/// Note that if a file system will be exported over NFS the `Inode`'s lifetime must extend even
/// beyond `forget`. See the `generation` field in `Entry`.
type Inode: From<u64> + Into<u64>;
/// Represents a file or directory that is open for reading/writing.
type Handle: From<u64> + Into<u64>;
/// An iterator over the entries of a directory. See the documentation for `readdir` for more
/// details.
type DirIter: DirectoryIterator;
/// Maximum size of the buffer that the filesystem can generate data to, including the header.
/// This corresponds to max_write in the initialization.
fn max_buffer_size(&self) -> u32 {
MAX_BUFFER_SIZE
}
/// Initialize the file system.
///
/// This method is called when a connection to the FUSE kernel module is first established. The
/// `capable` parameter indicates the features that are supported by the kernel module. The
/// implementation should return the options that it supports. Any options set in the returned
/// `FsOptions` that are not also set in `capable` are silently dropped.
fn init(&self, capable: FsOptions) -> io::Result<FsOptions> {
Ok(FsOptions::empty())
}
/// Clean up the file system.
///
/// Called when the filesystem exits. All open `Handle`s should be closed and the lookup count
/// for all open `Inode`s implicitly goes to zero. At this point the connection to the FUSE
/// kernel module may already be gone so implementations should not rely on being able to
/// communicate with the kernel.
fn destroy(&self) {}
/// Look up a directory entry by name and get its attributes.
///
/// If this call is successful then the lookup count of the `Inode` associated with the returned
/// `Entry` must be increased by 1.
fn lookup(&self, ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<Entry> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Forget about an inode.
///
/// Called when the kernel removes an inode from its internal caches. `count` indicates the
/// amount by which the lookup count for the inode should be decreased. If reducing the lookup
/// count by `count` causes it to go to zero, then the implementation may delete the `Inode`.
fn forget(&self, ctx: Context, inode: Self::Inode, count: u64) {}
/// Forget about multiple inodes.
///
/// `requests` is a vector of `(inode, count)` pairs. See the documentation for `forget` for
/// more information.
fn batch_forget(&self, ctx: Context, requests: Vec<(Self::Inode, u64)>) {
for (inode, count) in requests {
self.forget(ctx, inode, count)
}
}
/// Get attributes for a file / directory.
///
/// If `handle` is not `None`, then it contains the handle previously returned by the
/// implementation after a call to `open` or `opendir`. However, implementations should still
/// take care to verify the handle if they do not trust the client (e.g., virtio-fs).
///
/// If writeback caching is enabled (`FsOptions::WRITEBACK_CACHE`), then the kernel module
/// likely has a better idea of the length of the file than the file system (for
/// example, if there was a write that extended the size of the file but has not yet been
/// flushed). In this case, the `st_size` field of the returned struct is ignored.
///
/// The returned `Duration` indicates how long the returned attributes should be considered
/// valid by the client. If the attributes are only changed via the FUSE kernel module (i.e.,
/// the kernel module has exclusive access), then this should be a very large value.
fn getattr(
&self,
ctx: Context,
inode: Self::Inode,
handle: Option<Self::Handle>,
) -> io::Result<(libc::stat64, Duration)> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Set attributes for a file / directory.
///
/// If `handle` is not `None`, then it contains the handle previously returned by the
/// implementation after a call to `open` or `opendir`. However, implementations should still
/// take care to verify the handle if they do not trust the client (e.g., virtio-fs).
///
/// The `valid` parameter indicates the fields of `attr` that may be considered valid and should
/// be set by the file system. The content of all other fields in `attr` is undefined.
///
/// If the `FsOptions::HANDLE_KILLPRIV` was set during `init`, then the implementation is
/// expected to reset the setuid and setgid bits if the file size or owner is being changed.
///
/// This method returns the new attributes after making the modifications requested by the
/// client. The returned `Duration` indicates how long the returned attributes should be
/// considered valid by the client. If the attributes are only changed via the FUSE kernel
/// module (i.e., the kernel module has exclusive access), then this should be a very large
/// value.
fn setattr(
&self,
ctx: Context,
inode: Self::Inode,
attr: libc::stat64,
handle: Option<Self::Handle>,
valid: SetattrValid,
) -> io::Result<(libc::stat64, Duration)> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Read a symbolic link.
fn readlink(&self, ctx: Context, inode: Self::Inode) -> io::Result<Vec<u8>> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Create a symbolic link.
///
/// The file system must create a symbolic link named `name` in the directory represented by
/// `parent`, which contains the string `linkname`. Returns an `Entry` for the newly created
/// symlink.
///
/// If this call is successful then the lookup count of the `Inode` associated with the returned
/// `Entry` must be increased by 1.
fn symlink(
&self,
ctx: Context,
linkname: &CStr,
parent: Self::Inode,
name: &CStr,
security_ctx: Option<&CStr>,
) -> io::Result<Entry> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Create a file node.
///
/// Create a regular file, character device, block device, fifo, or socket node named `name` in
/// the directory represented by `inode`. Valid values for `mode` and `rdev` are the same as
/// those accepted by the `mknod(2)` system call. Returns an `Entry` for the newly created node.
///
/// When the `FsOptions::DONT_MASK` feature is set, the file system is responsible for setting
/// the permissions of the created node to `mode & !umask`.
///
/// If this call is successful then the lookup count of the `Inode` associated with the returned
/// `Entry` must be increased by 1.
fn mknod(
&self,
ctx: Context,
inode: Self::Inode,
name: &CStr,
mode: u32,
rdev: u32,
umask: u32,
security_ctx: Option<&CStr>,
) -> io::Result<Entry> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Create a directory.
///
/// When the `FsOptions::DONT_MASK` feature is set, the file system is responsible for setting
/// the permissions of the created directory to `mode & !umask`. Returns an `Entry` for the
/// newly created directory.
///
/// If this call is successful then the lookup count of the `Inode` associated with the returned
/// `Entry` must be increased by 1.
fn mkdir(
&self,
ctx: Context,
parent: Self::Inode,
name: &CStr,
mode: u32,
umask: u32,
security_ctx: Option<&CStr>,
) -> io::Result<Entry> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Create an unnamed temporary file.
fn chromeos_tmpfile(
&self,
ctx: Context,
parent: Self::Inode,
mode: u32,
umask: u32,
security_ctx: Option<&CStr>,
) -> io::Result<Entry> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Remove a file.
///
/// If the file's inode lookup count is non-zero, then the file system is expected to delay
/// removal of the inode until the lookup count goes to zero. See the documentation of the
/// `forget` function for more information.
fn unlink(&self, ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Remove a directory.
///
/// If the directory's inode lookup count is non-zero, then the file system is expected to delay
/// removal of the inode until the lookup count goes to zero. See the documentation of the
/// `forget` function for more information.
fn rmdir(&self, ctx: Context, parent: Self::Inode, name: &CStr) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Rename a file / directory.
///
/// If the destination exists, it should be atomically replaced. If the destination's inode
/// lookup count is non-zero, then the file system is expected to delay removal of the inode
/// until the lookup count goes to zero. See the documentation of the `forget` function for more
/// information.
///
/// `flags` may be `libc::RENAME_EXCHANGE` or `libc::RENAME_NOREPLACE`. If
/// `libc::RENAME_NOREPLACE` is specified, the implementation must not overwrite `newname` if it
/// exists and must return an error instead. If `libc::RENAME_EXCHANGE` is specified, the
/// implementation must atomically exchange the two files, i.e., both must exist and neither may
/// be deleted.
fn rename(
&self,
ctx: Context,
olddir: Self::Inode,
oldname: &CStr,
newdir: Self::Inode,
newname: &CStr,
flags: u32,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Create a hard link.
///
/// Create a hard link from `inode` to `newname` in the directory represented by `newparent`.
///
/// If this call is successful then the lookup count of the `Inode` associated with the returned
/// `Entry` must be increased by 1.
fn link(
&self,
ctx: Context,
inode: Self::Inode,
newparent: Self::Inode,
newname: &CStr,
) -> io::Result<Entry> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Open a file.
///
/// Open the file associated with `inode` for reading / writing. All values accepted by the
/// `open(2)` system call are valid values for `flags` and must be handled by the file system.
/// However, there are some additional rules:
///
/// * Creation flags (`libc::O_CREAT`, `libc::O_EXCL`, `libc::O_NOCTTY`) will be filtered out
/// and handled by the kernel.
///
/// * The file system should check the access modes (`libc::O_RDONLY`, `libc::O_WRONLY`,
/// `libc::O_RDWR`) to determine if the operation is permitted. If the file system was mounted
/// with the `-o default_permissions` mount option, then this check will also be carried out
/// by the kernel before sending the open request.
///
/// * When writeback caching is enabled (`FsOptions::WRITEBACK_CACHE`) the kernel may send read
/// requests even for files opened with `libc::O_WRONLY`. The file system should be prepared
/// to handle this.
///
/// * When writeback caching is enabled, the kernel will handle the `libc::O_APPEND` flag.
/// However, this will not work reliably unless the kernel has exclusive access to the file.
/// In this case the file system may either ignore the `libc::O_APPEND` flag or return an
/// error to indicate that reliable `libc::O_APPEND` handling is not available.
///
/// * When writeback caching is disabled, the file system is expected to properly handle
/// `libc::O_APPEND` and ensure that each write is appended to the end of the file.
///
/// The file system may choose to return a `Handle` to refer to the newly opened file. The
/// kernel will then use this `Handle` for all operations on the content of the file (`read`,
/// `write`, `flush`, `release`, `fsync`). If the file system does not return a
/// `Handle` then the kernel will use the `Inode` for the file to operate on its contents. In
/// this case the file system may wish to enable the `FsOptions::ZERO_MESSAGE_OPEN` feature if
/// it is supported by the kernel (see below).
///
/// The returned `OpenOptions` allow the file system to change the way the opened file is
/// handled by the kernel. See the documentation of `OpenOptions` for more information.
///
/// If the `FsOptions::ZERO_MESSAGE_OPEN` feature is enabled by both the file system
/// implementation and the kernel, then the file system may return an error of `ENOSYS`. This
/// will be interpreted by the kernel as success and future calls to `open` and `release` will
/// be handled by the kernel without being passed on to the file system.
fn open(
&self,
ctx: Context,
inode: Self::Inode,
flags: u32,
) -> io::Result<(Option<Self::Handle>, OpenOptions)> {
// Matches the behavior of libfuse.
Ok((None, OpenOptions::empty()))
}
/// Create and open a file.
///
/// If the file does not already exist, the file system should create it with the specified
/// `mode`. When the `FsOptions::DONT_MASK` feature is set, the file system is responsible for
/// setting the permissions of the created file to `mode & !umask`.
///
/// If the file system returns an `ENOSYS` error, then the kernel will treat this method as
/// unimplemented and all future calls to `create` will be handled by calling the `mknod` and
/// `open` methods instead.
///
/// See the documentation for the `open` method for more information about opening the file. In
/// addition to the optional `Handle` and the `OpenOptions`, the file system must also return an
/// `Entry` for the file. This increases the lookup count for the `Inode` associated with the
/// file by 1.
fn create(
&self,
ctx: Context,
parent: Self::Inode,
name: &CStr,
mode: u32,
flags: u32,
umask: u32,
security_ctx: Option<&CStr>,
) -> io::Result<(Entry, Option<Self::Handle>, OpenOptions)> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Read data from a file.
///
/// Returns `size` bytes of data starting from offset `off` from the file associated with
/// `inode` or `handle`.
///
/// `flags` contains the flags used to open the file. Similarly, `handle` is the `Handle`
/// returned by the file system from the `open` method, if any. If the file system
/// implementation did not return a `Handle` from `open` then the contents of `handle` are
/// undefined.
///
/// This method should return exactly the number of bytes requested by the kernel, except in the
/// case of error or EOF. Otherwise, the kernel will substitute the rest of the data with
/// zeroes. An exception to this rule is if the file was opened with the "direct I/O" option
/// (`libc::O_DIRECT`), in which case the kernel will forward the return code from this method
/// to the userspace application that made the system call.
fn read<W: io::Write + ZeroCopyWriter>(
&self,
ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
w: W,
size: u32,
offset: u64,
lock_owner: Option<u64>,
flags: u32,
) -> io::Result<usize> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Write data to a file.
///
/// Writes `size` bytes of data starting from offset `off` to the file associated with `inode`
/// or `handle`.
///
/// `flags` contains the flags used to open the file. Similarly, `handle` is the `Handle`
/// returned by the file system from the `open` method, if any. If the file system
/// implementation did not return a `Handle` from `open` then the contents of `handle` are
/// undefined.
///
/// If the `FsOptions::HANDLE_KILLPRIV` feature is not enabled then then the file system is
/// expected to clear the setuid and setgid bits.
///
/// If `delayed_write` is true then it indicates that this is a write for buffered data.
///
/// This method should return exactly the number of bytes requested by the kernel, except in the
/// case of error. An exception to this rule is if the file was opened with the "direct I/O"
/// option (`libc::O_DIRECT`), in which case the kernel will forward the return code from this
/// method to the userspace application that made the system call.
fn write<R: io::Read + ZeroCopyReader>(
&self,
ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
r: R,
size: u32,
offset: u64,
lock_owner: Option<u64>,
delayed_write: bool,
flags: u32,
) -> io::Result<usize> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Flush the contents of a file.
///
/// This method is called on every `close()` of a file descriptor. Since it is possible to
/// duplicate file descriptors there may be many `flush` calls for one call to `open`.
///
/// File systems should not make any assumptions about when `flush` will be
/// called or even if it will be called at all.
///
/// `handle` is the `Handle` returned by the file system from the `open` method, if any. If the
/// file system did not return a `Handle` from `open` then the contents of `handle` are
/// undefined.
///
/// Unlike `fsync`, the file system is not required to flush pending writes. One reason to flush
/// data is if the file system wants to return write errors during close. However, this is not
/// portable because POSIX does not require `close` to wait for delayed I/O to complete.
///
/// If the `FsOptions::POSIX_LOCKS` feature is enabled, then the file system must remove all
/// locks belonging to `lock_owner`.
///
/// If this method returns an `ENOSYS` error then the kernel will treat it as success and all
/// subsequent calls to `flush` will be handled by the kernel without being forwarded to the
/// file system.
fn flush(
&self,
ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
lock_owner: u64,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Synchronize file contents.
///
/// File systems must ensure that the file contents have been flushed to disk before returning
/// from this method. If `datasync` is true then only the file data (but not the metadata) needs
/// to be flushed.
///
/// `handle` is the `Handle` returned by the file system from the `open` method, if any. If the
/// file system did not return a `Handle` from `open` then the contents of
/// `handle` are undefined.
///
/// If this method returns an `ENOSYS` error then the kernel will treat it as success and all
/// subsequent calls to `fsync` will be handled by the kernel without being forwarded to the
/// file system.
fn fsync(
&self,
ctx: Context,
inode: Self::Inode,
datasync: bool,
handle: Self::Handle,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Allocate requested space for file data.
///
/// If this function returns success, then the file sytem must guarantee that it is possible to
/// write up to `length` bytes of data starting at `offset` without failing due to a lack of
/// free space on the disk.
///
/// `handle` is the `Handle` returned by the file system from the `open` method, if any. If the
/// file system did not return a `Handle` from `open` then the contents of `handle` are
/// undefined.
///
/// If this method returns an `ENOSYS` error then the kernel will treat that as a permanent
/// failure: all future calls to `fallocate` will fail with `EOPNOTSUPP` without being forwarded
/// to the file system.
fn fallocate(
&self,
ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
mode: u32,
offset: u64,
length: u64,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Release an open file.
///
/// This method is called when there are no more references to an open file: all file
/// descriptors are closed and all memory mappings are unmapped.
///
/// For every `open` call there will be exactly one `release` call (unless the file system is
/// force-unmounted).
///
/// The file system may reply with an error, but error values are not returned to the `close()`
/// or `munmap()` which triggered the release.
///
/// `handle` is the `Handle` returned by the file system from the `open` method, if any. If the
/// file system did not return a `Handle` from `open` then the contents of
/// `handle` are undefined.
///
/// If `flush` is `true` then the contents of the file should also be flushed to disk.
fn release(
&self,
ctx: Context,
inode: Self::Inode,
flags: u32,
handle: Self::Handle,
flush: bool,
flock_release: bool,
lock_owner: Option<u64>,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Get information about the file system.
fn statfs(&self, ctx: Context, inode: Self::Inode) -> io::Result<libc::statvfs64> {
// Safe because we are zero-initializing a struct with only POD fields.
let mut st: libc::statvfs64 = unsafe { mem::zeroed() };
// This matches the behavior of libfuse as it returns these values if the
// filesystem doesn't implement this method.
st.f_namemax = 255;
st.f_bsize = 512;
Ok(st)
}
/// Set an extended attribute.
///
/// If this method fails with an `ENOSYS` error, then the kernel will treat that as a permanent
/// failure. The kernel will return `EOPNOTSUPP` for all future calls to `setxattr` without
/// forwarding them to the file system.
///
/// Valid values for flags are the same as those accepted by the `setxattr(2)` system call and
/// have the same behavior.
fn setxattr(
&self,
ctx: Context,
inode: Self::Inode,
name: &CStr,
value: &[u8],
flags: u32,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Get an extended attribute.
///
/// If `size` is 0, then the file system should respond with `GetxattrReply::Count` and the
/// number of bytes needed to hold the value. If `size` is large enough to hold the value, then
/// the file system should reply with `GetxattrReply::Value` and the value of the extended
/// attribute. If `size` is not 0 but is also not large enough to hold the value, then the file
/// system should reply with an `ERANGE` error.
///
/// If this method fails with an `ENOSYS` error, then the kernel will treat that as a permanent
/// failure. The kernel will return `EOPNOTSUPP` for all future calls to `getxattr` without
/// forwarding them to the file system.
fn getxattr(
&self,
ctx: Context,
inode: Self::Inode,
name: &CStr,
size: u32,
) -> io::Result<GetxattrReply> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// List extended attribute names.
///
/// If `size` is 0, then the file system should respond with `ListxattrReply::Count` and the
/// number of bytes needed to hold a `\0` byte separated list of the names of all the extended
/// attributes. If `size` is large enough to hold the `\0` byte separated list of the attribute
/// names, then the file system should reply with `ListxattrReply::Names` and the list. If
/// `size` is not 0 but is also not large enough to hold the list, then the file system should
/// reply with an `ERANGE` error.
///
/// If this method fails with an `ENOSYS` error, then the kernel will treat that as a permanent
/// failure. The kernel will return `EOPNOTSUPP` for all future calls to `listxattr` without
/// forwarding them to the file system.
fn listxattr(&self, ctx: Context, inode: Self::Inode, size: u32) -> io::Result<ListxattrReply> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Remove an extended attribute.
///
/// If this method fails with an `ENOSYS` error, then the kernel will treat that as a permanent
/// failure. The kernel will return `EOPNOTSUPP` for all future calls to `removexattr` without
/// forwarding them to the file system.
fn removexattr(&self, ctx: Context, inode: Self::Inode, name: &CStr) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Open a directory for reading.
///
/// The file system may choose to return a `Handle` to refer to the newly opened directory. The
/// kernel will then use this `Handle` for all operations on the content of the directory
/// (`readdir`, `readdirplus`, `fsyncdir`, `releasedir`). If the file system does not return a
/// `Handle` then the kernel will use the `Inode` for the directory to operate on its contents.
/// In this case the file system may wish to enable the `FsOptions::ZERO_MESSAGE_OPENDIR`
/// feature if it is supported by the kernel (see below).
///
/// The returned `OpenOptions` allow the file system to change the way the opened directory is
/// handled by the kernel. See the documentation of `OpenOptions` for more information.
///
/// If the `FsOptions::ZERO_MESSAGE_OPENDIR` feature is enabled by both the file system
/// implementation and the kernel, then the file system may return an error of `ENOSYS`. This
/// will be interpreted by the kernel as success and future calls to `opendir` and `releasedir`
/// will be handled by the kernel without being passed on to the file system.
fn opendir(
&self,
ctx: Context,
inode: Self::Inode,
flags: u32,
) -> io::Result<(Option<Self::Handle>, OpenOptions)> {
// Matches the behavior of libfuse.
Ok((None, OpenOptions::empty()))
}
/// Read a directory.
///
/// `handle` is the `Handle` returned by the file system from the `opendir` method, if any. If
/// the file system did not return a `Handle` from `opendir` then the contents of `handle` are
/// undefined.
///
/// `size` indicates the maximum number of bytes that should be returned by this method.
///
/// If `offset` is non-zero then it corresponds to one of the `offset` values from a `DirEntry`
/// that was previously returned by a call to `readdir` for the same handle. In this case the
/// file system should skip over the entries before the position defined by the `offset` value.
/// If entries were added or removed while the `Handle` is open then the file system may still
/// include removed entries or skip newly created entries. However, adding or removing entries
/// should never cause the file system to skip over unrelated entries or include an entry more
/// than once. This means that `offset` cannot be a simple index and must include sufficient
/// information to uniquely determine the next entry in the list even when the set of entries is
/// being changed.
///
/// The file system may return entries for the current directory (".") and parent directory
/// ("..") but is not required to do so. If the file system does not return these entries, then
/// they are implicitly added by the kernel.
///
/// The lookup count for `Inode`s associated with the returned directory entries is **NOT**
/// affected by this method.
///
fn readdir(
&self,
ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
size: u32,
offset: u64,
) -> io::Result<Self::DirIter> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Synchronize the contents of a directory.
///
/// File systems must ensure that the directory contents have been flushed to disk before
/// returning from this method. If `datasync` is true then only the directory data (but not the
/// metadata) needs to be flushed.
///
/// `handle` is the `Handle` returned by the file system from the `opendir` method, if any. If
/// the file system did not return a `Handle` from `opendir` then the contents of
/// `handle` are undefined.
///
/// If this method returns an `ENOSYS` error then the kernel will treat it as success and all
/// subsequent calls to `fsyncdir` will be handled by the kernel without being forwarded to the
/// file system.
fn fsyncdir(
&self,
ctx: Context,
inode: Self::Inode,
datasync: bool,
handle: Self::Handle,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Release an open directory.
///
/// For every `opendir` call there will be exactly one `releasedir` call (unless the file system
/// is force-unmounted).
///
/// `handle` is the `Handle` returned by the file system from the `opendir` method, if any. If
/// the file system did not return a `Handle` from `opendir` then the contents of `handle` are
/// undefined.
///
/// `flags` contains used the flags used to open the directory in `opendir`.
fn releasedir(
&self,
ctx: Context,
inode: Self::Inode,
flags: u32,
handle: Self::Handle,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Check file access permissions.
///
/// This method is called when a userspace process in the client makes an `access()` or
/// `chdir()` system call. If the file system was mounted with the `-o default_permissions`
/// mount option, then the kernel will perform these checks itself and this method will not be
/// called.
///
/// If this method returns an `ENOSYS` error, then the kernel will treat it as a permanent
/// success: all future calls to `access` will return success without being forwarded to the
/// file system.
fn access(&self, ctx: Context, inode: Self::Inode, mask: u32) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Perform an ioctl on a file or directory.
///
/// `handle` is the `Handle` returned by the file system from the `open` or `opendir` methods,
/// if any. If the file system did not return a `Handle` from then the contents of `handle` are
/// undefined.
///
/// If `flags` contains `IoctlFlags::UNRESTRICTED` then the file system may retry the ioctl
/// after informing the kernel about the input and output areas. If `flags` does not contain
/// `IoctlFlags::UNRESTRICTED` then the kernel will prepare the input and output areas according
/// to the encoding in the ioctl command. In that case the ioctl cannot be retried.
///
/// `cmd` is the ioctl request made by the calling process, truncated to 32 bits.
///
/// `arg` is the argument provided by the calling process.
///
/// `in_size` is the length of the additional data that accompanies the request. The file system
/// may fetch this data from `reader`.
///
/// `out_size` is the length of the output area prepared by the kernel to hold the response to
/// the ioctl.
fn ioctl<R: io::Read>(
&self,
ctx: Context,
handle: Self::Handle,
flags: IoctlFlags,
cmd: u32,
arg: u64,
in_size: u32,
out_size: u32,
reader: R,
) -> io::Result<IoctlReply> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn getlk(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn setlk(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn setlkw(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn bmap(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn poll(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn notify_reply(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// TODO: support this
fn lseek(&self) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Copy a range of data from one file to another
///
/// Performs an optimized copy between two file descriptors without the additional cost of
/// transferring data through the kernel module to user space (glibc) and then back into
/// the file system again.
///
/// In case this method is not implemented, glibc falls back to reading data from the source and
/// writing to the destination.
///
/// If this method fails with an `ENOSYS` error, then the kernel will treat that as a permanent
/// failure. The kernel will return `EOPNOTSUPP` for all future calls to `copy_file_range`
/// without forwarding them to the file system.
///
/// All values accepted by the `copy_file_range(2)` system call are valid values for `flags` and
/// must be handled by the file system.
fn copy_file_range(
&self,
ctx: Context,
inode_src: Self::Inode,
handle_src: Self::Handle,
offset_src: u64,
inode_dst: Self::Inode,
handle_dst: Self::Handle,
offset_dst: u64,
length: u64,
flags: u64,
) -> io::Result<usize> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Set up memory mappings.
///
/// Used to set up file mappings in DAX window.
///
/// # Arguments
///
/// * `file_offset` - Offset into the file to start the mapping.
/// * `mem_offset` - Offset in Memory Window.
/// * `size` - Length of mapping required.
/// * `flags` - Bit field of `FUSE_SETUPMAPPING_FLAGS_*`.
/// * `mapper` - Mapper object which performs the mapping.
fn set_up_mapping<M: Mapper>(
&self,
ctx: Context,
inode: Self::Inode,
handle: Self::Handle,
file_offset: u64,
mem_offset: u64,
size: usize,
flags: u32,
mapper: M,
) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
/// Remove memory mappings.
///
/// Used to tear down file mappings in DAX window. This method must be supported when
/// `set_up_mapping` is supported.
fn remove_mapping<M: Mapper>(&self, msgs: &[RemoveMappingOne], mapper: M) -> io::Result<()> {
Err(io::Error::from_raw_os_error(libc::ENOSYS))
}
}
| true
|
8de04fd4c6c48f64fdb89648ea0561cc57aa24c7
|
Rust
|
JRAndreassen/graph-rs
|
/examples/rocket_open_id_code.rs
|
UTF-8
| 3,423
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#![feature(proc_macro_hygiene, decl_macro)]
#![feature(plugin)]
#[macro_use]
extern crate rocket;
#[allow(unused_imports)]
#[macro_use]
extern crate serde_json;
extern crate reqwest;
use from_as::*;
use graph_rs::oauth::{IdToken, OAuth};
use rocket::Data;
use rocket_codegen::routes;
use std::convert::TryFrom;
use std::io::Read;
use std::thread;
use std::time::Duration;
// Create an OAuth struct with the needed credentials.
// See the following link for more info on open ID connect:
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-protocols-oidc
fn oauth_open_id() -> OAuth {
let mut oauth = OAuth::new();
oauth
.client_id("<YOUR_CLIENT_ID>")
.client_secret("<YOUR_CLIENT_SECRET>")
.authorize_url("https://login.microsoftonline.com/common/oauth2/v2.0/authorize")
.redirect_uri("http://localhost:8000/redirect")
.access_token_url("https://login.microsoftonline.com/common/oauth2/v2.0/token")
.refresh_token_url("https://login.microsoftonline.com/common/oauth2/v2.0/token")
.response_type("id_token code")
.response_mode("form_post")
.add_scope("openid")
.add_scope("Files.Read")
.add_scope("Files.ReadWrite")
.add_scope("Files.Read.All")
.add_scope("Files.ReadWrite.All")
.add_scope("offline_access")
.nonce("7362CAEA-9CA5")
.prompt("login")
.state("12345");
oauth
}
fn main() {
// Spawn the browser to sign in within a different thread that waits until
// rocket has started. Otherwise, the redirect from sign in may happen
// before rocket has started.
let handle = thread::spawn(|| {
// Block the new thread and give enough time for rocket to completely start.
thread::sleep(Duration::from_secs(2));
// Use the OpenId trait from OAuth to request an access code.
// The full name syntax is used here so it does not clash with methods
// in the other grant types.
let mut oauth = oauth_open_id();
let mut request = oauth.build().open_id_connect();
request.browser_authorization().open().unwrap();
});
rocket::ignite().mount("/", routes![redirect]).launch();
handle.join().unwrap();
}
#[post("/redirect", data = "<id_token>")]
fn redirect(id_token: Data) -> String {
// Read in the response body to a String
let mut s = String::new();
id_token.open().read_to_string(&mut s).unwrap();
// Print the string for debugging in case the attempt to deserialize the response
// in the TryFrom method below does not work..
println!("Token response:\n{:#?}\n", s);
// Use the TryFrom impl to get an IdToken from a string
// and pass the IdToken to OAuth.
let token: IdToken = IdToken::try_from(s).unwrap();
println!("IdToken:\n{:#?}\n", token);
let mut oauth = oauth_open_id();
oauth.id_token(token);
access_token(&mut oauth);
String::from("Successfully Logged In! You can close your browser.")
}
pub fn access_token(oauth: &mut OAuth) {
let mut request = oauth.build().code_flow();
let access_token = request.access_token().send().unwrap();
oauth.access_token(access_token);
// If all went well here we can print out the OAuth config with the Access Token.
println!("OAuth:\n{:#?}\n", &oauth);
oauth
.as_file("./examples/example_files/web_oauth.json")
.unwrap();
}
| true
|
e4cba87328af31e822bd467ed96ac8718710745c
|
Rust
|
iotaledger/iota.rs
|
/types/src/api/plugins/participation/error.rs
|
UTF-8
| 667
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
// Copyright 2023 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
#[derive(Debug)]
pub enum Error {
/// Invalid participations error
InvalidParticipations,
/// IO error
Io(std::io::Error),
}
impl core::fmt::Display for Error {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
Self::InvalidParticipations => write!(f, "invalid participations"),
Self::Io(error) => write!(f, "{error}"),
}
}
}
impl From<std::io::Error> for Error {
fn from(error: std::io::Error) -> Self {
Self::Io(error)
}
}
#[cfg(feature = "std")]
impl std::error::Error for Error {}
| true
|
94f6cac5c52c9f69485e758eb2ae8dd36bc23cbb
|
Rust
|
rnestler/rustfest-2019-nannou-workshop
|
/src/main.rs
|
UTF-8
| 1,050
| 2.703125
| 3
|
[] |
no_license
|
use nannou::color::rgba;
use nannou::prelude::*;
fn main() {
nannou::app(model).update(update).simple_window(view).run();
}
struct Model {
x: f32,
y: f32,
size: f32,
t: f32,
}
fn model(_app: &App) -> Model {
Model {
x: 0.0,
y: 0.0,
size: 20.0,
t: 0.0,
}
}
fn update(app: &App, model: &mut Model, _update: Update) {
model.t += 0.01667;
let mouse_pos = app.mouse.position();
model.x = model.t.sin() * mouse_pos.x;
model.y = (model.t * 1.5).sin() * mouse_pos.y;
model.size = (model.t * 2.0).sin() * 10.0 + 20.0;
}
fn view(app: &App, model: &Model, frame: &Frame) {
let draw = app.draw();
//draw.background().color(PLUM);
//draw.background().color(rgba(0.0, 0.0, 0.0, 0.9));
draw.rect()
.wh(app.window_rect().wh())
.color(rgba(0.0, 0.0, 0.0, 0.03));
draw.ellipse()
.color(rgba(0.0, 0.0, 1.0, 0.9))
.w(model.size)
.h(model.size)
.x_y(model.x, model.y);
draw.to_frame(app, &frame).unwrap();
}
| true
|
5e2222d641fe9229222be7da22cb0db26fe786a5
|
Rust
|
rfdonnelly/lift-rs
|
/src/main.rs
|
UTF-8
| 6,843
| 3.140625
| 3
|
[] |
no_license
|
use std::cmp;
use std::fmt;
use structopt::StructOpt;
const MAX_SETS: u32 = 6;
const MAX_REPS: u32 = 5;
fn parse_sets(s: &str) -> Result<u32, String> {
let value =
match u32::from_str_radix(s, 10) {
Ok(v) => v,
Err(e) => return Err(e.to_string()),
};
if value > MAX_SETS {
return Err(format!("Must be {} or less", MAX_SETS));
}
Ok(value)
}
#[derive(StructOpt, Debug)]
#[structopt(about, author)]
struct Options {
/// The bar weight.
#[structopt(short, long, default_value = "45")]
bar: u32,
/// The number of sets.
#[structopt(short, long, default_value = "5", parse(try_from_str = parse_sets))]
sets: u32,
/// Sets the weight of the work set. Must be great than or equal to the bar weight.
work_set: u32,
}
struct Set {
weight: u32,
reps: u32,
sets: u32,
}
impl fmt::Display for Set {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}x{}x{}", self.weight, self.reps, self.sets)
}
}
impl fmt::Debug for Set {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}x{}x{}", self.weight, self.reps, self.sets)
}
}
fn main() {
// let matches = cli::build().get_matches();
// let cfg = Config::from_matches(&matches);
let options = Options::from_args();
let sets = get_sets(options.bar, options.work_set, options.sets);
print_sets(options.bar, &sets);
}
fn print_sets(base: u32, sets: &Vec<Set>) {
for set in sets {
println!("{:>7} {:?}", set.to_string(), get_plates(set.weight - base));
}
}
fn get_sets(min: u32, max: u32, sets: u32) -> Vec<Set> {
let mut rv = Vec::new();
let delta = round_up_5((max - min) / (sets - 1));
for set in 0..sets {
// This ensures weight for second to last set != last set
let set_max = match set {
n if n == sets - 1 => max,
_ => max - 5,
};
let weight = cmp::min(min + delta * set, set_max);
rv.push(Set {
weight: weight,
reps: get_reps(set, sets),
sets: get_sub_sets(set, sets),
});
}
rv
}
fn round_up_5(x: u32) -> u32 {
(x + 4) / 5 * 5
}
fn get_reps(set: u32, sets: u32) -> u32 {
let max = MAX_REPS;
let upper_bound = sets - 1;
match set {
n if n == upper_bound => max,
n => cmp::max(max - n, 1),
}
}
fn get_sub_sets(set: u32, sets: u32) -> u32 {
let lower_bound = 0;
let upper_bound = sets - 1;
match set {
n if n == upper_bound => 3,
n if n == lower_bound => 2,
_ => 1,
}
}
fn get_plates(weight: u32) -> Vec<f64> {
if weight == 0 {
return Vec::new();
}
let available_plates = vec![45.0, 35.0, 25.0, 10.0, 5.0, 5.0, 2.5];
let mut required_plates: Vec<f64> = Vec::new();
let mut available_plates_iter = available_plates.iter();
let weight = weight as f64 / 2.0;
let mut next_sum: f64 = 0.0;
// Cap iterations to prevent infinite loop in case of no solution
for _ in 0..10 {
let sum = next_sum;
// Eliminate available plates until we find one that doesn't exceed our desired weight
while let Some(plate) = available_plates_iter.next() {
next_sum = sum + plate;
if next_sum <= weight {
required_plates.push(*plate);
break;
}
}
// Are we done?
if next_sum == weight {
return required_plates;
} else if next_sum > weight {
panic!("sum exceeds weight");
}
}
panic!("no solution found");
}
#[cfg(test)]
mod tests {
mod round_up_5 {
use super::super::*;
#[test]
fn compare() {
assert_eq!(round_up_5(0), 0);
assert_eq!(round_up_5(1), 5);
assert_eq!(round_up_5(2), 5);
assert_eq!(round_up_5(3), 5);
assert_eq!(round_up_5(4), 5);
assert_eq!(round_up_5(5), 5);
assert_eq!(round_up_5(6), 10);
}
}
mod get_sets {
use super::super::*;
#[test]
fn typ() {
assert_eq!(
format!("{:?}", get_sets(45, 85, 5)),
"[45x5x2, 55x4x1, 65x3x1, 75x2x1, 85x5x3]"
);
assert_eq!(
format!("{:?}", get_sets(45, 105, 5)),
"[45x5x2, 60x4x1, 75x3x1, 90x2x1, 105x5x3]"
);
}
#[test]
fn fractional_delta() {
assert_eq!(
format!("{:?}", get_sets(45, 90, 5)),
"[45x5x2, 60x4x1, 75x3x1, 85x2x1, 90x5x3]"
);
assert_eq!(
format!("{:?}", get_sets(45, 95, 5)),
"[45x5x2, 60x4x1, 75x3x1, 90x2x1, 95x5x3]"
);
assert_eq!(
format!("{:?}", get_sets(45, 100, 5)),
"[45x5x2, 60x4x1, 75x3x1, 90x2x1, 100x5x3]"
);
}
}
mod get_reps {
use super::super::*;
#[test]
fn min() {
assert_eq!(get_reps(0, 5), 5);
}
#[test]
fn mid_nominal() {
assert_eq!(get_reps(1, 5), 4);
assert_eq!(get_reps(2, 5), 3);
assert_eq!(get_reps(3, 5), 2);
assert_eq!(get_reps(4, 6), 1);
}
#[test]
fn mid_min() {
assert_eq!(get_reps(5, 7), 1);
assert_eq!(get_reps(5, 9), 1);
}
#[test]
fn max() {
assert_eq!(get_reps(4, 5), 5);
}
}
mod get_sub_sets {
use super::super::*;
#[test]
fn min() {
assert_eq!(get_sub_sets(0, 5), 2);
}
#[test]
fn max() {
assert_eq!(get_sub_sets(4, 5), 3);
assert_eq!(get_sub_sets(0, 1), 3);
}
#[test]
fn mid() {
assert_eq!(get_sub_sets(3, 5), 1);
}
}
mod get_plates {
use super::super::*;
#[test]
fn min() {
assert_eq!(get_plates(5), vec!(2.5));
}
#[test]
fn max() {
assert_eq!(get_plates(255), vec!(45.0, 35.0, 25.0, 10.0, 5.0, 5.0, 2.5));
}
#[test]
fn mid() {
assert_eq!(get_plates(90), vec!(45.0));
assert_eq!(get_plates(30), vec!(10.0, 5.0));
}
#[test]
#[should_panic(expected = "sum exceeds weight")]
fn too_small() {
get_plates(4);
}
#[test]
#[should_panic(expected = "no solution found")]
fn not_multiple_of_five() {
get_plates(6);
}
#[test]
#[should_panic(expected = "no solution found")]
fn too_large() {
get_plates(301);
}
}
}
| true
|
797d55dd8b6ae993cc66a0962c07ed6eb2f8c240
|
Rust
|
davidkern/holder
|
/benches/ledger.rs
|
UTF-8
| 3,287
| 2.71875
| 3
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use criterion::{
black_box,
criterion_group,
criterion_main,
measurement::Measurement,
BatchSize,
BenchmarkGroup,
BenchmarkId,
Criterion,
Throughput,
};
use holder::{Ledger, Direction};
use std::num::NonZeroUsize;
// comprehensive:
//const INCREMENTAL_ALLOCATION_COUNTS: [usize; 4] = [1_000, 10_000, 100_000, 1_000_000];
// quick(er):
const INCREMENTAL_ALLOCATION_COUNTS: [usize; 2] = [1_000, 10_000];
const INCREMENTAL_ALLOCATION_CAPACITY: usize = 10_000_000; // allocation never fails
/// Allocate a `count` of elements one at a time.
pub fn incremental_allocation(c: &mut Criterion) {
let mut group = c.benchmark_group("incremental-allocation");
for count in INCREMENTAL_ALLOCATION_COUNTS.iter() {
group.throughput(Throughput::Elements(*count as u64));
bench_incremental_allocation(
&mut group,
*count,
"limit-forward",
Limit::new(),
|x| { black_box(x.forward()); }
);
group.throughput(Throughput::Elements(*count as u64));
bench_incremental_allocation(
&mut group,
*count,
"limit-reverse",
Limit::new(),
|x| { black_box(x.reverse()); }
);
group.throughput(Throughput::Elements(*count as u64));
bench_incremental_allocation(
&mut group,
*count,
"sequential-forward",
Ledger::new(INCREMENTAL_ALLOCATION_CAPACITY),
|x| { x.allocate(black_box(Direction::Forward), NonZeroUsize::new(1).unwrap()).unwrap(); }
);
group.throughput(Throughput::Elements(*count as u64));
bench_incremental_allocation(
&mut group,
*count,
"sequential-reverse",
Ledger::new(INCREMENTAL_ALLOCATION_CAPACITY),
|x| { x.allocate(black_box(Direction::Reverse), NonZeroUsize::new(1).unwrap()).unwrap(); }
);
}
group.finish();
}
criterion_group!(benches, incremental_allocation);
criterion_main!(benches);
fn bench_incremental_allocation<M: Measurement, S: Into<String>, T: Clone, F: Fn(&mut T)>(
group: &mut BenchmarkGroup<M>,
count: usize,
name: S,
initial: T,
func: F)
{
group.bench_with_input(
BenchmarkId::new(name, count),
&count,
|b, &count| {
b.iter_batched_ref(
|| initial.clone(),
|subject| {
for _ in 0..count {
(func)(subject);
}
},
BatchSize::SmallInput);
});
}
/// Limit case: increment/decrement indexes with no checking
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Limit {
forward: usize,
_padding: u128, // ensure forward and reverse don't share a cacheline
reverse: usize,
}
impl Limit {
pub fn new() -> Self {
Self {
forward: 0,
_padding: 0,
reverse: usize::MAX,
}
}
#[inline]
pub fn forward(&mut self) -> usize {
let value = self.forward;
self.forward += 1;
self.forward
}
#[inline]
pub fn reverse(&mut self) -> usize {
self.reverse -= 1;
self.reverse
}
}
| true
|
f0269085f90bd2691585ec8cc8047c85bae5856b
|
Rust
|
halvko/tide-handlebars
|
/src/lib.rs
|
UTF-8
| 9,694
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
//! # Tide-Handlebars integration This crate exposes [an extension
//! trait](TideHandlebarsExt) that adds two methods to [`handlebars::Handlebars`]:
//! [`render_response`](TideHandlebarsExt::render_response) and
//! [`render_body`](TideHandlebarsExt::render_body).
//! [`Handlebars`](handlebars::Handlebars)s.
use handlebars::Handlebars;
use serde::Serialize;
use std::path::PathBuf;
use tide::{http::Mime, Body, Response, Result};
/// This extension trait adds two methods to [`handlebars::Handlebars`]:
/// [`render_response`](TideHandlebarsExt::render_response) and
/// [`render_body`](TideHandlebarsExt::render_body)
pub trait TideHandlebarsExt {
/// `render_body` returns a fully-rendered [`tide::Body`] with mime
/// type set based on the template name file extension using the
/// logic at [`tide::http::Mime::from_extension`]. This will
/// return an `Err` variant if the render was unsuccessful.
///
/// ```rust
/// use handlebars::Handlebars;
/// use tide_handlebars::prelude::*;
/// use std::collections::BTreeMap;
/// let mut handlebars = Handlebars::new();
/// handlebars
/// .register_template_file("simple.html", "./tests/templates/simple.html")
/// .unwrap();
///
/// let mut data0 = BTreeMap::new();
/// data0.insert("title".to_string(), "hello tide!".to_string());
/// let mut body = handlebars.render_body("simple.html", &data0).unwrap();
/// assert_eq!(body.mime(), &tide::http::mime::HTML);
///```
fn render_body<T>(&self, template_name: &str, context: &T) -> Result<Body>
where
T: Serialize;
/// `render_body_ext` returns a fully-rendered [`tide::Body`] with mime
/// type set based on the extension using the
/// logic at [`tide::http::Mime::from_extension`]. This will
/// return an `Err` variant if the render was unsuccessful.
///
/// ```rust
/// use handlebars::Handlebars;
/// use tide_handlebars::prelude::*;
/// use std::collections::BTreeMap;
/// let mut handlebars = Handlebars::new();
/// handlebars
/// .register_template_file("simple.hbs", "./tests/templates/simple.hbs")
/// .unwrap();
///
/// let mut data0 = BTreeMap::new();
/// data0.insert("title".to_string(), "hello tide!".to_string());
/// let mut body = handlebars.render_body_ext("simple.hbs", &data0, "html").unwrap();
/// assert_eq!(body.mime(), &tide::http::mime::HTML);
///```
fn render_body_ext<T>(&self, template_name: &str, context: &T, extension: &str) -> Result<Body>
where
T: Serialize;
/// `render_response` returns a tide Response with a body rendered
/// with [`render_body`](TideHandlebarsExt::render_body). This will
/// return an `Err` variant if the render was unsuccessful.
///
/// ```rust
/// use handlebars::Handlebars;
/// use tide_handlebars::prelude::*;
/// use std::collections::BTreeMap;
/// let mut handlebars = Handlebars::new();
/// handlebars
/// .register_template_file("simple.html", "./tests/templates/simple.html")
/// .unwrap();
/// let mut data0 = BTreeMap::new();
/// data0.insert("title".to_string(), "hello tide!".to_string());
/// let mut response = handlebars.render_response("simple.html", &data0).unwrap();
/// assert_eq!(response.content_type(), Some(tide::http::mime::HTML));
///```
fn render_response<T>(&self, template_name: &str, context: &T) -> Result
where
T: Serialize;
/// `render_response_ext` returns a tide Response with a body rendered
/// with [`render_body`](TideHandlebarsExt::render_body). This will
/// return an `Err` variant if the render was unsuccessful.
///
/// ```rust
/// use handlebars::Handlebars;
/// use tide_handlebars::prelude::*;
/// use std::collections::BTreeMap;
/// let mut handlebars = Handlebars::new();
/// handlebars
/// .register_template_file("simple.hbs", "./tests/templates/simple.hbs")
/// .unwrap();
/// let mut data0 = BTreeMap::new();
/// data0.insert("title".to_string(), "hello tide!".to_string());
/// let mut response = handlebars.render_response_ext("simple.hbs", &data0, "html").unwrap();
/// assert_eq!(response.content_type(), Some(tide::http::mime::HTML));
///```
fn render_response_ext<T>(&self, template_name: &str, context: &T, extension: &str) -> Result
where
T: Serialize;
}
impl TideHandlebarsExt for Handlebars<'_> {
fn render_body_ext<T>(&self, template_name: &str, context: &T, extension: &str) -> Result<Body>
where
T: Serialize,
{
let string = self.render(template_name, context)?;
let mut body = Body::from_string(string);
if let Some(mime) = Mime::from_extension(extension) {
body.set_mime(mime);
}
Ok(body)
}
fn render_body<T>(&self, template_name: &str, context: &T) -> Result<Body>
where
T: Serialize,
{
let string = self.render(template_name, context)?;
let path = PathBuf::from(template_name);
let mut body = Body::from_string(string);
if let Some(extension) = path.extension() {
if let Some(mime) = Mime::from_extension(extension.to_string_lossy()) {
body.set_mime(mime);
}
}
Ok(body)
}
fn render_response<T>(&self, template_name: &str, context: &T) -> Result
where
T: Serialize,
{
let mut response = Response::new(200);
response.set_body(self.render_body(template_name, context)?);
Ok(response)
}
fn render_response_ext<T>(&self, template_name: &str, context: &T, extension: &str) -> Result
where
T: Serialize,
{
let mut response = Response::new(200);
response.set_body(self.render_body_ext(template_name, context, extension)?);
Ok(response)
}
}
pub mod prelude {
pub use super::TideHandlebarsExt;
}
#[cfg(test)]
mod tests {
use super::*;
use async_std::prelude::*;
use std::collections::BTreeMap;
#[async_std::test]
async fn test_body() {
let mut handlebars = Handlebars::new();
handlebars
.register_template_file("simple.html", "./tests/templates/simple.html")
.unwrap();
let mut data0 = BTreeMap::new();
data0.insert("title".to_string(), "hello tide!".to_string());
let mut body = handlebars.render_body("simple.html", &data0).unwrap();
assert_eq!(body.mime(), &tide::http::mime::HTML);
let mut body_string = String::new();
body.read_to_string(&mut body_string).await.unwrap();
assert_eq!(body_string, "<h1>hello tide!</h1>\n");
}
#[async_std::test]
async fn response() {
let mut handlebars = Handlebars::new();
handlebars
.register_template_file("simple.html", "./tests/templates/simple.html")
.unwrap();
let mut data0 = BTreeMap::new();
data0.insert("title".to_string(), "hello tide!".to_string());
let mut response = handlebars.render_response("simple.html", &data0).unwrap();
assert_eq!(response.content_type(), Some(tide::http::mime::HTML));
let http_response: &mut tide::http::Response = response.as_mut();
let body_string = http_response.body_string().await.unwrap();
assert_eq!(body_string, "<h1>hello tide!</h1>\n");
}
#[test]
fn unknown_content_type() {
let mut handlebars = Handlebars::new();
handlebars
.register_templates_directory(".hbs", "./tests/templates")
.unwrap();
let mut data0 = BTreeMap::new();
data0.insert("title".to_string(), "hello tide!".to_string());
let body = handlebars.render_body("simple", &data0).unwrap();
assert_eq!(body.mime(), &tide::http::mime::PLAIN);
}
#[test]
fn body_with_extension() {
let mut handlebars = Handlebars::new();
handlebars
.register_templates_directory(".hbs", "./tests/templates")
.unwrap();
let mut data0 = BTreeMap::new();
data0.insert("title".to_string(), "hello tide!".to_string());
let body = handlebars
.render_body_ext("simple", &data0, "html")
.unwrap();
assert_eq!(body.mime(), &tide::http::mime::HTML);
}
#[async_std::test]
async fn response_with_extension() {
let mut handlebars = Handlebars::new();
handlebars
.register_templates_directory(".hbs", "./tests/templates")
.unwrap();
let mut data0 = BTreeMap::new();
data0.insert("title".to_string(), "hello tide!".to_string());
let mut response = handlebars
.render_response_ext("simple", &data0, "html")
.unwrap();
assert_eq!(response.content_type(), Some(tide::http::mime::HTML));
let http_response: &mut tide::http::Response = response.as_mut();
let body_string = http_response.body_string().await.unwrap();
assert_eq!(body_string, "<h1>hello tide!</h1>\n");
}
// Templates are validate on load in handlebars -- need to work into the component
// #[test]
// fn bad_template() {
// let mut handlebars = Handlebars::new();
// handlebars
// .register_templates_directory(".broken", "./tests/templates")
// .unwrap();
// let mut data0 = BTreeMap::new();
// data0.insert("title".to_string(), "hello tide!".to_string());
// let result = handlebars.render_body("simple", &data0);
// assert!(result.is_err());
// }
}
| true
|
9b24ea686d80bb0591b97e5026245b543dabdcd5
|
Rust
|
timvisee/ffsend-api
|
/src/file/info.rs
|
UTF-8
| 1,864
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
use serde_json;
use crate::config;
use crate::crypto::key_set::KeySet;
/// File information, sent to the server before uploading.
///
/// This is used for Firefox Send v3.
#[derive(Debug, Serialize, Deserialize)]
pub struct FileInfo {
/// The expirey time in seconds.
///
/// Must be in any of these bounds:
/// - Not authenticated: `[0, 86400]`
/// - Authenticated: `[0, 86400 * 7]`
#[serde(rename = "timeLimit")]
expire: usize,
/// The download limit.
///
/// Must be in any of these bounds:
/// - Not authenticated: `[0, 20]`
/// - Authenticated: `[0, 200]`
#[serde(rename = "dlimit")]
download_limit: Option<u8>,
/// File metadata.
#[serde(rename = "fileMetadata")]
metadata: String,
/// File authorization.
// TODO: what is this?
#[serde(rename = "authorization")]
auth: String,
/// Firefox Account user authentication information.
// TODO: what is this?
#[serde(rename = "bearer")]
_firefox_user: Option<String>,
}
impl FileInfo {
/// Constructor.
///
/// Parameters:
/// * `expire`: optional file expiry time in seconds.
/// * `download_limit`: optional download limit.
/// * `metadata`: encrypted and base64 encoded file metadata
/// * `auth`: authorization data
pub fn from(
expire: Option<usize>,
download_limit: Option<u8>,
metadata: String,
key_set: &KeySet,
) -> Self {
Self {
expire: expire.unwrap_or(config::SEND_DEFAULT_EXPIRE_TIME),
download_limit,
metadata,
auth: format!("send-v1 {}", key_set.auth_key_encoded().unwrap()),
_firefox_user: None,
}
}
/// Convert this structure to a JSON string.
pub fn to_json(&self) -> String {
serde_json::to_string(&self).unwrap()
}
}
| true
|
72cd8d0b2908c4d2d26da8ae9aca1ddf65c472b5
|
Rust
|
yazgoo/sliders
|
/src/lib.rs
|
UTF-8
| 8,759
| 3.03125
| 3
|
[] |
no_license
|
use std::env;
use std::error::Error;
use std::process::Command;
use crossterm::{cursor::{Show,Hide,MoveTo},event::{read, Event, KeyCode, KeyModifiers},terminal::{size, Clear, ClearType, enable_raw_mode, disable_raw_mode}, ExecutableCommand};
use std::io::stdout;
pub trait SetterGetter {
fn get(&mut self) -> Result<u8, Box<dyn Error>>;
fn set(&mut self, value: u8) -> Result<(), Box<dyn Error>>;
}
pub struct Slider {
pub name: String,
pub setter_getter: Box<dyn SetterGetter>,
pub current: u8,
}
impl Slider {
fn get(&mut self) -> Result<u8, Box<dyn Error>> {
self.setter_getter.get()
}
fn set(&mut self, value: u8) -> Result<(), Box<dyn Error>> {
self.current = value;
self.setter_getter.set(value)
}
fn inc(&mut self, n: u8) -> Result<(), Box<dyn Error>> {
let val = self.get()?;
if (val + n) <= 100 {
self.set(val + n)?;
self.current = val + n;
}
Ok(())
}
fn dec(&mut self, n: u8) -> Result<(), Box<dyn Error>> {
let val = self.get()?;
if val >= n {
self.set(val - n)?;
self.current = val - n;
}
Ok(())
}
fn initialize(&mut self) -> Result<(), Box<dyn Error>> {
self.current = self.get()?;
Ok(())
}
}
struct CommandLineSetterGetter {
get_command: String,
set_command: String,
}
impl SetterGetter for CommandLineSetterGetter {
fn get(&mut self) -> Result<u8, Box<dyn Error>> {
let output = Command::new("sh")
.arg("-c")
.arg(self.get_command.clone())
.output()?;
let mut contents = String::from_utf8_lossy(&output.stdout).to_string();
if contents.ends_with('\n') { contents.pop(); }
let res = contents.parse()?;
Ok(res)
}
fn set(&mut self, value: u8) -> Result<(), Box<dyn Error>> {
Command::new("sh")
.arg("-c")
.arg(self.set_command.replace("{}", format!("{}", value).as_str()))
.output()?;
Ok(())
}
}
fn command_line_slider(name: String, get_command: String, set_command: String) -> Slider {
Slider {
name,
setter_getter: Box::new(CommandLineSetterGetter { get_command, set_command }),
current: 25,
}
}
pub struct Sliders {
pub sliders: Vec<Slider>,
pub coordinates_percent: (u16, u16),
pub size_percent: (u16, u16),
pub current: usize,
}
impl Sliders {
fn clear() -> Result<(), Box<dyn Error>> {
stdout().execute(Clear(ClearType::All))?;
Ok(())
}
pub fn draw(&self) -> Result<(), Box<dyn Error>> {
let (total_cols, total_rows) = size()?;
let (cols, rows) = (total_cols * self.size_percent.0 / 100, total_rows * self.size_percent.1 / 100);
let (x0, y0) = (total_cols * self.coordinates_percent.0 / 100, total_rows * self.coordinates_percent.1 / 100);
let vertical_margin = 10 * self.size_percent.1 / 100;
let spaces_count = (cols as usize / self.sliders.len() - 5) / 2;
let spaces = format!("{:width$}", "", width=spaces_count);
for y in 0..(rows - 1) {
stdout().execute(MoveTo(x0, y0 + y))?;
for (i, slider) in self.sliders.iter().enumerate() {
let value = slider.current as u16;
let start_y = (rows - vertical_margin) * (100 - value) / 100;
if y > vertical_margin && y < (rows - vertical_margin) {
print!("{}", spaces);
if y > start_y {
print!("│ █ │");
}
else {
print!("│ │");
}
print!("{}", spaces);
}
else if y == (rows - vertical_margin) {
print!("{}", spaces);
print!("╰───╯");
print!("{}", spaces);
}
else if y == vertical_margin {
print!("{}", spaces);
print!("╭───╮");
print!("{}", spaces);
}
else if y == (rows - vertical_margin + 1) {
let title = &slider.name;
let spaces_count = (cols as usize / self.sliders.len() - title.len() - 2) / 2;
let spaces = format!("{:width$}", "", width=spaces_count);
print!("{}", spaces);
print!("{}", if i == self.current { "<" } else { " " });
print!("{}", title);
print!("{}", if i == self.current { ">" } else { " " });
print!("{}", spaces);
}
}
}
Ok(())
}
fn read_key() -> Result<(KeyCode, KeyModifiers), Box<dyn Error>> {
loop {
if let Event::Key(e) = read()?
{
return Ok((e.code, e.modifiers));
}
}
}
fn print_help() -> Result<(), Box<dyn Error>> {
disable_raw_mode()?;
stdout() .execute(MoveTo(4, 4))?;
println!(r#"
╭─────────────────────────────────────╮
│ h, left arrow previous slider │
│ l, right arrow next slider │
│ k, up arrow increment slider │
│ j, down arrow decrement slider │
│ g set slider to 0 │
│ G set slider to 100 │
│ m set slider to 50 │
│ ? prints this help │
│ q exit │
│ ctrl+u increment 10 │
│ ctrl+d decrement 10 │
╰─────────────────────────────────────╯
"#);
enable_raw_mode()?;
Sliders::read_key()?;
Sliders::clear()?;
Ok(())
}
pub fn prompt(&mut self) -> Result<bool, Box<dyn Error>> {
match Sliders::read_key()? {
(KeyCode::Char('h'), _) | (KeyCode::Left, _) => if self.current > 0 { self.current -= 1 },
(KeyCode::Char('l'), _) | (KeyCode::Right, _) => if self.current < (self.sliders.len() - 1) { self.current += 1 },
(KeyCode::Char('k'), _) | (KeyCode::Up, _) => self.sliders[self.current].inc(1)?,
(KeyCode::Char('j'), _) | (KeyCode::Down , _)=> self.sliders[self.current].dec(1)?,
(KeyCode::Char('g'), _) => self.sliders[self.current].set(0)?,
(KeyCode::Char('G'), _) => self.sliders[self.current].set(100)?,
(KeyCode::Char('m'), _) => self.sliders[self.current].set(50)?,
(KeyCode::Char('?'), _) => Sliders::print_help()?,
(KeyCode::Char('q'), _) => return Ok(false),
(KeyCode::Char('u'), x) if x.contains(KeyModifiers::CONTROL) => self.sliders[self.current].inc(10)?,
(KeyCode::Char('d'), x) if x.contains(KeyModifiers::CONTROL) => self.sliders[self.current].dec(10)?,
_ => {},
};
Ok(true)
}
pub fn run(&mut self) -> Result<(), Box<dyn Error>> {
stdout().execute(Hide)?;
Sliders::clear()?;
enable_raw_mode()?;
loop {
self.draw()?;
if !self.prompt()? {
break;
}
}
disable_raw_mode()?;
stdout().execute(Show)?;
Sliders::clear()?;
Ok(())
}
pub fn from_args() -> Result<Sliders, Box<dyn Error>> {
let mut names = vec![];
let mut get_commands = vec![];
let mut set_commands = vec![];
let mut i = 1;
let args : Vec<String> = env::args().collect();
while i < args.len() {
match args[i].as_str() {
"--name" => names.push(args[i + 1].clone()),
"--get" => get_commands.push(args[i + 1].clone()),
"--set" => set_commands.push(args[i + 1].clone()),
_ => {},
}
i += 2;
}
let mut sliders = vec![];
for i in 0..names.len() {
let get_command = get_commands[i].clone();
let set_command = set_commands[i].clone();
sliders.push(command_line_slider(
names[i].clone(),
get_command,
set_command));
}
for slider in &mut sliders {
slider.initialize()?;
}
Ok(Sliders { sliders, coordinates_percent: (0, 0), size_percent: (100, 100), current: 0 })
}
}
| true
|
636cb73fd860854ce2ed123c14f90fe2c6c9fb09
|
Rust
|
nfiles/pips
|
/src/parser/test_helpers.rs
|
UTF-8
| 830
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#[cfg(test)]
type ParseFunc<I, O> = fn(input: I) -> nom::IResult<I, O>;
#[cfg(test)]
pub fn test_parser<'a, I, O>(test: ParseFunc<I, O>, cases: Vec<(&'a str, O)>)
where
I: std::fmt::Debug,
I: std::convert::From<&'a str>,
I: std::fmt::Display,
O: std::fmt::Debug,
O: std::cmp::PartialEq,
{
for (index, (input, expected)) in cases.iter().enumerate() {
let actual = (test)(input.clone().into());
let (rest, actual) = actual.expect(&format!(
"[{}]: failed to parse expression \"{}\"",
index, input
));
assert_eq!(
actual,
*expected,
"\ninput [{index}]: `{input}`\nrest [{index}]: `{rest}`",
index = index,
input = input,
rest = rest
);
}
}
| true
|
508c34869550979ef3ef776ce264ca7180927567
|
Rust
|
xy-plus/Rust
|
/slice_word/src/main.rs
|
UTF-8
| 444
| 3.359375
| 3
|
[] |
no_license
|
use std::io;
fn slice_word (sentence: &str) -> &str {
let sentence_bytes = sentence.as_bytes();
for (i, &c) in sentence_bytes.iter().enumerate() {
if c == b' ' {
return &sentence[..i];
}
}
&sentence
}
fn main() {
let mut sentence = String::new();
io::stdin().read_line(&mut sentence).expect("Fail to read");
let first_word = slice_word(&sentence[..]);
println!("{}", first_word);
}
| true
|
59eb571c635ec80e66b40a9c79cd02f428070b9b
|
Rust
|
rust-lang-ja/rust-by-example-ja
|
/src-old/fn/closures/input_functions/input_functions.rs
|
UTF-8
| 410
| 3.34375
| 3
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
// 関数を引数として取り、即座に実行する関数を定義
fn call_function<F: Fn()>(f: F) {
f()
}
// 引数として渡すための簡単な関数を定義
fn print() {
println!("I'm a function!")
}
fn main() {
// 上で定義した`print()`に似たクロージャを定義
let closure = || println!("I'm a closure!");
call_function(closure);
call_function(print);
}
| true
|
7443d033724dd4c43d7ce4fcd65566454a8a2fdf
|
Rust
|
Owez/superconf
|
/src/lib.rs
|
UTF-8
| 3,620
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#![no_std]
extern crate alloc;
use alloc::vec::Vec;
#[derive(Debug, PartialEq, Clone)]
pub enum SuperError {
/// When an item being parsed by [SuperItem] is empty, this is ignored by
/// [Parse] implementation for the [SuperValue] parsing
EmptyItem,
}
pub trait Parse<'a>: Sized {
fn parse(input: &'a str) -> Result<Self, SuperError>;
}
#[derive(Debug, PartialEq, Clone)]
pub enum SuperValue<'a> {
Nothing,
Name(&'a str),
Bool(bool),
Integer(i64),
List(Vec<SuperValue<'a>>),
Group(Vec<SuperItem<'a>>),
}
impl<'a> Parse<'a> for SuperValue<'a> {
fn parse(input: &'a str) -> Result<Self, SuperError> {
match input.trim() {
"true" => Ok(Self::Bool(true)),
"false" => Ok(Self::Bool(false)),
trimmed => match trimmed.len() {
0 => Ok(Self::Nothing),
1 => Ok(num_or_name(trimmed)),
_ => {
let mut trimmed_chars = trimmed.chars();
match (trimmed_chars.next().unwrap(), trimmed_chars.last().unwrap()) {
('[', ']') => todo!("list"),
('{', '}') => todo!("group"),
_ => Ok(num_or_name(trimmed)),
}
}
},
}
}
}
fn num_or_name<'a>(input: &'a str) -> SuperValue<'a> {
match input.parse() {
Ok(found) => SuperValue::Integer(found),
Err(_) => SuperValue::Name(input),
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct SuperItem<'a> {
pub key: &'a str,
pub value: SuperValue<'a>,
}
impl<'a> Parse<'a> for SuperItem<'a> {
fn parse(input: &'a str) -> Result<Self, SuperError> {
let (key, value) = flipflop_once(input, ' ').ok_or(SuperError::EmptyItem)?;
Ok(Self {
key,
value: SuperValue::parse(value)?,
})
}
}
/// Flipflops a boolean to ensure that the `sep` value cannot be used if a
/// backspace is present properly
fn flipflop_once(input: &str, sep: char) -> Option<(&str, &str)> {
// TODO: remove backslashes
let mut flipflop = false;
input.split_once(|c| {
if c == '\\' {
flipflop = true;
false
} else if c == sep {
if flipflop {
flipflop = false;
false
} else {
true
}
} else {
if flipflop {
flipflop = false;
}
false
}
})
}
#[derive(Debug, PartialEq, Clone)]
pub struct SuperConf<'a> {
pub items: Vec<SuperItem<'a>>,
}
impl<'a> Parse<'a> for SuperConf<'a> {
fn parse(input: &'a str) -> Result<Self, SuperError> {
let mut items = Vec::new();
for line in input.split('\n') {
match SuperItem::parse(line) {
Ok(item) => items.push(item),
Err(SuperError::EmptyItem) => continue,
Err(other) => return Err(other),
}
}
Ok(Self { items })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic_parse() {
SuperConf::parse("loop 10\n\nhello there").unwrap();
SuperConf::parse("loop 10\n\nhello there").unwrap();
SuperConf::parse("loop 10\nloop {hello: there, other 2334, final [2,4,324,2]}").unwrap();
}
#[test]
fn spaces_in_keys() {
assert_eq!(
SuperItem::parse("hello\\ there true").unwrap(),
SuperItem {
key: "hello there",
value: SuperValue::Bool(true)
}
);
}
}
| true
|
c1f3e730baafe81a8f92a94b485c7c8775de3389
|
Rust
|
lk29/braiins-open
|
/utils-rs/unvariant/unvariant-tests/tests/common/str_frame.rs
|
UTF-8
| 3,298
| 2.640625
| 3
|
[] |
no_license
|
// Copyright (C) 2020 Braiins Systems s.r.o.
//
// This file is part of Braiins Open-Source Initiative (BOSI).
//
// BOSI is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//
// Please, keep in mind that we may also license BOSI or any part thereof
// under a proprietary license. For more information on the terms and conditions
// of such proprietary license or if you have any other questions, please
// contact us at opensource@braiins.com.
use std::convert::TryFrom;
use ii_unvariant::{id, GetId, /* HandleFuture, */ Id};
/// Similar to `Frame` but contains a string reference
/// and has string reference ID type.
/// This type is for testing of both
/// a) passing a variant by reference, and
/// b) the variant having a lifetime generic argument.
pub struct StrFrame<'a>(pub &'a str, pub bool);
/// When passing variants by reference, we need to implement
/// `GetId` on the reference type instead:
impl<'a> GetId for StrFrame<'a> {
type Id = &'a str;
fn get_id(&self) -> &'a str {
self.0.as_ref()
}
}
#[id("foo")]
pub struct StrFoo;
impl<'a, 'b> From<&'a StrFrame<'b>> for StrFoo {
fn from(frame: &'a StrFrame<'b>) -> Self {
assert_eq!(frame.get_id(), Self::ID);
StrFoo
}
}
impl<'a> From<&'a str> for StrFoo {
fn from(frame: &'a str) -> Self {
assert_eq!(frame.get_id(), Self::ID);
StrFoo
}
}
#[id("bar")]
pub struct StrBar;
impl<'a, 'b> From<&'a StrFrame<'b>> for StrBar {
fn from(frame: &'a StrFrame<'b>) -> Self {
assert_eq!(frame.get_id(), Self::ID);
StrBar
}
}
impl<'a> From<&'a str> for StrBar {
fn from(frame: &'a str) -> Self {
assert_eq!(frame.get_id(), Self::ID);
StrBar
}
}
#[derive(PartialEq, Debug)]
pub struct TryStrFrameError;
impl From<TryStrFrameError> for bool {
fn from(_: TryStrFrameError) -> Self {
false
}
}
#[id("foo")]
pub struct TryStrFoo;
impl<'a, 'b> TryFrom<&'a StrFrame<'b>> for TryStrFoo {
type Error = TryStrFrameError;
fn try_from(frame: &'a StrFrame<'b>) -> Result<Self, TryStrFrameError> {
assert_eq!(frame.get_id(), Self::ID);
if frame.1 {
Ok(TryStrFoo)
} else {
Err(TryStrFrameError)
}
}
}
#[id("bar")]
pub struct TryStrBar;
impl<'a, 'b> TryFrom<&'a StrFrame<'b>> for TryStrBar {
type Error = TryStrFrameError;
fn try_from(frame: &'a StrFrame<'b>) -> Result<Self, TryStrFrameError> {
assert_eq!(frame.get_id(), Self::ID);
if frame.1 {
Ok(TryStrBar)
} else {
Err(TryStrFrameError)
}
}
}
pub trait TryStrFrameHandler {
fn handle<'a, 'b>(&mut self, variant: &'a StrFrame<'b>) -> Result<(), bool>;
}
| true
|
d90a4b0485249ddfc167a27ea4f387e9ae383a67
|
Rust
|
kpensec/rusty_synth
|
/src/synth/mod.rs
|
UTF-8
| 2,047
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
extern crate rand;
mod key;
mod periodical_wave;
mod noise;
mod envelop;
mod note;
use utils::clamp;
use synth::key::Key;
pub struct Synthesizer {
volume: f32,
playback_freq: i32,
sample_number: i32,
keys: [Key; 13],
step: f32,
active: bool,
}
impl Synthesizer {
pub fn new(playback_freq: i32) -> Self{
Synthesizer {
volume: 0.2,
playback_freq: playback_freq,
sample_number: 0,
keys: [ Key::new(0), Key::new(1), Key::new(2), Key::new(3), Key::new(4), Key::new(5), Key::new(6),
Key::new(7), Key::new(8), Key::new(9), Key::new(10), Key::new(11), Key::new(12), ],
step: 1.0 / playback_freq as f32,
active: true
}
}
pub fn get_volume(self) -> f32 {
self.volume
}
pub fn set_volume(&mut self, q: f32) {
self.volume = clamp(self.volume + q, 0.0, 1.0);
}
pub fn start_note(&mut self, key_idx: usize) {
// TODO vec upsert here!
self.keys[key_idx].press()
}
pub fn release_note(&mut self, key_idx: usize) {
// TODO vec update here!
self.keys[key_idx].release()
}
fn blend_sample(lhs: f32, rhs: f32) -> f32 {
// TODO read a book about sample blending/polyphony!
lhs + rhs
}
fn get_sample(&mut self) -> f32 {
let result = 0.0;
for key in self.keys.iter_mut() {
self.blend_sample(result, key.update(self.volume, self.step));
}
result
}
pub fn update(&mut self, eps: f32) -> Vec<f32> {
// TODO this should update/return the audio queue buffer!
let mut result = Vec::with_capacity(self.buffer_size);
if ! self.active {
result
}
for x in 0..self.buffer_size {
let sample = self.get_sample();
// TODO search
result.push(sample);
result.push(sample);
}
}
pub fn toggle_audio(&mut self) -> (){
self.active = ! self.active;
}
}
| true
|
868e42bc5ebc8379571ae199d9a10b12db0b7dd3
|
Rust
|
pepyakin/spree-proto
|
/polkadot-re-mock/src/error.rs
|
UTF-8
| 434
| 2.703125
| 3
|
[] |
no_license
|
use std::io;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
/// A generic error coming from the interpreter.
#[error("Interpreter error")]
Interpreter(#[from] wasmi::Error),
/// A generic I/O error has happened.
#[error("I/O error")]
Io(#[from] io::Error),
#[error("{0}")]
Msg(String),
}
impl From<String> for Error {
fn from(msg: String) -> Self {
Self::Msg(msg)
}
}
impl wasmi::HostError for Error {}
| true
|
da41dd5b46144e049620bff31241a0702a735f4e
|
Rust
|
gyk/TrivialSolutions
|
/CuckooHashing/src/lib.rs
|
UTF-8
| 8,613
| 3.375
| 3
|
[
"WTFPL"
] |
permissive
|
//! Cuckoo Hashing
use std::hash::{Hash, Hasher};
use std::mem;
use rand::{thread_rng, Rng};
use siphasher::sip::SipHasher;
const DEFAULT_CAPACITY: usize = 1024;
struct KeyValue<K, V> {
key: K,
value: V,
}
impl<K, V> KeyValue<K, V> {
fn new(key: K, value: V) -> Self {
Self {
key,
value,
}
}
fn replace(&mut self, new_value: V) -> V {
mem::replace(&mut self.value, new_value)
}
}
fn hash_by<K: Hash>(key: &K, (k0, k1): (u64, u64)) -> u64 {
let mut hasher = SipHasher::new_with_keys(k0, k1);
key.hash(&mut hasher);
hasher.finish()
}
pub struct CuckooHashMap<K, V> {
seeds: [(u64, u64); 2],
tables: [Vec<Option<KeyValue<K, V>>>; 2], // overhead of Option?
cycle_threshold: usize,
n_elements: usize,
}
impl<K: Hash + Eq, V: Clone> CuckooHashMap<K, V> {
pub fn new() -> Self {
Self::with_capacity(DEFAULT_CAPACITY)
}
/// Capacity refers to the length of both allocated vectors.
pub fn with_capacity(cap: usize) -> Self {
assert!(cap > 1, "Capacity too small");
let mut this = CuckooHashMap {
seeds: Default::default(),
tables: [
vec![],
vec![],
],
cycle_threshold: (cap as f64).log2().ceil() as usize,
n_elements: 0,
};
for t in 0 ..= 1 {
this.tables[t] = (0..cap).map(|_| None).collect();
}
this.reseed();
this
}
pub fn capacity(&self) -> usize {
self.tables[0].len()
}
pub fn len(&self) -> usize {
self.n_elements
}
fn key_to_i(&self, key: &K, t: usize) -> usize {
(hash_by(&key, self.seeds[t]) as usize) % self.capacity()
}
fn get_kv_at(&self, key: &K, t: usize) -> Option<&KeyValue<K, V>> {
let i = self.key_to_i(&key, t);
match &self.tables[t][i] {
Some(kv) if &kv.key == key => Some(&kv),
_ => None,
}
}
fn get_kv_at_mut(&mut self, key: &K, t: usize) -> Option<&mut KeyValue<K, V>> {
let i = self.key_to_i(&key, t);
match &mut self.tables[t][i] {
Some(kv) if &kv.key == key => Some(kv),
_ => None,
}
}
pub fn insert(&mut self, key: K, value: V) -> Option<V> {
// TODO: Should resize when loading factor exceeds a particular value. The cuckoo hashmap
// cannot function when it is almost full.
if self.len() == self.capacity() * 2 {
panic!("The hashmap is full");
}
match self.get_kv_at_mut(&key, 0) {
Some(kv) => {
return Some(kv.replace(value));
}
None => (),
}
let mut kv = KeyValue::new(key, value);
self.n_elements += 1;
loop {
kv = match self.cuckoo(kv, 0) {
None => return None,
Some(kv) => {
self.rehash();
kv
}
};
}
}
// If the iteration does not end after the threshold, returns the KV passed in the arguments. If
// the insersion succeeds, returns `None`.
fn cuckoo(&mut self, kv: KeyValue<K, V>, mut t: usize)
-> Option<KeyValue<K, V>>
{
match self.cuckoo_impl(kv, &mut t, self.cycle_threshold) {
None => None,
Some(kv) => {
t = 1 - t;
// puts it back in reversed order
self.cuckoo_impl(kv, &mut t, self.cycle_threshold)
}
}
}
// The cuckoo insert version specifically for rehashing. The cycle threshold is set to the
// number of elements in the table.
fn cuckoo_rehash(&mut self, kv: KeyValue<K, V>, mut t: usize)
-> Option<KeyValue<K, V>>
{
self.cuckoo_impl(kv, &mut t, self.len())
}
// Returns the last KV.
fn cuckoo_impl(&mut self, mut kv: KeyValue<K, V>, t: &mut usize, cycle_threshold: usize)
-> Option<KeyValue<K, V>>
{
let mut n_loops = cycle_threshold;
while n_loops > 0 {
let i = self.key_to_i(&kv.key, *t);
match self.tables[*t][i].as_mut() {
None => {
self.tables[*t][i] = Some(kv);
return None;
}
Some(old_kv) => {
kv = mem::replace(old_kv, kv);
*t = 1 - *t;
}
}
n_loops -= 1;
}
Some(kv)
}
fn reseed(&mut self) {
let mut rng = thread_rng();
self.seeds[0] = rng.gen();
self.seeds[1] = loop {
let s = rng.gen();
if s != self.seeds[0] {
break s;
}
};
}
fn remove_at(&mut self, t: usize, i: usize) -> Option<KeyValue<K, V>> {
mem::replace(&mut self.tables[t][i], None)
}
// In-place rehash
fn rehash(&mut self) {
while !self.rehash_impl() {} // FIXME: resizes if repeatedly failed
}
fn find_vacancy(&mut self) -> &mut Option<KeyValue<K, V>> {
for t in 0 ..= 1 {
for i in 0..self.capacity() {
if self.tables[t][i].is_none() {
return &mut self.tables[t][i];
}
}
}
unreachable!("At least one vacant should exist");
}
fn rehash_impl(&mut self) -> bool {
self.reseed();
for t in 0 ..= 1 {
for i in 0..self.capacity() {
if let Some(kv) = &self.tables[t][i] {
let key = &kv.key;
if self.key_to_i(key, t) != i { // misplaced
let kv = self.remove_at(t, i).unwrap();
if let Some(kv) = self.cuckoo_rehash(kv, t) {
*self.find_vacancy() = Some(kv); // puts it back
return false;
}
}
}
}
}
true
}
pub fn get(&self, key: &K) -> Option<&V> {
for t in 0 ..= 1 {
match self.get_kv_at(key, t) {
Some(kv) => return Some(&kv.value),
None => (),
}
}
None
}
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
// Cannot simply call `get_kv_at_mut` inside the loop, because of
// https://github.com/rust-lang/rust/issues/21906.
for t in 0 ..= 1 {
let i = self.key_to_i(&key, t);
if self.tables[t][i].is_some() {
return self.tables[t][i].as_mut().map(|kv| &mut kv.value);
}
}
None
}
pub fn contains(&self, key: &K) -> bool {
self.get(key).is_some()
}
pub fn remove(&mut self, key: &K) -> Option<V> {
for t in 0 ..= 1 {
let i = self.key_to_i(&key, t);
match &mut self.tables[t][i] {
Some(kv) if &kv.key == key => {
let kv = self.tables[t][i].take().unwrap();
self.n_elements -= 1;
return Some(kv.value);
}
_ => (),
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke() {
let mut cuckoo_hashmap = CuckooHashMap::with_capacity(1000);
for k in 0..500 {
assert_eq!(cuckoo_hashmap.insert(k, 1), None);
}
for k in 0..500 {
assert!(cuckoo_hashmap.contains(&k));
}
assert_eq!(cuckoo_hashmap.len(), 500);
for k in 0..250 {
assert_eq!(cuckoo_hashmap.remove(&k), Some(1));
}
assert_eq!(cuckoo_hashmap.len(), 250);
for k in 0..250 {
assert!(!cuckoo_hashmap.contains(&k));
}
for k in 250..500 {
assert!(cuckoo_hashmap.contains(&k));
}
}
#[test]
fn rehash() {
let mut cuckoo_hashmap;
loop {
cuckoo_hashmap = CuckooHashMap::with_capacity(500);
let old_seeds = cuckoo_hashmap.seeds;
for k in 0..500 {
assert_eq!(cuckoo_hashmap.insert(k, 1), None);
}
if cuckoo_hashmap.seeds != old_seeds {
println!("Rehash occurred");
break;
} else {
continue;
}
}
assert_eq!(cuckoo_hashmap.len(), 500);
for k in 0..500 {
assert!(cuckoo_hashmap.contains(&k));
}
}
}
| true
|
fb32d30128a0eb424fd07a3175bd1e65a039be4e
|
Rust
|
shakram02/HamdOS
|
/src/vga_driver.rs
|
UTF-8
| 6,501
| 3.15625
| 3
|
[] |
no_license
|
use core::fmt;
use spin::Mutex;
use lazy_static::lazy_static;
const VGA_BUFFER_ADDR: usize = 0xB8000;
const DEFAULT_TEXT_ATTR: u8 = 0x07;
// VGA buffer address
const SCREEN_WIDTH: usize = 80;
const SCREEN_HEIGHT: usize = 25;
const BACKSPACE: u8 = 8;
const LINE_FEED: u8 = 10;
lazy_static! {
pub static ref VGA_WRITER: Mutex<VgaBuffer> = Mutex::new(VgaBuffer {
row: 0,
col: 0,
all_screen_attr: ScreenCharAttr { val: DEFAULT_TEXT_ATTR } // Default light grey
});
}
pub struct VgaBuffer {
row: usize,
col: usize,
all_screen_attr: ScreenCharAttr,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
pub struct ScreenCharAttr {
val: u8,
}
impl ScreenCharAttr {
pub fn new(foreground_color: Color, background_color: Color) -> ScreenCharAttr {
ScreenCharAttr {
val: ((background_color as u8) << 4) | (foreground_color as u8),
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGray = 7,
DarkGray = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
Pink = 13,
Yellow = 14,
White = 15,
}
impl VgaBuffer {
/// Clears the screen content and applies the default text attribute
/// which is Light Grey text on Black background
pub fn clear_screen(&mut self) {
unsafe {
let addr = &mut *(VGA_BUFFER_ADDR as *mut [[u16; SCREEN_WIDTH]; SCREEN_HEIGHT]);
*addr = [[((self.all_screen_attr.val as u16) << 8) as u16; SCREEN_WIDTH]; SCREEN_HEIGHT]
// This is the default value [LightGray]
}
}
/// Clears the text on the screen and applies the given text attribute
pub fn clear_text_and_apply_attr(&mut self, text_attr: ScreenCharAttr) {
// Interpret the VGA buffer as u16 to be able to do batch copy,
// The structure of each VGA block is as follows (little endian)
// The addresses of x86 grow from bottom to top
// 0-7 ASCII code point (far right)
// 8-11 Foreground color
// 12-14 Background color
// 15 Blink (left)
// That's why each ScreenCharAttr value is shifted left 8 bits
self.all_screen_attr = text_attr;
unsafe {
let addr = &mut *((VGA_BUFFER_ADDR) as *mut [[u16; SCREEN_WIDTH]; SCREEN_HEIGHT]);
*addr =
[[((self.all_screen_attr.val as u16) << 8) as u16; SCREEN_WIDTH]; SCREEN_HEIGHT];
}
}
pub fn print(&mut self, bytes: &[u8]) {
for &byte in bytes.iter() {
self.print_byte(byte)
}
}
pub fn print_attributed_text(&mut self, string: &str, text_attr: ScreenCharAttr) {
for &byte in string.as_bytes() {
if is_printable(byte) {
self.write_attribute_to_buffer(text_attr);
}
self.print_byte(byte);
}
}
pub fn print_byte(&mut self, byte: u8) {
// Move to newline when we're about to go out of the screen
if self.col == SCREEN_WIDTH {
self.row += 1;
self.col = 0;
}
if self.row == SCREEN_HEIGHT {
self.row -= 1;
self.shift_one_row_up();
}
if byte == LINE_FEED {
self.row += 1;
self.col = 0;
} else if byte == BACKSPACE {
if self.col > 0 {
self.col -= 1;
}
} else if is_printable(byte) {
self.write_byte_to_buffer(byte);
self.col += 1;
} else {
self.write_byte_to_buffer(0xfe);
self.col += 1;
}
}
fn write_byte_to_buffer(&self, byte: u8) {
self.write_byte_to_buffer_at(byte, self.row, self.col);
}
fn write_byte_to_buffer_at(&self, byte: u8, row: usize, col: usize) {
let addr = buffer_address_for(row, col);
unsafe {
*(addr as *mut u8) = byte;
}
}
fn write_attribute_to_buffer(&self, attr: ScreenCharAttr) {
self.write_attribute_to_buffer_at(attr, self.row, self.col);
}
fn write_attribute_to_buffer_at(&self, attr: ScreenCharAttr, row: usize, col: usize) {
// The addresses of x86 grow from bottom to top, the text was the previous address
let addr = buffer_address_for(row, col) + 1;
unsafe {
*(addr as *mut u8) = attr.val;
}
}
fn shift_one_row_up(&mut self) {
self.clear_rect(0, 1, 0, SCREEN_WIDTH);
let addr = buffer_address_for(0, 0);
unsafe {
// Double screen width because each char has text & attribute
let row_vals = &mut *(addr as *mut [[u8; 2 * SCREEN_WIDTH]; SCREEN_HEIGHT]);
for i in 0..(SCREEN_HEIGHT - 1) {
row_vals[i] = row_vals[i + 1];
}
// The last line should be cleared before writing anything to it
// since we don't move the row variable when writing at the final line
// TODO: save the passed away screen buffer to somewhere
self.clear_rect(SCREEN_HEIGHT - 1, 1, 0, SCREEN_WIDTH);
}
}
pub fn clear_rect(&mut self, start_row: usize, rows: usize, start_col: usize, cols: usize) {
for i in start_row..(start_row + rows) {
for j in start_col..(start_col + cols) {
self.write_byte_to_buffer_at(0x00, i, j);
self.write_attribute_to_buffer_at(self.all_screen_attr, i, j);
}
}
}
}
impl fmt::Write for VgaBuffer {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.print(s.as_bytes());
Ok(())
}
}
fn buffer_address_for(row: usize, col: usize) -> usize {
let row_byte_count = 2 * SCREEN_WIDTH;
VGA_BUFFER_ADDR + ((row * row_byte_count) + (2 * col))
}
fn is_printable(byte: u8) -> bool {
byte >= 0x20 && byte <= 0x7e
}
#[doc(hidden)]
pub fn _print(args: fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
VGA_WRITER.lock().write_fmt(args).unwrap();
});
}
#[macro_export]
macro_rules! print {
($($arg:tt)*) => {$crate::vga_driver::_print(format_args!($($arg)*)) };
}
#[macro_export]
macro_rules! println {
() => (print!("\n"));
($($arg:tt)*) => {$crate::print!("{}\n",format_args!($($arg)*))};
}
| true
|
ccf6b6603b7235eeaea197f7ffe533f1d76f3056
|
Rust
|
iCodeIN/tabin-plugins
|
/make-docs/src/main.rs
|
UTF-8
| 2,360
| 2.765625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
use std::process::Command;
struct Check {
name: &'static str,
about: &'static str,
}
fn main() {
let preamble = "Documentation about the various scripts contained herein\n";
let checks = [
Check {
name: "check-graphite",
about: "Cross platform, only requires access to a graphite instance.",
},
Check {
name: "check-cpu",
about: "Linux-only.",
},
Check {
name: "check-container-cpu",
about: "Linux-only. Can only be run from inside a cgroup.",
},
Check {
name: "check-load",
about: "Linux-only.",
},
Check {
name: "check-ram",
about: "Linux-only.",
},
Check {
name: "check-container-ram",
about: "Linux-only. Can only be run from inside a cgroup.",
},
Check {
name: "check-procs",
about: "Linux-only. Reads running processes",
},
Check {
name: "check-fs-writeable",
about: "",
},
Check {
name: "check-disk",
about: "Unix only.",
},
];
let mut out: String = cp(preamble.split('\n'));
out.push_str("\n");
out.push_str(&cp(checks
.iter()
.map(|c| format!("- [{0}](#{0})", c.name))));
out.push_str("\n");
for check in &checks {
out.push_str(&format!(
"\
//!
//! # {0}
//!
//! {1}
//!
//! ```plain
//! $ {0} --help
",
check.name, check.about
));
let cout = String::from_utf8(
Command::new(&format!("target/debug/{}", check.name))
.args(&["--help"])
.output()
.expect(&format!("Couldn't execute command: {}", check.name))
.stdout,
).expect(&format!(
"Couldn't convert command {} help to utf8",
check.name
));
out.push_str(&cp(cout.split('\n')));
out.push_str("\n//! ```\n");
}
out.push_str("\n");
print!("{}", out);
}
/// Comment each line in the iterator
fn cp<S: AsRef<str>, I: Iterator<Item = S>>(s: I) -> String {
s.map(|s| format!("//! {}", s.as_ref()))
.map(|s| s.trim().into())
.collect::<Vec<String>>()
.join("\n")
}
| true
|
575ec3dd2a1db8cdaabedc120634de0a050334b7
|
Rust
|
MDGSF/JustCoding
|
/rust-leetcode/leetcode_806/src/solution1.rs
|
UTF-8
| 1,114
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
impl Solution {
pub fn number_of_lines(widths: Vec<i32>, s: String) -> Vec<i32> {
if s.is_empty() {
return vec![0, 0];
}
let mut lines = 1;
let mut cur_line_num = 0;
s.as_bytes().iter().for_each(|&c| {
let cur_num = widths[(c - b'a') as usize];
if cur_line_num + cur_num > 100 {
lines += 1;
cur_line_num = cur_num;
} else {
cur_line_num += cur_num;
}
});
vec![lines, cur_line_num]
}
}
pub struct Solution;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_1() {
assert_eq!(
Solution::number_of_lines(
vec![
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10
],
"abcdefghijklmnopqrstuvwxyz".to_string()
),
vec![3, 60]
);
assert_eq!(
Solution::number_of_lines(
vec![
4, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10,
],
"bbbcccdddaaa".to_string()
),
vec![2, 4]
);
}
}
| true
|
b56f9811c41a5266b3eb94e5b20c5f9901530e8c
|
Rust
|
horup/blueprint2-rs
|
/game/src/game.rs
|
UTF-8
| 3,278
| 2.625
| 3
|
[] |
no_license
|
use core::num;
use image::DynamicImage;
use nalgebra::Vector3;
use engine::*;
use crate::{AISystem, Animator};
#[derive(Default)]
pub struct BlueprintGame {
}
impl Game for BlueprintGame {
type GameEvent = ();
type GameComponent1 = ();
fn setup(&mut self, engine:&mut Engine<Self>) {
let mut assets = &mut engine.assets;
let sheet01 = assets.load_texture_from_png_bytes("sheet01".into(), include_bytes!("./assets/textures/spritesheet.png"));
let frames = [
sheet01.frame(0, 0, 16, 16),
sheet01.frame(0, 16, 16, 16)];
assets.load_spritesheet("sheet01".into(),
SpriteSheet {
texture:"sheet01".into(),
frames:frames.into()
});
engine.renderer.camera.zoom = 20.0;
let max = 10;
for i in 0..max*max {
let x = i % max;
let y = i / max;
engine.states.current_mut().entities.spawn((
Transform { position:Vector3::new(x as f32 , y as f32, 0.0)},
Sprite {
frame:0.0,
spritesheet:"sheet01".into(),
..Sprite::default()
}
));
}
engine.systems.add::<Animator>();
engine.systems.add::<AISystem>();
}
/*
fn update(&mut self, context:&mut Context<Self>) {
let current = context.states.current_mut();
match context.event {
Event::Initialize => {
log("Game initialized");
let mut assets = &mut context.assets;
let sheet01 = assets.load_texture_from_png_bytes("sheet01".into(), include_bytes!("./assets/textures/spritesheet.png"));
let frames = [
sheet01.frame(0, 0, 16, 16),
sheet01.frame(0, 16, 16, 16)];
assets.load_spritesheet("sheet01".into(),
SpriteSheet {
texture:"sheet01".into(),
frames:frames.into()
});
context.camera.zoom = 20.0;
let max = 10;
for i in 0..max*max {
let x = i % max;
let y = i / max;
current.entities.spawn((
Transform { position:Vector3::new(x as f32 , y as f32, 0.0)},
Sprite {
frame:0,
spritesheet:"sheet01".into(),
..Sprite::default()
}
));
}
}
Event::FixedStep(time, dt) => {
for (_, t) in current.entities.query_mut::<&mut Transform>() {
//t.position.x += 0.1 * dt as f32;
}
for (_, s) in current.entities.query_mut::<&mut Sprite>() {
//t.position.x += 0.1 * dt as f32;
//s.frame += 1;
}
}
Event::Draw(_,_,_) => {
}
Event::Game(_) => {}
}
}*/
}
impl BlueprintGame {
pub fn new() -> Self {
Self {
}
}
}
| true
|
8987ecaf17d50f259181a783960af55fff18e179
|
Rust
|
felix-d/troll
|
/src/cache.rs
|
UTF-8
| 1,910
| 2.953125
| 3
|
[] |
no_license
|
use errors::Error;
use std::io::prelude::*;
use std::io::SeekFrom;
use std::fs::File;
use rustc_serialize::json;
use std::fs::OpenOptions;
use std::collections::BTreeMap;
const CACHE: &'static str = "/tmp/troll_cache";
pub struct Cache {
handle: File,
content: BTreeMap<String, json::Json>,
}
impl Cache {
pub fn new() -> Result<Cache, Error> {
let mut handle: File = try!(OpenOptions::new().read(true).write(true).create(true).open(CACHE));
let content: BTreeMap<String, json::Json> = match Cache::file_content_as_json(&mut handle) {
Ok(content) => content,
Err(_) => {
handle.set_len(0);
handle.seek(SeekFrom::Start(0));
BTreeMap::new()
}
};
Ok(Cache { handle: handle, content: content })
}
fn file_content_as_json(file: &mut File) -> Result<BTreeMap<String, json::Json>, Error> {
let mut cache_content = String::new();
try!(file.read_to_string(&mut cache_content));
let json_obj = try!(json::Json::from_str(&cache_content));
let obj: BTreeMap<String, json::Json> = try!(json_obj.as_object().ok_or(Error::CantConvertJsonToObj)).clone();
Ok(obj)
}
pub fn get(&mut self, key: &str) -> Option<json::Json> {
self.content.get(key).map(|x| x.clone())
}
pub fn set(&mut self, key: &str, json: &json::Json) -> Result<(), Error> {
self.content.remove(key);
self.content.insert(key.to_string(), json.clone());
try!(self.write_cache());
Ok(())
}
fn write_cache(&mut self) -> Result<(), Error> {
self.handle.set_len(0);
self.handle.seek(SeekFrom::Start(0));
let stringified = json::Json::Object(self.content.clone()).to_string();
let bytes = stringified.as_bytes();
try!(self.handle.write_all(bytes));
Ok(())
}
}
| true
|
fa57774d82fc7003cd6cf0af07e11820b807a745
|
Rust
|
LionelBergen/ZedScript
|
/src/api_structs/lol_account.rs
|
UTF-8
| 632
| 2.53125
| 3
|
[] |
no_license
|
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)]
pub struct LeagueAccount {
#[serde(rename = "id")]
pub summoner_id: String,
#[serde(rename = "accountId")]
pub account_id: String,
pub puuid: String,
pub name: String,
#[serde(rename = "profileIconId")]
pub profile_icon_id: i32,
#[serde(rename = "revisionDate")]
pub revision_date: i64,
#[serde(rename = "summonerLevel")]
pub summoner_level: i32,
}
impl PartialEq for LeagueAccount {
fn eq(&self, other: &Self) -> bool {
self.account_id == other.account_id && self.name == other.name
}
}
| true
|
b42068ae7506aaaaddd1978d02975ae0a5397d76
|
Rust
|
HeroicKatora/oxide-auth
|
/oxide-auth/src/endpoint/query.rs
|
UTF-8
| 10,300
| 3.25
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
use std::borrow::{Borrow, Cow};
use std::collections::HashMap;
use std::fmt;
use std::iter::FromIterator;
use std::hash::{BuildHasher, Hash};
use std::rc::Rc;
use std::sync::Arc;
use serde::de;
use serde::Deserializer;
/// Allows access to the query parameters in an url or a body.
///
/// Use one of the listed implementations below. Since those may be a bit confusing due to their
/// abundant use of generics, basically use any type of `HashMap` that maps 'str-likes' to a
/// collection of other 'str-likes'. Popular instances may be:
/// * `HashMap<String, String>`
/// * `HashMap<String, Vec<String>>`
/// * `HashMap<Cow<'static, str>, Cow<'static, str>>`
///
/// You should generally not have to implement this trait yourself, and if you do there are
/// additional requirements on your implementation to guarantee standard conformance. Therefore the
/// trait is marked as `unsafe`.
pub unsafe trait QueryParameter {
/// Get the **unique** value associated with a key.
///
/// If there are multiple values, return `None`. This is very important to guarantee
/// conformance to the RFC. Afaik it prevents potentially subverting validation middleware,
/// order dependent processing, or simple confusion between different components who parse the
/// query string from different ends.
fn unique_value(&self, key: &str) -> Option<Cow<str>>;
/// Guarantees that one can grab an owned copy.
fn normalize(&self) -> NormalizedParameter;
}
/// The query parameter normal form.
///
/// When a request wants to give access to its query or body parameters by reference, it can do so
/// by a reference of the particular trait. But when the representation of the query is not stored
/// in the memory associated with the request, it needs to be allocated to outlive the borrow on
/// the request. This allocation may as well perform the minimization/normalization into a
/// representation actually consumed by the backend. This normal form thus encapsulates the
/// associated `clone-into-normal form` by various possible constructors from references [WIP].
///
/// This gives rise to a custom `Cow<QueryParameter>` instance by requiring that normalization into
/// memory with unrelated lifetime is always possible.
///
/// Internally a hashmap but this may change due to optimizations.
#[derive(Clone, Debug, Default)]
pub struct NormalizedParameter {
/// The value is `None` if the key appeared at least twice.
inner: HashMap<Cow<'static, str>, Option<Cow<'static, str>>>,
}
unsafe impl QueryParameter for NormalizedParameter {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.inner
.get(key)
.and_then(|val| val.as_ref().map(Cow::as_ref).map(Cow::Borrowed))
}
fn normalize(&self) -> NormalizedParameter {
self.clone()
}
}
impl NormalizedParameter {
/// Create an empty map.
pub fn new() -> Self {
NormalizedParameter::default()
}
/// Insert a key-value-pair or mark key as dead if already present.
///
/// Since each key must appear at most once, we do not remove it from the map but instead mark
/// the key as having a duplicate entry.
pub fn insert_or_poison(&mut self, key: Cow<'static, str>, val: Cow<'static, str>) {
let unique_val = Some(val);
self.inner
.entry(key)
.and_modify(|val| *val = None)
.or_insert(unique_val);
}
}
impl Borrow<dyn QueryParameter> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + 'static) {
self
}
}
impl Borrow<dyn QueryParameter + Send> for NormalizedParameter {
fn borrow(&self) -> &(dyn QueryParameter + Send + 'static) {
self
}
}
impl<'de> de::Deserialize<'de> for NormalizedParameter {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor(NormalizedParameter);
impl<'a> de::Visitor<'a> for Visitor {
type Value = NormalizedParameter;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "a sequence of key-value-pairs")
}
fn visit_seq<A>(mut self, mut access: A) -> Result<Self::Value, A::Error>
where
A: de::SeqAccess<'a>,
{
while let Some((key, value)) = access.next_element::<(String, String)>()? {
self.0.insert_or_poison(key.into(), value.into())
}
Ok(self.0)
}
}
let visitor = Visitor(NormalizedParameter::default());
deserializer.deserialize_seq(visitor)
}
}
impl<K, V> FromIterator<(K, V)> for NormalizedParameter
where
K: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = (K, V)>,
{
let mut target = NormalizedParameter::default();
iter.into_iter()
.for_each(|(k, v)| target.insert_or_poison(k.into(), v.into()));
target
}
}
impl ToOwned for dyn QueryParameter {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
impl ToOwned for dyn QueryParameter + Send {
type Owned = NormalizedParameter;
fn to_owned(&self) -> Self::Owned {
self.normalize()
}
}
/// Return a reference to value in a collection if it is the only one.
///
/// For example, a vector of string like types returns a reference to its first
/// element if there are no other, else it returns `None`.
///
/// If this were done with slices, that would require choosing a particular
/// value type of the underlying slice e.g. `[String]`.
pub unsafe trait UniqueValue {
/// Borrow the unique value reference.
fn get_unique(&self) -> Option<&str>;
}
unsafe impl<K, V, S: BuildHasher> QueryParameter for HashMap<K, V, S>
where
K: Borrow<str> + Eq + Hash,
V: UniqueValue + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
self.get(key).and_then(V::get_unique).map(Cow::Borrowed)
}
fn normalize(&self) -> NormalizedParameter {
let inner = self
.iter()
.filter_map(|(key, val)| {
val.get_unique().map(|value| {
(
Cow::Owned(key.borrow().to_string()),
Some(Cow::Owned(value.to_string())),
)
})
})
.collect();
NormalizedParameter { inner }
}
}
unsafe impl<K, V> QueryParameter for Vec<(K, V)>
where
K: Borrow<str> + Eq + Hash,
V: Borrow<str> + Eq + Hash,
{
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
let mut value = None;
for entry in self.iter() {
if entry.0.borrow() == key {
if value.is_some() {
return None;
}
value = Some(Cow::Borrowed(entry.1.borrow()));
}
}
value
}
fn normalize(&self) -> NormalizedParameter {
let mut params = NormalizedParameter::default();
self.iter()
.map(|&(ref key, ref val)| {
(
Cow::Owned(key.borrow().to_string()),
Cow::Owned(val.borrow().to_string()),
)
})
.for_each(|(key, val)| params.insert_or_poison(key, val));
params
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl<'a, Q: QueryParameter + 'a + ?Sized> QueryParameter for &'a mut Q {
fn unique_value(&self, key: &str) -> Option<Cow<str>> {
(**self).unique_value(key)
}
fn normalize(&self) -> NormalizedParameter {
(**self).normalize()
}
}
unsafe impl UniqueValue for str {
fn get_unique(&self) -> Option<&str> {
Some(self)
}
}
unsafe impl UniqueValue for String {
fn get_unique(&self) -> Option<&str> {
Some(&self)
}
}
unsafe impl<'a, V> UniqueValue for &'a V
where
V: AsRef<str> + ?Sized,
{
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<'a> UniqueValue for Cow<'a, str> {
fn get_unique(&self) -> Option<&str> {
Some(self.as_ref())
}
}
unsafe impl<V: UniqueValue> UniqueValue for Option<V> {
fn get_unique(&self) -> Option<&str> {
self.as_ref().and_then(V::get_unique)
}
}
unsafe impl<V: UniqueValue> UniqueValue for [V] {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Box<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Rc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue + ?Sized> UniqueValue for Arc<V> {
fn get_unique(&self) -> Option<&str> {
(**self).get_unique()
}
}
unsafe impl<V: UniqueValue> UniqueValue for Vec<V> {
fn get_unique(&self) -> Option<&str> {
if self.len() > 1 {
None
} else {
self.get(0).and_then(V::get_unique)
}
}
}
mod test {
use super::*;
/// Compilation tests for various possible QueryParameter impls.
#[allow(unused)]
#[allow(dead_code)]
fn test_query_parameter_impls() {
let _ = (&HashMap::<String, String>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<&'static str, &'static str>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<Cow<'static, str>, Cow<'static, str>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Vec<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<String>>::new()) as &dyn QueryParameter;
let _ = (&HashMap::<String, Box<[Cow<'static, str>]>>::new()) as &dyn QueryParameter;
}
}
| true
|
17daaec9d5be5c52971f4d04bf15f49b183ac19f
|
Rust
|
4meta5/cosmwasm-examples
|
/escrow/tests/integration.rs
|
UTF-8
| 7,228
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
use cosmwasm::serde::{from_slice, to_vec};
use cosmwasm::types::{coin, mock_params, Coin, ContractResult, CosmosMsg, Params};
use cosmwasm_vm::testing::{handle, init, mock_instance, query};
use cw_escrow::contract::{raw_query, HandleMsg, InitMsg, State, CONFIG_KEY};
/**
This integration test tries to run and call the generated wasm.
It depends on a release build being available already. You can create that with: `cargo wasm`
Then running `cargo test` will validate we can properly call into that generated data.
You can copy the code from unit tests here verbatim, then make a few changes:
Replace `let mut store = MockStorage::new();` with `let mut store = mock_instance(WASM);`.
Replace `query(&store...` with `query(&mut store..` (we need mutability to pass args into wasm).
Any switches on error results, using types will have to use raw strings from formatted errors.
You can use a pattern like this to assert specific errors:
```
match res {
ContractResult::Err(msg) => assert_eq!(msg, "Contract error: creating expired escrow"),
_=> panic!("expected error"),
}
```
**/
static WASM: &[u8] = include_bytes!("../target/wasm32-unknown-unknown/release/cw_escrow.wasm");
fn init_msg(height: i64, time: i64) -> Vec<u8> {
to_vec(&InitMsg {
arbiter: String::from("verifies"),
recipient: String::from("benefits"),
end_height: height,
end_time: time,
})
.unwrap()
}
fn mock_params_height(
signer: &str,
sent: &[Coin],
balance: &[Coin],
height: i64,
time: i64,
) -> Params {
let mut params = mock_params(signer, sent, balance);
params.block.height = height;
params.block.time = time;
params
}
#[test]
fn proper_initialization() {
let mut store = mock_instance(WASM);
let msg = init_msg(1000, 0);
let params = mock_params_height("creator", &coin("1000", "earth"), &[], 876, 0);
let res = init(&mut store, params, msg).unwrap();
assert_eq!(0, res.messages.len());
// it worked, let's query the state
let q_res = query(&mut store, raw_query(CONFIG_KEY).unwrap()).unwrap();
let model = q_res.results.first().expect("no data stored");
let state: State = from_slice(&model.val).unwrap();
assert_eq!(
state,
State {
arbiter: String::from("verifies"),
recipient: String::from("benefits"),
source: String::from("creator"),
end_height: 1000,
end_time: 0,
}
);
}
#[test]
fn cannot_initialize_expired() {
let mut store = mock_instance(WASM);
let msg = init_msg(1000, 0);
let params = mock_params_height("creator", &coin("1000", "earth"), &[], 1001, 0);
let res = init(&mut store, params, msg);
match res {
ContractResult::Err(msg) => assert_eq!(msg, "Contract error: creating expired escrow"),
_ => panic!("expected error"),
}
}
#[test]
fn fails_on_bad_init_data() {
let mut store = mock_instance(WASM);
let bad_msg = b"{}".to_vec();
let params = mock_params_height("creator", &coin("1000", "earth"), &[], 876, 0);
let res = init(&mut store, params, bad_msg);
match res {
ContractResult::Err(msg) => {
assert_eq!(msg, "Error parsing InitMsg: missing field `arbiter`")
}
_ => panic!("expected error"),
}
}
#[test]
fn handle_approve() {
let mut store = mock_instance(WASM);
// initialize the store
let msg = init_msg(1000, 0);
let params = mock_params_height("creator", &coin("1000", "earth"), &[], 876, 0);
let init_res = init(&mut store, params, msg).unwrap();
assert_eq!(0, init_res.messages.len());
// beneficiary cannot release it
let msg = to_vec(&HandleMsg::Approve { quantity: None }).unwrap();
let params = mock_params_height(
"beneficiary",
&coin("0", "earth"),
&coin("1000", "earth"),
900,
0,
);
let handle_res = handle(&mut store, params, msg.clone());
match handle_res {
ContractResult::Err(msg) => assert_eq!(msg, "Unauthorized"),
_ => panic!("expected error"),
}
// verifier cannot release it when expired
let params = mock_params_height(
"verifies",
&coin("0", "earth"),
&coin("1000", "earth"),
1100,
0,
);
let handle_res = handle(&mut store, params, msg.clone());
match handle_res {
ContractResult::Err(msg) => assert_eq!(msg, "Contract error: escrow expired"),
_ => panic!("expected error"),
}
// complete release by verfier, before expiration
let params = mock_params_height(
"verifies",
&coin("0", "earth"),
&coin("1000", "earth"),
999,
0,
);
let handle_res = handle(&mut store, params, msg.clone()).unwrap();
assert_eq!(1, handle_res.messages.len());
let msg = handle_res.messages.get(0).expect("no message");
assert_eq!(
msg,
&CosmosMsg::Send {
from_address: "cosmos2contract".to_string(),
to_address: "benefits".to_string(),
amount: coin("1000", "earth"),
}
);
// partial release by verfier, before expiration
let partial_msg = to_vec(&HandleMsg::Approve {
quantity: Some(coin("500", "earth")),
})
.unwrap();
let params = mock_params_height(
"verifies",
&coin("0", "earth"),
&coin("1000", "earth"),
999,
0,
);
let handle_res = handle(&mut store, params, partial_msg).unwrap();
assert_eq!(1, handle_res.messages.len());
let msg = handle_res.messages.get(0).expect("no message");
assert_eq!(
msg,
&CosmosMsg::Send {
from_address: "cosmos2contract".to_string(),
to_address: "benefits".to_string(),
amount: coin("500", "earth"),
}
);
}
#[test]
fn handle_refund() {
let mut store = mock_instance(WASM);
// initialize the store
let msg = init_msg(1000, 0);
let params = mock_params_height("creator", &coin("1000", "earth"), &[], 876, 0);
let init_res = init(&mut store, params, msg).unwrap();
assert_eq!(0, init_res.messages.len());
// cannot release when unexpired
let msg = to_vec(&HandleMsg::Refund {}).unwrap();
let params = mock_params_height(
"anybody",
&coin("0", "earth"),
&coin("1000", "earth"),
800,
0,
);
let handle_res = handle(&mut store, params, msg.clone());
match handle_res {
ContractResult::Err(msg) => assert_eq!(msg, "Contract error: escrow not yet expired"),
_ => panic!("expected error"),
}
// anyone can release after expiration
let params = mock_params_height(
"anybody",
&coin("0", "earth"),
&coin("1000", "earth"),
1001,
0,
);
let handle_res = handle(&mut store, params, msg.clone()).unwrap();
assert_eq!(1, handle_res.messages.len());
let msg = handle_res.messages.get(0).expect("no message");
assert_eq!(
msg,
&CosmosMsg::Send {
from_address: "cosmos2contract".to_string(),
to_address: "creator".to_string(),
amount: coin("1000", "earth"),
}
);
}
| true
|
7fc840a78a00eade9817092cc79700579fad8a7c
|
Rust
|
Erikovsky/curve-tracer
|
/src/model/pwc.rs
|
UTF-8
| 3,086
| 3.265625
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
use itertools::Itertools;
use num_traits::float::Float;
pub struct PieceWiseConstantFunction {
min: f64,
max: f64,
buckets: Vec<f64>,
}
impl PieceWiseConstantFunction {
pub fn from_points(
min: f64,
max: f64,
buckets: usize,
min_bucket_population: usize,
points: &[(f64, f64)],
) -> PieceWiseConstantFunction {
PieceWiseConstantFunction::new(min, max, buckets, min_bucket_population, |s, e| {
points.iter().cloned().filter_map(
move |(x, y)| {
if x >= s && x < e {
Some(y)
} else {
None
}
},
)
})
}
pub fn new<F, I>(
min: f64,
max: f64,
buckets: usize,
min_bucket_population: usize,
f: F,
) -> PieceWiseConstantFunction
where
F: Fn(f64, f64) -> I,
I: Iterator<Item = f64>,
{
let mut pwc = Vec::new();
pwc.reserve_exact(buckets);
pwc.resize(buckets, f64::nan());
let span = max - min;
#[allow(clippy::needless_range_loop)]
for b in 0..buckets {
let start = min + span * (b as f64 / buckets as f64);
let end = min + span * ((b + 1) as f64 / buckets as f64);
let vs: Vec<f64> = f(start, end).collect_vec();
if vs.len() >= min_bucket_population {
pwc[b] = vs.iter().sum::<f64>() / vs.len() as f64;
}
}
PieceWiseConstantFunction {
min,
max,
buckets: pwc,
}
}
pub fn iter<'a>(&'a self) -> impl Iterator<Item = (f64, f64)> + 'a {
let span = self.max - self.min;
let buckets_no = self.buckets.len() as f64;
let min = self.min;
self.buckets.iter().enumerate().filter_map(move |(ix, v)| {
if v.is_nan() {
None
} else {
let x = min + span * ((ix as f64 + 0.5) / buckets_no);
Some((x, *v))
}
})
}
}
#[cfg(test)]
mod tests {
use crate::model::pwc::PieceWiseConstantFunction;
use itertools::Itertools;
#[test]
fn values_not_in_domain_are_skipped() {
let h =
PieceWiseConstantFunction::from_points(0.0, 1.0, 1, 1, &[(-1.0, 12.0), (2.0, 23.0)]);
assert_eq!(h.iter().collect_vec(), vec![]);
}
#[test]
fn left_end_is_inclusive() {
let h = PieceWiseConstantFunction::from_points(0.0, 1.0, 1, 1, &[(0.0, 12.0)]);
assert_eq!(h.iter().collect_vec(), vec![(0.5, 12.0)]);
}
#[test]
fn right_end_is_exclusive() {
let h = PieceWiseConstantFunction::from_points(0.0, 1.0, 1, 1, &[(1.0, 23.0)]);
assert_eq!(h.iter().collect_vec(), vec![]);
}
#[test]
fn bucket_value_is_average() {
let h = PieceWiseConstantFunction::from_points(0.0, 1.0, 1, 1, &[(0.2, 12.0), (0.0, 23.0)]);
assert_eq!(h.iter().collect_vec(), vec![(0.5, 17.5)]);
}
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.