text stringlengths 8 4.13M |
|---|
use gl;
use imgui;
use imgui::*;
use imgui_opengl_renderer;
use imgui_sdl2;
use sdl2;
use sdl2::audio::{AudioCallback, AudioSpec, AudioSpecDesired};
use std;
pub const SAMPLE_FREQUENCY: i32 = 44_100;
pub const SAMPLE_COUNT: usize = 256;
pub const CHANNEL_COUNT: usize = 2;
pub const SAMPLE_BUFFER_SIZE: usize = SAMPLE_COUNT * CHANNEL_COUNT;
pub static ZERO_BUFFER: [f32; SAMPLE_BUFFER_SIZE] = [0f32; SAMPLE_BUFFER_SIZE];
pub trait ReceivesAudioSpec {
fn receive_spec(&mut self, spec: AudioSpec);
}
pub fn run<F, S, CB>(mut audio_cb: CB, mut gui_state: S, gui_cb: F)
where
F: Fn(&Ui, &mut S),
CB: AudioCallback + ReceivesAudioSpec,
{
let sdl_context = sdl2::init().unwrap();
// Init audio
let audio = sdl_context.audio().unwrap();
let desired_spec = AudioSpecDesired {
freq: Some(SAMPLE_FREQUENCY),
channels: Some(2),
samples: Some(256),
};
let device = audio
.open_playback(None, &desired_spec, |spec| {
audio_cb.receive_spec(spec);
audio_cb
})
.unwrap();
device.resume();
// Init video
let video = sdl_context.video().unwrap();
let mut event_pump = sdl_context.event_pump().unwrap();
let gl_attr = video.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(3, 0);
let window = video
.window("Rust Music", 1024, 768)
.position_centered()
.resizable()
.opengl()
.build()
.unwrap();
let _gl_context = window
.gl_create_context()
.expect("Couldn't create GL context");
gl::load_with(|s| video.gl_get_proc_address(s) as _);
let mut imgui = imgui::ImGui::init();
imgui.set_ini_filename(None);
let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui);
let renderer =
imgui_opengl_renderer::Renderer::new(&mut imgui, |s| video.gl_get_proc_address(s) as _);
// Run GUI thread
loop {
use sdl2::event::Event;
for event in event_pump.poll_iter() {
imgui_sdl2.handle_event(&mut imgui, &event);
if let Event::Quit { .. } = event {
return;
}
}
let ui = imgui_sdl2.frame(&window, &mut imgui, &event_pump);
gui_cb(&ui, &mut gui_state);
unsafe {
gl::ClearColor(0.2, 0.2, 0.2, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
renderer.render(ui);
window.gl_swap_window();
std::thread::sleep(std::time::Duration::new(0, 1_000_000_000u32 / 60));
}
}
|
pub enum OfficialClient {
android,
iphone,
ipad,
windows,
windows_phone,
google,
mac
}
pub const ANDROID_CK: &'static str = "3nVuSoBZnx6U4vzUxf5w";
pub const ANDROID_CS: &'static str = "Bcs59EFbbsdF6Sl9Ng71smgStWEGwXXKSjYvPVt7qys";
pub const IPHONE_CK: &'static str = "IQKbtAYlXLripLGPWd0HUA";
pub const IPHONE_CS: &'static str = "GgDYlkSvaPxGxC4X8liwpUoqKwwr3lCADbz8A7ADU";
pub const IPAD_CK: &'static str = "CjulERsDeqhhjSme66ECg";
pub const IPAD_CS: &'static str = "IQWdVyqFxghAtURHGeGiWAsmCAGmdW3WmbEx6Hck";
pub const WINDOWS_CK: &'static str = "TgHNMa7WZE7Cxi1JbkAMQ";
pub const WINDOWS_CS: &'static str = "SHy9mBMBPNj3Y17et9BF4g5XeqS4y3vkeW24PttDcY";
pub const WINDOWS_PHONE_CK: &'static str = "yN3DUNVO0Me63IAQdhTfCA";
pub const WINDOWS_PHONE_CS: &'static str = "c768oTKdzAjIYCmpSNIdZbGaG0t6rOhSFQP0S5uC79g";
pub const GOOGLE_CK: &'static str = "iAtYJ4HpUVfIUoNnif1DA";
pub const GOOGLE_CS: &'static str = "172fOpzuZoYzNYaU3mMYvE8m8MEyLbztOdbrUolU";
pub const MAC_CK: &'static str = "3rJOl1ODzm9yZy63FACdg";
pub const MAC_CS: &'static str = "5jPoQ5kQvMJFDYRNE8bQ4rHuds4xJqhvgNJM4awaE8";
|
use std::collections::HashMap;
use petgraph::{Graph, Directed, EdgeDirection};
use petgraph::graph::NodeIndex;
fn dfs<N, E>(
graph: &Graph<N, E, Directed>,
layers: &mut HashMap<NodeIndex, usize>,
u: NodeIndex,
depth: usize,
) {
for v in graph.neighbors(u) {
if layers.contains_key(&v) {
let layer = layers.get_mut(&v).unwrap();
if *layer <= depth {
*layer = depth + 1
}
} else {
layers.insert(v, depth + 1);
}
dfs(graph, layers, v, depth + 1);
}
}
pub fn longest_path<N, E>(graph: &Graph<N, E, Directed>) -> HashMap<NodeIndex, usize> {
let mut result = HashMap::new();
for u in graph.externals(EdgeDirection::Incoming) {
result.insert(u, 0);
dfs(graph, &mut result, u, 0);
}
result
}
#[cfg(test)]
mod tests {
use petgraph::Graph;
use super::*;
#[test]
fn test_longest_path() {
let mut graph = Graph::<&str, &str>::new();
let a = graph.add_node("a");
let b = graph.add_node("b");
let c = graph.add_node("c");
let d = graph.add_node("d");
let e = graph.add_node("e");
graph.add_edge(a, b, "");
graph.add_edge(b, c, "");
graph.add_edge(d, c, "");
graph.add_edge(d, e, "");
let layers = longest_path(&graph);
assert_eq!(*layers.get(&a).unwrap(), 0);
assert_eq!(*layers.get(&b).unwrap(), 1);
assert_eq!(*layers.get(&c).unwrap(), 2);
assert_eq!(*layers.get(&d).unwrap(), 0);
assert_eq!(*layers.get(&e).unwrap(), 1);
}
}
|
mod subclient;
pub fn client() -> &'static str {
subclient::subclient()
}
|
use dlal_component_base::{component, serde_json, CmdResult};
enum Stage {
A,
D,
S,
R,
}
impl Default for Stage {
fn default() -> Self {
Stage::R
}
}
component!(
{"in": ["midi"], "out": ["audio"]},
[
"uni",
"check_audio",
"run_size",
{"name": "field_helpers", "fields": ["a", "d", "s", "r"], "kinds": ["rw", "json"]},
],
{
a: f32,
d: f32,
s: f32,
r: f32,
stage: Stage,
vol: f32,
},
{
"a": {
"args": [{
"name": "amount",
"desc": "attack rate",
"units": "amplitude per sample",
"range": "(0, 1]",
}],
},
"d": {
"args": [{
"name": "amount",
"desc": "decay rate",
"units": "amplitude per sample",
"range": "(0, 1]",
}],
},
"s": {
"args": [{
"name": "amount",
"desc": "sustain level",
"range": "[0, 1]",
}],
},
"r": {
"args": [{
"name": "amount",
"desc": "release rate",
"units": "amplitude per sample",
"range": "(0, 1]",
}],
},
"reset": {},
},
);
impl ComponentTrait for Component {
fn init(&mut self) {
self.a = 0.01;
self.d = 0.01;
self.s = 0.5;
self.r = 0.01;
}
fn run(&mut self) {
let audio = match &self.output {
Some(output) => output.audio(self.run_size).unwrap(),
None => return,
};
for i in audio {
match self.stage {
Stage::A => {
self.vol += self.a;
if self.vol > 1.0 {
self.vol = 1.0;
self.stage = Stage::D;
}
}
Stage::D => {
self.vol -= self.d;
if self.vol < self.s {
self.vol = self.s;
self.stage = Stage::S;
}
}
Stage::S => (),
Stage::R => {
self.vol -= self.r;
if self.vol < 0.0 {
self.vol = 0.0;
}
}
}
*i *= self.vol;
}
}
fn midi(&mut self, msg: &[u8]) {
if msg.len() < 3 {
return;
}
let type_nibble = msg[0] & 0xf0;
if type_nibble == 0x80 || type_nibble == 0x90 && msg[2] == 0 {
self.stage = Stage::R;
} else if type_nibble == 0x90 {
self.stage = Stage::A;
}
}
}
impl Component {
fn reset_cmd(&mut self, _body: serde_json::Value) -> CmdResult {
self.stage = Stage::R;
self.vol = 0.0;
Ok(None)
}
}
|
use std::io;
use std::io::Read;
use std::fs::File;
use std::io::ErrorKind;
fn main() {
let f = File::open("hello.txt");
let f = match f {
Ok(file) => file,
Err(error) => match error.kind() {
ErrorKind::NotFound => match File::create("hello.txt") {
Ok(fc) => fc,
Err(e) => panic!("Problem creating the file: {:?}", e),
},
other_error => panic!("Problem opening the file: {:?}", other_error),
},
};
drop(f);
let f = File::open("hello.txt").unwrap_or_else(|error| {
if error.kind() == ErrorKind::NotFound {
File::create("hello.txt").unwrap_or_else(|error| {
panic!("Problem creating the file: {:?}", error);
})
} else {
panic!("Problem opening the file: {:?}", error);
}
});
drop(f);
// Avoids the need for unwrapping with a match, but panics if no file is found
// let f = File::open("hello.txt").unwrap();
// Same as above, but gives a better error message
let f = File::open("hello.txt").expect("Failed to open hello.txt");
drop(f);
}
// Propagating errors
fn read_username_from_file() -> Result<String, io::Error> {
let f = File::open("hello.txt");
let mut f = match f {
Ok(file) => file,
Err(e) => return Err(e),
};
let mut s = String::new();
match f.read_to_string(&mut s) {
Ok(_) => Ok(s),
Err(e) => Err(e),
}
}
fn read_username_from_file2() -> Result<String, io::Error> {
/*
Long story short: the "?" makes the method return the value if it returns with an "Ok(_)",
but if it's an error, the error is returned after being converted to the type specified above
*/
let mut f = File::open("hello.txt")?;
let mut s = String::new();
f.read_to_string(&mut s)?;
Ok(s)
}
fn read_username_from_file3() -> Result<String, io::Error> {
let mut s = String::new();
File::open("hello.txt")?.read_to_string(&mut s);
Ok(s)
}
fn read_username_from_file4() -> Result<String, io::Error> {
// fs is essentially a shortcut for doing all the things above
fs::read_to_string("hello.txt")
}
|
use super::models::NewBlock;
use super::schema::blocks;
use diesel::pg::PgConnection;
use diesel::prelude::*;
use massbit_chain_substrate::data_type::SubstrateBlock;
use plugin::core::{BlockHandler, InvocationError};
use std::env;
use index_store::core::IndexStore;
#[derive(Debug, Clone, PartialEq)]
pub struct BlockIndexer;
impl BlockHandler for BlockIndexer {
fn handle_block(&self, store: &IndexStore, substrate_block: &SubstrateBlock) -> Result<(), InvocationError> {
println!("[.SO File] triggered!");
let number = substrate_block.header.number as i64;
let new_block = NewBlock { number };
store.save(blocks::table, new_block);
Ok(())
}
}
|
pub fn is_armstrong_number(num: u32) -> bool {
let digits_string = num.to_string();
let power: u32 = digits_string.len() as u32;
let sum: u32 = digits_string
.chars()
.map(|c| c.to_digit(10).unwrap())
.map(|d| d.pow(power))
.sum();
sum == num
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Generating C constants from bind programs
#![allow(dead_code)]
use crate::instruction::{Condition, Instruction};
fn c_macro_invocation(macro_name: &str, arguments: &Vec<String>) -> String {
let mut parts = vec![macro_name, "("];
let argument_string = arguments.join(", ");
parts.push(&argument_string);
parts.push(")");
parts.join("")
}
fn to_macro_parts(condition: &Condition) -> Vec<String> {
match condition {
Condition::Always => vec!["AL".to_string()],
Condition::Equal(b, v) => vec!["EQ".to_string(), b.to_string(), v.to_string()],
Condition::NotEqual(b, v) => vec!["NE".to_string(), b.to_string(), v.to_string()],
Condition::GreaterThan(b, v) => vec!["GT".to_string(), b.to_string(), v.to_string()],
Condition::LessThan(b, v) => vec!["LT".to_string(), b.to_string(), v.to_string()],
Condition::GreaterThanEqual(b, v) => vec!["GE".to_string(), b.to_string(), v.to_string()],
Condition::LessThanEqual(b, v) => vec!["LE".to_string(), b.to_string(), v.to_string()],
}
}
fn to_c_constant(instruction: &Instruction) -> String {
match instruction {
Instruction::Abort(condition) => {
c_macro_invocation("BI_ABORT_IF", &to_macro_parts(condition))
}
Instruction::Match(condition) => {
c_macro_invocation("BI_MATCH_IF", &to_macro_parts(condition))
}
Instruction::Goto(condition, a) => {
let mut parts = to_macro_parts(condition);
parts.push(a.to_string());
c_macro_invocation("BI_GOTO_IF", &parts)
}
Instruction::Label(a) => c_macro_invocation("BI_LABEL", &vec![a.to_string()]),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_abort_value() {
let instruction = Instruction::Abort(Condition::Always);
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_ABORT_IF(AL)")
}
fn test_abort_if_value() {
let instruction = Instruction::Abort(Condition::Equal(2, 3));
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_ABORT_IF(EQ, 2, 3)")
}
#[test]
fn test_match_value() {
let instruction = Instruction::Match(Condition::Always);
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_MATCH_IF(AL)")
}
#[test]
fn test_match_if_value() {
let instruction = Instruction::Match(Condition::GreaterThanEqual(18, 19));
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_MATCH_IF(GE, 18, 19)")
}
#[test]
fn test_goto_value() {
let instruction = Instruction::Goto(Condition::Always, 42);
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_GOTO_IF(AL, 42)")
}
#[test]
fn test_goto_if_value() {
let instruction = Instruction::Goto(Condition::LessThan(5, 6), 55);
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_GOTO_IF(LT, 5, 6, 55)")
}
#[test]
fn test_label_value() {
let instruction = Instruction::Label(23);
let c_constant = to_c_constant(&instruction);
assert_eq!(c_constant, "BI_LABEL(23)")
}
}
|
// Audio utility functions -- basically nice to have
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
use cpal::{SupportedStreamConfigRange, StreamConfig};
use std::sync::mpsc;
// Store information about available audio devices for easy switching
pub struct AudioDevice {
pub index: usize,
pub name: String,
// Save all supported configurations for easy access through the GUI
pub supported_configurations: Vec<SupportedStreamConfigRange>
}
/// Fetches all audio devices available on the default host
pub fn fetch_devices () -> Vec<AudioDevice> {
// Get the default host, e.g. CoreAudio, Jack etc.
let host = cpal::default_host();
// Save all available devices into our buffer
let all_devices = host.input_devices().expect("Could not get a list of available input devices!");
let mut ret = Vec::new();
for (device_index, device) in all_devices.enumerate() {
// There is at least one input configuration available;
// let's retrieve all of them
let input_configs = device.supported_input_configs().unwrap(); // We can be sure it's NOT an error
let mut cfg = Vec::new();
for (_idx, config) in input_configs.enumerate() {
cfg.push(config);
}
// Make the AudioDevice struct
ret.push(AudioDevice {
index: device_index,
name: device.name().unwrap(),
supported_configurations: cfg
})
}
ret // Return the devices
}
/// Creates an input listening stream and returns both the stream and the data receiver
pub fn create_stream (device_index: Option<usize>, device_config: Option<StreamConfig>) -> (impl StreamTrait, StreamConfig, mpsc::Receiver<std::vec::Vec<f32>>, usize) {
// The input stream will live in a different thread, so we need a transmitter
// to safely transmit data to this (main) thread
let (tx, rx) = mpsc::channel();
// Get the default host, e.g. CoreAudio, Jack etc.
let host = cpal::default_host();
// By default, use the default input device, unless the user has provided a device_index
let mut real_idx = 0;
let device = match device_index {
Some (idx) => {
let mut all_input_devices = host.input_devices().expect("Could not get a list of available input devices!");
real_idx = idx;
all_input_devices.nth(idx).unwrap_or_else(|| {
real_idx = 0;
all_input_devices.nth(real_idx).unwrap()
})
},
None => {
host.default_input_device().expect("No device!")
}
};
// Now some debug output stuff etc.
let name = device.name();
println!("Listening to device {} ...", name.unwrap());
let supported_configs_range = device.supported_input_configs()
.expect("error while querying configs");
for (idx, config) in supported_configs_range.enumerate() {
println!("Supported config {}: {:?}", idx, config);
}
let config = match device_config {
Some (cfg) => { cfg },
// Just use the default
None => { device.supported_input_configs().unwrap().next().unwrap().with_max_sample_rate().config() }
};
let stream = device.build_input_stream(
&config,
move |data: &[f32], _: &cpal::InputCallbackInfo| {
// Here's when we get a full buffer.
// We need to take ownership of the full slice which
// we are doing with a vector, and send it to the
// main thread
let mut safe_buffer = Vec::new();
for elem in data {
safe_buffer.push(*elem);
}
tx.send(safe_buffer).unwrap();
},
move |err| {
println!("ERROR: {}", err);
},
).unwrap();
stream.play().expect("Could not start stream!");
(stream, config, rx, real_idx) // Return the stream and the receiver
}
|
use std::fs;
use std::time::Instant;
use std::collections::HashSet;
fn process_automaton(questions: Vec<String>, error:bool) -> isize {
let mut set: HashSet<usize> = HashSet::new();
let mut i: i64 = 0;
let mut acc: i64 = 0;
while i < questions.len() as i64 && !set.contains(&(i as usize)) {
let ins = questions.get(i as usize).expect("Missing instruction");
set.insert(i as usize);
if ins.starts_with("nop") {
i += 1;
} else {
let mut it = ins.split(" ");
it.next();
let number = it.next().expect("missing number");
if ins.starts_with("acc") {
acc += number.parse::<i64>().expect("no error");
i += 1;
} else {
i += number.parse::<i64>().expect("no error");
}
}
}
if i == questions.len() as i64 || !error { acc as isize} else { -1 }
}
fn part1(questions: Vec<String>) -> isize {
process_automaton(questions, false) as isize
}
fn part2(questions: Vec<String>) -> isize {
for i in 0..questions.len() {
let mut t = questions.clone();
let line = t.get(i).expect("ee");
if line.starts_with("jmp") {
t[i] = line.replace("jmp", "nop");
} else if line.starts_with("nop") {
t[i] = line.replace("nop", "jmp");
}else{
continue;
}
let res = process_automaton(t, true);
if res != -1 {
return res;
}
}
-1
}
fn main() {
let input = fs::read_to_string("input/test.txt")
.expect("Something went wrong reading the file");
let lines = input.lines();
let mut questions: Vec<String> = vec![];
for line in lines {
questions.push(line.parse::<String>().expect("Ouf that's not a string !"))
}
println!("Running part1");
let now = Instant::now();
println!("Found {}", part1(questions.clone()));
println!("Took {}us", now.elapsed().as_micros());
println!("Running part2");
let now = Instant::now();
println!("Found {}", part2(questions.clone()));
println!("Took {}ms", now.elapsed().as_millis());
} |
use std::fmt::Debug;
use cgmath::prelude::*;
use cgmath::BaseFloat;
use collision::dbvt::{DynamicBoundingVolumeTree, TreeValue};
use collision::prelude::*;
use shrev::EventChannel;
use specs::prelude::{
BitSet, Component, ComponentEvent, Entities, Entity, Join, ReadStorage, ReaderId, World,
System, Tracked, Write,
};
use core::{
tree_collide, BroadPhase, CollisionData, CollisionShape, ContactEvent, GetId, NarrowPhase,
NextFrame, Primitive,
};
/// Collision detection [system](https://docs.rs/specs/0.9.5/specs/trait.System.html) for use with
/// [`specs`](https://docs.rs/specs/0.9.5/specs/).
///
/// Will perform spatial sorting of the collision world.
///
/// Has support for both broad phase and narrow phase collision detection. Will only do narrow phase
/// if both broad and narrow phase is activated. If no broad phase is set, it will use a DBVT based
/// broad phase that has complexity O(m log^2 n), where m is the number of shapes that have a dirty
/// pose.
///
/// Can handle any transform component type, as long as the type implements
/// [`Transform`](https://docs.rs/cgmath/0.15.0/cgmath/trait.Transform.html), and as long as the
/// storage is wrapped in
/// [`FlaggedStorage`](https://docs.rs/specs/0.9.5/specs/struct.FlaggedStorage.html).
///
/// ### Type parameters:
///
/// - `P`: Shape primitive
/// - `T`: Transform
/// - `D`: Data accepted by broad phase
/// - `Y`: Shape type, see `Collider`
///
/// ### System Function:
///
/// `fn(Entities, T, NextFrame<T>, CollisionShape, DynamicBoundingVolumeTree<D>) -> (DynamicBoundingVolumeTree<D>, EventChannel<ContactEvent>)`
pub struct SpatialCollisionSystem<P, T, D, B, Y = ()>
where
P: Primitive,
B: Bound,
{
narrow: Option<Box<dyn NarrowPhase<P, T, B, Y>>>,
broad: Option<Box<dyn BroadPhase<D>>>,
dirty: BitSet,
pose_reader: Option<ReaderId<ComponentEvent>>,
next_pose_reader: Option<ReaderId<ComponentEvent>>,
}
impl<P, T, D, B, Y> SpatialCollisionSystem<P, T, D, B, Y>
where
P: Primitive + Send + Sync + 'static,
<P::Point as EuclideanSpace>::Diff: Debug,
<P::Point as EuclideanSpace>::Scalar: BaseFloat,
B: Clone
+ Debug
+ Send
+ Sync
+ 'static
+ Bound<Point = P::Point>
+ Union<B, Output = B>
+ Contains<B>
+ SurfaceArea<Scalar = <P::Point as EuclideanSpace>::Scalar>,
T: Transform<P::Point> + Component,
D: HasBound<Bound = B>,
{
/// Create a new collision detection system, with no broad or narrow phase activated.
pub fn new() -> Self {
SpatialCollisionSystem {
narrow: None,
broad: None,
dirty: BitSet::default(),
pose_reader: None,
next_pose_reader: None,
}
}
/// Specify what narrow phase algorithm to use
pub fn with_narrow_phase<N: NarrowPhase<P, T, B, Y> + 'static>(mut self, narrow: N) -> Self {
self.narrow = Some(Box::new(narrow));
self
}
/// Specify what broad phase algorithm to use
pub fn with_broad_phase<V: BroadPhase<D> + 'static>(mut self, broad: V) -> Self {
self.broad = Some(Box::new(broad));
self
}
}
impl<'a, P, T, Y, B, D> System<'a> for SpatialCollisionSystem<P, T, (usize, D), B, Y>
where
P: Primitive + ComputeBound<B> + Send + Sync + 'static,
P::Point: EuclideanSpace,
<P::Point as EuclideanSpace>::Scalar: BaseFloat + Send + Sync + 'static,
B: Clone
+ Debug
+ Send
+ Sync
+ 'static
+ Bound<Point = P::Point>
+ Union<B, Output = B>
+ Discrete<B>
+ Contains<B>
+ SurfaceArea<Scalar = <P::Point as EuclideanSpace>::Scalar>,
<P::Point as EuclideanSpace>::Diff: Debug + Send + Sync + 'static,
P::Point: Debug + Send + Sync + 'static,
T: Component + Clone + Debug + Transform<P::Point> + Send + Sync + 'static,
T::Storage: Tracked,
Y: Default + Send + Sync + 'static,
D: Send + Sync + 'static + TreeValue<Bound = B> + HasBound<Bound = B> + GetId<Entity>,
{
type SystemData = (
Entities<'a>,
ReadStorage<'a, T>,
ReadStorage<'a, NextFrame<T>>,
ReadStorage<'a, CollisionShape<P, T, B, Y>>,
Write<'a, EventChannel<ContactEvent<Entity, P::Point>>>,
Write<'a, DynamicBoundingVolumeTree<D>>,
);
fn run(&mut self, system_data: Self::SystemData) {
let (entities, poses, next_poses, shapes, mut event_channel, mut tree) = system_data;
self.dirty.clear();
for event in poses.channel().read(self.pose_reader.as_mut().unwrap()) {
match event {
ComponentEvent::Inserted(index) => {
self.dirty.add(*index);
}
ComponentEvent::Modified(index) => {
self.dirty.add(*index);
}
ComponentEvent::Removed(index) => {
self.dirty.remove(*index);
}
}
}
for event in next_poses
.channel()
.read(self.next_pose_reader.as_mut().unwrap())
{
match event {
ComponentEvent::Inserted(index) => {
self.dirty.add(*index);
}
ComponentEvent::Modified(index) => {
self.dirty.add(*index);
}
ComponentEvent::Removed(index) => {
self.dirty.remove(*index);
}
}
}
event_channel.iter_write(tree_collide(
&SpatialCollisionData {
poses,
shapes,
next_poses,
entities,
dirty: &self.dirty,
},
&mut *tree,
&mut self.broad,
&self.narrow,
));
}
fn setup(&mut self, res: &mut World) {
use specs::prelude::{SystemData, WriteStorage};
Self::SystemData::setup(res);
let mut poses = WriteStorage::<T>::fetch(res);
self.pose_reader = Some(poses.register_reader());
let mut next_poses = WriteStorage::<NextFrame<T>>::fetch(res);
self.next_pose_reader = Some(next_poses.register_reader());
}
}
/// Collision data used by ECS systems
pub struct SpatialCollisionData<'a, P, T, B, Y>
where
P: Primitive + ComputeBound<B> + Send + Sync + 'static,
P::Point: Debug + Send + Sync + 'static,
<P::Point as EuclideanSpace>::Scalar: Send + Sync + 'static,
<P::Point as EuclideanSpace>::Diff: Debug + Send + Sync + 'static,
T: Component + Transform<P::Point> + Send + Sync + Clone + 'static,
Y: Default + Send + Sync + 'static,
B: Bound<Point = P::Point> + Send + Sync + 'static + Union<B, Output = B> + Clone,
{
/// collision shapes
pub shapes: ReadStorage<'a, CollisionShape<P, T, B, Y>>,
/// current frame poses
pub poses: ReadStorage<'a, T>,
/// next frame poses
pub next_poses: ReadStorage<'a, NextFrame<T>>,
/// entities
pub entities: Entities<'a>,
/// dirty poses
pub dirty: &'a BitSet,
}
impl<'a, P, T, B, Y, D> CollisionData<Entity, P, T, B, Y, D>
for SpatialCollisionData<'a, P, T, B, Y>
where
P: Primitive + ComputeBound<B> + Send + Sync + 'static,
P::Point: Debug + Send + Sync + 'static,
<P::Point as EuclideanSpace>::Scalar: Send + Sync + 'static,
<P::Point as EuclideanSpace>::Diff: Debug + Send + Sync + 'static,
T: Component + Transform<P::Point> + Send + Sync + Clone + 'static,
Y: Default + Send + Sync + 'static,
B: Bound<Point = P::Point> + Send + Sync + 'static + Union<B, Output = B> + Clone,
{
fn get_broad_data(&self) -> Vec<D> {
Vec::default()
}
fn get_shape(&self, id: Entity) -> Option<&CollisionShape<P, T, B, Y>> {
self.shapes.get(id)
}
fn get_pose(&self, id: Entity) -> Option<&T> {
self.poses.get(id)
}
fn get_dirty_poses(&self) -> Vec<Entity> {
(&*self.entities, self.dirty, &self.shapes)
.join()
.map(|(entity, _, _)| entity)
.collect()
}
fn get_next_pose(&self, id: Entity) -> Option<&T> {
self.next_poses.get(id).as_ref().map(|p| &p.value)
}
}
|
//!
//! A DeltaLayer represents a collection of WAL records or page images in a range of
//! LSNs, for one segment. It is stored on a file on disk.
//!
//! Usually a delta layer only contains differences - in the form of WAL records against
//! a base LSN. However, if a segment is newly created, by creating a new relation or
//! extending an old one, there might be no base image. In that case, all the entries in
//! the delta layer must be page images or WAL records with the 'will_init' flag set, so
//! that they can be replayed without referring to an older page version. Also in some
//! circumstances, the predecessor layer might actually be another delta layer. That
//! can happen when you create a new branch in the middle of a delta layer, and the WAL
//! records on the new branch are put in a new delta layer.
//!
//! When a delta file needs to be accessed, we slurp the metadata and relsize chapters
//! into memory, into the DeltaLayerInner struct. See load() and unload() functions.
//! To access a page/WAL record, we search `page_version_metas` for the block # and LSN.
//! The byte ranges in the metadata can be used to find the page/WAL record in
//! PAGE_VERSIONS_CHAPTER.
//!
//! On disk, the delta files are stored in timelines/<timelineid> directory.
//! Currently, there are no subdirectories, and each delta file is named like this:
//!
//! <spcnode>_<dbnode>_<relnode>_<forknum>_<segno>_<start LSN>_<end LSN>
//!
//! For example:
//!
//! 1663_13990_2609_0_5_000000000169C348_000000000169C349
//!
//! If a relation is dropped, we add a '_DROPPED' to the end of the filename to indicate that.
//! So the above example would become:
//!
//! 1663_13990_2609_0_5_000000000169C348_000000000169C349_DROPPED
//!
//! The end LSN indicates when it was dropped in that case, we don't store it in the
//! file contents in any way.
//!
//! A detlta file is constructed using the 'bookfile' crate. Each file consists of two
//! parts: the page versions and the relation sizes. They are stored as separate chapters.
//!
use crate::layered_repository::blob::BlobWriter;
use crate::layered_repository::filename::{DeltaFileName, PathOrConf};
use crate::layered_repository::storage_layer::{
Layer, PageReconstructData, PageReconstructResult, PageVersion, SegmentTag,
};
use crate::repository::WALRecord;
use crate::waldecoder;
use crate::PageServerConf;
use crate::{ZTenantId, ZTimelineId};
use anyhow::{bail, Result};
use bytes::Bytes;
use log::*;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
// avoid binding to Write (conflicts with std::io::Write)
// while being able to use std::fmt::Write's methods
use std::fmt::Write as _;
use std::fs;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::ops::Bound::Included;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, MutexGuard};
use bookfile::{Book, BookWriter};
use zenith_utils::bin_ser::BeSer;
use zenith_utils::lsn::Lsn;
use super::blob::{read_blob, BlobRange};
// Magic constant to identify a Zenith delta file
pub const DELTA_FILE_MAGIC: u32 = 0x5A616E01;
/// Mapping from (block #, lsn) -> page/WAL record
/// byte ranges in PAGE_VERSIONS_CHAPTER
static PAGE_VERSION_METAS_CHAPTER: u64 = 1;
/// Page/WAL bytes - cannot be interpreted
/// without PAGE_VERSION_METAS_CHAPTER
static PAGE_VERSIONS_CHAPTER: u64 = 2;
static REL_SIZES_CHAPTER: u64 = 3;
/// Contains the [`Summary`] struct
static SUMMARY_CHAPTER: u64 = 4;
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
struct Summary {
tenantid: ZTenantId,
timelineid: ZTimelineId,
seg: SegmentTag,
start_lsn: Lsn,
end_lsn: Lsn,
dropped: bool,
}
impl From<&DeltaLayer> for Summary {
fn from(layer: &DeltaLayer) -> Self {
Self {
tenantid: layer.tenantid,
timelineid: layer.timelineid,
seg: layer.seg,
start_lsn: layer.start_lsn,
end_lsn: layer.end_lsn,
dropped: layer.dropped,
}
}
}
#[derive(Serialize, Deserialize)]
struct PageVersionMeta {
page_image_range: Option<BlobRange>,
record_range: Option<BlobRange>,
}
///
/// DeltaLayer is the in-memory data structure associated with an
/// on-disk delta file. We keep a DeltaLayer in memory for each
/// file, in the LayerMap. If a layer is in "loaded" state, we have a
/// copy of the file in memory, in 'inner'. Otherwise the struct is
/// just a placeholder for a file that exists on disk, and it needs to
/// be loaded before using it in queries.
///
pub struct DeltaLayer {
path_or_conf: PathOrConf,
pub tenantid: ZTenantId,
pub timelineid: ZTimelineId,
pub seg: SegmentTag,
//
// This entry contains all the changes from 'start_lsn' to 'end_lsn'. The
// start is inclusive, and end is exclusive.
//
pub start_lsn: Lsn,
pub end_lsn: Lsn,
dropped: bool,
/// Predecessor layer
predecessor: Option<Arc<dyn Layer>>,
inner: Mutex<DeltaLayerInner>,
}
pub struct DeltaLayerInner {
/// If false, the 'page_version_metas' and 'relsizes' have not been
/// loaded into memory yet.
loaded: bool,
/// All versions of all pages in the file are are kept here.
/// Indexed by block number and LSN.
page_version_metas: BTreeMap<(u32, Lsn), PageVersionMeta>,
/// `relsizes` tracks the size of the relation at different points in time.
relsizes: BTreeMap<Lsn, u32>,
}
impl Layer for DeltaLayer {
fn get_timeline_id(&self) -> ZTimelineId {
self.timelineid
}
fn get_seg_tag(&self) -> SegmentTag {
self.seg
}
fn is_dropped(&self) -> bool {
self.dropped
}
fn get_start_lsn(&self) -> Lsn {
self.start_lsn
}
fn get_end_lsn(&self) -> Lsn {
self.end_lsn
}
fn filename(&self) -> PathBuf {
PathBuf::from(
DeltaFileName {
seg: self.seg,
start_lsn: self.start_lsn,
end_lsn: self.end_lsn,
dropped: self.dropped,
}
.to_string(),
)
}
fn path(&self) -> Option<PathBuf> {
Some(Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
&DeltaFileName {
seg: self.seg,
start_lsn: self.start_lsn,
end_lsn: self.end_lsn,
dropped: self.dropped,
},
))
}
/// Look up given page in the cache.
fn get_page_reconstruct_data(
&self,
blknum: u32,
lsn: Lsn,
reconstruct_data: &mut PageReconstructData,
) -> Result<PageReconstructResult> {
let mut need_image = true;
assert!(self.seg.blknum_in_seg(blknum));
{
// Open the file and lock the metadata in memory
// TODO: avoid opening the file for each read
let (_path, book) = self.open_book()?;
let page_version_reader = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
let inner = self.load()?;
// Scan the metadata BTreeMap backwards, starting from the given entry.
let minkey = (blknum, Lsn(0));
let maxkey = (blknum, lsn);
let mut iter = inner
.page_version_metas
.range((Included(&minkey), Included(&maxkey)));
while let Some(((_blknum, _entry_lsn), entry)) = iter.next_back() {
if let Some(img_range) = &entry.page_image_range {
// Found a page image, return it
let img = Bytes::from(read_blob(&page_version_reader, img_range)?);
reconstruct_data.page_img = Some(img);
need_image = false;
break;
} else if let Some(rec_range) = &entry.record_range {
let rec = WALRecord::des(&read_blob(&page_version_reader, rec_range)?)?;
let will_init = rec.will_init;
reconstruct_data.records.push(rec);
if will_init {
// This WAL record initializes the page, so no need to go further back
need_image = false;
break;
}
} else {
// No base image, and no WAL record. Huh?
bail!("no page image or WAL record for requested page");
}
}
// release metadata lock and close the file
}
// If an older page image is needed to reconstruct the page, let the
// caller know about the predecessor layer.
if need_image {
if let Some(cont_layer) = &self.predecessor {
Ok(PageReconstructResult::Continue(
self.start_lsn,
Arc::clone(cont_layer),
))
} else {
Ok(PageReconstructResult::Missing(self.start_lsn))
}
} else {
Ok(PageReconstructResult::Complete)
}
}
/// Get size of the relation at given LSN
fn get_seg_size(&self, lsn: Lsn) -> Result<u32> {
assert!(lsn >= self.start_lsn);
// Scan the BTreeMap backwards, starting from the given entry.
let inner = self.load()?;
let mut iter = inner.relsizes.range((Included(&Lsn(0)), Included(&lsn)));
let result;
if let Some((_entry_lsn, entry)) = iter.next_back() {
result = *entry;
// Use the base image if needed
} else if let Some(predecessor) = &self.predecessor {
result = predecessor.get_seg_size(lsn)?;
} else {
result = 0;
}
Ok(result)
}
/// Does this segment exist at given LSN?
fn get_seg_exists(&self, lsn: Lsn) -> Result<bool> {
// Is the requested LSN after the rel was dropped?
if self.dropped && lsn >= self.end_lsn {
return Ok(false);
}
// Otherwise, it exists.
Ok(true)
}
///
/// Release most of the memory used by this layer. If it's accessed again later,
/// it will need to be loaded back.
///
fn unload(&self) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
inner.page_version_metas = BTreeMap::new();
inner.relsizes = BTreeMap::new();
inner.loaded = false;
Ok(())
}
fn delete(&self) -> Result<()> {
// delete underlying file
if let Some(path) = self.path() {
fs::remove_file(path)?;
}
Ok(())
}
fn is_incremental(&self) -> bool {
true
}
/// debugging function to print out the contents of the layer
fn dump(&self) -> Result<()> {
println!(
"----- delta layer for ten {} tli {} seg {} {}-{} ----",
self.tenantid, self.timelineid, self.seg, self.start_lsn, self.end_lsn
);
println!("--- relsizes ---");
let inner = self.load()?;
for (k, v) in inner.relsizes.iter() {
println!(" {}: {}", k, v);
}
println!("--- page versions ---");
let (_path, book) = self.open_book()?;
let chapter = book.chapter_reader(PAGE_VERSIONS_CHAPTER)?;
for (k, v) in inner.page_version_metas.iter() {
let mut desc = String::new();
if let Some(page_image_range) = v.page_image_range.as_ref() {
let image = read_blob(&chapter, page_image_range)?;
write!(&mut desc, " img {} bytes", image.len())?;
}
if let Some(record_range) = v.record_range.as_ref() {
let record_bytes = read_blob(&chapter, record_range)?;
let rec = WALRecord::des(&record_bytes)?;
let wal_desc = waldecoder::describe_wal_record(&rec.rec);
write!(
&mut desc,
" rec {} bytes will_init: {} {}",
rec.rec.len(),
rec.will_init,
wal_desc
)?;
}
println!(" blk {} at {}: {}", k.0, k.1, desc);
}
Ok(())
}
}
impl DeltaLayer {
fn path_for(
path_or_conf: &PathOrConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
fname: &DeltaFileName,
) -> PathBuf {
match path_or_conf {
PathOrConf::Path(path) => path.clone(),
PathOrConf::Conf(conf) => conf
.timeline_path(&timelineid, &tenantid)
.join(fname.to_string()),
}
}
/// Create a new delta file, using the given page versions and relsizes.
/// The page versions are passed by an iterator; the iterator must return
/// page versions in blknum+lsn order.
///
/// This is used to write the in-memory layer to disk. The in-memory layer uses the same
/// data structure with two btreemaps as we do, so passing the btreemaps is currently
/// expedient.
#[allow(clippy::too_many_arguments)]
pub fn create<'a>(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
seg: SegmentTag,
start_lsn: Lsn,
end_lsn: Lsn,
dropped: bool,
predecessor: Option<Arc<dyn Layer>>,
page_versions: impl Iterator<Item = (&'a (u32, Lsn), &'a PageVersion)>,
relsizes: BTreeMap<Lsn, u32>,
) -> Result<DeltaLayer> {
let delta_layer = DeltaLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
seg,
start_lsn,
end_lsn,
dropped,
inner: Mutex::new(DeltaLayerInner {
loaded: true,
page_version_metas: BTreeMap::new(),
relsizes,
}),
predecessor,
};
let mut inner = delta_layer.inner.lock().unwrap();
// Write the in-memory btreemaps into a file
let path = delta_layer
.path()
.expect("DeltaLayer is supposed to have a layer path on disk");
// Note: This overwrites any existing file. There shouldn't be any.
// FIXME: throw an error instead?
let file = File::create(&path)?;
let buf_writer = BufWriter::new(file);
let book = BookWriter::new(buf_writer, DELTA_FILE_MAGIC)?;
let mut page_version_writer = BlobWriter::new(book, PAGE_VERSIONS_CHAPTER);
for (key, page_version) in page_versions {
let page_image_range = page_version
.page_image
.as_ref()
.map(|page_image| page_version_writer.write_blob(page_image))
.transpose()?;
let record_range = page_version
.record
.as_ref()
.map(|record| {
let buf = WALRecord::ser(record)?;
page_version_writer.write_blob(&buf)
})
.transpose()?;
let old = inner.page_version_metas.insert(
*key,
PageVersionMeta {
page_image_range,
record_range,
},
);
assert!(old.is_none());
}
let book = page_version_writer.close()?;
// Write out page versions
let mut chapter = book.new_chapter(PAGE_VERSION_METAS_CHAPTER);
let buf = BTreeMap::ser(&inner.page_version_metas)?;
chapter.write_all(&buf)?;
let book = chapter.close()?;
// and relsizes to separate chapter
let mut chapter = book.new_chapter(REL_SIZES_CHAPTER);
let buf = BTreeMap::ser(&inner.relsizes)?;
chapter.write_all(&buf)?;
let book = chapter.close()?;
let mut chapter = book.new_chapter(SUMMARY_CHAPTER);
let summary = Summary {
tenantid,
timelineid,
seg,
start_lsn,
end_lsn,
dropped,
};
Summary::ser_into(&summary, &mut chapter)?;
let book = chapter.close()?;
// This flushes the underlying 'buf_writer'.
book.close()?;
trace!("saved {}", &path.display());
drop(inner);
Ok(delta_layer)
}
fn open_book(&self) -> Result<(PathBuf, Book<File>)> {
let path = Self::path_for(
&self.path_or_conf,
self.timelineid,
self.tenantid,
&DeltaFileName {
seg: self.seg,
start_lsn: self.start_lsn,
end_lsn: self.end_lsn,
dropped: self.dropped,
},
);
let file = File::open(&path)?;
let book = Book::new(file)?;
Ok((path, book))
}
///
/// Load the contents of the file into memory
///
fn load(&self) -> Result<MutexGuard<DeltaLayerInner>> {
// quick exit if already loaded
let mut inner = self.inner.lock().unwrap();
if inner.loaded {
return Ok(inner);
}
let (path, book) = self.open_book()?;
match &self.path_or_conf {
PathOrConf::Conf(_) => {
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
let actual_summary = Summary::des(&chapter)?;
let expected_summary = Summary::from(self);
if actual_summary != expected_summary {
bail!("in-file summary does not match expected summary. actual = {:?} expected = {:?}", actual_summary, expected_summary);
}
}
PathOrConf::Path(path) => {
let actual_filename = Path::new(path.file_name().unwrap());
let expected_filename = self.filename();
if actual_filename != expected_filename {
println!(
"warning: filename does not match what is expected from in-file summary"
);
println!("actual: {:?}", actual_filename);
println!("expected: {:?}", expected_filename);
}
}
}
let chapter = book.read_chapter(PAGE_VERSION_METAS_CHAPTER)?;
let page_version_metas = BTreeMap::des(&chapter)?;
let chapter = book.read_chapter(REL_SIZES_CHAPTER)?;
let relsizes = BTreeMap::des(&chapter)?;
debug!("loaded from {}", &path.display());
*inner = DeltaLayerInner {
loaded: true,
page_version_metas,
relsizes,
};
Ok(inner)
}
/// Create a DeltaLayer struct representing an existing file on disk.
pub fn new(
conf: &'static PageServerConf,
timelineid: ZTimelineId,
tenantid: ZTenantId,
filename: &DeltaFileName,
predecessor: Option<Arc<dyn Layer>>,
) -> DeltaLayer {
DeltaLayer {
path_or_conf: PathOrConf::Conf(conf),
timelineid,
tenantid,
seg: filename.seg,
start_lsn: filename.start_lsn,
end_lsn: filename.end_lsn,
dropped: filename.dropped,
inner: Mutex::new(DeltaLayerInner {
loaded: false,
page_version_metas: BTreeMap::new(),
relsizes: BTreeMap::new(),
}),
predecessor,
}
}
/// Create a DeltaLayer struct representing an existing file on disk.
///
/// This variant is only used for debugging purposes, by the 'dump_layerfile' binary.
pub fn new_for_path(path: &Path, book: &Book<File>) -> Result<Self> {
let chapter = book.read_chapter(SUMMARY_CHAPTER)?;
let summary = Summary::des(&chapter)?;
Ok(DeltaLayer {
path_or_conf: PathOrConf::Path(path.to_path_buf()),
timelineid: summary.timelineid,
tenantid: summary.tenantid,
seg: summary.seg,
start_lsn: summary.start_lsn,
end_lsn: summary.end_lsn,
dropped: summary.dropped,
inner: Mutex::new(DeltaLayerInner {
loaded: false,
page_version_metas: BTreeMap::new(),
relsizes: BTreeMap::new(),
}),
predecessor: None,
})
}
}
|
fn main() {
println!("🔓 Challenge 21");
println!("Code in 'prng/src/mt19937.rs'");
}
|
use stopwatch::Stopwatch;
use futures::Future;
use chrono::Duration;
pub fn stopwatch<F, I, E>(future: F) -> impl Future<Item = (I, Duration), Error = E>
where F: Future<Item = I, Error = E>
{
let sw = Stopwatch::start_new();
future.then(move |res| {
res.map(move |x| {
(x, Duration::from_std(sw.elapsed()).expect("Could not convert latency from std time"))
})
})
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
#[repr(transparent)]
pub struct AutoLoadedDisplayPropertyKind(pub i32);
impl AutoLoadedDisplayPropertyKind {
pub const None: Self = Self(0i32);
pub const MusicOrVideo: Self = Self(1i32);
pub const Music: Self = Self(2i32);
pub const Video: Self = Self(3i32);
}
impl ::core::marker::Copy for AutoLoadedDisplayPropertyKind {}
impl ::core::clone::Clone for AutoLoadedDisplayPropertyKind {
fn clone(&self) -> Self {
*self
}
}
pub type CurrentMediaPlaybackItemChangedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct FailedMediaStreamKind(pub i32);
impl FailedMediaStreamKind {
pub const Unknown: Self = Self(0i32);
pub const Audio: Self = Self(1i32);
pub const Video: Self = Self(2i32);
}
impl ::core::marker::Copy for FailedMediaStreamKind {}
impl ::core::clone::Clone for FailedMediaStreamKind {
fn clone(&self) -> Self {
*self
}
}
pub type IMediaEnginePlaybackSource = *mut ::core::ffi::c_void;
pub type IMediaPlaybackSource = *mut ::core::ffi::c_void;
pub type MediaBreak = *mut ::core::ffi::c_void;
pub type MediaBreakEndedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaBreakInsertionMethod(pub i32);
impl MediaBreakInsertionMethod {
pub const Interrupt: Self = Self(0i32);
pub const Replace: Self = Self(1i32);
}
impl ::core::marker::Copy for MediaBreakInsertionMethod {}
impl ::core::clone::Clone for MediaBreakInsertionMethod {
fn clone(&self) -> Self {
*self
}
}
pub type MediaBreakManager = *mut ::core::ffi::c_void;
pub type MediaBreakSchedule = *mut ::core::ffi::c_void;
pub type MediaBreakSeekedOverEventArgs = *mut ::core::ffi::c_void;
pub type MediaBreakSkippedEventArgs = *mut ::core::ffi::c_void;
pub type MediaBreakStartedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaCommandEnablingRule(pub i32);
impl MediaCommandEnablingRule {
pub const Auto: Self = Self(0i32);
pub const Always: Self = Self(1i32);
pub const Never: Self = Self(2i32);
}
impl ::core::marker::Copy for MediaCommandEnablingRule {}
impl ::core::clone::Clone for MediaCommandEnablingRule {
fn clone(&self) -> Self {
*self
}
}
pub type MediaItemDisplayProperties = *mut ::core::ffi::c_void;
pub type MediaPlaybackAudioTrackList = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManager = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerAutoRepeatModeReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerCommandBehavior = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerFastForwardReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerNextReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerPauseReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerPlayReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerPositionReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerPreviousReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerRateReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerRewindReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackCommandManagerShuffleReceivedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackItem = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlaybackItemChangedReason(pub i32);
impl MediaPlaybackItemChangedReason {
pub const InitialItem: Self = Self(0i32);
pub const EndOfStream: Self = Self(1i32);
pub const Error: Self = Self(2i32);
pub const AppRequested: Self = Self(3i32);
}
impl ::core::marker::Copy for MediaPlaybackItemChangedReason {}
impl ::core::clone::Clone for MediaPlaybackItemChangedReason {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlaybackItemError = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlaybackItemErrorCode(pub i32);
impl MediaPlaybackItemErrorCode {
pub const None: Self = Self(0i32);
pub const Aborted: Self = Self(1i32);
pub const NetworkError: Self = Self(2i32);
pub const DecodeError: Self = Self(3i32);
pub const SourceNotSupportedError: Self = Self(4i32);
pub const EncryptionError: Self = Self(5i32);
}
impl ::core::marker::Copy for MediaPlaybackItemErrorCode {}
impl ::core::clone::Clone for MediaPlaybackItemErrorCode {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlaybackItemFailedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackItemOpenedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackList = *mut ::core::ffi::c_void;
pub type MediaPlaybackSession = *mut ::core::ffi::c_void;
pub type MediaPlaybackSessionBufferingStartedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlaybackSessionOutputDegradationPolicyState = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlaybackSessionVideoConstrictionReason(pub i32);
impl MediaPlaybackSessionVideoConstrictionReason {
pub const None: Self = Self(0i32);
pub const VirtualMachine: Self = Self(1i32);
pub const UnsupportedDisplayAdapter: Self = Self(2i32);
pub const UnsignedDriver: Self = Self(3i32);
pub const FrameServerEnabled: Self = Self(4i32);
pub const OutputProtectionFailed: Self = Self(5i32);
pub const Unknown: Self = Self(6i32);
}
impl ::core::marker::Copy for MediaPlaybackSessionVideoConstrictionReason {}
impl ::core::clone::Clone for MediaPlaybackSessionVideoConstrictionReason {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlaybackSphericalVideoProjection = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlaybackState(pub i32);
impl MediaPlaybackState {
pub const None: Self = Self(0i32);
pub const Opening: Self = Self(1i32);
pub const Buffering: Self = Self(2i32);
pub const Playing: Self = Self(3i32);
pub const Paused: Self = Self(4i32);
}
impl ::core::marker::Copy for MediaPlaybackState {}
impl ::core::clone::Clone for MediaPlaybackState {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlaybackTimedMetadataTrackList = *mut ::core::ffi::c_void;
pub type MediaPlaybackVideoTrackList = *mut ::core::ffi::c_void;
pub type MediaPlayer = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlayerAudioCategory(pub i32);
impl MediaPlayerAudioCategory {
pub const Other: Self = Self(0i32);
pub const Communications: Self = Self(3i32);
pub const Alerts: Self = Self(4i32);
pub const SoundEffects: Self = Self(5i32);
pub const GameEffects: Self = Self(6i32);
pub const GameMedia: Self = Self(7i32);
pub const GameChat: Self = Self(8i32);
pub const Speech: Self = Self(9i32);
pub const Movie: Self = Self(10i32);
pub const Media: Self = Self(11i32);
}
impl ::core::marker::Copy for MediaPlayerAudioCategory {}
impl ::core::clone::Clone for MediaPlayerAudioCategory {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct MediaPlayerAudioDeviceType(pub i32);
impl MediaPlayerAudioDeviceType {
pub const Console: Self = Self(0i32);
pub const Multimedia: Self = Self(1i32);
pub const Communications: Self = Self(2i32);
}
impl ::core::marker::Copy for MediaPlayerAudioDeviceType {}
impl ::core::clone::Clone for MediaPlayerAudioDeviceType {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlayerDataReceivedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlayerError(pub i32);
impl MediaPlayerError {
pub const Unknown: Self = Self(0i32);
pub const Aborted: Self = Self(1i32);
pub const NetworkError: Self = Self(2i32);
pub const DecodingError: Self = Self(3i32);
pub const SourceNotSupported: Self = Self(4i32);
}
impl ::core::marker::Copy for MediaPlayerError {}
impl ::core::clone::Clone for MediaPlayerError {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlayerFailedEventArgs = *mut ::core::ffi::c_void;
pub type MediaPlayerRateChangedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct MediaPlayerState(pub i32);
impl MediaPlayerState {
pub const Closed: Self = Self(0i32);
pub const Opening: Self = Self(1i32);
pub const Buffering: Self = Self(2i32);
pub const Playing: Self = Self(3i32);
pub const Paused: Self = Self(4i32);
pub const Stopped: Self = Self(5i32);
}
impl ::core::marker::Copy for MediaPlayerState {}
impl ::core::clone::Clone for MediaPlayerState {
fn clone(&self) -> Self {
*self
}
}
pub type MediaPlayerSurface = *mut ::core::ffi::c_void;
pub type PlaybackMediaMarker = *mut ::core::ffi::c_void;
pub type PlaybackMediaMarkerReachedEventArgs = *mut ::core::ffi::c_void;
pub type PlaybackMediaMarkerSequence = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct SphericalVideoProjectionMode(pub i32);
impl SphericalVideoProjectionMode {
pub const Spherical: Self = Self(0i32);
pub const Flat: Self = Self(1i32);
}
impl ::core::marker::Copy for SphericalVideoProjectionMode {}
impl ::core::clone::Clone for SphericalVideoProjectionMode {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct StereoscopicVideoRenderMode(pub i32);
impl StereoscopicVideoRenderMode {
pub const Mono: Self = Self(0i32);
pub const Stereo: Self = Self(1i32);
}
impl ::core::marker::Copy for StereoscopicVideoRenderMode {}
impl ::core::clone::Clone for StereoscopicVideoRenderMode {
fn clone(&self) -> Self {
*self
}
}
pub type TimedMetadataPresentationModeChangedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct TimedMetadataTrackPresentationMode(pub i32);
impl TimedMetadataTrackPresentationMode {
pub const Disabled: Self = Self(0i32);
pub const Hidden: Self = Self(1i32);
pub const ApplicationPresented: Self = Self(2i32);
pub const PlatformPresented: Self = Self(3i32);
}
impl ::core::marker::Copy for TimedMetadataTrackPresentationMode {}
impl ::core::clone::Clone for TimedMetadataTrackPresentationMode {
fn clone(&self) -> Self {
*self
}
}
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::Error;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use base64::prelude::*;
use chrono::DateTime;
use chrono::Duration;
use chrono::Utc;
use common_base::base::tokio::sync::RwLock;
use http::HeaderValue;
// Token file reference.
#[derive(Debug)]
pub struct TokenFile {
path: PathBuf,
token: String,
pub expires_at: DateTime<Utc>,
}
impl TokenFile {
pub fn new(path: &Path) -> Result<Self, Error> {
let token =
std::fs::read_to_string(path).map_err(|err| Error::new(ErrorKind::Other, err))?;
Ok(Self {
path: path.to_path_buf(),
token,
// This period was picked because it is half of the duration between when the kubelet
// refreshes a projected service account token and when the original token expires.
expires_at: Utc::now() + Duration::seconds(60),
})
}
pub fn is_expiring(&self) -> bool {
Utc::now() + Duration::seconds(10) > self.expires_at
}
// fast path return not null token from alive credential
pub fn cached_token(&self) -> Option<&str> {
(!self.is_expiring()).then(|| self.token.as_ref())
}
// slow path return token from credential
pub fn token(&mut self) -> &str {
if self.is_expiring() {
// https://github.com/kubernetes/kubernetes/issues/68164
if let Ok(token) = std::fs::read_to_string(&self.path) {
self.token = token;
}
self.expires_at = Utc::now() + Duration::seconds(60);
}
self.token.as_ref()
}
}
#[derive(Debug, Clone)]
pub enum RefreshableToken {
File(Arc<RwLock<TokenFile>>),
Direct(String),
}
fn bearer_header(token: &str) -> Result<HeaderValue, Error> {
// trim spaces and base 64
let token = BASE64_URL_SAFE.encode(token.trim());
let mut value = HeaderValue::try_from(format!("Bearer {}", token))
.map_err(|err| Error::new(ErrorKind::InvalidInput, err))?;
value.set_sensitive(true);
Ok(value)
}
impl RefreshableToken {
pub async fn to_header(&self) -> Result<HeaderValue, Error> {
match self {
RefreshableToken::File(file) => {
let guard = file.read().await;
if let Some(header) = guard.cached_token().map(bearer_header) {
return header;
}
// Drop the read guard before a write lock attempt to prevent deadlock.
drop(guard);
// Note that `token()` only reloads if the cached token is expiring.
// A separate method to conditionally reload minimizes the need for an exclusive access.
bearer_header(file.write().await.token())
}
RefreshableToken::Direct(token) => bearer_header(token),
}
}
}
|
use std::io;
use std::error::Error;
use mio::EventLoop;
use mio::util::Slab;
use config::{create_slab, create_loop};
use handler::{create_handler};
use scope::{early_scope, EarlyScope, Scope};
use {Machine, Config, Handler, SpawnError};
use SpawnError::NoSlabSpace;
/// An object that is used to construct a loop
///
/// The purpose of the object is to shorten the boilerplate to create
/// an event loop.
///
/// The second purpose is to create the loop and state machines
/// before Context is initialized. This is useful when you want to put
/// `Notifier` objects of state machines into the context.
///
/// You can create a loop either right away:
///
/// ```ignore
/// use rotor::{Loop, Config};
///
/// let mut lc = Loop::new(&Config::new()).unwrap();
/// loop_creator.add_machine_with(|scope| {
/// // The scope here is the `EarlyScope` (no context)
/// Ok(CreateMachine(x))
/// }).unwrap();
/// assert!(conn.is_ok());
/// lc.run(context).unwrap()
/// ```
///
/// Or if you can create it in two stages:
///
/// ```ignore
/// let lc = Loop::new(&Config::new()).unwrap();
/// loop_creator.add_machine_with(|scope| {
/// // The scope here is the `EarlyScope`
/// Ok(StateMachine1(scope))
/// }).unwrap();
/// let mut inst = lc.instantiate(context);
/// loop_creator.add_machine_with(|scope| {
/// // The scope here is the real `Scope<C>`
/// Ok(StateMachine2(scope))
/// }).unwrap();
/// inst.run().unwrap()
/// ```
///
///
pub struct LoopCreator<C, M: Machine<Context=C>> {
slab: Slab<M>,
mio: EventLoop<Handler<C, M>>,
}
pub struct LoopInstance<C, M: Machine<Context=C>> {
mio: EventLoop<Handler<C, M>>,
handler: Handler<C, M>,
}
impl<C, M: Machine<Context=C>> LoopCreator<C, M> {
pub fn new(cfg: &Config) -> Result<LoopCreator<C, M>, io::Error> {
let slab = create_slab(&cfg);
let eloop = try!(create_loop(&cfg));
Ok(LoopCreator {
slab: slab,
mio: eloop,
})
}
pub fn add_machine_with<F>(&mut self, fun: F) -> Result<(), SpawnError<()>>
where F: FnOnce(&mut EarlyScope) -> Result<M, Box<Error>>
{
let ref mut chan = self.mio.channel();
let ref mut mio = self.mio;
let res = self.slab.insert_with(|token| {
let ref mut scope = early_scope(token, chan, mio);
match fun(scope) {
Ok(x) => x,
Err(_) => {
// TODO(tailhook) when Slab::insert_with_opt() lands, fix it
panic!("Unimplemented: Slab::insert_with_opt");
}
}
});
if res.is_some() {
Ok(())
} else {
Err(NoSlabSpace(()))
}
}
pub fn instantiate(self, context: C) -> LoopInstance<C, M> {
let LoopCreator { slab, mio } = self;
let handler = create_handler(slab, context, mio.channel());
LoopInstance { mio: mio, handler: handler }
}
pub fn run(self, context: C) -> Result<(), io::Error> {
self.instantiate(context).run()
}
}
impl<C, M: Machine<Context=C>> LoopInstance<C, M> {
pub fn add_machine_with<F>(&mut self, fun: F) -> Result<(), SpawnError<()>>
where F: FnOnce(&mut Scope<C>) -> Result<M, Box<Error>>
{
let ref mut handler = self.handler;
let ref mut mio = self.mio;
handler.add_machine_with(mio, fun)
}
pub fn run(mut self) -> Result<(), io::Error> {
let ref mut handler = self.handler;
let ref mut mio = self.mio;
mio.run(handler)
}
}
|
#![allow(dead_code, unused_imports)]
extern crate assembler;
extern crate hardware;
extern crate vm_translator;
use assembler::parser::Parser;
use hardware::computer::Computer;
use vm_translator::vm_translator::VmTranslator;
use std::fs::File;
use std::io::Write;
use std::process;
use std::{
net::{TcpListener, TcpStream},
sync::mpsc,
thread,
};
use tungstenite::{server::accept, Message, WebSocket};
struct VmScanner {
vm_path: String,
asm_path: String,
pub ml_path: String,
}
impl VmScanner {
pub fn new(program_path: &str) -> Self {
Self {
vm_path: format!("{}.vm", program_path),
asm_path: format!("{}.asm", program_path),
ml_path: format!("{}.txt", program_path),
}
}
pub fn run(&self) {
self.vm_to_assembly();
self.assembly_to_ml();
}
fn vm_to_assembly(&self) {
let mut vm_translator = VmTranslator::new();
vm_translator
.run(&self.vm_path, &self.asm_path)
.expect("Translate VM");
}
fn assembly_to_ml(&self) {
let mut parser = Parser::new(); // TODO: include saving feature ?
let parsed = parser.run(&self.asm_path);
let mut file = File::create(&self.ml_path).unwrap();
file.write_all(parsed.to_string().as_bytes()).unwrap();
}
}
fn main() {
let scanner = VmScanner::new("integrate/src/programs/StackTest");
scanner.run();
println!("------ start_computer ------");
let mut computer = Computer::new(None, false);
computer.run(&scanner.ml_path, false);
println!("{}", computer.get_memory_info(0, 8));
println!("{}", computer.get_memory_info(256, 260));
println!("------ start_stop ------");
// let server = TcpListener::bind("127.0.0.1:9001").unwrap();
// for stream in server.incoming() {
// match stream {
// Err(e) => panic!(e),
// Ok(tcp) => {
// tcp.set_nonblocking(true).unwrap();
// let socket = accept(tcp).unwrap();
// start_computer(socket, &scanner.ml_path);
// }
// }
// }
}
fn start_computer(mut socket: WebSocket<TcpStream>, filename: &str) {
println!("------ start_computer ------");
let (to_computer, from_external) = mpsc::channel::<String>();
let (to_external, from_computer) = mpsc::channel::<String>();
// issue: CPU usage hits 100%
thread::spawn(move || loop {
if let Ok(msg) = socket.read_message() {
to_computer.send(msg.to_string()).unwrap();
}
if let Ok(msg) = from_computer.try_recv() {
socket.write_message(Message::from(msg)).unwrap();
}
});
let mut computer = Computer::new(Some((to_external, from_external)), false);
computer.run(filename, false);
println!("{}", computer.get_memory_info(0, 8));
println!("{}", computer.get_memory_info(256, 260));
println!("------ start_stop ------");
process::exit(0);
}
#[cfg(test)]
mod tests {
use super::*;
use hardware::base::logic::Word;
#[test]
fn integrate_test_add() {
let scanner = VmScanner::new("src/programs/Add");
scanner.run();
let mut computer = Computer::new(None, false);
computer.run(&scanner.ml_path, false);
assert_eq!(
computer.memory_out("000000100000000"),
Word::from("0000000000001111")
);
}
#[test]
fn integrate_test_sub() {
let scanner = VmScanner::new("src/programs/Sub");
scanner.run();
let mut computer = Computer::new(None, false);
computer.run(&scanner.ml_path, false);
assert_eq!(
computer.memory_out("000000100000000"),
Word::from("1111111111111110")
);
}
#[test]
fn integrate_test_eq() {
let scanner = VmScanner::new("src/programs/Eq");
scanner.run();
let mut computer = Computer::new(None, false);
computer.run(&scanner.ml_path, false);
assert_eq!(
computer.memory_out("000000100000000"),
Word::from("1111111111111111")
);
}
#[test]
fn integrate_test_lt() {
let scanner = VmScanner::new("src/programs/Lt");
scanner.run();
let mut computer = Computer::new(None, false);
computer.run(&scanner.ml_path, false);
assert_eq!(
computer.memory_out("000000100000000"),
Word::from("1111111111111111")
);
}
#[test]
fn integrate_test_gt() {
let scanner = VmScanner::new("src/programs/Gt");
scanner.run();
let mut computer = Computer::new(None, false);
computer.run(&scanner.ml_path, false);
assert_eq!(
computer.memory_out("000000100000000"),
Word::from("0000000000000000")
);
}
}
|
use std::fs;
fn main() {
let result = solve_puzzle("input");
println!("And the result is {}", result);
}
fn solve_puzzle(file_name: &str) -> u32 {
let file = read_data(file_name);
let mut data = file.lines();
let earliest = data.next().unwrap().parse::<u32>().unwrap();
let bus_ids = data
.next()
.unwrap()
.split(',')
.filter(|x| *x != "x")
.map(|x| x.parse::<u32>().unwrap())
.collect::<Vec<u32>>();
let mut counter = earliest;
let (bus, next_time) = loop {
if let Some(bus) = bus_ids.iter().find(|bus| counter % **bus == 0) {
break (*bus, counter);
}
counter += 1;
};
(next_time - earliest) * bus
}
fn read_data(file_name: &str) -> String {
fs::read_to_string(file_name).expect("Error")
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn solve_example_data() {
assert_eq!(295, solve_puzzle("example_data"))
}
#[test]
fn solve_input() {
assert_eq!(138, solve_puzzle("input"))
}
}
|
// xfail-stage0
use std;
import std._vec;
import std.bitv;
fn test_0_elements() {
auto act;
auto exp;
act = bitv.create(0u, false);
exp = _vec.init_elt[uint](0u, 0u);
// FIXME: why can't I write vec[uint]()?
check (bitv.eq_vec(act, exp));
}
fn test_1_element() {
auto act;
act = bitv.create(1u, false);
check (bitv.eq_vec(act, vec(0u)));
act = bitv.create(1u, true);
check (bitv.eq_vec(act, vec(1u)));
}
fn test_10_elements() {
auto act;
// all 0
act = bitv.create(10u, false);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// all 1
act = bitv.create(10u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u)));
// mixed
act = bitv.create(10u, false);
bitv.set(act, 0u, true);
bitv.set(act, 1u, true);
bitv.set(act, 2u, true);
bitv.set(act, 3u, true);
bitv.set(act, 4u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 0u, 0u, 0u, 0u, 0u)));
// mixed
act = bitv.create(10u, false);
bitv.set(act, 5u, true);
bitv.set(act, 6u, true);
bitv.set(act, 7u, true);
bitv.set(act, 8u, true);
bitv.set(act, 9u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 1u, 1u, 1u, 1u, 1u)));
// mixed
act = bitv.create(10u, false);
bitv.set(act, 0u, true);
bitv.set(act, 3u, true);
bitv.set(act, 6u, true);
bitv.set(act, 9u, true);
check (bitv.eq_vec(act, vec(1u, 0u, 0u, 1u, 0u, 0u, 1u, 0u, 0u, 1u)));
}
fn test_31_elements() {
auto act;
// all 0
act = bitv.create(31u, false);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// all 1
act = bitv.create(31u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u)));
// mixed
act = bitv.create(31u, false);
bitv.set(act, 0u, true);
bitv.set(act, 1u, true);
bitv.set(act, 2u, true);
bitv.set(act, 3u, true);
bitv.set(act, 4u, true);
bitv.set(act, 5u, true);
bitv.set(act, 6u, true);
bitv.set(act, 7u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// mixed
act = bitv.create(31u, false);
bitv.set(act, 16u, true);
bitv.set(act, 17u, true);
bitv.set(act, 18u, true);
bitv.set(act, 19u, true);
bitv.set(act, 20u, true);
bitv.set(act, 21u, true);
bitv.set(act, 22u, true);
bitv.set(act, 23u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// mixed
act = bitv.create(31u, false);
bitv.set(act, 24u, true);
bitv.set(act, 25u, true);
bitv.set(act, 26u, true);
bitv.set(act, 27u, true);
bitv.set(act, 28u, true);
bitv.set(act, 29u, true);
bitv.set(act, 30u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
1u, 1u, 1u, 1u, 1u, 1u, 1u)));
// mixed
act = bitv.create(31u, false);
bitv.set(act, 3u, true);
bitv.set(act, 17u, true);
bitv.set(act, 30u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 1u)));
}
fn test_32_elements() {
auto act;
// all 0
act = bitv.create(32u, false);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// all 1
act = bitv.create(32u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u)));
// mixed
act = bitv.create(32u, false);
bitv.set(act, 0u, true);
bitv.set(act, 1u, true);
bitv.set(act, 2u, true);
bitv.set(act, 3u, true);
bitv.set(act, 4u, true);
bitv.set(act, 5u, true);
bitv.set(act, 6u, true);
bitv.set(act, 7u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// mixed
act = bitv.create(32u, false);
bitv.set(act, 16u, true);
bitv.set(act, 17u, true);
bitv.set(act, 18u, true);
bitv.set(act, 19u, true);
bitv.set(act, 20u, true);
bitv.set(act, 21u, true);
bitv.set(act, 22u, true);
bitv.set(act, 23u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u)));
// mixed
act = bitv.create(32u, false);
bitv.set(act, 24u, true);
bitv.set(act, 25u, true);
bitv.set(act, 26u, true);
bitv.set(act, 27u, true);
bitv.set(act, 28u, true);
bitv.set(act, 29u, true);
bitv.set(act, 30u, true);
bitv.set(act, 31u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u)));
// mixed
act = bitv.create(32u, false);
bitv.set(act, 3u, true);
bitv.set(act, 17u, true);
bitv.set(act, 30u, true);
bitv.set(act, 31u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u)));
}
fn test_33_elements() {
auto act;
// all 0
act = bitv.create(33u, false);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u)));
// all 1
act = bitv.create(33u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
1u)));
// mixed
act = bitv.create(33u, false);
bitv.set(act, 0u, true);
bitv.set(act, 1u, true);
bitv.set(act, 2u, true);
bitv.set(act, 3u, true);
bitv.set(act, 4u, true);
bitv.set(act, 5u, true);
bitv.set(act, 6u, true);
bitv.set(act, 7u, true);
check (bitv.eq_vec(act, vec(1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u)));
// mixed
act = bitv.create(33u, false);
bitv.set(act, 16u, true);
bitv.set(act, 17u, true);
bitv.set(act, 18u, true);
bitv.set(act, 19u, true);
bitv.set(act, 20u, true);
bitv.set(act, 21u, true);
bitv.set(act, 22u, true);
bitv.set(act, 23u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u)));
// mixed
act = bitv.create(33u, false);
bitv.set(act, 24u, true);
bitv.set(act, 25u, true);
bitv.set(act, 26u, true);
bitv.set(act, 27u, true);
bitv.set(act, 28u, true);
bitv.set(act, 29u, true);
bitv.set(act, 30u, true);
bitv.set(act, 31u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
1u, 1u, 1u, 1u, 1u, 1u, 1u, 1u,
0u)));
// mixed
act = bitv.create(33u, false);
bitv.set(act, 3u, true);
bitv.set(act, 17u, true);
bitv.set(act, 30u, true);
bitv.set(act, 31u, true);
bitv.set(act, 32u, true);
check (bitv.eq_vec(act, vec(0u, 0u, 0u, 1u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 1u, 0u, 0u, 0u, 0u, 0u, 0u,
0u, 0u, 0u, 0u, 0u, 0u, 1u, 1u,
1u)));
}
fn main() {
test_0_elements();
test_1_element();
test_10_elements();
test_31_elements();
test_32_elements();
test_33_elements();
}
|
use ::{TypeVariant, TypeData, Type, WeakTypeContainer, Result, TypeContainer, CompilerError};
use ::ir::TargetType;
use ::ir::variant::{Variant, VariantType};
use ::FieldReference;
use std::rc::Rc;
use std::cell::RefCell;
#[derive(Debug)]
pub struct UnionVariant {
pub union_name: String,
pub match_field_ref: FieldReference,
pub match_field: Option<WeakTypeContainer>,
pub match_type: Option<TargetType>,
pub cases: Vec<UnionCase>,
}
#[derive(Debug)]
pub struct UnionCase {
pub match_val_str: String,
pub case_name: String,
pub child: WeakTypeContainer,
}
impl TypeVariant for UnionVariant {
default_resolve_child_name_impl!();
fn has_property(&self, _data: &TypeData, name: &str) -> Option<TargetType> {
// TODO: Infer type
match name {
"tag" => Some(TargetType::Integer),
_ => None,
}
}
fn get_type(&self, _data: &TypeData) -> VariantType {
VariantType::Union
}
fn get_result_type(&self, _data: &TypeData) -> Option<TargetType> {
Some(TargetType::Enum)
}
fn do_resolve_references(&mut self, data: &mut TypeData,
resolver: &::ReferenceResolver) -> Result<()> {
self.match_field = Some(resolver(self, data, &self.match_field_ref)?);
let match_field = self.match_field.clone().unwrap().upgrade().unwrap();
let match_field_inner = match_field.borrow();
let match_field_type = match_field_inner.variant.to_variant()
.get_result_type(&match_field_inner.data);
ensure!(match_field_type != None, CompilerError::UnmatchableType {
variant: match_field_inner.variant.get_type(&match_field_inner.data),
});
ensure!(match_field_type != None,
"attempted to match on a unmatchable type");
self.match_type = match_field_type;
Ok(())
}
}
pub struct UnionVariantBuilder {
typ: Type,
}
impl UnionVariantBuilder {
pub fn new(union_name: String, match_field: FieldReference)
-> UnionVariantBuilder {
UnionVariantBuilder {
typ: Type {
data: TypeData::default(),
variant: Variant::Union(UnionVariant {
union_name: union_name,
match_field_ref: match_field,
match_field: None,
match_type: None,
cases: Vec::new(),
})
}
}
}
pub fn case(&mut self, match_val_str: String, case_name: String,
child: TypeContainer) {
match self.typ.variant {
Variant::Union(ref mut variant) => {
variant.cases.push(UnionCase {
match_val_str: match_val_str,
case_name: case_name,
child: Rc::downgrade(&child),
});
}
_ => unreachable!(),
}
self.typ.data.children.push(child);
}
pub fn build(self) -> ::std::result::Result<TypeContainer, String> {
Ok(Rc::new(RefCell::new(self.typ)))
}
}
|
#[path = "./engine/renderer.rs"]
pub mod my_renderer;
#[path = "./engine/loaders.rs"]
pub mod my_loaders;
use std::thread;
use std::sync::mpsc;
use cgmath::Matrix4;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize,Ordering};
use crate::my_game_engine::my_game_logic::my_renderer::{Renderer,Cam,ModelRst,MyVertex,StaticMesh,AnimatedMesh};
pub struct CCGame{
pub models:Vec<(ModelRst, StaticMesh)>,
pub animated_models:Vec<(ModelRst, AnimatedMesh)>,
pub textures:Vec<glium::texture::SrgbTexture2d>,
pub key_states:[glutin::ElementState;7],
pub toggle_key_states:[glutin::ElementState;1],
pub prev_cursor_pos:glutin::dpi::LogicalPosition,
pub running: bool,
pub resized : bool,
pub mode_changed : bool,
pub ego_mode : bool,
pub window_size: glutin::dpi::LogicalSize,
pub moved : bool,
pub window_position: glutin::dpi::LogicalPosition,
pub cam:Cam,
pub rx:mpsc::Receiver<(Vec<MyVertex>, Vec<u16>)>,
pub tx:mpsc::Sender<(Vec<MyVertex>, Vec<u16>)>,
pub ready_in_que: Arc<AtomicUsize>,
pub mics_alive: f64
}
impl CCGame{
pub fn new() -> CCGame{
let rst_v: Vec<(ModelRst, StaticMesh)> = Vec::new();
let rst_av: Vec<(ModelRst, AnimatedMesh)> = Vec::new();
let pos = cgmath::Point3 {
x: 0.0,
y: 0.0,
z: 0.0,
};
let look_dir: cgmath::Vector3<f32> = cgmath::Vector3 {
x: 0.0,
y: 0.0,
z: 1.0,
};
use cgmath::InnerSpace;
let look_dir = look_dir.normalize();
let fv = cgmath::Rad(std::f32::consts::PI / 3.0);
let perspective: cgmath::Matrix4<f32> = cgmath::perspective(fv, (4 / 3) as f32, 0.1, 1024.0);
let vt = Vec::new();
let (tx, rx) = mpsc::channel();
CCGame{
models: rst_v,
animated_models: rst_av,
textures: vt,
key_states: [glutin::ElementState::Released;7],
toggle_key_states: [glutin::ElementState::Released;1],
prev_cursor_pos: glutin::dpi::LogicalPosition {x:0.0,y:0.0},
running : true,
resized : false,
mode_changed: false,
ego_mode: true,
window_size: glutin::dpi::LogicalSize {height:400.0,width:300.0},
moved : false,
window_position: glutin::dpi::LogicalPosition {x:0.0,y:0.0},
cam: Cam {
pos,
look_dir,
ha: std::f32::consts::PI,
va: std::f32::consts::PI,
perspective,
speed: 0.001
},
rx: (rx),
tx: (tx),
ready_in_que: Arc::new(AtomicUsize::new(0)),
mics_alive: 0.0
}
}
pub fn init(&mut self,display:&mut glium::Display){
/*
let mut m = my_loaders::loaders::load_static_collada_mesh(display,"./res/cubeStackBendingRotating.dae");
let t:cgmath::Matrix4<f32> = cgmath::Matrix4::from_translation(cgmath::Vector3{
x: (1.0),
y: (0.0),
z: (1.0)
});
let s:cgmath::Matrix4<f32> = cgmath::Matrix4::from_scale(0.1);
m.0.translation = t;
m.0.scale = s;
self.models.push(m);
*/
let mut am = my_loaders::loaders::load_animated_collada_mesh(display,"./res/untitled.dae");
let t:cgmath::Matrix4<f32> = cgmath::Matrix4::from_translation(cgmath::Vector3{
x: (0.0),
y: (0.0),
z: (1.0)
});
let s:cgmath::Matrix4<f32> = cgmath::Matrix4::from_scale(0.1);
am.0.translation = t;
am.0.scale = s;
am.1.running = true;
self.animated_models.push(am);
let mut am = my_loaders::loaders::load_animated_collada_mesh(display,"./res/untitled.dae");
let t:cgmath::Matrix4<f32> = cgmath::Matrix4::from_translation(cgmath::Vector3{
x: (1.0),
y: (0.0),
z: (1.0)
});
let s:cgmath::Matrix4<f32> = cgmath::Matrix4::from_scale(0.1);
am.0.translation = t;
am.0.scale = s;
am.1.running = true;
self.animated_models.push(am);
let mut am = my_loaders::loaders::load_animated_collada_mesh(display,"./res/untitled.dae");
let t:cgmath::Matrix4<f32> = cgmath::Matrix4::from_translation(cgmath::Vector3{
x: (2.0),
y: (0.0),
z: (1.0)
});
let s:cgmath::Matrix4<f32> = cgmath::Matrix4::from_scale(0.1);
am.0.translation = t;
am.0.scale = s;
am.1.running = true;
self.animated_models.push(am);
let mut am = my_loaders::loaders::load_animated_collada_mesh(display,"./res/untitled.dae");
let t:cgmath::Matrix4<f32> = cgmath::Matrix4::from_translation(cgmath::Vector3{
x: (3.0),
y: (0.0),
z: (1.0)
});
let s:cgmath::Matrix4<f32> = cgmath::Matrix4::from_scale(0.1);
am.0.translation = t;
am.0.scale = s;
am.1.running = true;
self.animated_models.push(am);
let texture = my_loaders::loaders::load_texture(display, "./res/cubeTex.png");
self.textures.push(texture);
}
pub fn input(&mut self,events_loop :&mut glutin::EventsLoop){
use glutin::ElementState::{Pressed,Released};
events_loop.poll_events(|event| {
match event {
glutin::Event::DeviceEvent{ event, ..} => match event {
glutin::DeviceEvent::MouseMotion { delta, ..} => {
if self.ego_mode{
let mouse_speed:f32 = 0.001;
self.cam.rotate(-delta.0 as f32*mouse_speed,delta.1 as f32*mouse_speed);
}
},
_ => ()
},
glutin::Event::WindowEvent { event, .. } => match event {
glutin::WindowEvent::Resized(size) => {
let fv = cgmath::Rad(std::f32::consts::PI / 3.0);
let perspective: cgmath::Matrix4<f32> = cgmath::perspective(fv, (size.width / size.height) as f32, 0.1, 1024.0);
self.cam.perspective = perspective;
self.window_size = size;
},
glutin::WindowEvent::CloseRequested => self.running = false,
glutin::WindowEvent::KeyboardInput { input, .. } => {
match input.virtual_keycode {
Some(glutin::VirtualKeyCode::W) => if input.state == Pressed {self.key_states[0] = Pressed} else {self.key_states[0] = Released} ,
Some(glutin::VirtualKeyCode::S) => if input.state == Pressed {self.key_states[1] = Pressed} else {self.key_states[1] = Released} ,
Some(glutin::VirtualKeyCode::A) => if input.state == Pressed {self.key_states[2] = Pressed} else {self.key_states[2] = Released} ,
Some(glutin::VirtualKeyCode::D) => if input.state == Pressed {self.key_states[3] = Pressed} else {self.key_states[3] = Released} ,
Some(glutin::VirtualKeyCode::X) => if input.state == Pressed {self.key_states[4] = Pressed} else {self.key_states[4] = Released} ,
Some(glutin::VirtualKeyCode::Y) => if input.state == Pressed {self.key_states[5] = Pressed} else {self.key_states[5] = Released} ,
Some(glutin::VirtualKeyCode::LShift) => if input.state == Pressed {self.key_states[6] = Pressed} else {self.key_states[6] = Released} ,
Some(glutin::VirtualKeyCode::F) => if input.state == Pressed {self.mode_changed = true; if self.toggle_key_states[0] == Pressed {self.toggle_key_states[0] = Released} else { self.toggle_key_states[0] = Pressed }} ,
Some(glutin::VirtualKeyCode::Escape) => self.running = false,
Some(glutin::VirtualKeyCode::Space) => if input.state == Pressed{
/*if self.animated_models[0].1.running {self.animated_models[0].1.running = false} else { self.animated_models[0].1.running = true }*/},
_ => (),
}
},
_ => (),
},
_ => ()
}
});
}
pub fn load_que(&mut self, display: &mut glium::Display) {
match self.rx.try_recv() {
Ok(x) => {
let vb = glium::VertexBuffer::new(display, &x.0).unwrap();
let ib = glium::IndexBuffer::new(display, glium::index::PrimitiveType::TrianglesList, &x.1).unwrap();
let scale = Matrix4::from_scale(0.1);
let translation = Matrix4::from_translation(cgmath::Vector3 {
x: (self.models.len() as f32),
y: (0.0),
z: (2.0)
});
let rotation: Matrix4<f32> = cgmath::SquareMatrix::identity();
self.models.push((ModelRst {
rotation: rotation,
scale: scale,
translation: translation
}, StaticMesh {
vertices: (vb),
indices: (ib)
}));
},
Err(e) => println!("{}", e)
}
}
pub fn update(&mut self,dt: &f32){
for animated_model in &mut self.animated_models{
if animated_model.1.running{
animated_model.1.advance_time(&(dt/1000.0));
}
}
use glutin::ElementState::Pressed;
if (self.key_states[0] == Pressed && self.key_states[1] == Pressed) || (self.key_states[2] == Pressed && self.key_states[3] == Pressed ) || (self.key_states[4] == Pressed && self.key_states[5] == Pressed ){
return;
}
if self.key_states[0] == Pressed && self.key_states[2] == Pressed {
self.cam.move_angle(dt, &(std::f32::consts::PI*0.25));
}else if self.key_states[0] == Pressed && self.key_states[3] == Pressed {
self.cam.move_angle(dt, &(std::f32::consts::PI * 1.75));
} else if self.key_states[1] == Pressed && self.key_states[2] == Pressed {
self.cam.move_angle(dt, &(std::f32::consts::PI*0.75));
}else if self.key_states[1] == Pressed && self.key_states[3] == Pressed {
self.cam.move_angle(dt, &(std::f32::consts::PI * 1.25));
}else if self.key_states[0] == Pressed {
self.cam.forward(dt);
}else if self.key_states[1] == Pressed {
self.cam.backward(dt);
}else if self.key_states[2] == Pressed {
self.cam.right(dt);
}else if self.key_states[3] == Pressed {
self.cam.left(dt);
}else if self.key_states[4] == Pressed {
self.cam.up(dt);
}else if self.key_states[5] == Pressed {
self.cam.down(dt);
}
if self.key_states[6] == Pressed {
self.cam.speed = 0.005;
}else {
self.cam.speed = 0.001;
}
if self.toggle_key_states[0] == Pressed {
self.ego_mode = false;
}else {
self.ego_mode= true
}
}
pub fn render(&mut self,renderer: &mut Renderer,target_frame: &mut glium::Frame,display:&mut glium::Display){
for model in &mut self.animated_models{
model.1.calculate_current_pose();
}
for model in &self.models{
renderer.draw_textured_static_mesh(target_frame,&self.cam,model,&self.textures[0]);
}
for model in &self.animated_models{
renderer.draw_textured_animated_mesh(target_frame,display,&self.cam,model,&self.textures[0]);
}
}
}
|
use core::cmp;
use core::fmt;
use core::mem;
use core::ptr;
use num_bigint::Sign;
use crate::erts::term::prelude::*;
use liblumen_core::sys::Endianness;
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
pub struct BinaryPushFlags(usize);
impl BinaryPushFlags {
const FLAG_ALIGNED: usize = 1; /* Field is guaranteed to be byte-aligned. */
const FLAG_LITTLE: usize = 2; /* Field is little-endian (otherwise big-endian). */
const FLAG_SIGNED: usize = 4; /* Field is signed (otherwise unsigned). */
const FLAG_EXACT: usize = 8; /* Size in bs_init is exact. */
const FLAG_NATIVE: usize = 16; /* Native endian. */
/// Converts an `Encoding` to a raw flags bitset
#[inline]
pub fn new(signed: bool, endianness: Endianness) -> Self {
match endianness {
Endianness::Little if signed => Self(Self::FLAG_LITTLE | Self::FLAG_SIGNED),
Endianness::Little => Self(Self::FLAG_LITTLE),
Endianness::Big if signed => Self(Self::FLAG_SIGNED),
Endianness::Big => Self(0),
Endianness::Native if signed => Self(Self::FLAG_NATIVE | Self::FLAG_SIGNED),
Endianness::Native => Self(Self::FLAG_NATIVE),
}
}
pub fn as_endianness(&self) -> Endianness {
if self.is_little_endian() {
Endianness::Little
} else if self.is_native_endian() {
Endianness::Native
} else {
Endianness::Big
}
}
#[inline]
pub fn set_aligned(self, aligned: bool) -> Self {
if aligned {
Self(self.0 | Self::FLAG_ALIGNED)
} else {
Self(self.0 & !Self::FLAG_ALIGNED)
}
}
#[inline(always)]
pub fn is_aligned(&self) -> bool {
self.0 & Self::FLAG_ALIGNED == Self::FLAG_ALIGNED
}
#[inline(always)]
pub fn is_signed(&self) -> bool {
self.0 & Self::FLAG_SIGNED == Self::FLAG_SIGNED
}
#[inline(always)]
pub fn is_little_endian(&self) -> bool {
self.0 & Self::FLAG_LITTLE == Self::FLAG_LITTLE
}
#[inline(always)]
pub fn is_native_endian(&self) -> bool {
self.0 & Self::FLAG_NATIVE == Self::FLAG_NATIVE
}
#[inline(always)]
pub fn is_exact_size(&self) -> bool {
self.0 & Self::FLAG_EXACT == Self::FLAG_EXACT
}
}
impl Default for BinaryPushFlags {
fn default() -> Self {
Self::new(false, Endianness::Big)
}
}
impl fmt::Debug for BinaryPushFlags {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BinaryPushFlags")
.field("raw", &format_args!("{:b}", self.0))
.field("endianness", &format_args!("{:?}", self.as_endianness()))
.field("is_aligned", &self.is_aligned())
.field("is_exact_size", &self.is_exact_size())
.finish()
}
}
#[repr(C)]
pub struct BinaryPushResult {
pub builder: *mut BinaryBuilder,
pub success: bool,
}
/*
* Here is how many bits we can copy in each reduction.
*
* At the time of writing of this comment, CONTEXT_REDS was 4000 and
* BITS_PER_REDUCTION was 1 KiB (8192 bits). The time for copying an
* unaligned 4000 KiB binary on my computer (which has a 4,2 GHz Intel
* i7 CPU) was about 5 ms. The time was approximately 4 times lower if
* the source and destinations binaries were aligned.
*/
#[allow(unused)]
const BITS_PER_REDUCTION: usize = 8 * 1024;
pub struct BinaryBuilder {
buffer: Vec<u8>,
offset: usize,
}
impl BinaryBuilder {
#[inline]
pub fn new() -> Self {
Self {
buffer: Vec::new(),
offset: 0,
}
}
pub fn finish(self) -> Vec<u8> {
self.buffer
}
}
macro_rules! nbytes {
($n:expr) => {
(($n + 7) >> 3)
};
}
macro_rules! byte_offset {
($n:expr) => {
($n >> 3)
};
}
macro_rules! bit_offset {
($n:expr) => {
($n & 7)
};
}
impl BinaryBuilder {
pub fn push_integer(
&mut self,
value: Integer,
num_bits: usize,
flags: BinaryPushFlags,
) -> Result<(), ()> {
if num_bits == 0 {
return Ok(());
}
let mut bin_offset = self.offset;
let bit_offset = bit_offset!(bin_offset);
let mut b;
let num_bytes = nbytes!(num_bits);
self.ensure_needed(num_bytes);
let mut iptr: *mut u8 = self.buffer.as_mut_ptr();
match value {
Integer::Small(small) if bit_offset + num_bits <= 8 => {
let val: isize = small.into();
let rbits = 8 - bit_offset;
// All bits are in the same byte
unsafe {
iptr = iptr.offset(byte_offset!(bin_offset) as isize);
b = *iptr as usize & (0xFF << rbits);
b |= (val as usize & ((1 << num_bits) - 1)) << (8 - bit_offset - num_bits);
*iptr = b as u8;
}
}
Integer::Small(_) if bit_offset == 0 => {
// More than one bit, starting at a byte boundary.
let offset = fmt_int(
unsafe { iptr.offset(byte_offset!(bin_offset) as isize) },
num_bytes,
value,
num_bits,
flags,
)?;
bin_offset += offset;
}
Integer::Small(_) if flags.is_little_endian() => {
// Small unaligned, little-endian number
//
// We must format the number into a temporary buffer, and
// then copy that into the binary.
let mut tmp_buffer: Vec<u8> = Vec::with_capacity(num_bytes);
let tmp_ptr = tmp_buffer.as_mut_ptr();
let offset = fmt_int(tmp_ptr, num_bytes, value, num_bits, flags)?;
unsafe {
copy_bits(
tmp_ptr,
0,
CopyDirection::Forward,
iptr,
bin_offset,
CopyDirection::Forward,
num_bits,
);
}
bin_offset += offset;
}
Integer::Small(small) => {
let rbits = 8 - bit_offset;
// Big-endian, more than one byte, but not aligned on a byte boundary.
// Handle the bits up to the next byte boundary specially, then let
// fmt_int handle the rest.
let shift_count = num_bits - rbits;
let val: isize = small.into();
unsafe {
iptr = iptr.offset(byte_offset!(bin_offset) as isize);
b = *iptr as usize & (0xFF << rbits);
}
// Shifting with a shift count greater than or equal to the word
// size may be a no-op (instead of 0 the result may be the unshifted value).
// Therefore, only do the shift and the OR if the shift count is less than the
// word size if the number is positive; if negative, we must simulate the sign
// extension.
if shift_count < mem::size_of::<usize>() * 8 {
b |= (val as usize >> shift_count) & ((1 << rbits) - 1);
} else if val < 0 {
// Simulate sign extension
b |= (-1isize & (1 << (rbits as isize) - 1)) as usize;
}
unsafe {
iptr = iptr.offset(1);
*iptr = b as u8;
}
// NOTE: fmt_int is known not to fail here
let offset = fmt_int(
iptr,
nbytes!(num_bits - rbits),
value,
num_bits - rbits,
flags,
)?;
bin_offset += offset;
}
Integer::Big(_) if bit_offset == 0 => {
// Big integer, aligned on a byte boundary.
// We can format the integer directly into the binary.
let offset = fmt_int(
unsafe { iptr.offset(byte_offset!(bin_offset) as isize) },
num_bytes,
value,
num_bits,
flags,
)?;
bin_offset += offset;
}
Integer::Big(_) => {
// We must format the number into a temporary buffer, and
// then copy that into the binary.
let mut tmp_buffer: Vec<u8> = Vec::with_capacity(num_bytes);
let tmp_ptr = tmp_buffer.as_mut_ptr();
let offset = fmt_int(tmp_ptr, num_bytes, value, num_bits, flags)?;
unsafe {
copy_bits(
tmp_ptr,
0,
CopyDirection::Forward,
iptr,
bin_offset,
CopyDirection::Forward,
num_bits,
);
}
bin_offset += offset;
}
}
self.offset = bin_offset + num_bits;
Ok(())
}
// TODO: erts_new_bs_put_float(Process *c_p, Eterm arg, Uint num_bits, int flags);
pub fn push_float(
&mut self,
_value: f64,
_num_bits: usize,
_flags: BinaryPushFlags,
) -> Result<(), ()> {
unimplemented!()
}
pub fn push_utf8(&mut self, value: isize) -> Result<(), ()> {
let bin_offset = self.offset;
let bit_offset;
let num_bits;
let mut tmp_buf = [0u8; 4];
let mut use_tmp = false;
let mut dst: *mut u8;
if value < 0 {
return Err(());
}
bit_offset = bit_offset!(bin_offset);
if bit_offset == 0 {
// We can write directly into the destination binary
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
} else {
// Unaligned destination binary. Must use a temporary buffer.
dst = tmp_buf.as_mut_ptr();
use_tmp = true;
}
if value < 0x80 {
unsafe {
*dst = value as u8;
}
num_bits = 8;
self.ensure_needed(nbytes!(num_bits));
if !use_tmp {
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
}
} else if value < 0x800 {
num_bits = 16;
self.ensure_needed(nbytes!(num_bits));
if !use_tmp {
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
}
unsafe {
*dst = 0xC0 | ((value >> 6) as u8);
*(dst.offset(1)) = 0x80 | ((value & 0x3F) as u8);
}
} else if value < 0x10000 {
if 0xD800 <= value && value <= 0xDFFF {
return Err(());
}
num_bits = 24;
self.ensure_needed(nbytes!(num_bits));
if !use_tmp {
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
}
unsafe {
*dst = 0xE0 | ((value >> 12) as u8);
*(dst.offset(1)) = 0x80 | ((value >> 6) as u8 & 0x3F);
*(dst.offset(2)) = 0x80 | ((value & 0x3F) as u8);
}
} else if value < 0x110000 {
num_bits = 32;
self.ensure_needed(nbytes!(num_bits));
if !use_tmp {
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
}
unsafe {
*dst = 0xF0 | ((value >> 18) as u8);
*(dst.offset(1)) = 0x80 | ((value >> 12) as u8 & 0x3F);
*(dst.offset(2)) = 0x80 | ((value >> 6) as u8 & 0x3F);
*(dst.offset(3)) = 0x80 | ((value & 0x3F) as u8);
}
} else {
return Err(());
}
if bin_offset != 0 {
unsafe {
copy_bits(
dst,
0,
CopyDirection::Forward,
self.buffer.as_mut_ptr(),
bin_offset,
CopyDirection::Forward,
num_bits,
);
}
}
self.offset += num_bits;
Ok(())
}
pub fn push_utf16(&mut self, value: isize, flags: BinaryPushFlags) -> Result<(), ()> {
let bin_offset = self.offset;
let bit_offset;
let num_bits;
let mut tmp_buf = [0u8; 4];
let mut use_tmp = false;
let mut dst: *mut u8;
if value > 0x10FFFF || (0xD800 <= value && value <= 0xDFFF) {
return Err(());
}
bit_offset = bit_offset!(bin_offset);
if bit_offset == 0 {
// We can write directly into the destination binary
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
} else {
// Unaligned destination binary. Must use a temporary buffer.
dst = tmp_buf.as_mut_ptr();
use_tmp = true;
}
if value < 0x10000 {
num_bits = 16;
self.ensure_needed(nbytes!(num_bits));
if !use_tmp {
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
}
if flags.is_little_endian() {
unsafe {
*dst = value as u8;
*(dst.offset(1)) = (value >> 8) as u8;
}
} else {
unsafe {
*dst = (value >> 8) as u8;
*(dst.offset(1)) = value as u8;
}
}
} else {
let w1;
let w2;
num_bits = 32;
self.ensure_needed(nbytes!(num_bits));
if !use_tmp {
dst = unsafe {
self.buffer
.as_mut_ptr()
.offset(byte_offset!(bin_offset) as isize)
};
}
let value = value - 0x10000;
let dst = dst as *mut u16;
w1 = 0xD800 | ((value >> 10) as u16);
w2 = 0xDC00 | ((value & 0x3FF) as u16);
if flags.is_little_endian() {
let w1 = w1.to_le();
let w2 = w2.to_le();
unsafe {
*dst = w1;
*(dst.offset(1)) = w2;
}
} else {
unsafe {
*dst = w1;
*(dst.offset(1)) = w2;
}
}
}
if bin_offset != 0 {
unsafe {
copy_bits(
dst,
0,
CopyDirection::Forward,
self.buffer.as_mut_ptr(),
bin_offset,
CopyDirection::Forward,
num_bits,
);
}
}
self.offset += num_bits;
Ok(())
}
pub fn push_bits_unit(&mut self, value: Term, unit: u8) -> Result<(), ()> {
match value.decode().unwrap() {
TypedTerm::BinaryLiteral(binary_literal) => {
assert_eq!(unit, 1);
self.push_string(binary_literal.as_bytes())
}
TypedTerm::HeapBinary(heap_binary) => {
assert_eq!(unit, 1);
self.push_string(heap_binary.as_bytes())
}
_ => unimplemented!("pushing value ({}) as bits with unit ({})", value, unit),
}
}
pub fn push_byte_unit(&mut self, value: Term, unit: u8) -> Result<(), ()> {
match value.decode().unwrap() {
TypedTerm::SmallInteger(small_integer) => {
let bytes = small_integer.to_le_bytes();
let unit_usize = unit as usize;
assert!(1 <= unit && unit_usize <= bytes.len());
self.push_string(&bytes[0..unit_usize])
}
_ => unimplemented!("pushing value ({}) as byte with unit ({})", value, unit),
}
}
pub fn push_string(&mut self, value: &[u8]) -> Result<(), ()> {
self.ensure_needed(value.len());
let offset = unsafe { write_bytes(self.buffer.as_mut_ptr(), self.offset, value) };
self.offset += offset;
Ok(())
}
#[inline]
fn ensure_needed(&mut self, need: usize) {
self.buffer.resize(need + self.buffer.len(), 0);
}
}
fn fmt_int(
buf: *mut u8,
size_bytes: usize,
value: Integer,
size_bits: usize,
flags: BinaryPushFlags,
) -> Result<usize, ()> {
let is_signed = flags.is_signed();
let is_little = flags.is_little_endian();
match value {
Integer::Small(small) => {
assert_ne!(size_bits, 0);
let v: isize = small.into();
let bytes = if is_little {
v.to_le_bytes()
} else {
v.to_be_bytes()
};
let sign = if is_signed && v < 0 { 1 } else { 0 };
unsafe { fmt_int_bytes(&bytes, sign, buf, size_bytes, size_bits, flags) }
}
Integer::Big(big) => {
if size_bits == 0 {
return Ok(0);
}
let bytes = if is_little {
big.to_signed_bytes_le()
} else {
big.to_signed_bytes_be()
};
let sign = if is_signed && big.sign() == Sign::Minus {
1
} else {
0
};
unsafe { fmt_int_bytes(bytes.as_slice(), sign, buf, size_bytes, size_bits, flags) }
}
}
}
unsafe fn fmt_int_bytes(
bytes: &[u8],
sign: u8,
buf: *mut u8,
size_bytes: usize,
size_bits: usize,
flags: BinaryPushFlags,
) -> Result<usize, ()> {
let mut bit_offset = bit_offset!(size_bits);
let num_bytes = cmp::min(size_bytes, bytes.len());
if num_bytes < size_bytes {
let diff = size_bytes - num_bytes;
// A larger value was requested than what was provided,
// so sign extend the value accordingly
if flags.is_little_endian() {
// Pad right
let offset = write_bytes(buf, bit_offset, &bytes[0..num_bytes]);
Ok(pad_bytes(buf, offset, diff, sign))
} else {
// Pad left
bit_offset += pad_bytes(buf, bit_offset, diff, sign);
Ok(write_bytes(buf, bit_offset, &bytes[0..num_bytes]))
}
} else {
Ok(write_bytes(buf, bit_offset, &bytes[0..num_bytes]))
}
}
unsafe fn pad_bytes(dst: *mut u8, offset: usize, padding: usize, sign: u8) -> usize {
use super::primitives::{make_bitmask, mask_bits};
if offset % 8 == 0 {
// Byte-aligned
dst.offset(byte_offset!(offset) as isize)
.write_bytes(sign, padding);
} else {
// Not byte-aligned
let lmask = make_bitmask(8 - offset as u8);
// Handle bits in first (unaligned) byte
let base = dst.offset(byte_offset!(offset) as isize);
base.write(mask_bits(sign, *base, lmask));
// All that's left to copy are the remaining bytes, if any
let padding = padding - 1;
if padding > 0 {
base.offset(1).write_bytes(sign, padding);
}
}
offset + (padding * 8)
}
unsafe fn write_bytes(dst: *mut u8, offset: usize, value: &[u8]) -> usize {
let ptr = value.as_ptr();
let num_bytes = value.len();
if bit_offset!(offset) != 0 {
copy_bits(
ptr,
0,
CopyDirection::Forward,
dst,
offset,
CopyDirection::Forward,
num_bytes * 8,
);
} else {
let byte_offs = byte_offset!(offset) as isize;
ptr::copy_nonoverlapping(ptr, dst.offset(byte_offs), num_bytes);
}
num_bytes * 8
}
|
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeSet;
use std::collections::HashSet;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use libc::*;
use super::*;
use super::super::qlib::*;
use super::super::qlib::perf_tunning::*;
use super::super::qlib::qmsg::*;
#[derive(Debug, Copy, Clone)]
pub struct Timer {
pub expire: i64,
pub timerId: u64,
pub seqNo: u64,
}
impl Timer {
pub fn New(taskId: u64, seqNo: u64, expire: i64) -> Self {
return Self {
timerId: taskId,
seqNo: seqNo,
expire: expire,
}
}
}
impl Ord for Timer {
fn cmp(&self, other: &Self) -> Ordering {
if self.expire != other.expire {
return self.expire.cmp(&other.expire)
} else {
return self.timerId.cmp(&other.timerId)
}
}
}
impl PartialOrd for Timer {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for Timer {}
impl PartialEq for Timer {
fn eq(&self, other: &Self) -> bool {
self.timerId == other.timerId
}
}
impl Hash for Timer {
fn hash<H: Hasher>(&self, state: &mut H) {
self.timerId.hash(state);
}
}
#[derive(Default)]
pub struct TimerKeeper {
pub TimerList: BTreeSet<Timer>,
//sort by <expire, timerId>
pub TimerSet: HashSet<Timer>,
//id by <timerId>
pub timerfd: i32,
}
impl TimerKeeper {
pub fn New(timerfd: i32) -> Self {
return Self {
timerfd: timerfd,
..Default::default()
}
}
pub fn ResetTimer(&mut self, clockId: i32, timerId: u64, seqNo: u64, expire: i64, ss: &'static ShareSpace) {
let current = if clockId == CLOCK_MONOTONIC {
HostTime::Monotime().unwrap()
} else {
HostTime::Realtime().unwrap()
};
if expire < 10_000 {
ss.AQHostInputCall(&HostInputMsg::FireTimer(FireTimer {
TimerId: timerId,
SeqNo: seqNo,
}));
return;
}
let expire = expire + current;
let needSet = match self.TimerList.first() {
None => true,
Some(t) => {
t.expire > expire
},
};
self.Add(Timer {
expire: expire,
timerId: timerId,
seqNo: seqNo
});
if needSet {
self.SetTimer(expire);
}
}
pub fn SetTimer(&mut self, expire: i64) {
let E9 = 1_000_000_000;
let interval = timespec {
tv_sec: 0,
tv_nsec: 0,
};
let val = timespec {
tv_sec: expire / E9,
tv_nsec: expire % E9,
};
let newVal = itimerspec {
it_interval: interval,
it_value: val,
};
let ret = unsafe {
timerfd_settime(self.timerfd, TFD_TIMER_ABSTIME, &newVal, 0 as *mut itimerspec)
};
if ret == -1 {
error!("panic: TimerKeeper::SetTimer fail..., timerfd is {}, error is {}", self.timerfd, errno::errno().0);
panic!("TimerKeeper::SetTimer fail..., timerfd is {}, error is {}", self.timerfd, errno::errno().0);
}
}
pub fn NextExpire(&mut self, now: i64, ss: &'static ShareSpace) -> bool {
let mut hasMsg = false;
self.Expire(now, |timer| {
ss.AQHostInputCall(&HostInputMsg::FireTimer(FireTimer {
TimerId: timer.timerId,
SeqNo: timer.seqNo,
}));
if timer.timerId == 1 {
PerfPrint();
}
hasMsg = true;
});
let next = match self.TimerList.first() {
None => 0, //0 means cancel timer
Some(t) => t.expire,
};
self.SetTimer(next);
return hasMsg;
}
pub fn Add(&mut self, t: Timer) {
self.TimerList.insert(t);
self.TimerSet.insert(t);
}
pub fn Expire(&mut self, now: i64, mut f: impl FnMut(&Timer)) {
loop {
let timer = match self.TimerList.first() {
None => break,
Some(t) => *t,
};
if timer.expire > now {
break;
}
f(&timer);
self.TimerList.remove(&timer);
self.TimerSet.remove(&timer);
}
}
pub fn StopTimer(&mut self, timerId: u64) {
let t = self.TimerSet.get(&Timer {
timerId: timerId,
seqNo: 0,
expire: 0,
});
let timer = match t {
None => {
return
}
Some(t) => {
*t
}
};
self.TimerList.remove(&timer);
self.TimerSet.remove(&timer);
}
} |
use std::{
convert::Infallible,
task::Poll
};
use tokio::sync::broadcast::Receiver;
pub struct Polling {
receiver: Option<Receiver<()>>,
}
impl Polling {
pub fn new(receiver: Receiver<()>) -> Self {
Polling {
receiver: Some(receiver),
}
}
}
impl http_body::Body for Polling {
type Data = bytes::Bytes;
type Error = Infallible;
fn poll_data(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
match self.receiver.take() {
Some(mut receiver) => {
let waker = cx.waker().clone();
tokio::spawn(async move {
receiver.recv().await.ok();
waker.wake();
});
Poll::Pending
}
None => Poll::Ready(None),
}
}
fn poll_trailers(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> Poll<Result<Option<http::HeaderMap>, Self::Error>> {
Poll::Ready(Ok(None))
}
}
|
use bintree::Tree;
use P56::*;
pub fn main() {
let tree = Tree::node('a', Tree::leaf('b'), Tree::leaf('c'));
println!(
"{} is {}.",
tree,
if is_symmetric(&tree) {
"symmetric"
} else {
"not symmetric"
}
);
let tree = Tree::node(
'a',
Tree::node('b', Tree::leaf('d'), Tree::end()),
Tree::node('c', Tree::leaf('e'), Tree::end()),
);
println!(
"{} is {}.",
tree,
if is_symmetric(&tree) {
"symmetric"
} else {
"not symmetric"
}
);
}
|
// Legend:
// // pxyz -> problem xyz
// // hxyz -> helper for function xyz
use crate::solutions::Solution;
mod problems {
use super::Solution;
#[test]
#[ignore]
fn p1_two_sum() {
let (nums, target) = (vec![2, 7, 11, 15], 9);
assert_eq!(Solution::two_sum(nums, target), vec![0, 1]);
let (nums, target) = (vec![3, 2, 4], 6);
assert_eq!(Solution::two_sum(nums, target), vec![1, 2]);
let (nums, target) = (vec![3, 3], 6);
assert_eq!(Solution::two_sum(nums, target), vec![0, 1]);
}
#[test]
#[ignore]
fn p1920_array_from_perm() {
let nums = vec![0, 2, 1, 5, 3, 4];
assert_eq!(Solution::build_array(nums), vec![0, 1, 2, 4, 5, 3]);
let nums = vec![5, 0, 1, 2, 3, 4];
assert_eq!(Solution::build_array(nums), vec![4, 5, 0, 1, 2, 3]);
}
}
// mod helpers {}
|
extern crate wasm_bindgen;
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern {
fn alert(s: &str);
}
#[wasm_bindgen]
pub fn greet(name: &str) {
alert(&format!("Hello, {}!", name));
}
#[wasm_bindgen]
pub fn add(a: i32, b: i32) -> i32 {
return a + b
}
|
extern crate rapier2d as rapier; // For the debug UI.
use bevy::prelude::*;
use bevy::render::pass::ClearColor;
use bevy_rapier2d::physics::{RapierPhysicsPlugin, RapierPhysicsScale};
use bevy_rapier2d::render::RapierRenderPlugin;
use rapier2d::dynamics::RigidBodyBuilder;
use rapier2d::geometry::ColliderBuilder;
use rapier2d::pipeline::PhysicsPipeline;
use ui::DebugUiPlugin;
#[path = "../../src_debug_ui/mod.rs"]
mod ui;
fn main() {
App::build()
.add_resource(ClearColor(Color::rgb(
0xF9 as f32 / 255.0,
0xF9 as f32 / 255.0,
0xFF as f32 / 255.0,
)))
.add_resource(Msaa { samples: 2 })
.add_default_plugins()
.add_plugin(RapierPhysicsPlugin)
.add_plugin(RapierRenderPlugin)
.add_plugin(DebugUiPlugin)
.add_startup_system(setup_graphics.system())
.add_startup_system(setup_physics.system())
.add_startup_system(enable_physics_profiling.system())
.run();
}
fn enable_physics_profiling(mut pipeline: ResMut<PhysicsPipeline>) {
pipeline.counters.enable()
}
fn setup_graphics(mut commands: Commands, mut scale: ResMut<RapierPhysicsScale>) {
scale.0 = 10.0;
commands
.spawn(LightComponents {
translation: Translation::new(1000.0, 100.0, 2000.0),
..Default::default()
})
.spawn(Camera2dComponents {
translation: Translation::new(0.0, 200.0, 0.0),
..Camera2dComponents::default()
});
}
pub fn setup_physics(mut commands: Commands) {
/*
* Ground
*/
let ground_size = 25.0;
let rigid_body = RigidBodyBuilder::new_static();
let collider = ColliderBuilder::cuboid(ground_size, 1.2);
commands.spawn((rigid_body, collider));
let rigid_body = RigidBodyBuilder::new_static()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(ground_size, ground_size * 2.0);
let collider = ColliderBuilder::cuboid(ground_size * 2.0, 1.2);
commands.spawn((rigid_body, collider));
let body = RigidBodyBuilder::new_static()
.rotation(std::f32::consts::FRAC_PI_2)
.translation(-ground_size, ground_size * 2.0);
let collider = ColliderBuilder::cuboid(ground_size * 2.0, 1.2);
commands.spawn((body, collider));
/*
* Create the cubes
*/
let num = 20;
let rad = 0.5;
let shift = rad * 2.0;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
for i in 0..num {
for j in 0usize..num * 5 {
let x = i as f32 * shift - centerx;
let y = j as f32 * shift + centery + 2.0;
// Build the rigid body.
let body = RigidBodyBuilder::new_dynamic().translation(x, y);
let collider = ColliderBuilder::cuboid(rad, rad).density(1.0);
commands.spawn((body, collider));
}
}
}
|
include!(concat!(env!("OUT_DIR"), "/audio.rs"));
impl Sound {
pub fn play(&self) {
let id = self.clone() as u32;
js! { audio.play_sound(@{id}); }
}
}
impl Music {
pub fn play(&self) {
let id = self.clone() as u32;
js! { audio.play_music(@{id}); }
}
}
|
use super::hint;
#[derive(Hash, Eq, PartialEq, Debug, Clone, Copy)]
pub enum RuleCode {
Unsafe,
UseUnsafe,
Unwrap,
Expect,
IndexExpression,
}
impl ToString for RuleCode {
fn to_string(&self) -> String {
format!("{:?}", self)
}
}
#[derive(Debug, Copy, Clone)]
pub struct Rule {
pub code: RuleCode,
pub desc: &'static str,
pub hint: Option<&'static str>,
}
pub static RULE_UNSAFE_CODE: Rule = Rule {
code: RuleCode::Unsafe,
desc: "Unsafe keyword is forbidden.",
hint: None,
};
pub static RULE_USE_UNSAFE: Rule = Rule {
code: RuleCode::UseUnsafe,
desc: "using unsafe identifier like function or macro is forbidden.",
hint: Some(hint::UNSAFE_HINT),
};
pub static RULE_UNWRAP_CALL: Rule = Rule {
code: RuleCode::Unwrap,
desc: "Unwrap call may panic.",
hint: Some(hint::FUNCTOR_HINT),
};
pub static RULE_EXPECT_CALL: Rule = Rule {
code: RuleCode::Expect,
desc: "expect call may panic.",
hint: Some(hint::FUNCTOR_HINT),
};
pub static RULE_INDEX_EXPRESSION: Rule = Rule {
code: RuleCode::IndexExpression,
desc: "index operation may panic, use get method instead.",
hint: Some(hint::INDEX_EXPR_HINT),
};
pub static RULES: [Rule; 5] = [
RULE_UNSAFE_CODE,
RULE_USE_UNSAFE,
RULE_UNWRAP_CALL,
RULE_EXPECT_CALL,
RULE_INDEX_EXPRESSION,
];
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
use std::any::TypeId;
use std::rc::Rc;
type Fp<T> = Rc<T>;
struct Engine;
trait Component: 'static {}
impl Component for Engine {}
trait Env {
fn get_component_type_id(&self, type_id: TypeId) -> Option<Fp<Component>>;
}
impl<'a> Env+'a {
fn get_component<T: Component>(&self) -> Option<Fp<T>> {
let x = self.get_component_type_id(TypeId::of::<T>());
None
}
}
trait Figment {
fn init(&mut self, env: &Env);
}
struct MyFigment;
impl Figment for MyFigment {
fn init(&mut self, env: &Env) {
let engine = env.get_component::<Engine>();
}
}
fn main() {}
|
use glam::Vec2;
use itertools::Itertools;
use legion::Entity;
use parking_lot::RwLock;
type Cell = Vec<(Vec2, Entity)>;
const U32_SIZE: u32 = (std::mem::size_of::<u32>() as u32) * 8;
fn log_2(x: u32) -> u32 {
debug_assert!(x > 0);
U32_SIZE - x.leading_zeros() - 1
}
pub struct DenseGrid {
/// log2(ncells_side)
log2_side: u32,
/// log2(cell_size)
log2_cell: u32,
cells: Vec<RwLock<Cell>>,
}
impl DenseGrid {
pub fn new(cell_size: u32, side_len: u32) -> Self {
assert!(side_len.is_power_of_two());
assert!(cell_size.is_power_of_two());
let ncells_side = side_len / cell_size;
Self {
log2_side: log_2(ncells_side),
log2_cell: log_2(cell_size),
cells: (0..(ncells_side * ncells_side))
.map(|_| RwLock::new(Cell::default()))
.collect(),
}
}
pub fn insert(&self, pos: Vec2, entity: Entity) {
if let Some(cell) = self.cells.get(self.flat_ind(pos)) {
cell.write().push((pos, entity));
}
}
#[inline]
pub fn flat_ind(&self, pos: Vec2) -> usize {
let x = (pos.x as u32) >> self.log2_cell;
let y = (pos.y as u32) >> self.log2_cell;
((y << self.log2_side) | x) as usize
}
pub fn clear(&mut self) {
self.cells.iter_mut().for_each(|cell| cell.write().clear());
}
pub fn query(&self, pos: Vec2, radius: f32, ignore: Entity) -> Vec<Entity> {
let radius2 = radius.powi(2);
let mut hits = Vec::with_capacity(4);
for ind in self.cell_range(pos, radius) {
if let Some(cell) = self.cells.get(ind as usize) {
// We know this is at a read only stage. Safe to disregard lock
let unlocked = unsafe { cell.data_ptr().as_ref().unwrap() };
hits.extend(unlocked.iter().filter_map(|(other, id)| {
match (*id != ignore) & (pos.distance_squared(*other) < radius2) {
true => Some(*id),
false => None,
}
}));
}
}
hits
}
pub fn cell_range(&self, pos: Vec2, radius: f32) -> impl Iterator<Item = u32> {
let x1 = ((pos.x - radius) as u32) >> self.log2_cell;
let y1 = ((pos.y - radius) as u32) >> self.log2_cell;
let x2 = ((pos.x + radius) as u32) >> self.log2_cell;
let y2 = ((pos.y + radius) as u32) >> self.log2_cell;
let shift = self.log2_side;
(x1..=x2).cartesian_product(y1..=y2).map(move |(x, y)| (y << shift) | x)
}
}
|
extern crate stdweb;
#[macro_use]
extern crate yew;
use yew::prelude::*;
pub struct Model {
message: String,
}
pub enum Msg {}
impl Component for Model {
type Message = Msg;
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
Model {
message: String::from("Hello ")
}
}
fn update(&mut self, _msg: Self::Message) -> ShouldRender {
true
}
}
impl Renderable<Model> for Model {
fn view(&self) -> Html<Self> {
html! {
<div class="greeting">
<h2> {&format!("{} World!", self.message)}</h2>
</div>
}
}
}
|
use std::collections::HashMap;
use ring_queue::Ring;
const INPUT: &str = include_str!("../../input/09");
fn parse_input(input: &str) -> (usize, usize) {
let inputs = input.split(" ").collect::<Vec<&str>>();
let mut out = inputs.iter().filter_map(|v| v.parse().ok());
(out.next().unwrap(), out.next().unwrap())
}
fn solution(players: usize, points: usize) -> usize {
// keep the head (end) of the queue as the current
// rotate around the head
let mut ring: Ring<usize> = Ring::with_capacity(points);
// player -> score
let mut scores: HashMap<usize, usize> = HashMap::with_capacity(players);
// assuming always at least two turns
ring.push(0);
ring.push(1);
for marble in 2..points + 1 {
if marble % 23 == 0 {
ring.rotate(7);
let marble7 = ring.pop();
ring.rotate(-1);
// up score
if let Some(m7) = marble7 {
*scores.entry(marble % players).or_insert(0) += marble + m7;
} else {
panic!("Unable to rot 7.");
}
} else {
ring.rotate(-1);
ring.push(marble);
}
// println!("ring: {:?}", ring);
}
let mut maximum = 0;
for (_k, v) in scores {
if v > maximum {
maximum = v;
}
}
// why does *v with the max() below not work?
// let (_k, v) = scores.iter().max().unwrap();
maximum
}
fn main() {
let (players, points) = parse_input(INPUT);
println!("{}, {}", players, points);
let sol = solution(players, points);
println!("solution: {}", sol);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_example() {
let (ex_players, ex_points) = (9, 25);
let expected: usize = 32;
assert_eq!(expected, solution(ex_players, ex_points));
}
#[test]
fn test_example_1() {
let (ex_players, ex_points) = (10, 1618);
let expected: usize = 8317;
assert_eq!(expected, solution(ex_players, ex_points));
}
#[test]
fn test_example_2() {
let (ex_players, ex_points) = (13, 7999);
let expected: usize = 146373;
assert_eq!(expected, solution(ex_players, ex_points));
}
#[test]
fn test_example_3() {
let (ex_players, ex_points) = (17, 1104);
let expected: usize = 2764;
assert_eq!(expected, solution(ex_players, ex_points));
}
#[test]
fn test_example_4() {
let (ex_players, ex_points) = (21, 6111);
let expected: usize = 54718;
assert_eq!(expected, solution(ex_players, ex_points));
}
#[test]
fn test_example_5() {
let (ex_players, ex_points) = (30, 5807);
let expected: usize = 37305;
assert_eq!(expected, solution(ex_players, ex_points));
}
}
|
// Here is an implementation to return mutable pointers to the structure's fields since we saw that was not possible to do
// using mutable references.
use std::iter::IntoIterator;
use std::iter::Iterator;
pub struct NewStruct<T> {
field1: T,
field2: T,
field3: T,
field4: T,
field5: T,
}
pub struct NewStructMutRef<'a,T>
where T: 'a {
count: usize,
new_struct: &'a mut NewStruct<T>,
}
impl<'a,T> IntoIterator for &'a mut NewStruct<T> {
type Item = *mut T;
type IntoIter = NewStructMutRef<'a,T>;
fn into_iter( self: Self ) -> NewStructMutRef<'a,T> {
NewStructMutRef {
count: 0 as usize,
new_struct: self,
}
}
}
impl<'a,T> Iterator for NewStructMutRef<'a,T> {
type Item = *mut T;
fn next<'b>( self: &'b mut Self ) -> Option<*mut T> {
self.count += 1;
match self.count {
1 => { Some(&mut self.new_struct.field1 as *mut T) },
2 => { Some(&mut self.new_struct.field2 as *mut T) },
3 => { Some(&mut self.new_struct.field3 as *mut T) },
4 => { Some(&mut self.new_struct.field4 as *mut T) },
5 => { Some(&mut self.new_struct.field5 as *mut T) },
_ => { None },
}
}
}
fn main() -> () {
let mut n_copy = NewStruct {
field1: 1 as i32,
field2: 2,
field3: 3,
field4: 4,
field5: 5,
};
let mut n_clone = NewStruct {
field1: Box::new( 1 as i32 ),
field2: Box::new( 2 ),
field3: Box::new( 3 ),
field4: Box::new( 4 ),
field5: Box::new( 5 ),
};
for x in &mut n_copy {
unsafe {
*x = 10;
}
}
for x in &mut n_clone {
unsafe {
*x = Box::new( 10 );
}
}
println!( "{}", n_copy.field1 ); // output: 10
println!( "{}", n_copy.field5 ); // output: 10
println!( "{}", *n_clone.field1 ); // output: 10
println!( "{}", *n_clone.field5 ); // output: 10
}
|
use crate::ast::Node;
use crate::lexer::{Lexer, Token, TokenKind};
pub struct Parser<'a> {
lexer: Lexer<'a>,
}
impl<'a> Parser<'a> {
pub fn new(lexer: Lexer<'a>) -> Parser<'a> {
Parser { lexer }
}
pub fn bp(&self, t: &Token) -> usize {
match t.kind {
TokenKind::RParen => 0,
TokenKind::Plus => 10,
TokenKind::Minus => 10,
TokenKind::Times => 20,
TokenKind::Divide => 20,
TokenKind::Exponent => 30,
TokenKind::LParen => 40,
_ => 100,
}
}
pub fn nud(&mut self, t: Box<Token>, _bp: usize) -> Result<Node, String> {
match t.kind {
TokenKind::Number => Ok(Node::Number(t.value.parse::<f64>().unwrap())),
TokenKind::Plus | TokenKind::Minus => {
let right = self.expr(0)?;
Ok(Node::Unary(t, Box::new(right)))
}
TokenKind::Identifier => match t.value.as_str() {
"e" => Ok(Node::Number(std::f64::consts::E)),
"pi" => Ok(Node::Number(std::f64::consts::PI)),
_ => {
self.lexer.expect(TokenKind::LParen)?;
let arg = self.expr(0)?;
self.lexer.expect(TokenKind::RParen)?;
Ok(Node::Application(t.value, Box::new(arg)))
}
},
TokenKind::LParen => {
let right = self.expr(0)?;
match self.lexer.next() {
Some(ref t) if t.kind == TokenKind::RParen => Ok(right),
_ => Err("Expected ')'".to_owned()),
}
}
_ => Err(t.error()),
}
}
pub fn led(&mut self, left: Node, op: Box<Token>, bp: usize) -> Result<Node, String> {
match op.kind {
TokenKind::Plus | TokenKind::Minus | TokenKind::Times | TokenKind::Divide => {
let right = self.expr(bp)?;
Ok(Node::Binary(Box::new(left), op, Box::new(right)))
}
TokenKind::Exponent => {
let right = self.expr(bp - 1)?;
Ok(Node::Binary(Box::new(left), op, Box::new(right)))
}
_ => Err(op.error()),
}
}
pub fn expr(&mut self, rbp: usize) -> Result<Node, String> {
let err = "Undexpected EOF";
let first_t = self.lexer.next().ok_or(err)?;
let first_t_bp = self.bp(&first_t);
let mut left = self.nud(first_t, first_t_bp)?;
if self.lexer.peek().is_none() {
return Ok(left);
}
let mut peeked = self.lexer.peek_owned();
while peeked.is_some() && rbp < self.bp(&peeked.unwrap()) {
let op = self.lexer.next().ok_or(err)?;
let op_bp = self.bp(&op);
left = self.led(left, op, op_bp)?;
if self.lexer.peek().is_none() {
break;
}
peeked = self.lexer.peek_owned();
}
Ok(left)
}
pub fn parse(&mut self) -> Result<Node, String> {
let result = self.expr(0);
match self.lexer.next() {
Some(tkn) => Err(tkn.error()),
None => result,
}
}
}
#[cfg(test)]
mod tests {
use crate::ast::{eval, Node};
use crate::lexer::{Lexer, Token, TokenKind};
use crate::parser::Parser;
#[test]
fn number() {
let ast = Parser::new(Lexer::new("1")).expr(0).unwrap();
assert_eq!(ast, Node::Number(1_f64));
}
#[test]
fn plus_times() {
let ast = Parser::new(Lexer::new("1+2*3")).expr(0).unwrap();
assert_eq!(
ast,
Node::Binary(
Box::new(Node::Number(1_f64)),
Box::new(Token::new(TokenKind::Plus, "+".to_owned(), 1, 2, 1, 2)),
Box::new(Node::Binary(
Box::new(Node::Number(2_f64)),
Box::new(Token::new(TokenKind::Times, "*".to_owned(), 3, 4, 1, 4)),
Box::new(Node::Number(3_f64))
))
)
);
}
#[test]
fn times_plus() {
let ast = Parser::new(Lexer::new("1*2+3")).expr(0).unwrap();
assert_eq!(
ast,
Node::Binary(
Box::new(Node::Binary(
Box::new(Node::Number(1_f64)),
Box::new(Token::new(TokenKind::Times, "*".to_owned(), 1, 2, 1, 2)),
Box::new(Node::Number(2_f64))
)),
Box::new(Token::new(TokenKind::Plus, "+".to_owned(), 3, 4, 1, 4)),
Box::new(Node::Number(3_f64)),
)
);
}
#[test]
fn parens() {
let ast = Parser::new(Lexer::new("1*(2+3)")).expr(0).unwrap();
assert_eq!(
ast,
Node::Binary(
Box::new(Node::Number(1_f64)),
Box::new(Token::new(TokenKind::Times, "*".to_owned(), 1, 2, 1, 2)),
Box::new(Node::Binary(
Box::new(Node::Number(2_f64)),
Box::new(Token::new(TokenKind::Plus, "+".to_owned(), 4, 5, 1, 5)),
Box::new(Node::Number(3_f64)),
)),
)
);
}
#[test]
fn rassoc() {
let ast = Parser::new(Lexer::new("1^2^3")).expr(0).unwrap();
assert_eq!(
ast,
Node::Binary(
Box::new(Node::Number(1_f64)),
Box::new(Token::new(TokenKind::Exponent, "^".to_owned(), 1, 2, 1, 2)),
Box::new(Node::Binary(
Box::new(Node::Number(2_f64)),
Box::new(Token::new(TokenKind::Exponent, "^".to_owned(), 3, 4, 1, 4)),
Box::new(Node::Number(3_f64)),
)),
)
);
}
#[test]
fn pi() {
let ast = Parser::new(Lexer::new("pi")).expr(0).unwrap();
assert_eq!(ast, Node::Number(std::f64::consts::PI));
}
#[test]
fn e() {
let ast = Parser::new(Lexer::new("e")).expr(0).unwrap();
assert_eq!(ast, Node::Number(std::f64::consts::E));
}
#[test]
fn func() {
let ast = Parser::new(Lexer::new("sin(pi/6)")).expr(0).unwrap();
assert_eq!(eval(ast).unwrap(), (std::f64::consts::PI / 6_f64).sin());
}
}
|
mod env_page;
mod env_entry_page;
pub use env_page::{EnvMsg, EnvPage};
pub use env_entry_page::*; |
mod content_hasher;
mod directory_scanner;
mod extent_hasher;
mod file_deduper;
pub use self::content_hasher::*;
pub use self::directory_scanner::*;
pub use self::extent_hasher::*;
pub use self::file_deduper::*;
// ex: noet ts=4 filetype=rust
|
//! HTTP Versions enum
//!
//! Instead of relying on typo-prone Strings, use expected HTTP versions as
//! the `HttpVersion` enum.
use std::fmt;
use self::HttpVersion::{Http09, Http10, Http11, H2, H2c};
/// Represents a version of the HTTP spec.
#[derive(PartialEq, PartialOrd, Copy, Clone, Eq, Ord, Hash, Debug)]
pub enum HttpVersion {
/// `HTTP/0.9`
Http09,
/// `HTTP/1.0`
Http10,
/// `HTTP/1.1`
Http11,
/// `HTTP/2.0` over TLS
H2,
/// `HTTP/2.0` over cleartext
H2c,
#[doc(hidden)]
__DontMatchMe,
}
impl fmt::Display for HttpVersion {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(match *self {
Http09 => "HTTP/0.9",
Http10 => "HTTP/1.0",
Http11 => "HTTP/1.1",
H2 => "h2",
H2c => "h2c",
HttpVersion::__DontMatchMe => unreachable!(),
})
}
}
impl Default for HttpVersion {
fn default() -> HttpVersion {
Http11
}
}
|
#[macro_use]
extern crate afl;
extern crate vedirect;
use vedirect::{Events, VEError};
struct Listener;
impl Events<vedirect::Bmv700> for Listener {
fn on_complete_block(&mut self, block: vedirect::Bmv700) {
println!("Mapped data {:#?}", &block);
}
fn on_parse_error(&mut self, error: VEError, _parse_buf: &[u8]) {
println!("Parse error {:#?}", &error);
}
}
fn main() {
fuzz!(|data: &[u8]| {
let mut listener = Listener {};
let mut parser = vedirect::Parser::new(&mut listener);
let _ = parser.feed(data);
});
}
|
// Copyright 2022 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use common_expression::types::DataType;
use common_expression::types::NumberDataType;
use common_expression::DataField;
use common_expression::DataSchema;
use common_expression::DataSchemaRef;
use common_expression::DataSchemaRefExt;
use common_meta_app::principal::AuthInfo;
use common_meta_app::principal::GrantObject;
use common_meta_app::principal::PrincipalIdentity;
use common_meta_app::principal::UserIdentity;
use common_meta_app::principal::UserOption;
use common_meta_app::principal::UserPrivilegeSet;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CreateUserPlan {
pub user: UserIdentity,
pub auth_info: AuthInfo,
pub user_option: UserOption,
pub if_not_exists: bool,
}
impl CreateUserPlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct AlterUserPlan {
pub user: UserIdentity,
// None means no change to make
pub auth_info: Option<AuthInfo>,
pub user_option: Option<UserOption>,
}
impl AlterUserPlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct DropUserPlan {
pub if_exists: bool,
pub user: UserIdentity,
}
impl DropUserPlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct CreateRolePlan {
pub if_not_exists: bool,
pub role_name: String,
}
impl CreateRolePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct DropRolePlan {
pub if_exists: bool,
pub role_name: String,
}
impl DropRolePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct GrantRolePlan {
pub principal: PrincipalIdentity,
pub role: String,
}
impl GrantRolePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ShowGrantsPlan {
pub principal: Option<PrincipalIdentity>,
}
impl ShowGrantsPlan {
pub fn schema(&self) -> DataSchemaRef {
DataSchemaRefExt::create(vec![DataField::new("Grants", DataType::String)])
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RevokeRolePlan {
pub principal: PrincipalIdentity,
pub role: String,
}
impl RevokeRolePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SetRolePlan {
pub is_default: bool,
pub role_name: String,
}
impl SetRolePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ShowRolesPlan {}
impl ShowRolesPlan {
pub fn schema(&self) -> DataSchemaRef {
DataSchemaRefExt::create(vec![
DataField::new("name", DataType::String),
DataField::new("inherited_roles", DataType::Number(NumberDataType::UInt64)),
DataField::new("is_current", DataType::Boolean),
DataField::new("is_default", DataType::Boolean),
])
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct GrantPrivilegePlan {
pub principal: PrincipalIdentity,
pub priv_types: UserPrivilegeSet,
pub on: GrantObject,
}
impl GrantPrivilegePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RevokePrivilegePlan {
pub principal: PrincipalIdentity,
pub priv_types: UserPrivilegeSet,
pub on: GrantObject,
}
impl RevokePrivilegePlan {
pub fn schema(&self) -> DataSchemaRef {
Arc::new(DataSchema::empty())
}
}
|
use crate::{
app::{
config::{self, HelicityType, Rgba, StormMotionType},
sample::Sample,
AppContext, AppContextPointer, ZoomableDrawingAreas,
},
coords::{SDCoords, ScreenCoords, ScreenRect, XYCoords},
errors::SondeError,
gui::{
plot_context::{GenericContext, HasGenericContext, PlotContext, PlotContextExt},
utility::{check_overlap_then_add, draw_filled_polygon, plot_curve_from_points},
Drawable, DrawingArgs, MasterDrawable,
},
};
use gtk::{
gio::{SimpleAction, SimpleActionGroup},
prelude::*,
DrawingArea, EventControllerKey, EventControllerMotion, EventControllerScroll,
EventControllerScrollFlags, GestureClick, Inhibit, Window,
};
use itertools::izip;
use metfor::{Knots, Meters, Quantity, WindSpdDir, WindUV};
use sounding_analysis::DataRow;
use std::{iter::once, rc::Rc};
pub struct HodoContext {
generic: GenericContext,
}
impl HodoContext {
pub fn new() -> Self {
HodoContext {
generic: GenericContext::new(),
}
}
pub fn convert_sd_to_xy(coords: SDCoords) -> XYCoords {
let WindUV { u, v } = WindUV::<Knots>::from(coords.spd_dir);
let x = u / (config::MAX_SPEED * 2.0) + 0.5;
let y = v / (config::MAX_SPEED * 2.0) + 0.5;
XYCoords { x, y }
}
pub fn convert_sd_to_screen(&self, coords: SDCoords) -> ScreenCoords {
let xy = HodoContext::convert_sd_to_xy(coords);
self.convert_xy_to_screen(xy)
}
}
impl HasGenericContext for HodoContext {
fn get_generic_context(&self) -> &GenericContext {
&self.generic
}
}
impl PlotContextExt for HodoContext {}
impl Drawable for HodoContext {
/***********************************************************************************************
* Initialization
**********************************************************************************************/
fn set_up_drawing_area(acp: &AppContextPointer) -> Result<(), SondeError> {
let da: DrawingArea = acp.fetch_widget("hodograph_area")?;
// Set up the drawing function.
let ac = Rc::clone(acp);
da.set_draw_func(move |_da, cr, _width, _height| {
ac.hodo.draw_callback(cr, &ac);
});
// Set up the scroll (or zoom in/out) callbacks.
let ac = Rc::clone(acp);
let scroll_control = EventControllerScroll::new(EventControllerScrollFlags::VERTICAL);
scroll_control.connect_scroll(move |_scroll_control, _dx, dy| {
ac.mark_background_dirty();
ac.hodo.scroll_event(dy, &ac);
Inhibit(true)
});
da.add_controller(scroll_control);
// Set up the button clicks.
let left_mouse_button = GestureClick::builder().build();
let ac = Rc::clone(acp);
left_mouse_button.connect_pressed(move |_mouse_button, _n_pressed, x, y| {
ac.hodo.left_button_press_event((x, y), &ac);
});
let ac = Rc::clone(acp);
left_mouse_button.connect_released(move |_mouse_button, _n_press, x, y| {
ac.hodo.left_button_release_event((x, y), &ac);
});
da.add_controller(left_mouse_button);
let right_mouse_button = GestureClick::builder().button(3).build();
let ac = Rc::clone(acp);
right_mouse_button.connect_released(move |_mouse_button, _n_press, x, y| {
ac.hodo.right_button_release_event((x, y), &ac);
});
da.add_controller(right_mouse_button);
// Set up the mouse motion events
let mouse_motion = EventControllerMotion::new();
let ac = Rc::clone(acp);
mouse_motion.connect_motion(move |mouse_motion, x, y| {
ac.hodo.mouse_motion_event(mouse_motion, (x, y), &ac);
});
let ac = Rc::clone(acp);
mouse_motion.connect_enter(move |_mouse_motion, _x, _y| {
ac.hodo.enter_event(&ac);
});
let ac = Rc::clone(acp);
mouse_motion.connect_leave(move |_mouse_motion| {
ac.hodo.leave_event(&ac);
});
da.add_controller(mouse_motion);
// Set up the key presses.
let key_press = EventControllerKey::new();
let ac = Rc::clone(acp);
key_press.connect_key_pressed(move |_key_press, key, _code, _key_modifier| {
HodoContext::key_press_event(key, &ac)
});
da.add_controller(key_press);
let ac = Rc::clone(acp);
da.connect_resize(move |da, width, height| {
// TODO merge below methods into one.
ac.hodo.size_allocate_event(da);
ac.hodo.resize_event(width, height, &ac);
});
build_hodograph_area_context_menu(acp)?;
Ok(())
}
/***********************************************************************************************
* Background Drawing.
**********************************************************************************************/
fn draw_background_fill(&self, args: DrawingArgs<'_, '_>) {
let (cr, config) = (args.cr, args.ac.config.borrow());
if config.show_background_bands {
let mut do_draw = true;
let rgba = config.background_band_rgba;
cr.set_source_rgba(rgba.0, rgba.1, rgba.2, rgba.3);
for pnts in config::ISO_SPEED_PNTS.iter() {
let mut pnts = pnts
.iter()
.map(|xy_coords| self.convert_xy_to_screen(*xy_coords));
if let Some(pnt) = pnts.by_ref().next() {
cr.move_to(pnt.x, pnt.y);
}
if do_draw {
for pnt in pnts {
cr.line_to(pnt.x, pnt.y);
}
} else {
for pnt in pnts.rev() {
cr.line_to(pnt.x, pnt.y);
}
}
cr.close_path();
if do_draw {
cr.fill().unwrap();
}
do_draw = !do_draw;
}
}
}
fn draw_background_lines(&self, args: DrawingArgs<'_, '_>) {
let (cr, config) = (args.cr, args.ac.config.borrow());
if config.show_iso_speed {
for pnts in config::ISO_SPEED_PNTS.iter() {
let pnts = pnts
.iter()
.map(|xy_coords| self.convert_xy_to_screen(*xy_coords));
plot_curve_from_points(
cr,
config.background_line_width,
config.iso_speed_rgba,
pnts,
);
}
let origin = self.convert_sd_to_screen(SDCoords {
spd_dir: WindSpdDir {
speed: Knots(0.0),
direction: 360.0,
},
});
for pnts in [
30.0, 60.0, 90.0, 120.0, 150.0, 180.0, 210.0, 240.0, 270.0, 300.0, 330.0, 360.0,
]
.iter()
.map(|d| {
let end_point = self.convert_sd_to_screen(SDCoords {
spd_dir: WindSpdDir {
speed: config::MAX_SPEED,
direction: *d,
},
});
[origin, end_point]
}) {
plot_curve_from_points(
cr,
config.background_line_width,
config.iso_speed_rgba,
pnts.iter().cloned(),
);
}
}
}
fn collect_labels(&self, args: DrawingArgs<'_, '_>) -> Vec<(String, ScreenRect)> {
let (ac, cr, config) = (args.ac, args.cr, args.ac.config.borrow());
let mut labels = vec![];
let screen_edges = self.calculate_plot_edges(cr, ac);
if config.show_iso_speed {
for &s in &config::ISO_SPEED {
for direction in &[240.0] {
let label = format!("{:.0}", s.unpack());
let extents = cr.text_extents(&label).unwrap();
let ScreenCoords {
x: mut screen_x,
y: mut screen_y,
} = self.convert_sd_to_screen(SDCoords {
spd_dir: WindSpdDir {
speed: s,
direction: *direction,
},
});
screen_y -= extents.height() / 2.0;
screen_x -= extents.width() / 2.0;
let label_lower_left = ScreenCoords {
x: screen_x,
y: screen_y,
};
let label_upper_right = ScreenCoords {
x: screen_x + extents.width(),
y: screen_y + extents.height(),
};
let pair = (
label,
ScreenRect {
lower_left: label_lower_left,
upper_right: label_upper_right,
},
);
check_overlap_then_add(cr, ac, &mut labels, &screen_edges, pair);
}
}
}
labels
}
fn build_legend_strings(ac: &AppContext) -> Vec<(String, Rgba)> {
vec![("Hodograph".to_owned(), ac.config.borrow().label_rgba)]
}
/***********************************************************************************************
* Data Drawing.
**********************************************************************************************/
fn draw_data(&self, args: DrawingArgs<'_, '_>) {
draw_data(args);
draw_data_overlays(args);
}
/***********************************************************************************************
* Overlays Drawing.
**********************************************************************************************/
fn draw_active_sample(&self, args: DrawingArgs<'_, '_>) {
if !self.has_data() {
return;
}
let (ac, config) = (args.ac, args.ac.config.borrow());
let spd_dir = match *ac.get_sample() {
Sample::Sounding {
data: DataRow { pressure, wind, .. },
..
} => {
if let Some(wind) = pressure
.into_option()
.filter(|pr| *pr > config.min_hodo_pressure)
.and(wind.into_option())
{
wind
} else {
return;
}
}
Sample::FirePlume { .. } | Sample::None => return,
};
let coords = ac.hodo.convert_sd_to_screen(SDCoords { spd_dir });
let rgba = config.active_readout_line_rgba;
Self::draw_point(coords, rgba, args);
}
/***********************************************************************************************
* Events
**********************************************************************************************/
fn right_button_release_event(&self, _position: (f64, f64), ac: &AppContextPointer) {
if let Ok(popover) = ac.fetch_widget::<gtk::PopoverMenu>("hodo_popover") {
if let Some(pos) = self.get_last_cursor_position() {
let llx: i32 = pos.col as i32;
let lly: i32 = pos.row as i32;
let rect = gtk::gdk::Rectangle::new(llx, lly, 1, 1);
popover.set_pointing_to(Some(&rect));
popover.popup();
}
}
}
fn enter_event(&self, ac: &AppContextPointer) {
ac.set_last_focus(ZoomableDrawingAreas::Hodo);
}
}
impl MasterDrawable for HodoContext {}
/**************************************************************************************************
* DrawingArea set up
**************************************************************************************************/
fn build_hodograph_area_context_menu(acp: &AppContextPointer) -> Result<(), SondeError> {
let window: Window = acp.fetch_widget("main_window")?;
let config = acp.config.borrow();
let hodo_group = SimpleActionGroup::new();
window.insert_action_group("hodo", Some(&hodo_group));
// Configure the layer to use for helicity calculations
let current_helicity_layer = match config.helicity_layer {
HelicityType::SurfaceTo3km => "sfc_to_3km",
HelicityType::Effective => "effective",
};
let helicity_layer_action = SimpleAction::new_stateful(
"helicity_layer_action",
Some(gtk::glib::VariantTy::STRING),
current_helicity_layer.into(),
);
let ac = Rc::clone(acp);
helicity_layer_action.connect_activate(move |action, variant| {
let val: &str = variant.unwrap().str().unwrap();
action.set_state(val.into());
let layer = match val {
"sfc_to_3km" => HelicityType::SurfaceTo3km,
"effective" => HelicityType::Effective,
_ => unreachable!(),
};
ac.config.borrow_mut().helicity_layer = layer;
ac.mark_data_dirty();
crate::gui::draw_all(&ac);
crate::gui::update_text_views(&ac);
});
hodo_group.add_action(&helicity_layer_action);
// Show/hide the helicity overlay (fill the helicity area).
let ac = acp.clone();
let show_action = SimpleAction::new("show_helicity_overlay", None);
show_action.connect_activate(move |_action, _variant| {
let mut config = ac.config.borrow_mut();
config.show_helicity_overlay = !config.show_helicity_overlay;
ac.mark_data_dirty();
crate::gui::draw_all(&ac);
crate::gui::update_text_views(&ac);
});
hodo_group.add_action(&show_action);
// Configure the helicity storm type (left move or right mover)
let current_helicity_type = match config.helicity_storm_motion {
StormMotionType::RightMover => "right",
StormMotionType::LeftMover => "left",
};
let helicity_type_action = SimpleAction::new_stateful(
"helicity_type",
Some(gtk::glib::VariantTy::STRING),
current_helicity_type.into(),
);
let ac = Rc::clone(acp);
helicity_type_action.connect_activate(move |action, variant| {
let val: &str = variant.unwrap().str().unwrap();
action.set_state(val.into());
let direction = match val {
"right" => StormMotionType::RightMover,
"left" => StormMotionType::LeftMover,
_ => unreachable!(),
};
ac.config.borrow_mut().helicity_storm_motion = direction;
ac.mark_data_dirty();
crate::gui::draw_all(&ac);
crate::gui::update_text_views(&ac);
});
hodo_group.add_action(&helicity_type_action);
Ok(())
}
/**************************************************************************************************
* Data Layer Drawing
**************************************************************************************************/
fn draw_data(args: DrawingArgs<'_, '_>) {
let (ac, cr) = (args.ac, args.cr);
let config = ac.config.borrow();
if let Some(anal) = ac.get_sounding_for_display() {
let anal = anal.borrow();
let sndg = anal.sounding();
let pres_data = sndg.pressure_profile();
let wind_data = sndg.wind_profile();
let profile_data = izip!(pres_data, wind_data).filter_map(|(p, wind)| {
if let (Some(p), Some(spd_dir)) = (p.into_option(), wind.into_option()) {
if p >= config.min_hodo_pressure {
let sd_coords = SDCoords { spd_dir };
Some(ac.hodo.convert_sd_to_screen(sd_coords))
} else {
None
}
} else {
None
}
});
plot_curve_from_points(
cr,
config.velocity_line_width,
config.wind_rgba,
profile_data,
);
}
}
fn draw_data_overlays(args: DrawingArgs<'_, '_>) {
draw_helicity_fill(args);
draw_storm_motion_and_mean_wind(args);
}
fn draw_helicity_fill(args: DrawingArgs<'_, '_>) {
let (ac, cr) = (args.ac, args.cr);
let config = ac.config.borrow();
if !config.show_helicity_overlay {
return;
}
if let Some(anal) = ac.get_sounding_for_display() {
let anal = anal.borrow();
// Get the storm motion
let motion = {
let motion = match config.helicity_storm_motion {
StormMotionType::RightMover => anal.right_mover(),
StormMotionType::LeftMover => anal.left_mover(),
}
.map(WindSpdDir::<Knots>::from);
if let Some(motion) = motion {
motion
} else {
return;
}
};
let pnts = {
let layer = match config.helicity_layer {
HelicityType::SurfaceTo3km => {
sounding_analysis::layer_agl(anal.sounding(), Meters(3000.0)).ok()
}
HelicityType::Effective => anal.effective_inflow_layer(),
};
if let Some((bottom_p, top_p)) = layer.and_then(|lyr| {
lyr.bottom
.pressure
.into_option()
.and_then(|bp| lyr.top.pressure.into_option().map(|tp| (bp, tp)))
}) {
let pnts = izip!(
anal.sounding().pressure_profile(),
anal.sounding().wind_profile()
)
.filter_map(|(p_opt, w_opt)| {
p_opt.into_option().and_then(|p| w_opt.map(|w| (p, w)))
})
.skip_while(move |(p, _)| *p > bottom_p)
.take_while(move |(p, _)| *p >= top_p)
.map(|(_, w)| w);
once(motion)
.chain(pnts)
.map(|spd_dir| SDCoords { spd_dir })
.map(|coord| ac.hodo.convert_sd_to_screen(coord))
} else {
return;
}
};
let rgba = config.helicity_rgba;
draw_filled_polygon(cr, rgba, pnts);
}
}
fn draw_storm_motion_and_mean_wind(args: DrawingArgs<'_, '_>) {
let (ac, cr) = (args.ac, args.cr);
let config = ac.config.borrow();
if let Some(anal) = ac.get_sounding_for_display() {
let anal = anal.borrow();
if let (Some(rm), Some(lm), Some(mw)) = (
anal.right_mover().into_option(),
anal.left_mover().into_option(),
anal.mean_wind().into_option(),
) {
let rm = WindSpdDir::<Knots>::from(rm);
let lm = WindSpdDir::<Knots>::from(lm);
let mw = WindSpdDir::<Knots>::from(mw);
let pnt_size = cr.device_to_user_distance(6.0, 0.0).unwrap().0;
let mut coords_rm = ac.hodo.convert_sd_to_screen(SDCoords { spd_dir: rm });
let mut coords_lm = ac.hodo.convert_sd_to_screen(SDCoords { spd_dir: lm });
let mut coords_mw = ac.hodo.convert_sd_to_screen(SDCoords { spd_dir: mw });
let sm_rgba = config.storm_motion_rgba;
let mw_rgba = config.storm_motion_rgba;
cr.set_source_rgba(sm_rgba.0, sm_rgba.1, sm_rgba.2, sm_rgba.3);
cr.arc(
coords_rm.x,
coords_rm.y,
pnt_size,
0.0,
2.0 * ::std::f64::consts::PI,
);
cr.fill().unwrap();
cr.arc(
coords_lm.x,
coords_lm.y,
pnt_size,
0.0,
2.0 * ::std::f64::consts::PI,
);
cr.fill().unwrap();
coords_rm.x += 0.025;
coords_lm.x += 0.025;
ac.hodo.draw_tag("RM", coords_rm, sm_rgba, args);
ac.hodo.draw_tag("LM", coords_lm, sm_rgba, args);
cr.set_source_rgba(mw_rgba.0, mw_rgba.1, mw_rgba.2, mw_rgba.3);
cr.arc(
coords_mw.x,
coords_mw.y,
pnt_size,
0.0,
2.0 * ::std::f64::consts::PI,
);
cr.fill().unwrap();
coords_mw.x += 0.025;
ac.hodo.draw_tag("MW", coords_mw, mw_rgba, args);
}
}
}
|
use futures_core::future::BoxFuture;
use std::borrow::{Borrow, BorrowMut};
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use std::time::Instant;
use super::inner::{DecrementSizeGuard, SharedPool};
use crate::connection::{Connect, Connection};
/// A connection checked out from [`Pool`][crate::pool::Pool].
///
/// Will be returned to the pool on-drop.
pub struct PoolConnection<C>
where
C: Connect,
{
live: Option<Live<C>>,
pub(crate) pool: Arc<SharedPool<C>>,
}
pub(super) struct Live<C> {
raw: C,
pub(super) created: Instant,
}
pub(super) struct Idle<C> {
live: Live<C>,
pub(super) since: Instant,
}
/// RAII wrapper for connections being handled by functions that may drop them
pub(super) struct Floating<'p, C> {
inner: C,
guard: DecrementSizeGuard<'p>,
}
const DEREF_ERR: &str = "(bug) connection already released to pool";
impl<C> Borrow<C> for PoolConnection<C>
where
C: Connect,
{
fn borrow(&self) -> &C {
&*self
}
}
impl<C> BorrowMut<C> for PoolConnection<C>
where
C: Connect,
{
fn borrow_mut(&mut self) -> &mut C {
&mut *self
}
}
impl<C> Deref for PoolConnection<C>
where
C: Connect,
{
type Target = C;
fn deref(&self) -> &Self::Target {
&self.live.as_ref().expect(DEREF_ERR).raw
}
}
impl<C> DerefMut for PoolConnection<C>
where
C: Connect,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.live.as_mut().expect(DEREF_ERR).raw
}
}
impl<C> Connection for PoolConnection<C>
where
C: Connect,
{
fn close(mut self) -> BoxFuture<'static, crate::Result<()>> {
Box::pin(async move {
let live = self.live.take().expect("PoolConnection double-dropped");
live.float(&self.pool).into_idle().close().await
})
}
#[inline]
fn ping(&mut self) -> BoxFuture<crate::Result<()>> {
Box::pin(self.deref_mut().ping())
}
}
/// Returns the connection to the [`Pool`][crate::pool::Pool] it was checked-out from.
impl<C> Drop for PoolConnection<C>
where
C: Connect,
{
fn drop(&mut self) {
if let Some(live) = self.live.take() {
self.pool.release(live.float(&self.pool));
}
}
}
impl<C> Live<C> {
pub fn float(self, pool: &SharedPool<C>) -> Floating<Self> {
Floating {
inner: self,
guard: DecrementSizeGuard::new(pool),
}
}
pub fn into_idle(self) -> Idle<C> {
Idle {
live: self,
since: Instant::now(),
}
}
}
impl<C> Deref for Idle<C> {
type Target = Live<C>;
fn deref(&self) -> &Self::Target {
&self.live
}
}
impl<C> DerefMut for Idle<C> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.live
}
}
impl<'s, C> Floating<'s, C> {
pub fn into_leakable(self) -> C {
self.guard.cancel();
self.inner
}
}
impl<'s, C> Floating<'s, Live<C>> {
pub fn new_live(conn: C, guard: DecrementSizeGuard<'s>) -> Self {
Self {
inner: Live {
raw: conn,
created: Instant::now(),
},
guard,
}
}
pub fn attach(self, pool: &Arc<SharedPool<C>>) -> PoolConnection<C>
where
C: Connect,
{
let Floating { inner, guard } = self;
debug_assert!(
guard.same_pool(pool),
"BUG: attaching connection to different pool"
);
guard.cancel();
PoolConnection {
live: Some(inner),
pool: Arc::clone(pool),
}
}
pub fn into_idle(self) -> Floating<'s, Idle<C>> {
Floating {
inner: self.inner.into_idle(),
guard: self.guard,
}
}
}
impl<'s, C> Floating<'s, Idle<C>> {
pub fn from_idle(idle: Idle<C>, pool: &'s SharedPool<C>) -> Self {
Self {
inner: idle,
guard: DecrementSizeGuard::new(pool),
}
}
pub async fn ping(&mut self) -> crate::Result<()>
where
C: Connection,
{
self.live.raw.ping().await
}
pub fn into_live(self) -> Floating<'s, Live<C>> {
Floating {
inner: self.inner.live,
guard: self.guard,
}
}
pub async fn close(self) -> crate::Result<()>
where
C: Connection,
{
// `guard` is dropped as intended
self.inner.live.raw.close().await
}
}
impl<C> Deref for Floating<'_, C> {
type Target = C;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<C> DerefMut for Floating<'_, C> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
|
use crate::prelude::*;
pub fn load_renderables(mut renderables: Models<Renderables>) -> Models<Renderables> {
use CreatureRenderables::*;
renderables.insert(Some("Player"), Some(Renderables::Creatures(Player)), load_scene("Player"), Template::ASprite(AnimSprite::default()));
renderables.insert(Some("Enemy"), Some(Renderables::Creatures(Enemy)), load_scene("Enemy"), Template::ASprite(AnimSprite::default()));
renderables
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum Renderables {
Creatures(CreatureRenderables),
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum CreatureRenderables {
Player,
Enemy,
} |
use collections::{Array, EntityMap, EntitySet};
use ecs::*;
use scene::Scene;
use std::cell::RefCell;
use std::fmt::{Debug, Error, Formatter};
use std::intrinsics::type_name;
use std::ops::*;
const MAX_COMPONENTS: usize = 1_000;
struct MessageMap<T: Component>(EntityMap<Vec<T::Message>>);
impl<T: Component> MessageMap<T> {
fn new() -> MessageMap<T> {
MessageMap(EntityMap::default())
}
}
impl<T: Component> Clone for MessageMap<T> {
fn clone(&self) -> MessageMap<T> {
MessageMap::new()
}
}
impl<T: Component> Deref for MessageMap<T> {
type Target = EntityMap<Vec<T::Message>>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: Component> DerefMut for MessageMap<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: Component> Debug for MessageMap<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}", unsafe { type_name::<Self>() })
}
}
/// A utilty on which to build other component managers.
///
/// `StructComponentManager` provides a default system for implementing a component manager for any
/// type that can be represented as a single struct. It handles the details of assigning component
/// data to an entity, retrieving that data, and destroying it. It also handles the details of
/// doing all of that through only shared references. `StructComponentManager` however does not
/// implement `ComponentManager` because it is meant to be reused within other managers that want
/// to wrap extra behavior around the default management style. `DefaultManager` is a basic wrapper
/// around `StructComponentManager` that implements `ComponentManager` and should be used as the
/// default component manager when no special handling is needed.
#[derive(Debug, Clone)]
pub struct StructComponentManager<T>
where T: Component + Clone + Debug,
T::Message: Message<Target=T>,
{
components: Array<T>,
entities: Array<Entity>,
indices: RefCell<EntityMap<usize>>,
marked_for_destroy: RefCell<EntitySet>,
messages: RefCell<MessageMap<T>>,
}
impl<T> StructComponentManager<T>
where T: Component + Clone + Debug,
T::Message: Message<Target=T>,
{
pub fn new() -> StructComponentManager<T> {
StructComponentManager {
components: Array::new(MAX_COMPONENTS),
entities: Array::new(MAX_COMPONENTS),
indices: RefCell::new(EntityMap::default()),
marked_for_destroy: RefCell::new(EntitySet::default()),
messages: RefCell::new(MessageMap::new()),
}
}
pub fn assign(&self, entity: Entity, component: T) -> &T {
assert!(
!self.indices.borrow().contains_key(&entity),
"Component already assign to entity {:?}",
entity);
let index = self.components.len();
self.components.push(component);
self.entities.push(entity);
self.indices.borrow_mut().insert(entity, index);
&self.components[index]
}
pub fn get(&self, entity: Entity) -> Option<&T> {
self.indices
.borrow()
.get(&entity)
.map(|index| &self.components[*index])
}
pub fn update(&mut self, _scene: &Scene, _delta: f32) {
println!("StructComponentManager::update()");
}
pub fn destroy(&self, entity: Entity) {
self.marked_for_destroy.borrow_mut().insert(entity);
}
pub fn iter(&self) -> Iter<T> {
Iter {
component_iter: self.components.iter(),
entity_iter: self.entities.iter(),
}
}
pub fn iter_mut(&mut self) -> IterMut<T> {
IterMut {
component_iter: self.components.iter_mut(),
entity_iter: self.entities.iter(),
}
}
pub fn len(&self) -> usize {
self.entities.len()
}
/// Passes a message to the component associated with the specified entity.
pub fn send_message<M: Into<T::Message>>(&self, entity: Entity, message: M) {
let mut messages = self.messages.borrow_mut();
messages
.entry(entity)
.or_insert(Vec::new())
.push(message.into());
}
/// Applies all pending messages to their target components.
pub fn process_messages(&mut self) {
let mut messages = self.messages.borrow_mut();
for (entity, mut messages) in messages.drain() {
if let Some(index) = self.indices.borrow().get(&entity) {
let component = &mut self.components[*index];
for message in messages.drain(..) {
message.apply(component);
}
} else {
// TODO: Panic or error? That could probably be configured at runtime.
panic!(
"Attempted to pass message to {} of {:?} which does not exist",
unsafe { type_name::<T>() },
entity);
}
}
}
}
pub struct Iter<'a, T: 'a> {
component_iter: ::std::slice::Iter<'a, T>,
entity_iter: ::std::slice::Iter<'a, Entity>,
}
impl<'a, T: 'a + Component> Iterator for Iter<'a, T> {
type Item = (&'a T, Entity);
fn next(&mut self) -> Option<(&'a T, Entity)> {
if let (Some(component), Some(entity)) = (self.component_iter.next(), self.entity_iter.next()) {
Some((component, *entity))
} else {
None
}
}
}
pub struct IterMut<'a, T: 'a + Component> {
component_iter: ::std::slice::IterMut<'a, T>,
entity_iter: ::std::slice::Iter<'a, Entity>,
}
impl<'a, T: 'a + Component> Iterator for IterMut<'a, T> {
type Item = (&'a mut T, Entity);
fn next(&mut self) -> Option<(&'a mut T, Entity)> {
if let (Some(component), Some(entity)) = (self.component_iter.next(), self.entity_iter.next()) {
Some((component, *entity))
} else {
None
}
}
}
|
fn main() {
let reference_to_nothing = dangle();
println!("{}", reference_to_nothing)
}
fn dangle() -> String {
//fn dangle() -> &String { this will dangle
let s = String::from("hello");
//&s this will dangle; return the String instead
s //this moves ownership out; underlying value isn't deallocated
}
|
//! This is copy of [sync/mpsc/](https://github.com/rust-lang/futures-rs)
use std::{
fmt,
hash::{Hash, Hasher},
pin::Pin,
sync::{
atomic::{
AtomicBool, AtomicUsize,
Ordering::{Relaxed, SeqCst},
},
Arc, Weak,
},
task::{self, Poll},
thread,
};
use futures_core::{stream::Stream, task::__internal::AtomicWaker};
use parking_lot::Mutex;
use tokio::sync::oneshot::{channel as oneshot_channel, Receiver as OneshotReceiver};
use super::{
envelope::{Envelope, ToEnvelope},
queue::Queue,
SendError,
};
use crate::{
actor::Actor,
handler::{Handler, Message},
};
pub trait Sender<M>: Send
where
M::Result: Send,
M: Message + Send,
{
fn do_send(&self, msg: M) -> Result<(), SendError<M>>;
fn try_send(&self, msg: M) -> Result<(), SendError<M>>;
fn send(&self, msg: M) -> Result<OneshotReceiver<M::Result>, SendError<M>>;
fn boxed(&self) -> Box<dyn Sender<M> + Sync>;
fn hash(&self) -> usize;
fn connected(&self) -> bool;
/// Returns a downgraded sender, where the sender is downgraded into its weak counterpart.
fn downgrade(&self) -> Box<dyn WeakSender<M> + Sync + 'static>;
}
impl<S, M> Sender<M> for Box<S>
where
S: Sender<M> + ?Sized,
M::Result: Send,
M: Message + Send,
{
fn do_send(&self, msg: M) -> Result<(), SendError<M>> {
(**self).do_send(msg)
}
fn try_send(&self, msg: M) -> Result<(), SendError<M>> {
(**self).try_send(msg)
}
fn send(&self, msg: M) -> Result<OneshotReceiver<<M as Message>::Result>, SendError<M>> {
(**self).send(msg)
}
fn boxed(&self) -> Box<dyn Sender<M> + Sync> {
(**self).boxed()
}
fn hash(&self) -> usize {
(**self).hash()
}
fn connected(&self) -> bool {
(**self).connected()
}
fn downgrade(&self) -> Box<dyn WeakSender<M> + Sync> {
(**self).downgrade()
}
}
pub trait WeakSender<M>: Send
where
M::Result: Send,
M: Message + Send,
{
/// Attempts to upgrade a `WeakAddressSender<A>` to a [`Sender<M>`]
///
/// Returns [`None`] if the actor has since been dropped.
fn upgrade(&self) -> Option<Box<dyn Sender<M> + Sync>>;
fn boxed(&self) -> Box<dyn WeakSender<M> + Sync>;
}
/// The transmission end of a channel which is used to send values.
///
/// This is created by the `channel` method.
pub struct AddressSender<A: Actor> {
// Channel state shared between the sender and receiver.
inner: Arc<Inner<A>>,
// Handle to the task that is blocked on this sender. This handle is sent
// to the receiver half in order to be notified when the sender becomes
// unblocked.
sender_task: Arc<Mutex<SenderTask>>,
// True if the sender might be blocked. This is an optimization to avoid
// having to lock the mutex most of the time.
maybe_parked: Arc<AtomicBool>,
}
impl<A: Actor> fmt::Debug for AddressSender<A> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("AddressSender")
.field("sender_task", &self.sender_task)
.field("maybe_parked", &self.maybe_parked)
.finish()
}
}
/// A weakly referenced version of `AddressSender`.
///
/// This is created by the `AddressSender::downgrade` method.
pub struct WeakAddressSender<A: Actor> {
inner: Weak<Inner<A>>,
}
impl<A: Actor> Clone for WeakAddressSender<A> {
fn clone(&self) -> WeakAddressSender<A> {
WeakAddressSender {
inner: self.inner.clone(),
}
}
}
impl<A: Actor> fmt::Debug for WeakAddressSender<A> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("WeakAddressSender").finish()
}
}
impl<A: Actor> PartialEq for WeakAddressSender<A> {
fn eq(&self, other: &Self) -> bool {
self.inner.ptr_eq(&other.inner)
}
}
impl<A: Actor> Eq for WeakAddressSender<A> {}
trait AssertKinds: Send + Sync + Clone {}
/// The receiving end of a channel which implements the `Stream` trait.
///
/// This is a concrete implementation of a stream which can be used to represent
/// a stream of values being computed elsewhere. This is created by the
/// `channel` method.
pub struct AddressReceiver<A: Actor> {
inner: Arc<Inner<A>>,
}
/// Generate `AddressSenders` for the channel
pub struct AddressSenderProducer<A: Actor> {
inner: Arc<Inner<A>>,
}
struct Inner<A: Actor> {
// Max buffer size of the channel. If `0` then the channel is unbounded.
buffer: AtomicUsize,
// Internal channel state. Consists of the number of messages stored in the
// channel as well as a flag signalling that the channel is closed.
state: AtomicUsize,
// Atomic, FIFO queue used to send messages to the receiver.
message_queue: Queue<Envelope<A>>,
// Atomic, FIFO queue used to send parked task handles to the receiver.
parked_queue: Queue<Arc<Mutex<SenderTask>>>,
// Number of senders in existence.
num_senders: AtomicUsize,
// Handle to the receiver's task.
recv_task: AtomicWaker,
}
// Struct representation of `Inner::state`.
#[derive(Debug, Clone, Copy)]
struct State {
// `true` when the channel is open
is_open: bool,
// Number of messages in the channel
num_messages: usize,
}
impl State {
fn is_closed(&self) -> bool {
!self.is_open && self.num_messages == 0
}
}
// The `is_open` flag is stored in the left-most bit of `Inner::state`
const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1);
// When a new channel is created, it is created in the open state with no
// pending messages.
const INIT_STATE: usize = OPEN_MASK;
// The maximum number of messages that a channel can track is `usize::MAX >> 1`
const MAX_CAPACITY: usize = !(OPEN_MASK);
// The maximum requested buffer size must be less than the maximum capacity of
// a channel. This is because each sender gets a guaranteed slot.
const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
// Sent to the consumer to wake up blocked producers
#[derive(Debug)]
struct SenderTask {
task: Option<task::Waker>,
is_parked: bool,
}
impl SenderTask {
fn new() -> Self {
SenderTask {
task: None,
is_parked: false,
}
}
fn notify(&mut self) -> bool {
self.is_parked = false;
if let Some(task) = self.task.take() {
task.wake();
true
} else {
false
}
}
}
/// Creates an in-memory channel implementation of the `Stream` trait with
/// bounded capacity.
///
/// This method creates a concrete implementation of the `Stream` trait which
/// can be used to send values across threads in a streaming fashion. This
/// channel is unique in that it implements back pressure to ensure that the
/// sender never outpaces the receiver. The channel capacity is equal to
/// `buffer + num-senders`. In other words, each sender gets a guaranteed slot
/// in the channel capacity, and on top of that there are `buffer` "first come,
/// first serve" slots available to all senders.
///
/// The `Receiver` returned implements the `Stream` trait and has access to any
/// number of the associated combinators for transforming the result.
pub fn channel<A: Actor>(buffer: usize) -> (AddressSender<A>, AddressReceiver<A>) {
// Check that the requested buffer size does not exceed the maximum buffer
// size permitted by the system.
assert!(buffer < MAX_BUFFER, "requested buffer size too large");
let inner = Arc::new(Inner {
buffer: AtomicUsize::new(buffer),
state: AtomicUsize::new(INIT_STATE),
message_queue: Queue::new(),
parked_queue: Queue::new(),
num_senders: AtomicUsize::new(1),
recv_task: AtomicWaker::new(),
});
let tx = AddressSender {
inner: Arc::clone(&inner),
sender_task: Arc::new(Mutex::new(SenderTask::new())),
maybe_parked: Arc::new(AtomicBool::new(false)),
};
let rx = AddressReceiver { inner };
(tx, rx)
}
//
//
// ===== impl Sender =====
//
//
impl<A: Actor> AddressSender<A> {
/// Is the channel still open
pub fn connected(&self) -> bool {
let curr = self.inner.state.load(SeqCst);
let state = decode_state(curr);
state.is_open
}
/// Attempts to send a message on this `Sender<A>` with blocking.
///
/// This function must be called from inside of a task.
pub fn send<M>(&self, msg: M) -> Result<OneshotReceiver<M::Result>, SendError<M>>
where
A: Handler<M>,
A::Context: ToEnvelope<A, M>,
M::Result: Send,
M: Message + Send,
{
// If the sender is currently blocked, reject the message
if !self.poll_unparked(false, None).is_ready() {
return Err(SendError::Full(msg));
}
// First, increment the number of messages contained by the channel.
// This operation will also atomically determine if the sender task
// should be parked.
//
// None is returned in the case that the channel has been closed by the
// receiver. This happens when `Receiver::close` is called or the
// receiver is dropped.
let park_self = match self.inc_num_messages() {
Some(num_messages) => {
// receiver is full
let buffer = self.inner.buffer.load(Relaxed);
buffer != 0 && num_messages >= buffer
}
None => return Err(SendError::Closed(msg)),
};
// If the channel has reached capacity, then the sender task needs to
// be parked. This will send the task handle on the parked task queue.
if park_self {
self.park();
}
let (tx, rx) = oneshot_channel();
let env = <A::Context as ToEnvelope<A, M>>::pack(msg, Some(tx));
self.queue_push_and_signal(env);
Ok(rx)
}
/// Attempts to send a message on this `Sender<A>` without blocking.
pub fn try_send<M>(&self, msg: M, park: bool) -> Result<(), SendError<M>>
where
A: Handler<M>,
<A as Actor>::Context: ToEnvelope<A, M>,
M::Result: Send,
M: Message + Send + 'static,
{
// If the sender is currently blocked, reject the message
if !self.poll_unparked(false, None).is_ready() {
return Err(SendError::Full(msg));
}
let park_self = match self.inc_num_messages() {
Some(num_messages) => {
// receiver is full
let buffer = self.inner.buffer.load(Relaxed);
buffer != 0 && num_messages >= buffer
}
None => return Err(SendError::Closed(msg)),
};
if park_self && park {
self.park();
}
let env = <A::Context as ToEnvelope<A, M>>::pack(msg, None);
self.queue_push_and_signal(env);
Ok(())
}
/// Send a message on this `Sender<A>` without blocking.
///
/// This function does not park current task.
pub fn do_send<M>(&self, msg: M) -> Result<(), SendError<M>>
where
A: Handler<M>,
<A as Actor>::Context: ToEnvelope<A, M>,
M::Result: Send,
M: Message + Send,
{
if self.inc_num_messages().is_none() {
Err(SendError::Closed(msg))
} else {
// If inc_num_messages returned Some(park_self), then the mailbox is still active.
// We ignore the boolean (indicating to park and wait) in the Some, and queue the
// message regardless.
let env = <A::Context as ToEnvelope<A, M>>::pack(msg, None);
self.queue_push_and_signal(env);
Ok(())
}
}
/// Downgrade to `WeakAddressSender` which can later be upgraded
pub fn downgrade(&self) -> WeakAddressSender<A> {
WeakAddressSender {
inner: Arc::downgrade(&self.inner),
}
}
// Push message to the queue and signal to the receiver
fn queue_push_and_signal(&self, msg: Envelope<A>) {
// Push the message onto the message queue
self.inner.message_queue.push(msg);
// Signal to the receiver that a message has been enqueued. If the
// receiver is parked, this will unpark the task.
self.inner.recv_task.wake();
}
// Increment the number of queued messages. Returns if the sender should
// block.
fn inc_num_messages(&self) -> Option<usize> {
let mut curr = self.inner.state.load(SeqCst);
loop {
let mut state = decode_state(curr);
if !state.is_open {
return None;
}
state.num_messages += 1;
let next = encode_state(&state);
match self
.inner
.state
.compare_exchange(curr, next, SeqCst, SeqCst)
{
Ok(_) => {
return Some(state.num_messages);
}
Err(actual) => curr = actual,
}
}
}
// TODO: Not sure about this one, I modified code to match the futures one, might still be buggy
fn park(&self) {
{
let mut sender = self.sender_task.lock();
sender.task = None;
sender.is_parked = true;
}
// Send handle over queue
self.inner.parked_queue.push(Arc::clone(&self.sender_task));
// Check to make sure we weren't closed after we sent our task on the queue
let state = decode_state(self.inner.state.load(SeqCst));
self.maybe_parked.store(state.is_open, Relaxed);
}
fn poll_unparked(&self, do_park: bool, cx: Option<&mut task::Context<'_>>) -> Poll<()> {
// First check the `maybe_parked` variable. This avoids acquiring the
// lock in most cases
if self.maybe_parked.load(Relaxed) {
// Get a lock on the task handle
let mut task = self.sender_task.lock();
if !task.is_parked {
self.maybe_parked.store(false, Relaxed);
return Poll::Ready(());
}
// At this point, an unpark request is pending, so there will be an
// unpark sometime in the future. We just need to make sure that
// the correct task will be notified.
//
// Update the task in case the `Sender` has been moved to another
// task
task.task = if do_park {
cx.map(|cx| cx.waker().clone())
} else {
None
};
Poll::Pending
} else {
Poll::Ready(())
}
}
}
impl<A, M> Sender<M> for AddressSender<A>
where
A: Handler<M>,
A::Context: ToEnvelope<A, M>,
M::Result: Send,
M: Message + Send + 'static,
{
fn do_send(&self, msg: M) -> Result<(), SendError<M>> {
self.do_send(msg)
}
fn try_send(&self, msg: M) -> Result<(), SendError<M>> {
self.try_send(msg, true)
}
fn send(&self, msg: M) -> Result<OneshotReceiver<M::Result>, SendError<M>> {
self.send(msg)
}
fn boxed(&self) -> Box<dyn Sender<M> + Sync> {
Box::new(self.clone())
}
fn hash(&self) -> usize {
let hash: *const _ = self.inner.as_ref();
hash as usize
}
fn connected(&self) -> bool {
self.connected()
}
fn downgrade(&self) -> Box<dyn WeakSender<M> + Sync + 'static> {
Box::new(WeakAddressSender {
inner: Arc::downgrade(&self.inner),
})
}
}
impl<A: Actor> Clone for AddressSender<A> {
fn clone(&self) -> AddressSender<A> {
// Since this atomic op isn't actually guarding any memory and we don't
// care about any orderings besides the ordering on the single atomic
// variable, a relaxed ordering is acceptable.
let mut curr = self.inner.num_senders.load(SeqCst);
loop {
// If the maximum number of senders has been reached, then fail
if curr == self.inner.max_senders() {
panic!("cannot clone `Sender` -- too many outstanding senders");
}
debug_assert!(curr < self.inner.max_senders());
let next = curr + 1;
#[allow(deprecated)]
let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
// The ABA problem doesn't matter here. We only care that the
// number of senders never exceeds the maximum.
if actual == curr {
return AddressSender {
inner: Arc::clone(&self.inner),
sender_task: Arc::new(Mutex::new(SenderTask::new())),
maybe_parked: Arc::new(AtomicBool::new(false)),
};
}
curr = actual;
}
}
}
impl<A: Actor> Drop for AddressSender<A> {
fn drop(&mut self) {
// Ordering between variables don't matter here
let prev = self.inner.num_senders.fetch_sub(1, SeqCst);
// last sender, notify receiver task
if prev == 1 {
self.inner.recv_task.wake();
}
}
}
impl<A: Actor> PartialEq for AddressSender<A> {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl<A: Actor> Eq for AddressSender<A> {}
impl<A: Actor> Hash for AddressSender<A> {
fn hash<H: Hasher>(&self, state: &mut H) {
let hash: *const Inner<A> = self.inner.as_ref();
hash.hash(state);
}
}
//
//
// ===== impl WeakSender =====
//
//
impl<A: Actor> WeakAddressSender<A> {
/// Attempts to upgrade the `WeakAddressSender<A>` pointer to an [`AddressSender<A>`]
///
/// Returns [`None`] if the actor has since been dropped.
pub fn upgrade(&self) -> Option<AddressSender<A>> {
Weak::upgrade(&self.inner).map(|inner| AddressSenderProducer { inner }.sender())
}
}
impl<A, M> WeakSender<M> for WeakAddressSender<A>
where
A: Handler<M>,
A::Context: ToEnvelope<A, M>,
M::Result: Send,
M: Message + Send + 'static,
{
fn upgrade(&self) -> Option<Box<dyn Sender<M> + Sync>> {
if let Some(inner) = WeakAddressSender::upgrade(self) {
Some(Box::new(inner))
} else {
None
}
}
fn boxed(&self) -> Box<dyn WeakSender<M> + Sync> {
Box::new(self.clone())
}
}
//
//
// ===== impl SenderProducer =====
//
//
impl<A: Actor> AddressSenderProducer<A> {
/// Are any senders connected
pub fn connected(&self) -> bool {
self.inner.num_senders.load(SeqCst) != 0
}
/// Get channel capacity
pub fn capacity(&self) -> usize {
self.inner.buffer.load(Relaxed)
}
/// Set channel capacity
///
/// This method wakes up all waiting senders if new capacity is greater
/// than current
pub fn set_capacity(&mut self, cap: usize) {
let buffer = self.inner.buffer.load(Relaxed);
self.inner.buffer.store(cap, Relaxed);
// wake up all
if cap > buffer {
while let Some(task) = unsafe { self.inner.parked_queue.pop_spin() } {
task.lock().notify();
}
}
}
/// Get sender side of the channel
pub fn sender(&self) -> AddressSender<A> {
// this code same as Sender::clone
let mut curr = self.inner.num_senders.load(SeqCst);
loop {
// If the maximum number of senders has been reached, then fail
if curr == self.inner.max_senders() {
panic!("cannot clone `Sender` -- too many outstanding senders");
}
let next = curr + 1;
#[allow(deprecated)]
let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
// The ABA problem doesn't matter here. We only care that the
// number of senders never exceeds the maximum.
if actual == curr {
return AddressSender {
inner: Arc::clone(&self.inner),
sender_task: Arc::new(Mutex::new(SenderTask::new())),
maybe_parked: Arc::new(AtomicBool::new(false)),
};
}
curr = actual;
}
}
}
//
//
// ===== impl Receiver =====
//
//
impl<A: Actor> AddressReceiver<A> {
/// Returns whether any senders are still connected.
pub fn connected(&self) -> bool {
self.inner.num_senders.load(SeqCst) != 0
}
/// Returns the channel capacity.
pub fn capacity(&self) -> usize {
self.inner.buffer.load(Relaxed)
}
/// Sets the channel capacity.
///
/// This method wakes up all waiting senders if the new capacity
/// is greater than the current one.
pub fn set_capacity(&mut self, cap: usize) {
let buffer = self.inner.buffer.load(Relaxed);
self.inner.buffer.store(cap, Relaxed);
// wake up all
if cap > buffer {
while let Some(task) = unsafe { self.inner.parked_queue.pop_spin() } {
task.lock().notify();
}
}
}
/// Returns the sender side of the channel.
pub fn sender(&self) -> AddressSender<A> {
// this code same as Sender::clone
let mut curr = self.inner.num_senders.load(SeqCst);
loop {
// If the maximum number of senders has been reached, then fail
if curr == self.inner.max_senders() {
panic!("cannot clone `Sender` -- too many outstanding senders");
}
let next = curr + 1;
#[allow(deprecated)]
let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
// The ABA problem doesn't matter here. We only care that the
// number of senders never exceeds the maximum.
if actual == curr {
return AddressSender {
inner: Arc::clone(&self.inner),
sender_task: Arc::new(Mutex::new(SenderTask::new())),
maybe_parked: Arc::new(AtomicBool::new(false)),
};
}
curr = actual;
}
}
/// Creates the sender producer.
pub fn sender_producer(&self) -> AddressSenderProducer<A> {
AddressSenderProducer {
inner: self.inner.clone(),
}
}
fn next_message(&mut self) -> Poll<Option<Envelope<A>>> {
// Pop off a message
match unsafe { self.inner.message_queue.pop_spin() } {
Some(msg) => {
// If there are any parked task handles in the parked queue,
// pop one and unpark it.
self.unpark_one();
// Decrement number of messages
self.dec_num_messages();
Poll::Ready(Some(msg))
}
None => {
let state = decode_state(self.inner.state.load(SeqCst));
if state.is_closed() {
// If closed flag is set AND there are no pending messages
// it means end of stream
Poll::Ready(None)
} else {
// If queue is open, we need to return Pending
// to be woken up when new messages arrive.
// If queue is closed but num_messages is non-zero,
// it means that senders updated the state,
// but didn't put message to queue yet,
// so we need to park until sender unparks the task
// after queueing the message.
Poll::Pending
}
}
}
}
// Unpark a single task handle if there is one pending in the parked queue
fn unpark_one(&mut self) {
if let Some(task) = unsafe { self.inner.parked_queue.pop_spin() } {
task.lock().notify();
}
}
fn dec_num_messages(&self) {
// OPEN_MASK is highest bit, so it's unaffected by subtraction
// unless there's underflow, and we know there's no underflow
// because number of messages at this point is always > 0.
self.inner.state.fetch_sub(1, SeqCst);
}
}
impl<A: Actor> Stream for AddressReceiver<A> {
type Item = Envelope<A>;
fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match this.next_message() {
Poll::Ready(msg) => Poll::Ready(msg),
Poll::Pending => {
// There are no messages to read, in this case, park.
this.inner.recv_task.register(cx.waker());
// Check queue again after parking to prevent race condition:
// a message could be added to the queue after previous `next_message`
// before `register` call.
this.next_message()
}
}
}
}
impl<A: Actor> Drop for AddressReceiver<A> {
fn drop(&mut self) {
// close
self.inner.set_closed();
// Wake up any threads waiting as they'll see that we've closed the
// channel and will continue on their merry way.
while let Some(task) = unsafe { self.inner.parked_queue.pop_spin() } {
task.lock().notify();
}
// Drain the channel of all pending messages
loop {
match self.next_message() {
Poll::Ready(Some(_)) => {}
Poll::Ready(None) => break,
Poll::Pending => {
let state = decode_state(self.inner.state.load(SeqCst));
// If the channel is closed, then there is no need to park.
if state.is_closed() {
break;
}
// TODO: Spinning isn't ideal, it might be worth
// investigating using a condvar or some other strategy
// here. That said, if this case is hit, then another thread
// is about to push the value into the queue and this isn't
// the only spinlock in the impl right now.
thread::yield_now();
}
}
}
}
}
//
//
// ===== impl Inner =====
//
//
impl<A: Actor> Inner<A> {
// The return value is such that the total number of messages that can be
// enqueued into the channel will never exceed MAX_CAPACITY
fn max_senders(&self) -> usize {
MAX_CAPACITY - self.buffer.load(Relaxed)
}
// Clear `open` flag in the state, keep `num_messages` intact.
fn set_closed(&self) {
let curr = self.state.load(SeqCst);
if !decode_state(curr).is_open {
return;
}
self.state.fetch_and(!OPEN_MASK, SeqCst);
}
}
unsafe impl<A: Actor> Send for Inner<A> {}
unsafe impl<A: Actor> Sync for Inner<A> {}
//
//
// ===== Helpers =====
//
//
fn decode_state(num: usize) -> State {
State {
is_open: num & OPEN_MASK == OPEN_MASK,
num_messages: num & MAX_CAPACITY,
}
}
fn encode_state(state: &State) -> usize {
let mut num = state.num_messages;
if state.is_open {
num |= OPEN_MASK;
}
num
}
#[cfg(test)]
mod tests {
use std::{thread, time};
use super::*;
use crate::{address::queue::PopResult, prelude::*};
struct Act;
impl Actor for Act {
type Context = Context<Act>;
}
struct Ping;
impl Message for Ping {
type Result = ();
}
impl Handler<Ping> for Act {
type Result = ();
fn handle(&mut self, _: Ping, _: &mut Context<Act>) {}
}
#[test]
fn test_cap() {
System::new().block_on(async {
let (s1, mut recv) = channel::<Act>(1);
let s2 = recv.sender();
let arb = Arbiter::new();
arb.spawn_fn(move || {
let _ = s1.send(Ping);
});
thread::sleep(time::Duration::from_millis(100));
let arb2 = Arbiter::new();
arb2.spawn_fn(move || {
let _ = s2.send(Ping);
let _ = s2.send(Ping);
});
thread::sleep(time::Duration::from_millis(100));
let state = decode_state(recv.inner.state.load(SeqCst));
assert_eq!(state.num_messages, 2);
let p = loop {
match unsafe { recv.inner.parked_queue.pop() } {
PopResult::Data(task) => break Some(task),
PopResult::Empty => break None,
PopResult::Inconsistent => thread::yield_now(),
}
};
assert!(p.is_some());
recv.inner.parked_queue.push(p.unwrap());
recv.set_capacity(10);
thread::sleep(time::Duration::from_millis(100));
let state = decode_state(recv.inner.state.load(SeqCst));
assert_eq!(state.num_messages, 2);
let p = loop {
match unsafe { recv.inner.parked_queue.pop() } {
PopResult::Data(task) => break Some(task),
PopResult::Empty => break None,
PopResult::Inconsistent => thread::yield_now(),
}
};
assert!(p.is_none());
System::current().stop();
});
}
}
|
fn main() {
let mut count = 0;
// goes to infinity
loop {
count += 1;
if count == 3 {
println!("Three!");
continue;
}
println!("count: {count}");
if count == 5 {
println!("Ok enough.");
break;
}
}
let mut x_count = 0;
let mut y_count;
'x: loop {
// label the loop x
x_count += 1;
println!("x: {x_count}");
y_count = 0;
'y: loop {
y_count += 1;
println!("y: {y_count}");
if x_count == 10 {
break 'y;
}
if y_count == x_count {
continue 'x;
}
}
break; // breaks closest enclosing loop 'x
}
// Returning a value from the loop.
let mut counter = 0;
let result = loop {
counter += 1;
if counter == 10 {
break counter * 2; // expression after break is returned.
}
};
assert_eq!(result, 20);
}
|
extern crate rapier2d as rapier; // For the debug UI.
use bevy::prelude::*;
use bevy::render::pass::ClearColor;
use bevy_rapier2d::physics::{JointBuilderComponent, RapierPhysicsPlugin, RapierPhysicsScale};
use bevy_rapier2d::render::RapierRenderPlugin;
use nalgebra::Point2;
use rapier::dynamics::{BallJoint, BodyStatus};
use rapier2d::dynamics::RigidBodyBuilder;
use rapier2d::geometry::ColliderBuilder;
use rapier2d::pipeline::PhysicsPipeline;
use ui::DebugUiPlugin;
#[path = "../../src_debug_ui/mod.rs"]
mod ui;
fn main() {
App::build()
.add_resource(ClearColor(Color::rgb(
0xF9 as f32 / 255.0,
0xF9 as f32 / 255.0,
0xFF as f32 / 255.0,
)))
.add_resource(Msaa { samples: 2 })
.add_default_plugins()
.add_plugin(RapierPhysicsPlugin)
.add_plugin(RapierRenderPlugin)
.add_plugin(DebugUiPlugin)
.add_startup_system(setup_graphics.system())
.add_startup_system(setup_physics.system())
.add_startup_system(enable_physics_profiling.system())
.run();
}
fn enable_physics_profiling(mut pipeline: ResMut<PhysicsPipeline>) {
pipeline.counters.enable()
}
fn setup_graphics(mut commands: Commands, mut scale: ResMut<RapierPhysicsScale>) {
scale.0 = 12.0;
commands
.spawn(LightComponents {
translation: Translation::new(1000.0, 100.0, 2000.0),
..Default::default()
})
.spawn(Camera2dComponents {
translation: Translation::new(200.0, -200.0, 0.0),
..Camera2dComponents::default()
});
}
pub fn setup_physics(mut commands: Commands) {
/*
* Create the balls
*/
// Build the rigid body.
// NOTE: a smaller radius (e.g. 0.1) breaks Box2D so
// in order to be able to compare rapier with Box2D,
// we set it to 0.4.
let rad = 0.4;
let numi = 40; // Num vertical nodes.
let numk = 40; // Num horizontal nodes.
let shift = 1.0;
let mut body_entities = Vec::new();
for k in 0..numk {
for i in 0..numi {
let fk = k as f32;
let fi = i as f32;
let status = if i == 0 && (k % 4 == 0 || k == numk - 1) {
BodyStatus::Static
} else {
BodyStatus::Dynamic
};
let rigid_body = RigidBodyBuilder::new(status).translation(fk * shift, -fi * shift);
let collider = ColliderBuilder::cuboid(rad, rad).density(1.0);
let child_entity = commands
.spawn((rigid_body, collider))
.current_entity()
.unwrap();
// Vertical joint.
if i > 0 {
let parent_entity = *body_entities.last().unwrap();
let joint = BallJoint::new(Point2::origin(), Point2::new(0.0, shift));
commands.spawn((JointBuilderComponent::new(
joint,
parent_entity,
child_entity,
),));
}
// Horizontal joint.
if k > 0 {
let parent_index = body_entities.len() - numi;
let parent_entity = body_entities[parent_index];
let joint = BallJoint::new(Point2::origin(), Point2::new(-shift, 0.0));
commands.spawn((JointBuilderComponent::new(
joint,
parent_entity,
child_entity,
),));
}
body_entities.push(child_entity);
}
}
}
|
// auto generated, do not modify.
// created: Mon Feb 22 23:57:02 2016
// src-file: /QtWidgets/qfontdialog.h
// dst-file: /src/widgets/qfontdialog.rs
//
// header block begin =>
#![feature(libc)]
#![feature(core)]
#![feature(collections)]
extern crate libc;
use self::libc::*;
// <= header block end
// main block begin =>
// <= main block end
// use block begin =>
use super::qdialog::*; // 773
use std::ops::Deref;
use super::qwidget::*; // 773
use super::super::core::qobject::*; // 771
use super::super::gui::qfont::*; // 771
use super::super::core::qstring::*; // 771
use super::super::core::qobjectdefs::*; // 771
// <= use block end
// ext block begin =>
// #[link(name = "Qt5Core")]
// #[link(name = "Qt5Gui")]
// #[link(name = "Qt5Widgets")]
// #[link(name = "QtInline")]
extern {
fn QFontDialog_Class_Size() -> c_int;
// proto: void QFontDialog::QFontDialog(QWidget * parent);
fn C_ZN11QFontDialogC2EP7QWidget(arg0: *mut c_void) -> u64;
// proto: void QFontDialog::open(QObject * receiver, const char * member);
fn C_ZN11QFontDialog4openEP7QObjectPKc(qthis: u64 /* *mut c_void*/, arg0: *mut c_void, arg1: *mut c_char);
// proto: void QFontDialog::QFontDialog(const QFont & initial, QWidget * parent);
fn C_ZN11QFontDialogC2ERK5QFontP7QWidget(arg0: *mut c_void, arg1: *mut c_void) -> u64;
// proto: QFont QFontDialog::currentFont();
fn C_ZNK11QFontDialog11currentFontEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: void QFontDialog::setVisible(bool visible);
fn C_ZN11QFontDialog10setVisibleEb(qthis: u64 /* *mut c_void*/, arg0: c_char);
// proto: void QFontDialog::~QFontDialog();
fn C_ZN11QFontDialogD2Ev(qthis: u64 /* *mut c_void*/);
// proto: static QFont QFontDialog::getFont(bool * ok, QWidget * parent);
fn C_ZN11QFontDialog7getFontEPbP7QWidget(arg0: *mut c_char, arg1: *mut c_void) -> *mut c_void;
// proto: const QMetaObject * QFontDialog::metaObject();
fn C_ZNK11QFontDialog10metaObjectEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: QFont QFontDialog::selectedFont();
fn C_ZNK11QFontDialog12selectedFontEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: void QFontDialog::setCurrentFont(const QFont & font);
fn C_ZN11QFontDialog14setCurrentFontERK5QFont(qthis: u64 /* *mut c_void*/, arg0: *mut c_void);
fn QFontDialog_SlotProxy_connect__ZN11QFontDialog18currentFontChangedERK5QFont(qthis: *mut c_void, ffifptr: *mut c_void, rsfptr: *mut c_void);
fn QFontDialog_SlotProxy_connect__ZN11QFontDialog12fontSelectedERK5QFont(qthis: *mut c_void, ffifptr: *mut c_void, rsfptr: *mut c_void);
} // <= ext block end
// body block begin =>
// class sizeof(QFontDialog)=1
#[derive(Default)]
pub struct QFontDialog {
qbase: QDialog,
pub qclsinst: u64 /* *mut c_void*/,
pub _fontSelected: QFontDialog_fontSelected_signal,
pub _currentFontChanged: QFontDialog_currentFontChanged_signal,
}
impl /*struct*/ QFontDialog {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QFontDialog {
return QFontDialog{qbase: QDialog::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
}
}
impl Deref for QFontDialog {
type Target = QDialog;
fn deref(&self) -> &QDialog {
return & self.qbase;
}
}
impl AsRef<QDialog> for QFontDialog {
fn as_ref(& self) -> & QDialog {
return & self.qbase;
}
}
// proto: void QFontDialog::QFontDialog(QWidget * parent);
impl /*struct*/ QFontDialog {
pub fn new<T: QFontDialog_new>(value: T) -> QFontDialog {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QFontDialog_new {
fn new(self) -> QFontDialog;
}
// proto: void QFontDialog::QFontDialog(QWidget * parent);
impl<'a> /*trait*/ QFontDialog_new for (Option<&'a QWidget>) {
fn new(self) -> QFontDialog {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialogC2EP7QWidget()};
let ctysz: c_int = unsafe{QFontDialog_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = (if self.is_none() {0} else {self.unwrap().qclsinst}) as *mut c_void;
let qthis: u64 = unsafe {C_ZN11QFontDialogC2EP7QWidget(arg0)};
let rsthis = QFontDialog{qbase: QDialog::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: void QFontDialog::open(QObject * receiver, const char * member);
impl /*struct*/ QFontDialog {
pub fn open<RetType, T: QFontDialog_open<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.open(self);
// return 1;
}
}
pub trait QFontDialog_open<RetType> {
fn open(self , rsthis: & QFontDialog) -> RetType;
}
// proto: void QFontDialog::open(QObject * receiver, const char * member);
impl<'a> /*trait*/ QFontDialog_open<()> for (&'a QObject, &'a String) {
fn open(self , rsthis: & QFontDialog) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialog4openEP7QObjectPKc()};
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = self.1.as_ptr() as *mut c_char;
unsafe {C_ZN11QFontDialog4openEP7QObjectPKc(rsthis.qclsinst, arg0, arg1)};
// return 1;
}
}
// proto: void QFontDialog::QFontDialog(const QFont & initial, QWidget * parent);
impl<'a> /*trait*/ QFontDialog_new for (&'a QFont, Option<&'a QWidget>) {
fn new(self) -> QFontDialog {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialogC2ERK5QFontP7QWidget()};
let ctysz: c_int = unsafe{QFontDialog_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = (if self.1.is_none() {0} else {self.1.unwrap().qclsinst}) as *mut c_void;
let qthis: u64 = unsafe {C_ZN11QFontDialogC2ERK5QFontP7QWidget(arg0, arg1)};
let rsthis = QFontDialog{qbase: QDialog::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: QFont QFontDialog::currentFont();
impl /*struct*/ QFontDialog {
pub fn currentFont<RetType, T: QFontDialog_currentFont<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.currentFont(self);
// return 1;
}
}
pub trait QFontDialog_currentFont<RetType> {
fn currentFont(self , rsthis: & QFontDialog) -> RetType;
}
// proto: QFont QFontDialog::currentFont();
impl<'a> /*trait*/ QFontDialog_currentFont<QFont> for () {
fn currentFont(self , rsthis: & QFontDialog) -> QFont {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK11QFontDialog11currentFontEv()};
let mut ret = unsafe {C_ZNK11QFontDialog11currentFontEv(rsthis.qclsinst)};
let mut ret1 = QFont::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QFontDialog::setVisible(bool visible);
impl /*struct*/ QFontDialog {
pub fn setVisible<RetType, T: QFontDialog_setVisible<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.setVisible(self);
// return 1;
}
}
pub trait QFontDialog_setVisible<RetType> {
fn setVisible(self , rsthis: & QFontDialog) -> RetType;
}
// proto: void QFontDialog::setVisible(bool visible);
impl<'a> /*trait*/ QFontDialog_setVisible<()> for (i8) {
fn setVisible(self , rsthis: & QFontDialog) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialog10setVisibleEb()};
let arg0 = self as c_char;
unsafe {C_ZN11QFontDialog10setVisibleEb(rsthis.qclsinst, arg0)};
// return 1;
}
}
// proto: void QFontDialog::~QFontDialog();
impl /*struct*/ QFontDialog {
pub fn free<RetType, T: QFontDialog_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QFontDialog_free<RetType> {
fn free(self , rsthis: & QFontDialog) -> RetType;
}
// proto: void QFontDialog::~QFontDialog();
impl<'a> /*trait*/ QFontDialog_free<()> for () {
fn free(self , rsthis: & QFontDialog) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialogD2Ev()};
unsafe {C_ZN11QFontDialogD2Ev(rsthis.qclsinst)};
// return 1;
}
}
// proto: static QFont QFontDialog::getFont(bool * ok, QWidget * parent);
impl /*struct*/ QFontDialog {
pub fn getFont_s<RetType, T: QFontDialog_getFont_s<RetType>>( overload_args: T) -> RetType {
return overload_args.getFont_s();
// return 1;
}
}
pub trait QFontDialog_getFont_s<RetType> {
fn getFont_s(self ) -> RetType;
}
// proto: static QFont QFontDialog::getFont(bool * ok, QWidget * parent);
impl<'a> /*trait*/ QFontDialog_getFont_s<QFont> for (&'a mut Vec<i8>, Option<&'a QWidget>) {
fn getFont_s(self ) -> QFont {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialog7getFontEPbP7QWidget()};
let arg0 = self.0.as_ptr() as *mut c_char;
let arg1 = (if self.1.is_none() {0} else {self.1.unwrap().qclsinst}) as *mut c_void;
let mut ret = unsafe {C_ZN11QFontDialog7getFontEPbP7QWidget(arg0, arg1)};
let mut ret1 = QFont::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: const QMetaObject * QFontDialog::metaObject();
impl /*struct*/ QFontDialog {
pub fn metaObject<RetType, T: QFontDialog_metaObject<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.metaObject(self);
// return 1;
}
}
pub trait QFontDialog_metaObject<RetType> {
fn metaObject(self , rsthis: & QFontDialog) -> RetType;
}
// proto: const QMetaObject * QFontDialog::metaObject();
impl<'a> /*trait*/ QFontDialog_metaObject<QMetaObject> for () {
fn metaObject(self , rsthis: & QFontDialog) -> QMetaObject {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK11QFontDialog10metaObjectEv()};
let mut ret = unsafe {C_ZNK11QFontDialog10metaObjectEv(rsthis.qclsinst)};
let mut ret1 = QMetaObject::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: QFont QFontDialog::selectedFont();
impl /*struct*/ QFontDialog {
pub fn selectedFont<RetType, T: QFontDialog_selectedFont<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.selectedFont(self);
// return 1;
}
}
pub trait QFontDialog_selectedFont<RetType> {
fn selectedFont(self , rsthis: & QFontDialog) -> RetType;
}
// proto: QFont QFontDialog::selectedFont();
impl<'a> /*trait*/ QFontDialog_selectedFont<QFont> for () {
fn selectedFont(self , rsthis: & QFontDialog) -> QFont {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK11QFontDialog12selectedFontEv()};
let mut ret = unsafe {C_ZNK11QFontDialog12selectedFontEv(rsthis.qclsinst)};
let mut ret1 = QFont::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QFontDialog::setCurrentFont(const QFont & font);
impl /*struct*/ QFontDialog {
pub fn setCurrentFont<RetType, T: QFontDialog_setCurrentFont<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.setCurrentFont(self);
// return 1;
}
}
pub trait QFontDialog_setCurrentFont<RetType> {
fn setCurrentFont(self , rsthis: & QFontDialog) -> RetType;
}
// proto: void QFontDialog::setCurrentFont(const QFont & font);
impl<'a> /*trait*/ QFontDialog_setCurrentFont<()> for (&'a QFont) {
fn setCurrentFont(self , rsthis: & QFontDialog) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN11QFontDialog14setCurrentFontERK5QFont()};
let arg0 = self.qclsinst as *mut c_void;
unsafe {C_ZN11QFontDialog14setCurrentFontERK5QFont(rsthis.qclsinst, arg0)};
// return 1;
}
}
#[derive(Default)] // for QFontDialog_fontSelected
pub struct QFontDialog_fontSelected_signal{poi:u64}
impl /* struct */ QFontDialog {
pub fn fontSelected(&self) -> QFontDialog_fontSelected_signal {
return QFontDialog_fontSelected_signal{poi:self.qclsinst};
}
}
impl /* struct */ QFontDialog_fontSelected_signal {
pub fn connect<T: QFontDialog_fontSelected_signal_connect>(self, overload_args: T) {
overload_args.connect(self);
}
}
pub trait QFontDialog_fontSelected_signal_connect {
fn connect(self, sigthis: QFontDialog_fontSelected_signal);
}
#[derive(Default)] // for QFontDialog_currentFontChanged
pub struct QFontDialog_currentFontChanged_signal{poi:u64}
impl /* struct */ QFontDialog {
pub fn currentFontChanged(&self) -> QFontDialog_currentFontChanged_signal {
return QFontDialog_currentFontChanged_signal{poi:self.qclsinst};
}
}
impl /* struct */ QFontDialog_currentFontChanged_signal {
pub fn connect<T: QFontDialog_currentFontChanged_signal_connect>(self, overload_args: T) {
overload_args.connect(self);
}
}
pub trait QFontDialog_currentFontChanged_signal_connect {
fn connect(self, sigthis: QFontDialog_currentFontChanged_signal);
}
// currentFontChanged(const class QFont &)
extern fn QFontDialog_currentFontChanged_signal_connect_cb_0(rsfptr:fn(QFont), arg0: *mut c_void) {
println!("{}:{}", file!(), line!());
let rsarg0 = QFont::inheritFrom(arg0 as u64);
rsfptr(rsarg0);
}
extern fn QFontDialog_currentFontChanged_signal_connect_cb_box_0(rsfptr_raw:*mut Box<Fn(QFont)>, arg0: *mut c_void) {
println!("{}:{}", file!(), line!());
let rsfptr = unsafe{Box::from_raw(rsfptr_raw)};
let rsarg0 = QFont::inheritFrom(arg0 as u64);
// rsfptr(rsarg0);
unsafe{(*rsfptr_raw)(rsarg0)};
}
impl /* trait */ QFontDialog_currentFontChanged_signal_connect for fn(QFont) {
fn connect(self, sigthis: QFontDialog_currentFontChanged_signal) {
// do smth...
// self as u64; // error for Fn, Ok for fn
self as *mut c_void as u64;
self as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QFontDialog_currentFontChanged_signal_connect_cb_0 as *mut c_void;
let arg2 = self as *mut c_void;
unsafe {QFontDialog_SlotProxy_connect__ZN11QFontDialog18currentFontChangedERK5QFont(arg0, arg1, arg2)};
}
}
impl /* trait */ QFontDialog_currentFontChanged_signal_connect for Box<Fn(QFont)> {
fn connect(self, sigthis: QFontDialog_currentFontChanged_signal) {
// do smth...
// Box::into_raw(self) as u64;
// Box::into_raw(self) as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QFontDialog_currentFontChanged_signal_connect_cb_box_0 as *mut c_void;
let arg2 = Box::into_raw(Box::new(self)) as *mut c_void;
unsafe {QFontDialog_SlotProxy_connect__ZN11QFontDialog18currentFontChangedERK5QFont(arg0, arg1, arg2)};
}
}
// fontSelected(const class QFont &)
extern fn QFontDialog_fontSelected_signal_connect_cb_1(rsfptr:fn(QFont), arg0: *mut c_void) {
println!("{}:{}", file!(), line!());
let rsarg0 = QFont::inheritFrom(arg0 as u64);
rsfptr(rsarg0);
}
extern fn QFontDialog_fontSelected_signal_connect_cb_box_1(rsfptr_raw:*mut Box<Fn(QFont)>, arg0: *mut c_void) {
println!("{}:{}", file!(), line!());
let rsfptr = unsafe{Box::from_raw(rsfptr_raw)};
let rsarg0 = QFont::inheritFrom(arg0 as u64);
// rsfptr(rsarg0);
unsafe{(*rsfptr_raw)(rsarg0)};
}
impl /* trait */ QFontDialog_fontSelected_signal_connect for fn(QFont) {
fn connect(self, sigthis: QFontDialog_fontSelected_signal) {
// do smth...
// self as u64; // error for Fn, Ok for fn
self as *mut c_void as u64;
self as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QFontDialog_fontSelected_signal_connect_cb_1 as *mut c_void;
let arg2 = self as *mut c_void;
unsafe {QFontDialog_SlotProxy_connect__ZN11QFontDialog12fontSelectedERK5QFont(arg0, arg1, arg2)};
}
}
impl /* trait */ QFontDialog_fontSelected_signal_connect for Box<Fn(QFont)> {
fn connect(self, sigthis: QFontDialog_fontSelected_signal) {
// do smth...
// Box::into_raw(self) as u64;
// Box::into_raw(self) as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QFontDialog_fontSelected_signal_connect_cb_box_1 as *mut c_void;
let arg2 = Box::into_raw(Box::new(self)) as *mut c_void;
unsafe {QFontDialog_SlotProxy_connect__ZN11QFontDialog12fontSelectedERK5QFont(arg0, arg1, arg2)};
}
}
// <= body block end
|
/*
ifvms-decompiler - core library
===============================
Copyright (c) 2020 Dannii Willis
MIT licenced
https://github.com/curiousdannii/ifvms.js
*/
use fnv::*;
pub mod zvm;
// Function safety refers to whether or not a function can be compiled and run without worrying about its locals and stack being part of the savestate
// Safe functions will never be saved so they can have various optimisations
// Unsafe functions need to be compiled such that they can be serialised and restored
// SafetyTBD functions have not yet been determined, and may need to be recompiled if judged safe
pub enum FunctionSafety {
Safe,
Unsafe,
SafetyTBD,
}
// Function state for disassembly and relooping
pub struct Function<T> {
pub addr: u32,
pub calls: FnvHashSet<u32>,
pub entry_points: FnvHashSet<u32>,
pub expressions: FnvHashMap<u32, Expression<T>>,
pub first_fragment_addr: u32,
pub locals: u32,
pub safety: FunctionSafety,
}
// Expressions: instructions or combined branches
pub enum Expression<T> {
Instruction(T),
Branch(Branch<T>),
}
// Multi-instruction branches
pub struct Branch<T> {
addr: u32,
conditions: Vec<T>,
} |
pub mod score_controller;
pub mod score_service;
use crate::context::{generate_context, Ctx};
use crate::scores::score_controller::{create_score, delete_score, get_score, update_score};
use thruster::{middleware, App, MiddlewareChain, MiddlewareReturnValue, Request};
pub fn init() -> App<Request, Ctx> {
let mut subroute = App::<Request, Ctx>::create(generate_context);
subroute.post("/", middleware![Ctx => create_score]);
subroute.get("/:id", middleware![Ctx => get_score]);
subroute.put("/:id", middleware![Ctx => update_score]);
subroute.delete("/:id", middleware![Ctx => delete_score]);
subroute
}
|
#![feature(async_await)]
#![deny(
missing_debug_implementations,
missing_copy_implementations,
elided_lifetimes_in_paths,
rust_2018_idioms,
clippy::fallible_impl_from,
clippy::missing_const_for_fn,
intra_doc_link_resolution_failure
)]
use std::{
io::{self, Result, Write},
process::{Command, Output},
};
const ROOT_HANDLE: &str = "1:0";
const ROOT_VIP_HANDLE: &str = "1:1";
const CLASS_ID: &str = "1:10";
const CLASS_VIP_ID: &str = "1:5";
const DEFAULT_RATE: &str = "120kbps";
const VIP_RATE: &str = "256kbps";
const VIP_IP_LIST: [&str; 1] = ["192.168.1.111"];
const INTERFACE: &str = "wlan0";
fn main() -> Result<()> {
// tc is the program that is in charge of setting up the shaping rules.
setup_tc(INTERFACE)?;
// Once that class is setup, we’ll need to setup iptables to mark the
// specific packets we want to shape as such.
setup_iptables(INTERFACE)?;
println!("All DONE");
Ok(())
}
fn setup_tc(interface: &str) -> Result<()> {
// a nice guide http://sirlagz.net/2013/01/27/how-to-turn-the-raspberry-pi-into-a-shaping-wifi-router/
// **Step 1**
//
// Firstly, we will setup the default rule for the interface, which is
// {interface} in this instance.
//
// These 2 commands sets the default policy on {interface} to shape
// everyone’s download speed to {DEFAULT_RATE} kilobytes a second.
let cmd = format!(
"tc qdisc add dev {device} root handle {root_handle} htb default 10",
device = interface,
root_handle = ROOT_HANDLE
);
let output = run_as_sudo(&cmd)?;
assert!(output.status.success(), "Error while adding QDISC");
let cmd = format!(
"tc class add dev {device} parent {root_handle} classid {class_id} htb rate {default_rate} \
ceil \
{default_rate} \
prio 0",
device = interface,
root_handle = ROOT_HANDLE,
class_id = CLASS_ID,
default_rate = DEFAULT_RATE,
);
let output = run_as_sudo(&cmd)?;
assert!(output.status.success(), "Error while adding CLASS");
// **Step 2**
//
// Next, we’ll setup another class to shape certain addresses to a higher
// speed. We also need to setup a filter so that any packets marked as
// such go through this rule
let cmd = format!(
"tc class add dev {device} parent {root_handle} classid {class_id} htb rate {default_rate} \
ceil \
{default_rate} \
prio 1",
device = interface,
root_handle = ROOT_VIP_HANDLE,
class_id = CLASS_VIP_ID,
default_rate = VIP_RATE,
);
let output = run_as_sudo(&cmd)?;
assert!(output.status.success(), "Error while adding CLASS VIP");
let cmd = format!(
"tc filter add dev {device} parent {root_handle} prio 1 handle 5 fw flowid {class_id}",
device = interface,
root_handle = ROOT_VIP_HANDLE,
class_id = CLASS_VIP_ID,
);
let output = run_as_sudo(&cmd)?;
assert!(output.status.success(), "Error while adding FILTER VIP");
Ok(())
}
fn setup_iptables(interface: &str) -> Result<()> {
// **Step 1**
// Firstly, we’ll create the mangle table that we need.
// I’ve used a custom chain in the mangle table in this snippet The below
// code creates the new chains of `shaper-in` and `shaper-out`, and then
// sets up some rules for any packets coming in and out of {interface}
// to go through the new chains.
let output = run_as_sudo("iptables -t mangle -N shaper-out")?;
assert!(output.status.success(), "Error while adding shaper-out");
let output = run_as_sudo("iptables -t mangle -N shaper-in")?;
assert!(output.status.success(), "Error while adding shaper-in");
let cmd = format!(
"iptables -t mangle -I POSTROUTING -o {} -j shaper-in",
interface
);
let output = run_as_sudo(&cmd)?;
assert!(output.status.success(), "Error while adding POSTROUTING");
let cmd = format!(
"iptables -t mangle -I PREROUTING -i {} -j shaper-out",
interface
);
let output = run_as_sudo(&cmd)?;
assert!(output.status.success(), "Error while adding PREROUTING");
// **Step 2**
//
// Once that is done, we can then setup the packet marking so that any
// packets from the 192.168.1.0/24 subnet gets marked with a 1, otherwise if
// the IP address is in {VIP_IP}, they will get marked with a 5
let output = run_as_sudo("iptables -t mangle -A shaper-out -s 192.168.1.0/24 -j MARK --set-mark 1")?;
assert!(output.status.success(), "Error while marking out 1");
let output = run_as_sudo("iptables -t mangle -A shaper-in -d 192.168.1.0/24 -j MARK --set-mark 1")?;
assert!(output.status.success(), "Error while marking 1");
for ip in &VIP_IP_LIST {
let output = run_as_sudo(&format!(
"iptables -t mangle -A shaper-out -s {} -j MARK --set-mark 5",
ip
))?;
assert!(
output.status.success(),
"Error while marking out 5 for ip = {}",
ip
);
let output = run_as_sudo(&format!(
"iptables -t mangle -A shaper-in -d {} -j MARK --set-mark 5",
ip
))?;
assert!(
output.status.success(),
"Error while marking in 5 for ip = {}",
ip
);
}
Ok(())
}
fn run_as_sudo(cmd: &str) -> Result<Output> {
let output = Command::new("sudo")
.arg("sh")
.args(&["-c", &cmd])
.output()
.expect("command not found");
io::stdout().write_all(&output.stdout)?;
io::stderr().write_all(&output.stderr)?;
Ok(output)
}
|
use aoc_runner_derive::{aoc, aoc_generator};
type Map = Vec<Vec<u8>>;
#[aoc_generator(day3)]
fn input_parse(input: &str) -> Map {
input
.lines()
.map(|l| {
l.chars()
.map(|c| match c {
'.' => 0,
'#' => 1,
_ => panic!("Unexpected character: {}", c),
})
.collect()
})
.collect()
}
fn get(map: &Map, x: usize, y: usize) -> i8 {
if y >= map.len() {
return -1;
}
map[y][x % map[0].len()] as i8
}
fn traverse(map: &Map, step_x: usize, step_y: usize) -> i64 {
let mut x = 0;
let mut y = 0;
let mut trees = 0;
loop {
let v = get(&map, x, y);
if v == 1 {
trees += 1;
} else if v == -1 {
return trees;
}
x += step_x;
y += step_y;
}
}
#[aoc(day3, part1)]
fn solve_part1(map: &Map) -> i64 {
traverse(map, 3, 1)
}
#[aoc(day3, part2)]
fn solve_part2(map: &Map) -> i64 {
traverse(map, 1, 1)
* traverse(map, 3, 1)
* traverse(map, 5, 1)
* traverse(map, 7, 1)
* traverse(map, 1, 2)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse() {
assert_eq!(
input_parse(".#.\n#.#\n"),
vec![vec![0, 1, 0], vec![1, 0, 1]]
);
}
#[test]
fn lookup() {
let sample: Map = vec![vec![0, 1, 0], vec![1, 0, 1]];
assert_eq!(get(&sample, 0, 0), 0);
assert_eq!(get(&sample, 0, 100), -1);
assert_eq!(get(&sample, 4, 0), 1);
}
#[test]
fn part1() {
let inp = "..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#";
assert_eq!(solve_part1(&input_parse(&inp)), 7)
}
#[test]
fn part2() {
let inp = "..##.......
#...#...#..
.#....#..#.
..#.#...#.#
.#...##..#.
..#.##.....
.#.#.#....#
.#........#
#.##...#...
#...##....#
.#..#...#.#";
assert_eq!(solve_part2(&input_parse(&inp)), 336)
}
}
|
#![no_main]
#[macro_use] extern crate libfuzzer_sys;
extern crate needletail;
use needletail::parser::{FastaReader, FastxReader};
use std::io::Cursor;
fuzz_target!(|data: &[u8]| {
let cursor = Cursor::new([b">", data].concat());
let mut reader = FastaReader::new(cursor);
while let Some(rec) = reader.next() {
let _ = rec;
}
});
|
//! This module contains the I/O types used by the clients.
use futures::Future;
/// A [`mqtt::IoSource`] implementation used by the clients.
pub struct IoSource {
iothub_hostname: std::sync::Arc<str>,
iothub_host: std::net::SocketAddr,
certificate: std::sync::Arc<Option<(Vec<u8>, String)>>,
timeout: std::time::Duration,
extra: IoSourceExtra,
}
#[derive(Clone, Debug)]
enum IoSourceExtra {
Raw,
WebSocket { url: url::Url },
}
impl IoSource {
#[allow(clippy::new_ret_no_self)] // Clippy bug
pub(crate) fn new(
iothub_hostname: std::sync::Arc<str>,
certificate: std::sync::Arc<Option<(Vec<u8>, String)>>,
timeout: std::time::Duration,
transport: crate::Transport,
) -> Result<Self, crate::CreateClientError> {
let port = match transport {
crate::Transport::Tcp => 8883,
crate::Transport::WebSocket => 443,
};
let iothub_host = std::net::ToSocketAddrs::to_socket_addrs(&(&*iothub_hostname, port))
.map_err(|err| crate::CreateClientError::ResolveIotHubHostname(Some(err)))?
.next()
.ok_or(crate::CreateClientError::ResolveIotHubHostname(None))?;
let extra = match transport {
crate::Transport::Tcp => crate::io::IoSourceExtra::Raw,
crate::Transport::WebSocket => {
let url = match format!("ws://{}/$iothub/websocket", iothub_hostname).parse() {
Ok(url) => url,
Err(err) => return Err(crate::CreateClientError::WebSocketUrl(err)),
};
crate::io::IoSourceExtra::WebSocket { url }
}
};
Ok(IoSource {
iothub_hostname,
iothub_host,
certificate,
timeout,
extra,
})
}
}
impl mqtt::IoSource for IoSource {
type Io = Io<tokio_tls::TlsStream<tokio_io_timeout::TimeoutStream<tokio::net::TcpStream>>>;
type Future = Box<dyn Future<Item = Self::Io, Error = std::io::Error> + Send>;
fn connect(&mut self) -> Self::Future {
let iothub_hostname = self.iothub_hostname.clone();
let certificate = self.certificate.clone();
let timeout = self.timeout;
let extra = self.extra.clone();
Box::new(
tokio::timer::Timeout::new(tokio::net::TcpStream::connect(&self.iothub_host), timeout)
.map_err(|err| {
if err.is_inner() {
err.into_inner().unwrap()
} else if err.is_elapsed() {
std::io::ErrorKind::TimedOut.into()
} else if err.is_timer() {
panic!("could not poll connect timer: {}", err);
} else {
panic!("unreachable error: {}", err);
}
})
.and_then(move |stream| {
stream.set_nodelay(true)?;
let mut stream = tokio_io_timeout::TimeoutStream::new(stream);
stream.set_read_timeout(Some(timeout));
let mut tls_connector_builder = native_tls::TlsConnector::builder();
if let Some((der, password)) = &*certificate {
let identity =
native_tls::Identity::from_pkcs12(der, password).map_err(|err| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("could not parse client certificate: {}", err),
)
})?;
tls_connector_builder.identity(identity);
}
let connector = tls_connector_builder.build().map_err(|err| {
std::io::Error::new(
std::io::ErrorKind::Other,
format!("could not create TLS connector: {}", err),
)
})?;
let connector: tokio_tls::TlsConnector = connector.into();
Ok(connector
.connect(&iothub_hostname, stream)
.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)))
})
.flatten()
.and_then(move |stream| match extra {
IoSourceExtra::Raw => {
futures::future::Either::A(futures::future::ok(Io::Raw(stream)))
}
IoSourceExtra::WebSocket { url } => {
let request = tungstenite::handshake::client::Request {
url,
extra_headers: Some(vec![(
"sec-websocket-protocol".into(),
"mqtt".into(),
)]),
};
let handshake = tungstenite::ClientHandshake::start(stream, request, None);
futures::future::Either::B(WsConnect::Handshake(handshake).map(|stream| {
Io::WebSocket {
inner: stream,
pending_read: std::io::Cursor::new(vec![]),
}
}))
}
}),
)
}
}
/// The transport to use for the connection to the Azure IoT Hub
#[derive(Clone, Copy, Debug)]
pub enum Transport {
Tcp,
WebSocket,
}
enum WsConnect<S>
where
S: std::io::Read + std::io::Write,
{
Handshake(tungstenite::handshake::MidHandshake<tungstenite::ClientHandshake<S>>),
Invalid,
}
impl<S> std::fmt::Debug for WsConnect<S>
where
S: std::io::Read + std::io::Write,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
WsConnect::Handshake(_) => f.debug_struct("Handshake").finish(),
WsConnect::Invalid => f.debug_struct("Invalid").finish(),
}
}
}
impl<S> Future for WsConnect<S>
where
S: std::io::Read + std::io::Write,
{
type Item = tungstenite::WebSocket<S>;
type Error = std::io::Error;
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
match std::mem::replace(self, WsConnect::Invalid) {
WsConnect::Handshake(handshake) => match handshake.handshake() {
Ok((stream, _)) => Ok(futures::Async::Ready(stream)),
Err(tungstenite::HandshakeError::Interrupted(handshake)) => {
*self = WsConnect::Handshake(handshake);
Ok(futures::Async::NotReady)
}
Err(tungstenite::HandshakeError::Failure(err)) => poll_from_tungstenite_error(err),
},
WsConnect::Invalid => panic!("future polled after completion"),
}
}
}
/// A wrapper around an inner I/O object
pub enum Io<S> {
Raw(S),
WebSocket {
inner: tungstenite::WebSocket<S>,
pending_read: std::io::Cursor<Vec<u8>>,
},
}
impl<S> std::io::Read for Io<S>
where
S: tokio::io::AsyncRead + std::io::Write,
{
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
use tokio::io::AsyncRead;
match self.poll_read(buf)? {
futures::Async::Ready(read) => Ok(read),
futures::Async::NotReady => Err(std::io::ErrorKind::WouldBlock.into()),
}
}
}
impl<S> tokio::io::AsyncRead for Io<S>
where
S: tokio::io::AsyncRead + std::io::Write,
{
fn poll_read(&mut self, buf: &mut [u8]) -> futures::Poll<usize, std::io::Error> {
use std::io::Read;
let (inner, pending_read) = match self {
Io::Raw(stream) => return stream.poll_read(buf),
Io::WebSocket {
inner,
pending_read,
} => (inner, pending_read),
};
if buf.is_empty() {
return Ok(futures::Async::Ready(0));
}
loop {
if pending_read.position() != pending_read.get_ref().len() as u64 {
return Ok(futures::Async::Ready(
pending_read.read(buf).expect("Cursor::read cannot fail"),
));
}
let message = match inner.read_message() {
Ok(tungstenite::Message::Binary(b)) => b,
Ok(message) => {
log::warn!("ignoring unexpected message: {:?}", message);
continue;
}
Err(err) => return poll_from_tungstenite_error(err),
};
*pending_read = std::io::Cursor::new(message);
}
}
}
impl<S> std::io::Write for Io<S>
where
S: std::io::Read + tokio::io::AsyncWrite,
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
use tokio::io::AsyncWrite;
match self.poll_write(buf)? {
futures::Async::Ready(written) => Ok(written),
futures::Async::NotReady => Err(std::io::ErrorKind::WouldBlock.into()),
}
}
fn flush(&mut self) -> std::io::Result<()> {
use tokio::io::AsyncWrite;
match self.poll_flush()? {
futures::Async::Ready(()) => Ok(()),
futures::Async::NotReady => Err(std::io::ErrorKind::WouldBlock.into()),
}
}
}
impl<S> tokio::io::AsyncWrite for Io<S>
where
S: std::io::Read + tokio::io::AsyncWrite,
{
fn shutdown(&mut self) -> futures::Poll<(), std::io::Error> {
let inner = match self {
Io::Raw(stream) => return stream.shutdown(),
Io::WebSocket { inner, .. } => inner,
};
match inner.close(None) {
Ok(()) => Ok(futures::Async::Ready(())),
Err(err) => poll_from_tungstenite_error(err),
}
}
fn poll_write(&mut self, buf: &[u8]) -> futures::Poll<usize, std::io::Error> {
let inner = match self {
Io::Raw(stream) => return stream.poll_write(buf),
Io::WebSocket { inner, .. } => inner,
};
if buf.is_empty() {
return Ok(futures::Async::Ready(0));
}
let message = tungstenite::Message::Binary(buf.to_owned());
match inner.write_message(message) {
Ok(()) => Ok(futures::Async::Ready(buf.len())),
Err(tungstenite::Error::SendQueueFull(_)) => Ok(futures::Async::NotReady), // Hope client calls `poll_flush()` before retrying
Err(err) => poll_from_tungstenite_error(err),
}
}
fn poll_flush(&mut self) -> futures::Poll<(), std::io::Error> {
let inner = match self {
Io::Raw(stream) => return stream.poll_flush(),
Io::WebSocket { inner, .. } => inner,
};
match inner.write_pending() {
Ok(()) => Ok(futures::Async::Ready(())),
Err(err) => poll_from_tungstenite_error(err),
}
}
}
fn poll_from_tungstenite_error<T>(err: tungstenite::Error) -> futures::Poll<T, std::io::Error> {
match err {
tungstenite::Error::Io(ref err) if err.kind() == std::io::ErrorKind::WouldBlock => {
Ok(futures::Async::NotReady)
}
tungstenite::Error::Io(err) => Err(err),
err => Err(std::io::Error::new(std::io::ErrorKind::Other, err)),
}
}
|
use fuzzcheck::mutators::integer::U8Mutator;
use fuzzcheck::mutators::vector::VecMutator;
#[test]
fn test_vector_mutator() {
let m = VecMutator::new(VecMutator::new(U8Mutator::default(), 0..=usize::MAX), 0..=usize::MAX);
fuzzcheck::mutators::testing_utilities::test_mutator(m, 500.0, 500.0, false, true, 100, 150);
}
// #[test]
// fn test_vector_explore() {
// // let m = VecMutator::new(VecMutator::new(U8Mutator::default(), 0..=5), 0..=5);
// let m = VecMutator::new(<Option<u16>>::default_mutator(), 0..=32); //VecMutator::new(VecMutator::new(U8Mutator::default(), 0..=5), 0..=10);
// let mut step = m.default_arbitrary_step();
// // let (x, cplx) = m.ordered_arbitrary(&mut step, 1000.0).unwrap();
// // println!("{:?}", x);
// // println!("cplx: {}", cplx);
// let mut sum = 0;
// let mut total = 0;
// for _ in 0..10 {
// if let Some((mut x, _cplx)) = m.ordered_arbitrary(&mut step, 1000.0) {
// assert!((0..=32).contains(&x.len()));
// // println!("{:?}", x);
// // println!("cplx: {}", cplx);
// let mut cache = m.validate_value(&x).unwrap();
// let mut step = m.default_mutation_step(&x, &cache);
// let mut all = HashSet::new();
// for i in 0..10_000 {
// total += 1;
// if let Some((token, _cplx)) = m.ordered_mutate(&mut x, &mut cache, &mut step, 1000.) {
// all.insert(x.clone());
// // println!("{:?}", x);
// // println!("\t{:?}", x);
// assert!((0..=32).contains(&x.len()), "{}", x.len());
// m.unmutate(&mut x, &mut cache, token);
// assert!((0..=32).contains(&x.len()), "{}", x.len());
// } else {
// println!("!!!!!!! STOP at {} !!!!!!", i);
// break;
// }
// // let (token, _) = m.random_mutate(&mut x, &mut cache, 1000.);
// // assert!((0..=32).contains(&x.len()));
// // all.insert(x.clone());
// // m.unmutate(&mut x, &mut cache, token);
// }
// sum += all.len();
// println!("===");
// } else {
// break;
// }
// }
// println!("{}", sum as f64 / total as f64);
// }
// #[test]
// fn test_vector_explore2() {
// let m = VecMutator::new(<()>::default_mutator(), 0..=usize::MAX); //VecMutator::new(VecMutator::new(U8Mutator::default(), 0..=5), 0..=10);
// let mut step = m.default_arbitrary_step();
// for j in 0..36 {
// if let Some((mut x, _cplx)) = m.ordered_arbitrary(&mut step, 32.0) {
// println!("{} {:?}", x.len(), x);
// let mut cache = m.validate_value(&x).unwrap();
// let mut step = m.default_mutation_step(&x, &cache);
// for i in 0..40 {
// if let Some((token, _cplx)) = m.ordered_mutate(&mut x, &mut cache, &mut step, 32.) {
// println!("{} {:?}", x.len(), x);
// m.unmutate(&mut x, &mut cache, token);
// } else {
// println!("!!!!!!! STOP at {} !!!!!!", i);
// break;
// }
// }
// println!("===");
// } else {
// println!("no more arbitraries!! {}", j);
// break;
// }
// }
// }
|
use std::collections::HashMap;
// c=0 -> ぴったり
// c=1 -> 繰り上がりあり
fn solve(
a: &[usize],
i: Option<usize>,
c: usize,
memo: &mut HashMap<(usize, usize), usize>,
) -> usize {
match i {
None => c,
Some(i) => {
if let Some(&res) = memo.get(&(i, c)) {
return res;
}
let x = a[i] + c;
let ni = i.checked_sub(1);
let res = if x == 10 {
solve(a, ni, 1, memo)
} else {
std::cmp::min(x + solve(a, ni, 0, memo), (10 - x) + solve(a, ni, 1, memo))
};
memo.insert((i, c), res);
res
}
}
}
fn main() {
let stdin = std::io::stdin();
let mut rd = ProconReader::new(stdin.lock());
let n: String = rd.get();
let a: Vec<usize> = n.chars().map(|c| c as usize - '0' as usize).collect();
let mut memo = HashMap::new();
println!("{}", solve(&a, Some(a.len() - 1), 0, &mut memo));
}
pub struct ProconReader<R: std::io::Read> {
reader: R,
}
impl<R: std::io::Read> ProconReader<R> {
pub fn new(reader: R) -> Self {
Self { reader }
}
pub fn get<T: std::str::FromStr>(&mut self) -> T {
use std::io::Read;
let buf = self
.reader
.by_ref()
.bytes()
.map(|b| b.unwrap())
.skip_while(|&byte| byte == b' ' || byte == b'\n' || byte == b'\r')
.take_while(|&byte| byte != b' ' && byte != b'\n' && byte != b'\r')
.collect::<Vec<_>>();
std::str::from_utf8(&buf)
.unwrap()
.parse()
.ok()
.expect("Parse Error.")
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
_reserved0: [u8; 56usize],
#[doc = "0x38 - OPAMP1 control register"]
pub opamp1_csr: OPAMP1_CSR,
#[doc = "0x3c - OPAMP2 control register"]
pub opamp2_csr: OPAMP2_CSR,
#[doc = "0x40 - OPAMP3 control register"]
pub opamp3_csr: OPAMP3_CSR,
#[doc = "0x44 - OPAMP4 control register"]
pub opamp4_csr: OPAMP4_CSR,
}
#[doc = "OPAMP2 control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [opamp2_csr](opamp2_csr) module"]
pub type OPAMP2_CSR = crate::Reg<u32, _OPAMP2_CSR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _OPAMP2_CSR;
#[doc = "`read()` method returns [opamp2_csr::R](opamp2_csr::R) reader structure"]
impl crate::Readable for OPAMP2_CSR {}
#[doc = "`write(|w| ..)` method takes [opamp2_csr::W](opamp2_csr::W) writer structure"]
impl crate::Writable for OPAMP2_CSR {}
#[doc = "OPAMP2 control register"]
pub mod opamp2_csr;
#[doc = "OPAMP3 control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [opamp3_csr](opamp3_csr) module"]
pub type OPAMP3_CSR = crate::Reg<u32, _OPAMP3_CSR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _OPAMP3_CSR;
#[doc = "`read()` method returns [opamp3_csr::R](opamp3_csr::R) reader structure"]
impl crate::Readable for OPAMP3_CSR {}
#[doc = "`write(|w| ..)` method takes [opamp3_csr::W](opamp3_csr::W) writer structure"]
impl crate::Writable for OPAMP3_CSR {}
#[doc = "OPAMP3 control register"]
pub mod opamp3_csr;
#[doc = "OPAMP4 control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [opamp4_csr](opamp4_csr) module"]
pub type OPAMP4_CSR = crate::Reg<u32, _OPAMP4_CSR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _OPAMP4_CSR;
#[doc = "`read()` method returns [opamp4_csr::R](opamp4_csr::R) reader structure"]
impl crate::Readable for OPAMP4_CSR {}
#[doc = "`write(|w| ..)` method takes [opamp4_csr::W](opamp4_csr::W) writer structure"]
impl crate::Writable for OPAMP4_CSR {}
#[doc = "OPAMP4 control register"]
pub mod opamp4_csr;
#[doc = "OPAMP1 control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [opamp1_csr](opamp1_csr) module"]
pub type OPAMP1_CSR = crate::Reg<u32, _OPAMP1_CSR>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _OPAMP1_CSR;
#[doc = "`read()` method returns [opamp1_csr::R](opamp1_csr::R) reader structure"]
impl crate::Readable for OPAMP1_CSR {}
#[doc = "`write(|w| ..)` method takes [opamp1_csr::W](opamp1_csr::W) writer structure"]
impl crate::Writable for OPAMP1_CSR {}
#[doc = "OPAMP1 control register"]
pub mod opamp1_csr;
|
/*
* @lc app=leetcode.cn id=104 lang=rust
*
* [104] 二叉树的最大深度
*
* https://leetcode-cn.com/problems/maximum-depth-of-binary-tree/description/
*
* algorithms
* Easy (71.39%)
* Likes: 433
* Dislikes: 0
* Total Accepted: 115.4K
* Total Submissions: 160.4K
* Testcase Example: '[3,9,20,null,null,15,7]'
*
* 给定一个二叉树,找出其最大深度。
*
* 二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
*
* 说明: 叶子节点是指没有子节点的节点。
*
* 示例:
* 给定二叉树 [3,9,20,null,null,15,7],
*
* 3
* / \
* 9 20
* / \
* 15 7
*
* 返回它的最大深度 3 。
*
*/
// @lc code=start
// Definition for a binary tree node.
// #[derive(Debug, PartialEq, Eq)]
// pub struct TreeNode {
// pub val: i32,
// pub left: Option<Rc<RefCell<TreeNode>>>,
// pub right: Option<Rc<RefCell<TreeNode>>>,
// }
//
// impl TreeNode {
// #[inline]
// pub fn new(val: i32) -> Self {
// TreeNode {
// val,
// left: None,
// right: None
// }
// }
// }
use std::cell::RefCell;
use std::rc::Rc;
impl Solution {
pub fn max_depth(root: Option<Rc<RefCell<TreeNode>>>) -> i32 {
use std::cmp;
if root.is_none() {
return 0;
}
fn add_depth(
left: Option<Rc<RefCell<TreeNode>>>,
right: Option<Rc<RefCell<TreeNode>>>,
depth: i32,
) -> i32 {
match (left, right) {
(Some(l), Some(r)) => cmp::max(
add_depth(l.borrow().left.clone(), l.borrow().right.clone(), depth + 1),
add_depth(r.borrow().left.clone(), r.borrow().right.clone(), depth + 1),
),
(Some(l), None) => {
add_depth(l.borrow().left.clone(), l.borrow().right.clone(), depth + 1)
}
(None, Some(r)) => {
add_depth(r.borrow().left.clone(), r.borrow().right.clone(), depth + 1)
}
_ => depth,
}
}
let left = root.as_ref().and_then(|v| v.borrow().left.clone());
let right = root.as_ref().and_then(|v| v.borrow().right.clone());
return add_depth(left, right, 1);
// if root == None {
// return 0;
// } else {
// let root = root.unwrap();
// let left_depth = Self::max_depth(root.borrow().left.clone());
// let right_depth = Self::max_depth(root.borrow().right.clone());
// if left_depth > right_depth {
// return left_depth + 1;
// } else {
// return right_depth + 1;
// }
// }
}
}
// @lc code=end
|
use itertools::Itertools;
use std::fs::File;
use std::path::Path;
use regex::{Captures, Regex};
use std::io::{BufRead, BufReader, Error, ErrorKind, Read};
use std::ops::{Add, Mul};
pub fn day_two() -> Result<(), Error> {
let re = Regex::new(r"^(\d+)-(\d+)\s*(\w):\s(\w+)$").unwrap();
// use `vec` for whatever
let br = BufReader::new(File::open("day2.txt")?);
let rows = br
.lines()
.map(|l| l.unwrap())
.filter(|row| {
let mut m = re.captures_iter(&*row);
match m.next() {
Some(cap) => {
let min = (&cap[1]).parse::<usize>().unwrap();
let max = (&cap[2]).parse::<usize>().unwrap();
let letter = &cap[3];
let text = &cap[4];
// second
let c1 = text.chars().nth(min - 1).unwrap();
let c2 = text.chars().nth(max - 1).unwrap();
(c1 == letter.chars().next().unwrap()) ^ (c2 == letter.chars().next().unwrap())
// first problem
// let count = text.matches(letter).count();
//count >= min && count <= max
}
_ => false,
}
})
.count();
dbg!(rows);
// for (i, row) in rows.enumerate() {
//
// // println!("({})", el)
// for cap in re.captures_iter(&*row) {
// let min = (&cap[1]).parse::<usize>().unwrap();
// let max = (&cap[2]).parse::<usize>().unwrap();
// let letter =&cap[3];
// let text =&cap[4];
//
// let count = text.matches(letter).count();
// if count >= min && count <= max {
//
// }
//
// println!(": {} Day: {} Year: {} {} {}", &cap[1], &cap[2], &cap[3], &cap[4], count);
// }
// }
Ok(())
}
|
use std::ops::{Add, Sub, Mul, Div, Neg};
#[derive(Clone,Copy,Debug)]
pub struct Vec3 {
pub x: f32,
pub y: f32,
pub z: f32,
}
impl Vec3 {
pub fn new(x: f32, y: f32, z: f32) -> Vec3 {
Vec3 {x: x, y: y, z: z}
}
pub fn squared_length(&self) -> f32 {
self.x*self.x + self.y*self.y + self.z*self.z
}
pub fn length(&self) -> f32 {
(self.x*self.x + self.y*self.y + self.z*self.z).sqrt()
}
}
impl Add for Vec3 {
type Output = Vec3;
fn add(self, rhs: Vec3) -> Vec3{
Vec3 { x: self.x + rhs.x,
y: self.y + rhs.y,
z: self.z + rhs.z,
}
}
}
impl Sub for Vec3 {
type Output = Vec3;
fn sub(self, rhs: Vec3) -> Vec3{
Vec3 { x: self.x - rhs.x,
y: self.y - rhs.y,
z: self.z - rhs.z,
}
}
}
impl Mul<f32> for Vec3 {
type Output = Vec3;
fn mul(self, rhs: f32) -> Vec3 {
Vec3 { x: self.x * rhs,
y: self.y * rhs,
z: self.z * rhs,
}
}
}
impl Mul<Vec3> for f32 {
type Output = Vec3;
fn mul(self, rhs: Vec3) -> Vec3 {
Vec3 { x: rhs.x * self,
y: rhs.y * self,
z: rhs.z * self,
}
}
}
impl Div<f32> for Vec3 {
type Output = Vec3;
fn div(self, rhs: f32) -> Vec3 {
Vec3 { x: self.x / rhs,
y: self.y / rhs,
z: self.z / rhs,
}
}
}
impl Div<Vec3> for f32 {
type Output = Vec3;
fn div(self, rhs: Vec3) -> Vec3 {
Vec3 { x: rhs.x / self,
y: rhs.y / self,
z: rhs.z / self,
}
}
}
impl Neg for Vec3 {
type Output = Vec3;
fn neg(self) -> Self::Output {
Vec3 { x: -self.x,
y: -self.y,
z: -self.z,
}
}
}
// finctions working on Vec3
pub fn unit_vector(v: Vec3) -> Vec3 {
let len = v.length();
Vec3 {x: v.x/len,
y: v.y/len,
z: v.z/len
}
}
pub fn dot(v1: Vec3, v2: Vec3) -> f32 {
v1.x*v2.x + v1.y*v2.y + v1.z*v2.z
}
pub fn cross(v1: Vec3, v2: Vec3) -> Vec3 {
Vec3 { x: v1.y*v2.z - v1.z*v2.y,
y: v1.z*v2.x - v1.x*v2.z,
z: v1.x*v2.y - v1.y*v2.x,
}
}
pub fn mul_component(v1: Vec3, v2: Vec3) -> Vec3 {
Vec3 { x: v1.x*v2.x,
y: v1.y*v2.y,
z: v1.z*v2.z,
}
}
// Ray in 3D space
#[derive(Clone,Copy,Debug)]
pub struct Ray {
pub origin: Vec3,
pub direction: Vec3,
}
impl Ray {
pub fn new(origin: Vec3, direction: Vec3) -> Ray {
Ray {origin, direction}
}
pub fn point_at_parameter(&self, t: f32) -> Vec3 {
self.origin + self.direction * t
}
}
#[cfg(test)]
mod tests {
use super::Vec3;
use super::Ray;
#[test]
fn vec3_creation() {
let v1 = super::Vec3::new(1.0,2.0,3.0);
assert_eq!(v1.x, 1.0);
assert_eq!(v1.y, 2.0);
assert_eq!(v1.z, 3.0);
let v2 = v1 + super::Vec3::new(1.0, 1.0, 1.0);
assert_eq!(v2.x, 2.0);
assert_eq!(v2.y, 3.0);
assert_eq!(v2.z, 4.0);
let v3 = v1 * 2.0;
assert_eq!(v3.x, 2.0);
assert_eq!(v3.y, 4.0);
assert_eq!(v3.z, 6.0);
}
use super::unit_vector;
use super::cross;
#[test]
fn vec3_functions() {
let v1 = Vec3::new(3.0,0.0,0.0);
let v2 = unit_vector(v1);
assert_eq!(v2.x, 1.0);
assert_eq!(v2.y, 0.0);
assert_eq!(v2.z, 0.0);
let v1 = Vec3::new(1.0,0.0,0.0);
let v2 = Vec3::new(0.0,1.0,0.0);
let v3 = cross(v1, v2);
assert_eq!(v3.x, 0.0);
assert_eq!(v3.y, 0.0);
assert_eq!(v3.z, 1.0);
let v3 = cross(v2, v1);
assert_eq!(v3.x, 0.0);
assert_eq!(v3.y, 0.0);
assert_eq!(v3.z, -1.0);
}
#[test]
fn ray() {
let r = Ray::new(Vec3::new(1.0,2.0,3.0), Vec3::new(1.0,1.0,1.0));
let p = r.point_at_parameter(1.0);
assert_eq!(p.x, 2.0);
assert_eq!(p.y, 3.0);
assert_eq!(p.z, 4.0);
let p = r.point_at_parameter(1.5);
assert_eq!(p.x, 2.5);
assert_eq!(p.y, 3.5);
assert_eq!(p.z, 4.5);
}
}
|
//! A full-fledged tool to read data from Electres Plus-485 energy meters.
#[macro_use]
extern crate error_chain;
extern crate clap;
pub mod errors;
pub mod constants;
pub mod data;
mod num_utils;
|
#![deny(warnings)]
mod add_two_numbers;
mod combine;
mod count_bits;
mod count_substrings;
mod daily_temperatures;
mod generate_parenthesis;
mod increasing_bst;
mod inorder_traversal;
mod invert_tree;
mod is_same_tree;
mod is_symmetric;
mod longest_substring_without_repeating_characters;
mod maximum_depth_of_binary_tree;
mod merge_two_binary_trees;
mod permutations;
mod queue_reconstruction_by_height;
mod reverse_list;
mod single_number;
mod subsets;
mod top_k_frequent;
mod two_sum;
mod valid_parentheses;
|
/**
* Copyright © 2019
* Sami Shalayel <sami.shalayel@tutamail.com>,
* Carl Schwan <carl@carlschwan.eu>,
* Daniel Freiermuth <d_freiermu14@cs.uni-kl.de>
*
* This work is free. You can redistribute it and/or modify it under the
* terms of the Do What The Fuck You Want To Public License, Version 2,
* as published by Sam Hocevar. See the LICENSE file for more details.
*
* This program is free software. It comes without any warranty, to
* the extent permitted by applicable law. You can redistribute it
* and/or modify it under the terms of the Do What The Fuck You Want
* To Public License, Version 2, as published by Sam Hocevar. See the LICENSE
* file for more details. **/
use crate::camera::*;
use crate::helpers::*;
use crate::ray::Ray;
use crate::world::World;
use image::{DynamicImage, GenericImage};
use na::{Rotation3, Unit, Vector3};
use std::f64;
use indicatif::ProgressBar;
pub struct EquilinearCamera {
pub height: u32,
pub width: u32,
pub roll: f64,
pub pitch: f64,
pub yaw: f64,
pub pos: Vector3<f64>,
pub vertical_viewangle: f64,
}
impl EquilinearCamera {
pub fn point_at(&mut self, object: Vector3<f64>) -> () {
self.yaw = 0.0;
let (vert_angle, hort_angle) = vector2polar(&(object - self.pos));
self.roll = vert_angle - f64::consts::FRAC_PI_2;
self.pitch = -hort_angle + f64::consts::FRAC_PI_2;
}
}
impl Camera for EquilinearCamera {
fn render(&self, world: &World, progress: bool) -> DynamicImage {
// algorithm for direction taken from https://www.scratchapixel.com/code.php?id=3&origin=/lessons/3d-basic-rendering/introduction-to-ray-tracing
let mut img = DynamicImage::new_rgb8(self.width, self.height);
let inv_width = 1.0 / self.width as f64;
let inv_height = 1.0 / self.height as f64;
let aspectratio = self.width as f64 / self.height as f64;
let vertical_half_canvas_size =
(f64::consts::FRAC_PI_2 * self.vertical_viewangle / 180.0).tan();
let rot_matrix = Rotation3::from_euler_angles(self.roll, self.pitch, self.yaw);
let bar = if progress {
Some(ProgressBar::new((self.width * self.height).into()))
} else {
None
};
for x in 0..self.width {
for y in 0..self.height {
let xx = (2.0 * ((x as f64 + 0.5) * inv_width) - 1.0)
* vertical_half_canvas_size
* aspectratio;
let yy = (2.0 * ((y as f64 + 0.5) * inv_height) - 1.) * vertical_half_canvas_size;
let dir = rot_matrix * Vector3::new(xx, yy, 1.0).normalize();
let ray = Ray {
dir: Unit::new_normalize(dir),
start: self.pos,
};
let rgb = world.color(ray, 10.0);
img.put_pixel(x, self.height - y - 1, rgb);
if let Some(bar) = &bar {
bar.inc(1);
}
}
}
if let Some(bar) = bar {
bar.finish();
}
img
}
}
|
use crate::*;
use byteorder::{LittleEndian, ReadBytesExt};
struct SizeUnit {
value: f64,
unit: char,
}
struct UpdateUnit {
partial: SizeUnit,
total: SizeUnit,
percent: f64,
}
pub fn print_updates(received: f64, header: &TeleportInit) {
let units = update_units(received as f64, header.filesize as f64);
print!(
"\r => {:>8.03}{} of {:>8.03}{} ({:02.02}%)",
units.partial.value, units.partial.unit, units.total.value, units.total.unit, units.percent
);
io::stdout().flush().unwrap();
}
fn update_units(partial: f64, total: f64) -> UpdateUnit {
let percent: f64 = (partial as f64 / total as f64) * 100f64;
let p = identify_unit(partial);
let t = identify_unit(total);
UpdateUnit {
partial: p,
total: t,
percent,
}
}
fn identify_unit(mut value: f64) -> SizeUnit {
let unit = ['B', 'K', 'M', 'G', 'T'];
let mut count = 0;
loop {
if (value / 1024.0) > 1.0 {
count += 1;
value /= 1024.0;
} else {
break;
}
if count == unit.len() - 1 {
break;
}
}
SizeUnit {
value,
unit: unit[count],
}
}
fn gen_chunk_size(file_size: u64) -> usize {
let mut chunk = 1024;
loop {
if file_size / chunk > 150 {
chunk *= 2;
} else {
break;
}
}
chunk as usize
}
pub fn calc_file_hash(filename: String) -> Result<Hash, Error> {
let mut hasher = blake3::Hasher::new();
let mut buf = Vec::<u8>::new();
let mut file = File::open(filename)?;
let meta = file.metadata()?;
buf.resize(gen_chunk_size(meta.len()), 0);
file.seek(SeekFrom::Start(0))?;
loop {
// Read a chunk of the file
let len = match file.read(&mut buf) {
Ok(l) => l,
Err(s) => return Err(s),
};
if len == 0 {
break;
}
hasher.update(&buf);
}
file.seek(SeekFrom::Start(0))?;
Ok(hasher.finalize())
}
pub fn calc_delta_hash(mut file: &File) -> Result<TeleportDelta, Error> {
let meta = file.metadata()?;
let file_size = meta.len();
file.seek(SeekFrom::Start(0))?;
let mut buf = Vec::<u8>::new();
buf.resize(gen_chunk_size(meta.len()), 0);
let mut hasher = blake3::Hasher::new();
let mut whole_hasher = blake3::Hasher::new();
let mut delta_csum = Vec::<Hash>::new();
loop {
// Read a chunk of the file
let len = match file.read(&mut buf) {
Ok(l) => l,
Err(s) => return Err(s),
};
if len == 0 {
break;
}
hasher.update(&buf);
delta_csum.push(hasher.finalize());
hasher.reset();
whole_hasher.update(&buf);
}
let out = TeleportDelta {
size: file_size as u64,
delta_size: buf.len() as u64,
csum: whole_hasher.finalize(),
delta_csum,
};
file.seek(SeekFrom::Start(0))?;
Ok(out)
}
fn generate_checksum(input: &[u8]) -> u8 {
input.iter().map(|x| *x as u64).sum::<u64>() as u8
}
fn validate_checksum(input: &[u8]) -> Result<(), Error> {
if input.len() < 2 {
return Err(Error::new(
ErrorKind::InvalidData,
"Vector is too short to validate checksum",
));
}
let csum: u8 = input[..input.len() - 1]
.iter()
.map(|x| *x as u64)
.sum::<u64>() as u8;
if csum != *input.last().unwrap() {
return Err(Error::new(
ErrorKind::InvalidData,
"Teleport checksum is invalid",
));
}
Ok(())
}
impl TeleportInit {
pub fn new() -> TeleportInit {
TeleportInit {
protocol: PROTOCOL.to_string(),
version: VERSION.to_string(),
filename: "".to_string(),
filenum: 0,
totalfiles: 0,
filesize: 0,
chmod: 0,
overwrite: false,
}
}
pub fn serialize(&self) -> Vec<u8> {
let mut out = Vec::<u8>::new();
let size: u32 = self.size() as u32 + 5; // sizeof(struct) + 1csum + 4len
out.append(&mut size.to_le_bytes().to_vec());
out.append(&mut self.protocol.clone().into_bytes());
out.push(0);
out.append(&mut self.version.clone().into_bytes());
out.push(0);
out.append(&mut self.filename.clone().into_bytes().to_vec());
out.push(0);
out.append(&mut self.filenum.to_le_bytes().to_vec());
out.append(&mut self.totalfiles.to_le_bytes().to_vec());
out.append(&mut self.filesize.to_le_bytes().to_vec());
out.append(&mut self.chmod.to_le_bytes().to_vec());
let bbyte = TeleportInit::bool_to_u8(self.overwrite);
out.push(bbyte);
out.push(generate_checksum(&out));
out
}
pub fn size(&self) -> usize {
let mut out: usize = 0;
out += self.protocol.len() + 1;
out += self.version.len() + 1;
out += 8; // filenum
out += 8; // totalfiles
out += 8; // filesize
out += self.filename.len() + 1;
out += 4; // chmod
out += 1; // overwrite
out
}
fn bool_to_u8(b: bool) -> u8 {
if b {
1
} else {
0
}
}
fn vec_to_string(input: &[u8]) -> String {
let mut s: String = "".to_string();
for i in input.iter() {
let c: char = match (*i).try_into() {
Ok(c) => c,
Err(_) => break,
};
if c.is_ascii_graphic() || c == ' ' {
s.push(c);
} else {
break;
}
}
s
}
pub fn deserialize(&mut self, input: Vec<u8>) -> Result<(), Error> {
validate_checksum(&input)?;
let mut buf: &[u8] = &input;
let size = buf.read_u32::<LittleEndian>().unwrap() as usize;
if input.len() < size {
return Err(Error::new(
ErrorKind::InvalidData,
"Not enough data received",
));
}
let mut ofs = 4;
self.protocol = TeleportInit::vec_to_string(&input[ofs..]);
ofs += self.protocol.len() + 1;
self.version = TeleportInit::vec_to_string(&input[ofs..]);
ofs += self.version.len() + 1;
self.filename = TeleportInit::vec_to_string(&input[ofs..]);
ofs += self.filename.len() + 1;
let mut buf: &[u8] = &input[ofs..];
self.filenum = buf.read_u64::<LittleEndian>().unwrap();
self.totalfiles = buf.read_u64::<LittleEndian>().unwrap();
self.filesize = buf.read_u64::<LittleEndian>().unwrap();
self.chmod = buf.read_u32::<LittleEndian>().unwrap();
self.overwrite = buf.read_u8().unwrap() > 0;
Ok(())
}
}
impl Default for TeleportInit {
fn default() -> Self {
Self::new()
}
}
impl PartialEq for TeleportInit {
fn eq(&self, other: &Self) -> bool {
self.protocol == other.protocol
&& self.version == other.version
&& self.filename == other.filename
&& self.filenum == other.filenum
&& self.totalfiles == other.totalfiles
&& self.filesize == other.filesize
&& self.chmod == other.chmod
&& self.overwrite == other.overwrite
}
}
impl TryFrom<u8> for TeleportInitStatus {
type Error = &'static str;
fn try_from(v: u8) -> std::result::Result<Self, Self::Error> {
match v {
x if x == TeleportInitStatus::Proceed as u8 => Ok(TeleportInitStatus::Proceed),
x if x == TeleportInitStatus::Overwrite as u8 => Ok(TeleportInitStatus::Overwrite),
x if x == TeleportInitStatus::NoOverwrite as u8 => Ok(TeleportInitStatus::NoOverwrite),
x if x == TeleportInitStatus::NoSpace as u8 => Ok(TeleportInitStatus::NoSpace),
x if x == TeleportInitStatus::NoPermission as u8 => {
Ok(TeleportInitStatus::NoPermission)
}
x if x == TeleportInitStatus::WrongVersion as u8 => {
Ok(TeleportInitStatus::WrongVersion)
}
_ => Err("TeleportInitStatus is invalid"),
}
}
}
impl TeleportInitAck {
pub fn new(status: TeleportInitStatus) -> TeleportInitAck {
TeleportInitAck {
ack: status,
version: VERSION.to_string(),
delta: None,
}
}
pub fn serialize(&self) -> Vec<u8> {
let mut out = vec![self.ack as u8];
out.append(&mut self.version.clone().into_bytes());
out.push(0);
match &self.delta {
Some(d) => {
out.append(&mut d.size.to_le_bytes().to_vec());
out.append(&mut d.delta_size.to_le_bytes().to_vec());
out.append(&mut d.csum.as_bytes().to_vec());
out.append(&mut TeleportInitAck::csum_serial(&d.delta_csum));
}
None => {}
};
out.push(generate_checksum(&out));
out
}
fn csum_serial(input: &[Hash]) -> Vec<u8> {
let mut out = Vec::<u8>::new();
for i in input {
out.append(&mut i.as_bytes().to_vec());
}
out
}
fn csum_deserial(input: &[u8]) -> Result<Vec<Hash>, Error> {
if input.len() % 32 != 0 {
return Err(Error::new(
ErrorKind::InvalidData,
"Cannot deserialize Vec<Hash>",
));
}
let mut out = Vec::<Hash>::new();
for i in input.chunks(32) {
let a: [u8; 32] = i.try_into().unwrap();
let h: Hash = a.try_into().unwrap();
out.push(h);
}
Ok(out)
}
pub fn deserialize(&mut self, input: Vec<u8>) -> Result<(), Error> {
validate_checksum(&input)?;
let mut buf: &[u8] = &input;
let size = input.len();
self.ack = buf.read_u8().unwrap().try_into().unwrap();
self.version = TeleportInit::vec_to_string(&input[1..]);
if size > self.version.len() + 3 {
buf = &input[self.version.len() + 2..];
let c: [u8; 32] = input[self.version.len() + 2 + 16..self.version.len() + 2 + 16 + 32]
.try_into()
.unwrap();
self.delta = Some(TeleportDelta {
size: buf.read_u64::<LittleEndian>().unwrap(),
delta_size: buf.read_u64::<LittleEndian>().unwrap(),
csum: c.try_into().unwrap(),
delta_csum: TeleportInitAck::csum_deserial(
&input[self.version.len() + 2 + 16 + 32..size - 1],
)?,
});
} else {
self.delta = None;
}
Ok(())
}
}
impl Default for TeleportData {
fn default() -> Self {
Self::new()
}
}
impl TeleportData {
pub fn new() -> TeleportData {
TeleportData {
length: 0,
offset: 0,
data: vec![],
}
}
pub fn serialize(&self) -> Vec<u8> {
let mut out = Vec::<u8>::new();
let len: u32 = self.data.len() as u32;
out.append(&mut len.to_le_bytes().to_vec());
out.append(&mut self.offset.to_le_bytes().to_vec());
out.append(&mut self.data.clone());
out.push(generate_checksum(&out));
out
}
pub fn size(&self) -> usize {
self.data.len() + 4 + 8
}
pub fn deserialize(&mut self, input: &[u8]) -> Result<(), Error> {
validate_checksum(&input.to_vec())?;
let size = input.len();
let mut buf: &[u8] = input;
self.length = buf.read_u32::<LittleEndian>().unwrap();
self.offset = buf.read_u64::<LittleEndian>().unwrap();
self.data = input[12..size - 1].to_vec();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
const TESTINIT: &[u8] = &[
62, 0, 0, 0, 84, 69, 76, 69, 80, 79, 82, 84, 0, 48, 46, 50, 46, 50, 0, 116, 101, 115, 116,
102, 105, 108, 101, 46, 98, 105, 110, 0, 1, 0, 0, 0, 0, 0, 0, 0, 231, 3, 0, 0, 0, 0, 0, 0,
41, 35, 0, 0, 0, 0, 0, 0, 243, 2, 0, 0, 1, 145,
];
const TESTINITACK: &[u8] = &[5, 48, 46, 50, 46, 51, 0, 246];
const TESTDATA: &[u8] = &[4, 0, 0, 0, 184, 34, 0, 0, 0, 0, 0, 0, 10, 10, 32, 3, 21];
#[test]
fn test_update_unit() {
let pe = 2.0;
let te = 1_234_567_890_123_456.0;
let s = update_units(pe, te);
assert_eq!(s.partial.unit, 'B');
assert_eq!(s.total.unit, 'T');
}
#[test]
fn test_teleportinit_serialize() {
let t: TeleportInit = TeleportInit {
protocol: PROTOCOL.to_string(),
version: "0.2.2".to_string(),
filename: "testfile.bin".to_string(),
filenum: 1,
totalfiles: 999,
filesize: 9001,
chmod: 00755,
overwrite: true,
};
let s = t.serialize();
assert_eq!(s, TESTINIT);
}
#[test]
fn test_teleportinit_deserialize() {
let t: TeleportInit = TeleportInit {
protocol: PROTOCOL.to_string(),
version: "0.2.2".to_string(),
filename: "testfile.bin".to_string(),
filenum: 1,
totalfiles: 999,
filesize: 9001,
chmod: 00755,
overwrite: true,
};
let mut te = TeleportInit::new();
te.deserialize(TESTINIT.to_vec()).unwrap();
assert_eq!(te, t);
}
#[test]
fn test_teleportinitack_serialize() {
let mut t = TeleportInitAck::new(TeleportInitStatus::WrongVersion);
t.version = "0.2.3".to_string();
let te = t.serialize();
assert_eq!(te, TESTINITACK);
}
#[test]
fn test_teleportinitack_deserialize() {
let mut te = TeleportInitAck::new(TeleportInitStatus::Proceed);
let test = TeleportInitAck {
ack: TeleportInitStatus::WrongVersion,
version: "0.2.3".to_string(),
delta: None,
};
te.deserialize(TESTINITACK.to_vec()).unwrap();
te.version = "0.2.3".to_string();
assert_eq!(test, te);
}
#[test]
fn test_teleportdata_serialize() {
let t = TeleportData {
length: 4,
offset: 8888,
data: vec![0x0a, 0x0a, 0x20, 0x03],
}
.serialize();
assert_eq!(t, TESTDATA);
}
#[test]
fn test_teleportdata_deserialize() {
let mut t = TeleportData::new();
t.deserialize(TESTDATA).unwrap();
let test = TeleportData {
length: 4,
offset: 8888,
data: vec![0x0a, 0x0a, 0x20, 0x03],
};
assert_eq!(t, test);
}
#[test]
fn test_generate_checksum() {
let t = TESTINITACK[..TESTINITACK.len() - 1].to_vec();
let c = generate_checksum(&t);
assert_eq!(c, TESTINITACK[TESTINITACK.len() - 1]);
}
#[test]
fn test_validate_checksum() {
assert_eq!((), validate_checksum(&TESTINITACK.to_vec()).unwrap());
}
}
|
use super::VecMutator;
use crate::mutators::mutations::{Mutation, RevertMutation};
use crate::{Mutator, SubValueProvider};
pub struct InsertElement;
#[derive(Clone)]
pub struct InsertElementRandomStep;
#[derive(Clone)]
pub struct InsertElementStep<A> {
arbitrary_steps: Vec<(usize, A)>,
}
pub struct ConcreteInsertElement<T> {
el: T,
cplx: f64,
idx: usize,
}
pub struct RevertInsertElement {
pub idx: usize,
}
impl<T, M> RevertMutation<Vec<T>, VecMutator<T, M>> for RevertInsertElement
where
T: Clone + 'static,
M: Mutator<T>,
{
#[no_coverage]
fn revert(
self,
_mutator: &VecMutator<T, M>,
value: &mut Vec<T>,
_cache: &mut <VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
) {
let _ = value.remove(self.idx);
}
}
impl<T, M> Mutation<Vec<T>, VecMutator<T, M>> for InsertElement
where
T: Clone + 'static,
M: Mutator<T>,
{
type RandomStep = InsertElementRandomStep;
type Step = InsertElementStep<M::ArbitraryStep>;
type Concrete<'a> = ConcreteInsertElement<T>;
type Revert = RevertInsertElement;
#[no_coverage]
fn default_random_step(&self, mutator: &VecMutator<T, M>, value: &Vec<T>) -> Option<Self::RandomStep> {
if mutator.m.max_complexity() == 0. {
return None;
}
if value.len() >= *mutator.len_range.end() {
None
} else {
Some(InsertElementRandomStep)
}
}
#[no_coverage]
fn random<'a>(
mutator: &VecMutator<T, M>,
value: &Vec<T>,
cache: &<VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
_random_step: &Self::RandomStep,
max_cplx: f64,
) -> Self::Concrete<'a> {
let value_cplx = mutator.complexity(value, cache);
let spare_cplx = max_cplx - value_cplx;
let (el, cplx) = mutator.m.random_arbitrary(spare_cplx);
ConcreteInsertElement {
el,
cplx,
idx: mutator.rng.usize(..=value.len()),
}
}
#[no_coverage]
fn default_step(
&self,
mutator: &VecMutator<T, M>,
value: &Vec<T>,
_cache: &<VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
) -> Option<Self::Step> {
if mutator.m.max_complexity() == 0. {
return None;
}
if value.len() >= *mutator.len_range.end() {
None
} else {
Some(InsertElementStep {
arbitrary_steps: (0..=value.len())
.map(
#[no_coverage]
|i| (i, mutator.m.default_arbitrary_step()),
)
.collect(),
})
}
}
#[no_coverage]
fn from_step<'a>(
mutator: &VecMutator<T, M>,
value: &Vec<T>,
cache: &<VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
step: &'a mut Self::Step,
subvalue_provider: &dyn SubValueProvider,
max_cplx: f64,
) -> Option<Self::Concrete<'a>> {
if step.arbitrary_steps.is_empty() {
return None;
}
let value_cplx = mutator.complexity(value, cache);
let spare_cplx = max_cplx - value_cplx;
let choice = mutator.rng.usize(..step.arbitrary_steps.len());
let (idx, arbitrary_step) = &mut step.arbitrary_steps[choice];
if let Some((el, cplx)) = mutator.m.ordered_arbitrary(arbitrary_step, spare_cplx) {
Some(ConcreteInsertElement { el, cplx, idx: *idx })
} else {
step.arbitrary_steps.remove(choice);
Self::from_step(mutator, value, cache, step, subvalue_provider, max_cplx)
}
}
#[no_coverage]
fn apply<'a>(
mutation: Self::Concrete<'a>,
mutator: &VecMutator<T, M>,
value: &mut Vec<T>,
cache: &mut <VecMutator<T, M> as Mutator<Vec<T>>>::Cache,
_subvalue_provider: &dyn SubValueProvider,
_max_cplx: f64,
) -> (Self::Revert, f64) {
value.insert(mutation.idx, mutation.el);
let new_cplx = mutator.complexity_from_inner(cache.sum_cplx + mutation.cplx, value.len());
(RevertInsertElement { idx: mutation.idx }, new_cplx)
}
}
|
use liblumen_alloc::erts::term::prelude::*;
#[native_implemented::function(lumen:is_small_integer/1)]
pub fn result(term: Term) -> Term {
term.is_smallint().into()
}
|
use geometry::Size;
use models::Dot;
use models::Mouse;
use std::collections::HashSet;
use models::Line;
#[derive(PartialEq)]
pub enum ActivePlayer {
A,
B,
}
impl ActivePlayer {
pub fn is_a(&self) -> bool {
*self == ActivePlayer::A
}
}
/// A model that contains the other models and renders them
pub struct World {
pub dots: Vec<Vec<Dot>>,
pub size: Size,
pub lines_a: HashSet<Line>,
pub lines_b: HashSet<Line>,
pub score_a: u32,
pub score_b: u32,
pub active_player: ActivePlayer,
}
impl World {
/// Returns a new world of the given size
pub fn new(size: Size) -> World {
World {
dots: World::dots(size),
lines_a: HashSet::new(),
lines_b: HashSet::new(),
score_a: 0,
score_b: 0,
active_player: ActivePlayer::A,
size: size,
}
}
pub fn on_mouse_up(&mut self, start_dot: Dot, mouse: &Mouse) {
let collision = self
.dots
.iter()
.filter_map(|row| row.iter().filter_map(|dot| if dot.collides_with(mouse) {
Some(dot)
} else {
None
}).next()).next()
;
if !collision.is_some() {
return;
}
let end_dot = *collision.unwrap();
if start_dot.is_contiguous(end_dot) {
let line = Line::new(start_dot, end_dot);
if self.active_player == ActivePlayer::A {
if !self.lines_b.contains(&line) && self.lines_a.insert(line) {
if self.scored(line) {
self.score_a += 1;
} else {
self.active_player = ActivePlayer::B;
}
}
} else {
if !self.lines_a.contains(&line) && self.lines_b.insert(line) {
if self.scored(line) {
self.score_b += 1;
} else {
self.active_player = ActivePlayer::A;
}
}
}
}
}
fn scored(&self, line: Line) -> bool {
let squares = if line.a.i == line.b.i {
let i = line.a.i;
vec![
World::square(line, Dot::new(i + 1, line.a.j), Dot::new(i + 1, line.b.j)),
World::square(line, Dot::new(i - 1, line.a.j), Dot::new(i - 1, line.b.j)),
]
} else {
let j = line.a.j;
vec![
World::square(line, Dot::new(line.a.i, j + 1), Dot::new(line.b.i, j + 1)),
World::square(line, Dot::new(line.a.i, j - 1), Dot::new(line.b.i,j - 1)),
]
};
squares.iter().any(
|square| square.iter().all(
|line| self.lines_a.contains(line) || self.lines_b.contains(line)
)
)
}
fn square(line: Line, dot_c: Dot, dot_d: Dot) -> Vec<Line> {
vec![
Line::new(
line.a,
dot_c,
),
Line::new(
line.b,
dot_d,
),
Line::new(
dot_c,
dot_d,
),
]
}
pub fn dots(_size: Size) -> Vec<Vec<Dot>> {
let mut rows = vec![];
for i in 0..3 {
let mut row = vec![];
for j in 0..3 {
row.push(Dot::new(i, j));
}
rows.push(row);
}
rows
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn score_is_added() {
let mut world = World::new(Size::new(0.0, 0.0));
world.lines_a = vec![
Line::new(Dot::new(0, 0), Dot::new(0, 1)),
Line::new(Dot::new(0, 0), Dot::new(1, 0)),
Line::new(Dot::new(1, 0), Dot::new(1, 1)),
].iter().cloned().collect();
assert!(world.scored(Line::new(Dot::new(1, 1), Dot::new(0, 1))), "counts scored");
}
}
|
use crate::file_util::read_non_blank_lines;
use std::str::FromStr;
#[allow(dead_code)]
pub fn run_day_ten() {
let mut input = read_non_blank_lines("assets/day_ten")
.filter_map(|x| usize::from_str(x.as_str()).ok())
.collect::<Vec<usize>>();
input.sort_unstable();
let result = find_jolt_differences(&mut input);
println!("Result Task 1: {}", result[0] * result[1]);
let result2 = get_number_of_arrangements(&input);
println!("Result Task 2: {}", result2);
}
fn get_number_of_arrangements(input: &[usize]) -> usize {
vec!(0_usize).iter().chain(input)
.zip(input)
.fold(
(0, 0, 1),
|(x, y, z), (a, b)| match b - a {
3 => (0, 0, z),
2 => (z, 0, y + z),
_ => (y, z, x + y + z)
}
).2
}
fn find_jolt_differences(input: &mut [usize]) -> [usize; 2] {
let mut result = [1, 1];
input.iter().zip(&input[1..])
.map(|(x, y)| y - x)
.for_each(|diff| {
if diff == 1 {
result[0] += 1;
} else if diff == 3 {
result[1] += 1;
}
});
result
}
#[cfg(test)]
mod tests {
use crate::day_ten::*;
#[test]
fn should_produce_number_arrangements() {
let mut input = [
28_usize, 33, 18, 42, 31, 14,
46, 20, 48, 47, 24, 23,
49, 45, 19, 38, 39, 11,
1, 32, 25, 35, 8, 17,
7, 9, 4, 2, 34, 10, 3
];
input.sort_unstable();
let result = get_number_of_arrangements(&input);
assert_eq!(result, 19208)
}
#[test]
fn should_produce_jolt_differences() {
let mut input = [
28_usize, 33, 18, 42, 31, 14,
46, 20, 48, 47, 24, 23,
49, 45, 19, 38, 39, 11,
1, 32, 25, 35, 8, 17,
7, 9, 4, 2, 34, 10, 3
];
input.sort_unstable();
let result = find_jolt_differences(&mut input);
assert_eq!(result, [22_usize, 10])
}
}
|
//! Error handling interface.
//!
//! This module holds the generic error and result types to interface with `ctru_sys` and the [`ctru-rs`](crate) safe wrapper.
use std::borrow::Cow;
use std::error;
use std::ffi::CStr;
use std::fmt;
use std::ops::{ControlFlow, FromResidual, Try};
use ctru_sys::result::{R_DESCRIPTION, R_LEVEL, R_MODULE, R_SUMMARY};
/// Custom type alias for generic [`ctru-rs`](crate) operations.
///
/// This type is compatible with [`ctru_sys::Result`] codes.
pub type Result<T> = ::std::result::Result<T, Error>;
/// Validity checker of raw [`ctru_sys::Result`] codes.
///
/// This struct supports the "try" syntax (`?`) to convert to an [`Error::Os`].
///
/// # Example
///
/// ```no_run
/// use ctru::error::{Result, ResultCode};
///
/// pub fn hid_init() -> Result<()> {
/// // We run an unsafe function which returns a `ctru_sys::Result`.
/// let result: ctru_sys::Result = unsafe { ctru_sys::hidInit() };
///
/// // The result code is parsed and any possible error gets returned by the function.
/// ResultCode(result)?;
/// Ok(())
/// }
/// ```
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Eq, Ord)]
#[repr(transparent)]
pub struct ResultCode(pub ctru_sys::Result);
impl Try for ResultCode {
type Output = ();
type Residual = Error;
fn from_output(_: Self::Output) -> Self {
Self(0)
}
fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
// Wait timeouts aren't counted as "failures" in libctru, but an unfinished task means unsafety for us.
// Luckily all summary cases are for system failures (except RS_SUCCESS).
// I don't know if there are any cases in libctru where a Result holds a "failing" summary but a "success" code, so we'll just check for both.
if ctru_sys::R_FAILED(self.0) || ctru_sys::R_SUMMARY(self.0) != ctru_sys::RS_SUCCESS as i32
{
ControlFlow::Break(self.into())
} else {
ControlFlow::Continue(())
}
}
}
impl FromResidual for ResultCode {
fn from_residual(e: <Self as Try>::Residual) -> Self {
match e {
Error::Os(result) => Self(result),
_ => unreachable!(),
}
}
}
impl<T> FromResidual<Error> for Result<T> {
fn from_residual(e: Error) -> Self {
Err(e)
}
}
/// The generic error enum returned by [`ctru-rs`](crate) functions.
///
/// This error enum supports parsing and displaying [`ctru_sys::Result`] codes.
#[non_exhaustive]
pub enum Error {
/// Raw [`ctru_sys::Result`] codes.
Os(ctru_sys::Result),
/// Generic [`libc`] errors.
Libc(String),
/// Requested service is already active and cannot be activated again.
ServiceAlreadyActive,
/// `stdout` is already being redirected.
OutputAlreadyRedirected,
/// The buffer provided by the user to store some data is shorter than required.
BufferTooShort {
/// Length of the buffer provided by the user.
provided: usize,
/// Size of the requested data (in bytes).
wanted: usize,
},
}
impl Error {
/// Create an [`Error`] out of the last set value in `errno`.
///
/// This can be used to get a human-readable error string from calls to `libc` functions.
pub(crate) fn from_errno() -> Self {
let error_str = unsafe {
let errno = ctru_sys::errno();
let str_ptr = libc::strerror(errno);
// Safety: strerror should always return a valid string,
// even if the error number is unknown
CStr::from_ptr(str_ptr)
};
// Copy out of the error string, since it may be changed by other libc calls later
Self::Libc(error_str.to_string_lossy().into())
}
}
impl From<ctru_sys::Result> for Error {
fn from(err: ctru_sys::Result) -> Self {
Error::Os(err)
}
}
impl From<ResultCode> for Error {
fn from(err: ResultCode) -> Self {
Self::Os(err.0)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Self::Os(err) => f
.debug_struct("Error")
.field("raw", &format_args!("{err:#08X}"))
.field("level", &result_code_level_str(err))
.field("module", &result_code_module_str(err))
.field("summary", &result_code_summary_str(err))
.field("description", &result_code_description_str(err))
.finish(),
Self::Libc(err) => f.debug_tuple("Libc").field(err).finish(),
Self::ServiceAlreadyActive => f.debug_tuple("ServiceAlreadyActive").finish(),
Self::OutputAlreadyRedirected => f.debug_tuple("OutputAlreadyRedirected").finish(),
Self::BufferTooShort { provided, wanted } => f
.debug_struct("BufferTooShort")
.field("provided", provided)
.field("wanted", wanted)
.finish(),
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Self::Os(err) => write!(
f,
"libctru result code 0x{err:08X}: [{} {}] {}: {}",
result_code_level_str(err),
result_code_module_str(err),
result_code_summary_str(err),
result_code_description_str(err)
),
Self::Libc(err) => write!(f, "{err}"),
Self::ServiceAlreadyActive => write!(f, "service already active"),
Self::OutputAlreadyRedirected => {
write!(f, "output streams are already redirected to 3dslink")
}
Self::BufferTooShort{provided, wanted} => write!(f, "the provided buffer's length is too short (length = {provided}) to hold the wanted data (size = {wanted})")
}
}
}
impl error::Error for Error {}
fn result_code_level_str(result: ctru_sys::Result) -> Cow<'static, str> {
use ctru_sys::{
RL_FATAL, RL_INFO, RL_PERMANENT, RL_REINITIALIZE, RL_RESET, RL_STATUS, RL_SUCCESS,
RL_TEMPORARY, RL_USAGE,
};
Cow::Borrowed(match R_LEVEL(result) as u32 {
RL_SUCCESS => "success",
RL_INFO => "info",
RL_FATAL => "fatal",
RL_RESET => "reset",
RL_REINITIALIZE => "reinitialize",
RL_USAGE => "usage",
RL_PERMANENT => "permanent",
RL_TEMPORARY => "temporary",
RL_STATUS => "status",
code => return Cow::Owned(format!("(unknown level: {code:#x})")),
})
}
fn result_code_summary_str(result: ctru_sys::Result) -> Cow<'static, str> {
use ctru_sys::{
RS_CANCELED, RS_INTERNAL, RS_INVALIDARG, RS_INVALIDRESVAL, RS_INVALIDSTATE, RS_NOP,
RS_NOTFOUND, RS_NOTSUPPORTED, RS_OUTOFRESOURCE, RS_STATUSCHANGED, RS_SUCCESS,
RS_WOULDBLOCK, RS_WRONGARG,
};
Cow::Borrowed(match R_SUMMARY(result) as u32 {
RS_SUCCESS => "success",
RS_NOP => "nop",
RS_WOULDBLOCK => "would_block",
RS_OUTOFRESOURCE => "out_of_resource",
RS_NOTFOUND => "not_found",
RS_INVALIDSTATE => "invalid_state",
RS_NOTSUPPORTED => "not_supported",
RS_INVALIDARG => "invalid_arg",
RS_WRONGARG => "wrong_arg",
RS_CANCELED => "canceled",
RS_STATUSCHANGED => "status_changed",
RS_INTERNAL => "internal",
RS_INVALIDRESVAL => "invalid_res_val",
code => return Cow::Owned(format!("(unknown summary: {code:#x})")),
})
}
fn result_code_description_str(result: ctru_sys::Result) -> Cow<'static, str> {
use ctru_sys::{
RD_ALREADY_DONE, RD_ALREADY_EXISTS, RD_ALREADY_INITIALIZED, RD_BUSY, RD_CANCEL_REQUESTED,
RD_INVALID_ADDRESS, RD_INVALID_COMBINATION, RD_INVALID_ENUM_VALUE, RD_INVALID_HANDLE,
RD_INVALID_POINTER, RD_INVALID_RESULT_VALUE, RD_INVALID_SELECTION, RD_INVALID_SIZE,
RD_MISALIGNED_ADDRESS, RD_MISALIGNED_SIZE, RD_NOT_AUTHORIZED, RD_NOT_FOUND,
RD_NOT_IMPLEMENTED, RD_NOT_INITIALIZED, RD_NO_DATA, RD_OUT_OF_MEMORY, RD_OUT_OF_RANGE,
RD_SUCCESS, RD_TIMEOUT, RD_TOO_LARGE,
};
Cow::Borrowed(match R_DESCRIPTION(result) as u32 {
RD_SUCCESS => "success",
RD_INVALID_RESULT_VALUE => "invalid_result_value",
RD_TIMEOUT => "timeout",
RD_OUT_OF_RANGE => "out_of_range",
RD_ALREADY_EXISTS => "already_exists",
RD_CANCEL_REQUESTED => "cancel_requested",
RD_NOT_FOUND => "not_found",
RD_ALREADY_INITIALIZED => "already_initialized",
RD_NOT_INITIALIZED => "not_initialized",
RD_INVALID_HANDLE => "invalid_handle",
RD_INVALID_POINTER => "invalid_pointer",
RD_INVALID_ADDRESS => "invalid_address",
RD_NOT_IMPLEMENTED => "not_implemented",
RD_OUT_OF_MEMORY => "out_of_memory",
RD_MISALIGNED_SIZE => "misaligned_size",
RD_MISALIGNED_ADDRESS => "misaligned_address",
RD_BUSY => "busy",
RD_NO_DATA => "no_data",
RD_INVALID_COMBINATION => "invalid_combination",
RD_INVALID_ENUM_VALUE => "invalid_enum_value",
RD_INVALID_SIZE => "invalid_size",
RD_ALREADY_DONE => "already_done",
RD_NOT_AUTHORIZED => "not_authorized",
RD_TOO_LARGE => "too_large",
RD_INVALID_SELECTION => "invalid_selection",
code => return Cow::Owned(format!("(unknown description: {code:#x})")),
})
}
fn result_code_module_str(result: ctru_sys::Result) -> Cow<'static, str> {
use ctru_sys::{
RM_AC, RM_ACC, RM_ACT, RM_AM, RM_AM_LOW, RM_APPLET, RM_APPLICATION, RM_AVD, RM_BOSS,
RM_CAM, RM_CARD, RM_CARDNOR, RM_CARD_SPI, RM_CEC, RM_CODEC, RM_COMMON, RM_CONFIG, RM_CSND,
RM_CUP, RM_DBG, RM_DBM, RM_DD, RM_DI, RM_DLP, RM_DMNT, RM_DSP, RM_EC, RM_ENC, RM_FATFS,
RM_FILE_SERVER, RM_FND, RM_FRIENDS, RM_FS, RM_FSI, RM_GD, RM_GPIO, RM_GSP, RM_GYROSCOPE,
RM_HID, RM_HIO, RM_HIO_LOW, RM_HTTP, RM_I2C, RM_INVALIDRESVAL, RM_IR, RM_KERNEL, RM_L2B,
RM_LDR, RM_LOADER_SERVER, RM_MC, RM_MCU, RM_MIC, RM_MIDI, RM_MP, RM_MPWL, RM_MVD, RM_NDM,
RM_NEIA, RM_NEWS, RM_NEX, RM_NFC, RM_NFP, RM_NGC, RM_NIM, RM_NPNS, RM_NS, RM_NWM, RM_OLV,
RM_OS, RM_PDN, RM_PI, RM_PIA, RM_PL, RM_PM, RM_PM_LOW, RM_PS, RM_PTM, RM_PXI, RM_QTM,
RM_RDT, RM_RO, RM_ROMFS, RM_SDMC, RM_SND, RM_SOC, RM_SPI, RM_SPM, RM_SRV, RM_SSL, RM_SWC,
RM_TCB, RM_TEST, RM_UART, RM_UDS, RM_UPDATER, RM_UTIL, RM_VCTL, RM_WEB_BROWSER,
};
Cow::Borrowed(match R_MODULE(result) as u32 {
RM_COMMON => "common",
RM_KERNEL => "kernel",
RM_UTIL => "util",
RM_FILE_SERVER => "file_server",
RM_LOADER_SERVER => "loader_server",
RM_TCB => "tcb",
RM_OS => "os",
RM_DBG => "dbg",
RM_DMNT => "dmnt",
RM_PDN => "pdn",
RM_GSP => "gsp",
RM_I2C => "i2c",
RM_GPIO => "gpio",
RM_DD => "dd",
RM_CODEC => "codec",
RM_SPI => "spi",
RM_PXI => "pxi",
RM_FS => "fs",
RM_DI => "di",
RM_HID => "hid",
RM_CAM => "cam",
RM_PI => "pi",
RM_PM => "pm",
RM_PM_LOW => "pm_low",
RM_FSI => "fsi",
RM_SRV => "srv",
RM_NDM => "ndm",
RM_NWM => "nwm",
RM_SOC => "soc",
RM_LDR => "ldr",
RM_ACC => "acc",
RM_ROMFS => "romfs",
RM_AM => "am",
RM_HIO => "hio",
RM_UPDATER => "updater",
RM_MIC => "mic",
RM_FND => "fnd",
RM_MP => "mp",
RM_MPWL => "mpwl",
RM_AC => "ac",
RM_HTTP => "http",
RM_DSP => "dsp",
RM_SND => "snd",
RM_DLP => "dlp",
RM_HIO_LOW => "hio_low",
RM_CSND => "csnd",
RM_SSL => "ssl",
RM_AM_LOW => "am_low",
RM_NEX => "nex",
RM_FRIENDS => "friends",
RM_RDT => "rdt",
RM_APPLET => "applet",
RM_NIM => "nim",
RM_PTM => "ptm",
RM_MIDI => "midi",
RM_MC => "mc",
RM_SWC => "swc",
RM_FATFS => "fatfs",
RM_NGC => "ngc",
RM_CARD => "card",
RM_CARDNOR => "cardnor",
RM_SDMC => "sdmc",
RM_BOSS => "boss",
RM_DBM => "dbm",
RM_CONFIG => "config",
RM_PS => "ps",
RM_CEC => "cec",
RM_IR => "ir",
RM_UDS => "uds",
RM_PL => "pl",
RM_CUP => "cup",
RM_GYROSCOPE => "gyroscope",
RM_MCU => "mcu",
RM_NS => "ns",
RM_NEWS => "news",
RM_RO => "ro",
RM_GD => "gd",
RM_CARD_SPI => "card_spi",
RM_EC => "ec",
RM_WEB_BROWSER => "web_browser",
RM_TEST => "test",
RM_ENC => "enc",
RM_PIA => "pia",
RM_ACT => "act",
RM_VCTL => "vctl",
RM_OLV => "olv",
RM_NEIA => "neia",
RM_NPNS => "npns",
RM_AVD => "avd",
RM_L2B => "l2b",
RM_MVD => "mvd",
RM_NFC => "nfc",
RM_UART => "uart",
RM_SPM => "spm",
RM_QTM => "qtm",
RM_NFP => "nfp",
RM_APPLICATION => "application",
RM_INVALIDRESVAL => "invalid_res_val",
code => return Cow::Owned(format!("(unknown module: {code:#x})")),
})
}
|
//! Core of SQLx, the rust SQL toolkit.
//! Not intended to be used directly.
#![recursion_limit = "512"]
#![warn(future_incompatible, rust_2018_idioms)]
#![allow(clippy::needless_doctest_main, clippy::type_complexity)]
// See `clippy.toml` at the workspace root
#![deny(clippy::disallowed_method)]
//
// Allows an API be documented as only available in some specific platforms.
// <https://doc.rust-lang.org/unstable-book/language-features/doc-cfg.html>
#![cfg_attr(docsrs, feature(doc_cfg))]
//
// When compiling with support for SQLite we must allow some unsafe code in order to
// interface with the inherently unsafe C module. This unsafe code is contained
// to the sqlite module.
#![cfg_attr(feature = "sqlite", deny(unsafe_code))]
#![cfg_attr(not(feature = "sqlite"), forbid(unsafe_code))]
#[cfg(feature = "bigdecimal")]
extern crate bigdecimal_ as bigdecimal;
#[macro_use]
mod ext;
#[macro_use]
pub mod error;
#[macro_use]
pub mod arguments;
#[macro_use]
pub mod pool;
pub mod connection;
#[macro_use]
pub mod transaction;
#[macro_use]
pub mod encode;
#[macro_use]
pub mod decode;
#[macro_use]
pub mod types;
#[macro_use]
pub mod query;
#[macro_use]
pub mod acquire;
#[macro_use]
pub mod column;
#[macro_use]
pub mod statement;
mod common;
pub use either::Either;
pub mod database;
pub mod describe;
pub mod executor;
pub mod from_row;
mod io;
mod logger;
mod net;
pub mod query_as;
pub mod query_builder;
pub mod query_scalar;
pub mod row;
pub mod type_info;
pub mod value;
#[cfg(feature = "migrate")]
pub mod migrate;
#[cfg(all(
any(
feature = "postgres",
feature = "mysql",
feature = "mssql",
feature = "sqlite"
),
feature = "any"
))]
pub mod any;
#[cfg(feature = "postgres")]
#[cfg_attr(docsrs, doc(cfg(feature = "postgres")))]
pub mod postgres;
#[cfg(feature = "sqlite")]
#[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))]
pub mod sqlite;
#[cfg(feature = "mysql")]
#[cfg_attr(docsrs, doc(cfg(feature = "mysql")))]
pub mod mysql;
#[cfg(feature = "mssql")]
#[cfg_attr(docsrs, doc(cfg(feature = "mssql")))]
pub mod mssql;
// Implements test support with automatic DB management.
#[cfg(feature = "migrate")]
pub mod testing;
pub use sqlx_rt::test_block_on;
/// sqlx uses ahash for increased performance, at the cost of reduced DoS resistance.
use ahash::AHashMap as HashMap;
//type HashMap<K, V> = std::collections::HashMap<K, V, ahash::RandomState>;
|
//! Support to handle animations for sprites.
use amethyst::{
core::timing::Time,
ecs::prelude::{Join, Read, System, WriteStorage},
ecs::{Component, DenseVecStorage},
renderer::SpriteRender,
};
/// Component which holds a sprite animation
///
/// This includes the sprite indices for the animation, the
/// animation speed, if the animation should pause and other
/// information required to draw a sprite animation.
#[derive(Default)]
pub struct SpriteAnimation {
pub index: u32,
pub keys: Vec<usize>,
pub speed: f32,
pub time: f32,
pub pause: bool,
}
impl SpriteAnimation {
pub fn new(keys: Vec<usize>, speed: f32) -> Self {
SpriteAnimation {
index: 0,
keys,
speed,
time: 0.0,
pause: false,
}
}
}
impl Component for SpriteAnimation {
type Storage = DenseVecStorage<Self>;
}
/// System to handle sprite animation.
pub struct SpriteAnimationSystem;
impl<'s> System<'s> for SpriteAnimationSystem {
type SystemData = (
WriteStorage<'s, SpriteRender>,
WriteStorage<'s, SpriteAnimation>,
Read<'s, Time>,
);
fn run(&mut self, (mut sprite_renders, mut sprite_animations, time): Self::SystemData) {
for (mut sprite_render, mut sprite_animation) in
(&mut sprite_renders, &mut sprite_animations).join()
{
if !sprite_animation.pause {
sprite_animation.time += time.delta_seconds();
while sprite_animation.time > sprite_animation.speed {
sprite_animation.index =
(sprite_animation.index + 1) % (sprite_animation.keys.len() as u32);
sprite_render.sprite_number =
sprite_animation.keys[sprite_animation.index as usize];
sprite_animation.time -= sprite_animation.speed;
}
}
}
}
}
|
fn main() {
let _ = 1;
println!("{}", _);
}
// $rustc ./reserved_identifier.rs
// error: expected expression, found reserved identifier `_`
// --> ./reserved_identifier.rs:3:20
// |
// 3 | println!("{}", _);
// | ^ expected expression
//
// error: aborting due to previous error
|
/*-------------------------------
etc.rs
カテゴリ分けに困る、
細々とした関数をひとまとめにする
* impl File
* empty_dir_remove(): エラーを吐かせないためにわざわざフォルダ内を確認する。これいる?
* read_to_string : stringとしてファイルを読み取る
* read_to_vec : Vec<u8>としてファイルを読み取る
* unused_dir_remove(): ggezが自動生成するフォルダを削除
* easy_path_set() : cargo環境でも通常環境でも適応できるpathをセット
* random_x() : 敵出現位置用の乱数を取るやつ
-------------------------------*/
use std;
use std::path::{ Path, PathBuf };
use std::io::Result;
// for File Read
use std::io::{ BufReader, Read };
// for ggez
use ggez;
use rand::{ self, Rng };
pub struct File;
impl File {
#[allow(dead_code)]
/// ファイルをStringとして読み込む
pub fn read_to_string<'a>(path: &'a Path) -> Result<String> {
let mut f = BufReader::new(std::fs::File::open(path)?);
let mut out_s = String::new();
f.read_to_string(&mut out_s)?;
Ok(out_s)
}
#[allow(dead_code)]
/// ファイルをVec<u8>として読み込む
pub fn read_to_vec<'a>(path: &'a Path) -> Result<Vec<u8>> {
let mut f = BufReader::new(std::fs::File::open(path)?);
let mut out_v = Vec::new();
f.read_to_end(&mut out_v)?;
Ok(out_v)
}
/// ディレクトリ内ファイル数を判定して、空ディレクトリなら削除
pub fn empty_dir_remove<'a>(path: &'a Path) -> Result<()> {
/*
for文を回してファイル数計測
絶対もっと良い方法あると思う(白目)
*/
let mut cnt = 0;
for _ in std::fs::read_dir(path)? {
cnt += 1;
}
// ディレクトリ内に何もなかったらcntが0になる
if cnt == 0 {
std::fs::remove_dir(path)?;
}
Ok(())
}
}
#[allow(dead_code)]
/// ggezが自動生成したフォルダを消去する
pub fn unused_dir_remove(ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let user_conf_dir_path = ctx.filesystem.get_user_config_dir();
let user_data_dir_path = ctx.filesystem.get_user_data_dir();
/*
let _ = std::fs::remove_dir()で問題ないことは知ってるけど、
捨てていてもエラーを出すのは気になるの!
*/
if user_conf_dir_path.is_dir() {
File::empty_dir_remove(user_conf_dir_path)?;
}
if user_data_dir_path.is_dir() {
File::empty_dir_remove(user_data_dir_path)?;
}
Ok(())
}
/// 手間が少ない楽ちんpath設定
///
/// cargo環境なら、プロジェクトディレクトリのpathから始める
///
/// そうでないなら、バイナリがあるフォルダから始める
pub fn easy_path_set<'a>(path_str: &'a str) -> PathBuf {
// 入力変数例: `path_str = "game_setting.toml"`
let mut path_base = PathBuf::new();
if let Ok(manifest_dir) = std::env::var("CARGO_MANIFEST_DIR") {
path_base.push(manifest_dir);
}
path_base.push(path_str);
path_base
}
/// ランダムな値xを生み出す
///
/// 使用例: `let x = random_x(800);`
pub fn random_x(max_num: u32) -> f32 {
let out_n: u32 = rand::thread_rng().gen_range(0, max_num);
out_n as f32
}
|
/// Check error returned by the Flight API.
pub fn check_flight_error(
err: influxdb_iox_client::flight::Error,
expected_error_code: tonic::Code,
expected_message: Option<&str>,
) {
if let Some(status) = err.tonic_status() {
check_tonic_status(status, expected_error_code, expected_message);
} else {
panic!("Not a gRPC error: {err}");
}
}
/// Check tonic status.
pub fn check_tonic_status(
status: &tonic::Status,
expected_error_code: tonic::Code,
expected_message: Option<&str>,
) {
assert_eq!(
status.code(),
expected_error_code,
"Wrong status code: {}\n\nStatus:\n{}",
status.code(),
status,
);
if let Some(expected_message) = expected_message {
let status_message = status.message();
assert_eq!(
status_message, expected_message,
"\nActual status message:\n{status_message}\nExpected message:\n{expected_message}"
);
}
}
|
pub mod templates;
use reqwest;
use serde_json::{json, Value};
use templates::MailTemplate;
use crate::app_env::get_env;
fn form_data(to: &String, template: MailTemplate) -> Value {
json!({
"personalizations": [
{
"to": [{ "email": to }],
"dynamic_template_data": template.data
}
],
"from": {
"email": get_env::sendgrid_from_email(),
"name": get_env::sendgrid_from_name()
},
"template_id": template.id
})
}
pub async fn send_mail(to: &String, template: MailTemplate) -> Result<(), String> {
let client = reqwest::Client::new();
client
.post("https://api.sendgrid.com/v3/mail/send")
.bearer_auth(get_env::sendgrid_api_key())
.json(&form_data(to, template))
.send()
.await
.map_err(|err| err.to_string())?;
Ok(())
}
|
fn puzzle1(input: String, iterations: usize) -> String {
let mut digits: Vec<u128> = input.chars()
.map(|char| char.to_string().parse::<u128>().unwrap())
.collect();
for iteration_count in 0..iterations {
let mut iteration_digits = vec![0u128; digits.len()];
for output_digit_index in 0..digits.len() {
let pattern = calculate_pattern(output_digit_index, digits.len());
let mut value = 0i128;
for calculating_digit_index in 0..digits.len() {
value = value + digits[calculating_digit_index] as i128 * pattern[calculating_digit_index];
}
iteration_digits[output_digit_index] = value.abs() as u128 % 10;
}
digits = iteration_digits;
}
let vec: Vec<String> = digits[..8].iter()
.map(u128::to_string)
.collect();
return vec.join("");
}
fn puzzle2(input: String, iterations: usize) -> String {
let mut digits: Vec<u128> = input.chars()
.map(|char| char.to_string().parse::<u128>().unwrap())
.collect();
let start_index = digits[..7].iter()
.fold(0,|total, object | total * 10 + *object as usize);
for _ in 0..iterations {
let mut iteration_digits = vec![0u128; digits.len()];
for output_digit_index in 0..digits.len() {
let pattern = calculate_pattern(output_digit_index, digits.len());
let mut value = 0i128;
for calculating_digit_index in 0..digits.len() {
value = value + digits[calculating_digit_index] as i128 * pattern[calculating_digit_index];
}
iteration_digits[output_digit_index] = value.abs() as u128 % 10;
}
digits = iteration_digits;
}
let vec: Vec<String> = digits[start_index..start_index + 8].iter()
.map(u128::to_string)
.collect();
return vec.join("");
}
const BASE_PATTERN: [i128; 4] = [0, 1, 0, -1];
fn calculate_pattern(digit_index: usize, count_needed: usize) -> Vec<i128> {
let mut pattern = Vec::<i128>::with_capacity(count_needed);
for i in 1..count_needed + 1 {
pattern.push(BASE_PATTERN[(i / (digit_index + 1)) % 4]);
}
return pattern;
}
#[cfg(test)]
mod tests {
use crate::day16::{puzzle1, puzzle2};
use crate::utils::read_lines;
#[test]
fn test_puzzle1() {
let result = puzzle1(String::from("12345678"), 4);
assert_eq!(result, String::from("01029498"));
let result2 = puzzle1(String::from("80871224585914546619083218645595"), 100);
assert_eq!(result2, String::from("24176176"));
let result3 = puzzle1(String::from("19617804207202209144916044189917"), 100);
assert_eq!(result3, String::from("73745418"));
let result4 = puzzle1(String::from("69317163492948606335995924319873"), 100);
assert_eq!(result4, String::from("52432133"));
let puzzle_input = read_lines("data/Day16.txt").unwrap();
let result5 = puzzle1(puzzle_input[0].clone(), 100);
assert_eq!(result5, String::from("37153056"));
}
#[test]
#[ignore]
fn test_puzzle2() {
let puzzle_input = read_lines("data/Day16.txt").unwrap();
let read_input = puzzle_input[0].clone();
let input_str = read_input.as_str();
let mut calculated_input = String::with_capacity(read_input.len() * 10000);
for i in 0..10000 {
calculated_input.push_str(input_str);
}
let result5 = puzzle2(calculated_input, 100);
assert_eq!(result5, String::from("37153056"));
}
} |
use crate::arrow2::error::Result;
use crate::arrow2::ffi::FFIArrowTable;
use arrow2::io::ipc::read::{read_file_metadata, FileReader as IPCFileReader};
use arrow2::io::parquet::write::{FileWriter as ParquetFileWriter, RowGroupIterator};
use std::io::Cursor;
/// Internal function to write a buffer of data in Arrow IPC File format to a Parquet file using
/// the arrow2 and parquet2 crates
pub fn write_parquet(
arrow_file: &[u8],
writer_properties: crate::arrow2::writer_properties::WriterProperties,
) -> Result<Vec<u8>> {
// Create IPC reader
let mut input_file = Cursor::new(arrow_file);
let stream_metadata = read_file_metadata(&mut input_file)?;
let arrow_ipc_reader = IPCFileReader::new(input_file, stream_metadata.clone(), None, None);
// Create Parquet writer
let mut output_file: Vec<u8> = vec![];
let options = writer_properties.get_write_options();
let encoding = writer_properties.get_encoding();
let schema = stream_metadata.schema.clone();
let mut parquet_writer = ParquetFileWriter::try_new(&mut output_file, schema, options)?;
for maybe_chunk in arrow_ipc_reader {
let chunk = maybe_chunk?;
let iter = vec![Ok(chunk)];
// Need to create an encoding for each column
let mut encodings = vec![];
for _ in &stream_metadata.schema.fields {
// Note, the nested encoding is for nested Parquet columns
// Here we assume columns are not nested
encodings.push(vec![encoding]);
}
let row_groups = RowGroupIterator::try_new(
iter.into_iter(),
&stream_metadata.schema,
options,
encodings,
);
if let Ok(row_group_iterator) = row_groups {
for maybe_group in row_group_iterator {
let group = maybe_group?;
parquet_writer.write(group)?;
}
}
}
let _size = parquet_writer.end(None)?;
Ok(output_file)
}
pub fn write_ffi_table_to_parquet(
table: FFIArrowTable,
writer_properties: crate::arrow2::writer_properties::WriterProperties,
) -> Result<Vec<u8>> {
let (schema, chunks) = table.import()?;
// Create Parquet writer
let mut output_file: Vec<u8> = vec![];
let options = writer_properties.get_write_options();
let encoding = writer_properties.get_encoding();
let mut parquet_writer = ParquetFileWriter::try_new(&mut output_file, schema.clone(), options)?;
for chunk in chunks {
let iter = vec![Ok(chunk)];
// Need to create an encoding for each column
let mut encodings = vec![];
for _ in &schema.fields {
// Note, the nested encoding is for nested Parquet columns
// Here we assume columns are not nested
encodings.push(vec![encoding]);
}
let row_groups = RowGroupIterator::try_new(iter.into_iter(), &schema, options, encodings);
if let Ok(row_group_iterator) = row_groups {
for maybe_group in row_group_iterator {
let group = maybe_group?;
parquet_writer.write(group)?;
}
}
}
let _size = parquet_writer.end(None)?;
Ok(output_file)
}
|
//! A module providing a CLI command for regenerating line protocol from a WAL file.
use std::fs::{create_dir_all, File, OpenOptions};
use std::future::Future;
use std::path::PathBuf;
use std::sync::Arc;
use data_types::{NamespaceId, TableId};
use hashbrown::HashMap;
use influxdb_iox_client::connection::Connection;
use influxdb_iox_client::schema::Client as SchemaClient;
use observability_deps::tracing::{debug, error, info};
use wal::{ClosedSegmentFileReader, WriteOpEntry, WriteOpEntryDecoder};
use wal_inspect::{LineProtoWriter, NamespaceDemultiplexer, TableBatchWriter, WriteError};
use super::{Error, RegenerateError};
/// The set of errors which may occur when trying to look up a table name
/// index for a namespace.
#[derive(Debug, Error)]
pub enum TableIndexLookupError {
#[error("encountered error when making index request: {0}")]
RequestFailed(#[from] Box<influxdb_iox_client::error::Error>),
#[error("no namespace known for id: {0:?}")]
NamespaceNotKnown(NamespaceId),
}
// This type provides a convenience wrapper around the namespace and schema APIs
// to enable the fetching of table name indexes from namespace and table IDs.
struct TableIndexFetcher {
namespace_index: HashMap<NamespaceId, String>,
schema_client: SchemaClient,
}
impl TableIndexFetcher {
async fn new(connection: Connection) -> Result<Self, TableIndexLookupError> {
let mut namespace_client = influxdb_iox_client::namespace::Client::new(connection.clone());
Ok(Self {
namespace_index: namespace_client
.get_namespaces()
.await
.map_err(Box::new)?
.into_iter()
.map(|ns| (NamespaceId::new(ns.id), ns.name))
.collect(),
schema_client: SchemaClient::new(connection),
})
}
async fn get_table_name_index(
&self,
namespace_id: NamespaceId,
) -> Result<HashMap<TableId, String>, TableIndexLookupError> {
let namespace_name = self
.namespace_index
.get(&namespace_id)
.ok_or(TableIndexLookupError::NamespaceNotKnown(namespace_id))?;
info!(
%namespace_id,
%namespace_name,
"requesting namespace schema to construct table ID to name mapping"
);
let ns_schema = self
.schema_client
.clone()
.get_schema(namespace_name)
.await
.map_err(Box::new)?;
Ok(ns_schema
.tables
.into_iter()
.map(|(table_name, table_schema)| {
let table_id = TableId::new(table_schema.id);
debug!(%table_name, %table_id, %namespace_id, %namespace_name, "discovered ID to name mapping for table in namespace");
(table_id, table_name)
})
.collect())
}
}
/// A container for the possible arguments & flags of a `regenerate-lp` command.
#[derive(Debug, clap::Parser)]
pub struct Config {
/// The path to the input WAL file
#[clap(value_parser)]
input: PathBuf,
/// The directory to write regenerated line protocol to. Creates the directory
/// if it does not exist.
///
/// When unspecified the line protocol is written to stdout
#[clap(long, short, value_parser)]
output_directory: Option<PathBuf>,
/// When enabled, pre-existing line protocol files will be overwritten
#[clap(long, short)]
force: bool,
/// When enabled, lookup of the measurement and database names is skipped.
/// This means that regenerated line protocol will only contain the table
/// ID for each measurement, rather than the original name
#[clap(long, short)]
skip_measurement_lookup: bool,
}
/// Executes the `regenerate-lp` command with the provided configuration, reading
/// write operation entries from a WAL file and mapping them to line protocol.
pub async fn command<C, CFut>(connection: C, config: Config) -> Result<(), Error>
where
C: Send + FnOnce() -> CFut,
CFut: Send + Future<Output = Connection>,
{
let decoder = WriteOpEntryDecoder::from(
ClosedSegmentFileReader::from_path(&config.input).map_err(Error::UnableToReadWalFile)?,
);
let table_name_indexer = if config.skip_measurement_lookup {
Ok(None)
} else {
let connection = connection().await;
TableIndexFetcher::new(connection)
.await
.map(Some)
.map_err(Error::UnableToInitTableNameFetcher)
}?
.map(Arc::new);
match config.output_directory {
Some(d) => {
let d = Arc::new(d);
create_dir_all(d.as_path())?;
let namespace_demux = NamespaceDemultiplexer::new(move |namespace_id| {
new_line_proto_file_writer(
namespace_id,
Arc::clone(&d),
config.force,
table_name_indexer.as_ref().map(Arc::clone),
)
});
decode_and_write_entries(decoder, namespace_demux).await
}
None => {
let namespace_demux = NamespaceDemultiplexer::new(move |namespace_id| {
let table_name_indexer = table_name_indexer.as_ref().map(Arc::clone);
async move {
let table_name_lookup = match table_name_indexer {
Some(indexer) => Some(indexer.get_table_name_index(namespace_id).await?),
None => None,
};
let result: Result<LineProtoWriter<std::io::Stdout>, RegenerateError> =
Ok(LineProtoWriter::new(std::io::stdout(), table_name_lookup));
result
}
});
decode_and_write_entries(decoder, namespace_demux).await
}
}
}
// Creates a new [`LineProtoWriter`] backed by a [`File`] in `output_dir` using
// the format "namespace_id_`namespace_id`.lp". If `replace_existing` is set
// then any pre-existing file is replaced.
async fn new_line_proto_file_writer(
namespace_id: NamespaceId,
output_dir: Arc<PathBuf>,
replace_existing: bool,
table_name_indexer: Option<Arc<TableIndexFetcher>>,
) -> Result<LineProtoWriter<File>, RegenerateError> {
let file_path = output_dir
.as_path()
.join(format!("namespace_id_{}.lp", namespace_id));
let mut open_options = OpenOptions::new().write(true).to_owned();
if replace_existing {
open_options.create(true);
} else {
open_options.create_new(true);
}
info!(
?file_path,
%namespace_id,
"creating namespaced file as destination for regenerated line protocol",
);
let table_name_lookup = match table_name_indexer {
Some(indexer) => Some(indexer.get_table_name_index(namespace_id).await?),
None => None,
};
Ok(LineProtoWriter::new(
open_options.open(&file_path).map_err(WriteError::IoError)?,
table_name_lookup,
))
}
// Consumes [`wal::WriteOpEntry`]s from `decoder` until end of stream or a fatal decode error is hit,
// rewriting each table-keyed batch of writes using the provided implementation [`TableBatchWriter`]
// and initialisation function used by `namespace_demux`.
//
// Errors returned by the `namespace_demux` or any [`TableBatchWriter`] returned
async fn decode_and_write_entries<T, F, I>(
decoder: WriteOpEntryDecoder,
mut namespace_demux: NamespaceDemultiplexer<T, F>,
) -> Result<(), Error>
where
T: TableBatchWriter<WriteError = wal_inspect::WriteError> + Send,
F: (Fn(NamespaceId) -> I) + Send + Sync,
I: Future<Output = Result<T, RegenerateError>> + Send,
{
let mut regenerate_errors = vec![];
for (wal_entry_number, entry_batch) in decoder.enumerate() {
regenerate_errors.extend(
regenerate_wal_entry((wal_entry_number, entry_batch?), &mut namespace_demux).await,
);
}
if !regenerate_errors.is_empty() {
Err(Error::UnableToFullyRegenerateLineProtocol {
sources: regenerate_errors,
})
} else {
Ok(())
}
}
// Given a `wal_entry` containing the entry number and a list of write
// operations, this function will regenerate the entries using the
// provided `namespace_demux`.
async fn regenerate_wal_entry<T, F, I>(
wal_entry: (usize, Vec<WriteOpEntry>),
namespace_demux: &mut NamespaceDemultiplexer<T, F>,
) -> Vec<RegenerateError>
where
T: TableBatchWriter<WriteError = wal_inspect::WriteError> + Send,
F: (Fn(NamespaceId) -> I) + Send + Sync,
I: Future<Output = Result<T, RegenerateError>> + Send,
{
let mut regenerate_errors = vec![];
let (wal_entry_number, entry_batch) = wal_entry;
for (write_op_number, entry) in entry_batch.into_iter().enumerate() {
let namespace_id = entry.namespace;
debug!(%namespace_id, %wal_entry_number, %write_op_number, "regenerating line protocol for namespace from WAL write op entry");
namespace_demux
.get(namespace_id)
.await
.map(|writer| writer.write_table_batches(entry.table_batches.into_iter()))
.and_then(|write_result| write_result.map_err(RegenerateError::TableBatchWriteFailure)) // flatten out the write result if Ok
.unwrap_or_else(|err| {
error!(
%namespace_id,
%wal_entry_number,
%write_op_number,
%err,
"failed to rewrite table batches for write op");
regenerate_errors.push(err);
});
}
regenerate_errors
}
#[cfg(test)]
mod tests {
use std::collections::VecDeque;
use std::iter::from_fn;
use assert_matches::assert_matches;
use mutable_batch::MutableBatch;
use super::*;
#[derive(Debug)]
struct MockTableBatchWriter {
return_results:
VecDeque<Result<(), <MockTableBatchWriter as TableBatchWriter>::WriteError>>,
got_calls: Vec<(TableId, MutableBatch)>,
}
impl TableBatchWriter for MockTableBatchWriter {
type WriteError = WriteError;
fn write_table_batches<B>(&mut self, table_batches: B) -> Result<(), Self::WriteError>
where
B: Iterator<Item = (TableId, MutableBatch)>,
{
self.got_calls.extend(table_batches);
self.return_results
.pop_front()
.expect("excess calls to write_table_batches were made")
}
}
#[tokio::test]
async fn regenerate_entries_continues_on_error() {
const NAMESPACE_ONE: NamespaceId = NamespaceId::new(1);
const NON_EXISTENT_NAMESPACE: NamespaceId = NamespaceId::new(2);
const NAMESPACE_OTHER: NamespaceId = NamespaceId::new(3);
// Set up a demux to simulate a happy namespace, an always error-ing
// namespace and a namespace with a temporarily erroring writer.
let mut demux = NamespaceDemultiplexer::new(move |namespace_id| async move {
if namespace_id == NAMESPACE_ONE {
Ok(MockTableBatchWriter {
return_results: from_fn(|| Some(Ok(()))).take(10).collect(),
got_calls: Default::default(),
})
} else if namespace_id == NON_EXISTENT_NAMESPACE {
Err(RegenerateError::NamespaceSchemaDiscoveryFailed(
TableIndexLookupError::NamespaceNotKnown(NON_EXISTENT_NAMESPACE),
))
} else {
let mut iter = VecDeque::new();
iter.push_back(Err(WriteError::RecordBatchTranslationFailure(
"bananas".to_string(),
)));
iter.extend(from_fn(|| Some(Ok(()))).take(9).collect::<VecDeque<_>>());
Ok(MockTableBatchWriter {
return_results: iter,
got_calls: Default::default(),
})
}
});
// Write some batches happily,
let result = regenerate_wal_entry(
(
1,
vec![
WriteOpEntry {
namespace: NAMESPACE_ONE,
table_batches: vec![(TableId::new(1), Default::default())]
.into_iter()
.collect(),
},
WriteOpEntry {
namespace: NAMESPACE_ONE,
table_batches: vec![(TableId::new(2), Default::default())]
.into_iter()
.collect(),
},
],
),
&mut demux,
)
.await;
assert!(result.is_empty());
// Then try to write for an unknown namespace, but continue on error
let result = regenerate_wal_entry(
(
2,
vec![
WriteOpEntry {
namespace: NON_EXISTENT_NAMESPACE,
table_batches: vec![(TableId::new(1), Default::default())]
.into_iter()
.collect(),
},
WriteOpEntry {
namespace: NAMESPACE_ONE,
table_batches: vec![(TableId::new(1), Default::default())]
.into_iter()
.collect(),
},
],
),
&mut demux,
)
.await;
assert_eq!(result.len(), 1);
assert_matches!(
result.get(0),
Some(RegenerateError::NamespaceSchemaDiscoveryFailed(..))
);
// And finally continue writing after a "corrupt" entry is unable
// to be translated.
let result = regenerate_wal_entry(
(
3,
vec![
WriteOpEntry {
namespace: NAMESPACE_OTHER,
table_batches: vec![(TableId::new(1), Default::default())]
.into_iter()
.collect(),
},
WriteOpEntry {
namespace: NAMESPACE_OTHER,
table_batches: vec![(TableId::new(1), Default::default())]
.into_iter()
.collect(),
},
],
),
&mut demux,
)
.await;
assert_eq!(result.len(), 1);
assert_matches!(
result.get(0),
Some(RegenerateError::TableBatchWriteFailure(..))
);
// There should be five write calls made.
assert_matches!(demux.get(NAMESPACE_ONE).await, Ok(mock) => {
assert_eq!(mock.got_calls.len(), 3);
});
assert_matches!(demux.get(NAMESPACE_OTHER).await, Ok(mock) => {
assert_eq!(mock.got_calls.len(), 2);
});
}
}
|
use crate::riscv_csr::CsrAddr;
use crate::riscv_tracer::RiscvTracer;
use crate::riscv32_core::AddrT;
use crate::riscv32_core::InstT;
use crate::riscv32_core::UXlenT;
use crate::riscv32_core::XlenT;
use crate::riscv64_core::UXlen64T;
use crate::riscv64_core::Xlen64T;
use crate::riscv32_core::PrivMode;
use crate::riscv32_core::MemResult;
use crate::riscv64_core::Riscv64Core;
use crate::riscv64_core::Riscv64Env;
use crate::riscv_exception::ExceptCode;
use crate::riscv_exception::RiscvException;
use crate::riscv32_insts::RiscvInstId;
use crate::riscv32_insts::RiscvInsts;
use crate::riscv_csr_bitdef::SYSREG_MSTATUS_SIE_LSB;
use crate::riscv_csr_bitdef::SYSREG_MSTATUS_SIE_MSB;
use crate::riscv_csr_bitdef::SYSREG_MSTATUS_SPIE_LSB;
use crate::riscv_csr_bitdef::SYSREG_MSTATUS_SPIE_MSB;
use crate::riscv_csr_bitdef::SYSREG_MSTATUS_SPP_LSB;
use crate::riscv_csr_bitdef::SYSREG_MSTATUS_SPP_MSB;
impl RiscvInsts for Riscv64Env {
fn decode_inst(&mut self, inst: InstT) -> Option<RiscvInstId> {
let opcode = inst & 0x7f;
let funct3 = (inst >> 12) & 0x07;
let funct7 = (inst >> 25) & 0x7f;
let imm12 = (inst >> 20) & 0xfff;
let dec_inst: RiscvInstId;
return match opcode {
0x0f => match funct3 {
0b000 => Some(RiscvInstId::FENCE),
0b001 => Some(RiscvInstId::FENCEI),
_ => None,
},
0x1b => match funct3 {
0b000 => Some(RiscvInstId::ADDIW),
0b001 => Some(RiscvInstId::SLLIW),
0b101 => match funct7 {
0b0000000 => Some(RiscvInstId::SRLIW),
0b0100000 => Some(RiscvInstId::SRAIW),
_ => None,
},
_ => None,
},
0x3b => match funct3 {
0b000 => match funct7 {
0b0000000 => Some(RiscvInstId::ADDW),
0b0100000 => Some(RiscvInstId::SUBW),
_ => None,
},
0b001 => Some(RiscvInstId::SLLW),
0b101 => match funct7 {
0b0000000 => Some(RiscvInstId::SRLW),
0b0100000 => Some(RiscvInstId::SRAW),
_ => None,
},
_ => None,
},
0x33 => match funct7 {
0b0000000 => match funct3 {
0b000 => Some(RiscvInstId::ADD),
0b001 => Some(RiscvInstId::SLL),
0b010 => Some(RiscvInstId::SLT),
0b011 => Some(RiscvInstId::SLTU),
0b100 => Some(RiscvInstId::XOR),
0b101 => Some(RiscvInstId::SRL),
0b110 => Some(RiscvInstId::OR),
0b111 => Some(RiscvInstId::AND),
_ => None,
},
0b0100000 => match funct3 {
0b000 => Some(RiscvInstId::SUB),
0b101 => Some(RiscvInstId::SRA),
_ => None,
},
0b0000001 => match funct3 {
0b000 => Some(RiscvInstId::MUL),
0b001 => Some(RiscvInstId::MULH),
0b010 => Some(RiscvInstId::MULHSU),
0b011 => Some(RiscvInstId::MULHU),
0b100 => Some(RiscvInstId::DIV),
0b101 => Some(RiscvInstId::DIVU),
0b110 => Some(RiscvInstId::REM),
0b111 => Some(RiscvInstId::REMU),
_ => None,
},
_ => None,
},
0x03 => match funct3 {
0b000 => Some(RiscvInstId::LB),
0b001 => Some(RiscvInstId::LH),
0b010 => Some(RiscvInstId::LW),
0b100 => Some(RiscvInstId::LBU),
0b101 => Some(RiscvInstId::LHU),
0b110 => Some(RiscvInstId::LWU),
0b011 => Some(RiscvInstId::LD),
_ => None,
},
0x23 => match funct3 {
0b000 => Some(RiscvInstId::SB),
0b001 => Some(RiscvInstId::SH),
0b010 => Some(RiscvInstId::SW),
0b011 => Some(RiscvInstId::SD),
_ => None,
},
0x37 => Some(RiscvInstId::LUI),
0x17 => Some(RiscvInstId::AUIPC),
0x63 => match funct3 {
0b000 => Some(RiscvInstId::BEQ),
0b001 => Some(RiscvInstId::BNE),
0b100 => Some(RiscvInstId::BLT),
0b101 => Some(RiscvInstId::BGE),
0b110 => Some(RiscvInstId::BLTU),
0b111 => Some(RiscvInstId::BGEU),
_ => None,
},
0x13 => match funct3 {
0b000 => Some(RiscvInstId::ADDI),
0b010 => Some(RiscvInstId::SLTI),
0b011 => Some(RiscvInstId::SLTIU),
0b100 => Some(RiscvInstId::XORI),
0b110 => Some(RiscvInstId::ORI),
0b111 => Some(RiscvInstId::ANDI),
0b001 => Some(RiscvInstId::SLLI),
0b101 => match funct7 {
0b0000000 => Some(RiscvInstId::SRLI),
0b0100000 => Some(RiscvInstId::SRAI),
_ => None,
},
_ => None,
},
0x6f => Some(RiscvInstId::JAL),
0x67 => Some(RiscvInstId::JALR),
0x73 => match funct3 {
0x000 => match imm12 {
0x000 => Some(RiscvInstId::ECALL),
0x001 => Some(RiscvInstId::EBREAK),
0x002 => Some(RiscvInstId::URET),
0x102 => Some(RiscvInstId::SRET),
0x302 => Some(RiscvInstId::MRET),
_ => None,
},
0b001 => Some(RiscvInstId::CSRRW),
0b010 => Some(RiscvInstId::CSRRS),
0b011 => Some(RiscvInstId::CSRRC),
0b101 => Some(RiscvInstId::CSRRWI),
0b110 => Some(RiscvInstId::CSRRSI),
0b111 => Some(RiscvInstId::CSRRCI),
_ => None,
},
_ => Some(RiscvInstId::WFI),
};
}
fn execute_inst(&mut self, dec_inst: RiscvInstId, inst: InstT, step: u32) {
self.m_trace.m_executed_pc = self.m_pc;
self.m_trace.m_inst_hex = inst;
self.m_trace.m_step = step;
self.m_trace.m_priv = self.m_priv;
self.m_trace.m_vmmode = self.get_vm_mode();
let rs1 = Self::get_rs1_addr(inst);
let rs2 = Self::get_rs2_addr(inst);
let rd = Self::get_rd_addr(inst);
let csr_addr = CsrAddr::from_u64(((inst >> 20) & 0x0fff) as u64);
self.set_update_pc(false);
match dec_inst {
RiscvInstId::CSRRW => {
let rs1_data = self.read_reg(rs1);
let reg_data: Xlen64T = self.m_csr.csrrw(csr_addr, rs1_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::CSRRS => {
let rs1_data = self.read_reg(rs1);
let reg_data: Xlen64T = self.m_csr.csrrs(csr_addr, rs1_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::CSRRC => {
let rs1_data = self.read_reg(rs1);
let reg_data: Xlen64T = self.m_csr.csrrc(csr_addr, rs1_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::CSRRWI => {
let zimm: Xlen64T = ((inst >> 15) & 0x1f) as Xlen64T;
let reg_data: Xlen64T = self.m_csr.csrrw(csr_addr, zimm);
self.write_reg(rd, reg_data);
}
RiscvInstId::CSRRSI => {
let zimm: Xlen64T = ((inst >> 15) & 0x1f) as Xlen64T;
let reg_data: Xlen64T = self.m_csr.csrrs(csr_addr, zimm);
self.write_reg(rd, reg_data);
}
RiscvInstId::CSRRCI => {
let zimm: Xlen64T = ((inst >> 15) & 0x1f) as Xlen64T;
let reg_data: Xlen64T = self.m_csr.csrrc(csr_addr, zimm);
self.write_reg(rd, reg_data);
}
RiscvInstId::LUI => {
let mut imm: Xlen64T =
Self::extend_sign(Self::extract_bit_field(inst as Xlen64T, 31, 12), 19);
imm = imm << 12;
self.write_reg(rd, imm);
}
RiscvInstId::AUIPC => {
let mut imm: Xlen64T =
Self::extend_sign(Self::extract_bit_field(inst as Xlen64T, 31, 12), 19);
imm = (imm << 12).wrapping_add(self.m_pc as Xlen64T);
self.write_reg(rd, imm);
}
RiscvInstId::LB => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_byte(addr as AddrT);
if result == MemResult::NoExcept {
let extended_reg_data = Self::extend_sign(reg_data, 7);
self.write_reg(rd, extended_reg_data);
}
}
RiscvInstId::LH => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_hword(addr as AddrT);
if result == MemResult::NoExcept {
let extended_reg_data = Self::extend_sign(reg_data, 15);
self.write_reg(rd, extended_reg_data);
}
}
RiscvInstId::LW => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_word(addr as AddrT);
if result == MemResult::NoExcept {
self.write_reg(rd, reg_data);
}
}
RiscvInstId::LD => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_dword(addr as AddrT);
if result == MemResult::NoExcept {
self.write_reg(rd, reg_data);
}
}
RiscvInstId::LBU => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_byte(addr as AddrT);
if result == MemResult::NoExcept {
self.write_reg(rd, reg_data as Xlen64T);
}
}
RiscvInstId::LHU => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_hword(addr as AddrT);
if result == MemResult::NoExcept {
self.write_reg(rd, reg_data as Xlen64T);
}
}
RiscvInstId::LWU => {
let addr = self.read_reg(rs1) + Self::extract_ifield(inst);
let (result, reg_data) = self.read_bus_word(addr as AddrT);
if result == MemResult::NoExcept {
self.write_reg(rd, reg_data & 0xffffffff);
}
}
RiscvInstId::ADDI => {
let rs1_data = self.read_reg(rs1);
let imm_data = Self::extract_ifield(inst);
let reg_data: Xlen64T = rs1_data.wrapping_add(imm_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::SLTI => {
let reg_data: bool = self.read_reg(rs1) < Self::extract_ifield(inst);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::SLTIU => {
let reg_data: bool =
(self.read_reg(rs1) as UXlen64T) < (Self::extract_ifield(inst) as UXlen64T);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::XORI => {
let reg_data: Xlen64T = self.read_reg(rs1) ^ Self::extract_ifield(inst);
self.write_reg(rd, reg_data);
}
RiscvInstId::ORI => {
let reg_data: Xlen64T = self.read_reg(rs1) | Self::extract_ifield(inst);
self.write_reg(rd, reg_data);
}
RiscvInstId::ANDI => {
let reg_data: Xlen64T = self.read_reg(rs1) & Self::extract_ifield(inst);
self.write_reg(rd, reg_data);
}
RiscvInstId::SLLI => {
let shamt: u32 = (Self::extract_shamt_field(inst) & 0x3f) as u32;
let reg_data: UXlen64T = (self.read_reg(rs1) as UXlen64T).wrapping_shl(shamt);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::SRLI => {
let shamt: u32 = (Self::extract_shamt_field(inst) & 0x3f) as u32;
let reg_data: UXlen64T = (self.read_reg(rs1) as UXlen64T).wrapping_shr(shamt);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::SRAI => {
let shamt: u32 = (Self::extract_shamt_field(inst) & 0x3f) as u32;
let reg_data: Xlen64T = self.read_reg(rs1).wrapping_shr(shamt);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::ADD => {
let rs1_data = self.read_reg(rs1);
let rs2_data = self.read_reg(rs2);
let reg_data: Xlen64T = rs1_data.wrapping_add(rs2_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::SUB => {
let rs1_data = self.read_reg(rs1);
let rs2_data = self.read_reg(rs2);
let reg_data: Xlen64T = rs1_data.wrapping_sub(rs2_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::SLL => {
let rs1_data = self.read_reg(rs1) as UXlen64T;
let rs2_data = self.read_reg(rs2) as UXlenT;
let reg_data = rs1_data.wrapping_shl(rs2_data);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::SLT => {
let reg_data: bool = self.read_reg(rs1) < self.read_reg(rs2);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::SLTU => {
let reg_data: bool =
(self.read_reg(rs1) as UXlen64T) < (self.read_reg(rs2) as UXlen64T);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::XOR => {
let reg_data: Xlen64T = self.read_reg(rs1) ^ self.read_reg(rs2);
self.write_reg(rd, reg_data);
}
RiscvInstId::SRL => {
let rs1_data = self.read_reg(rs1) as UXlen64T;
let rs2_data = self.read_reg(rs2);
let reg_data = rs1_data.wrapping_shr(rs2_data as u32);
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::SRA => {
let rs1_data = self.read_reg(rs1);
let rs2_data: UXlen64T = self.read_reg(rs2) as UXlen64T;
let reg_data: Xlen64T = rs1_data.wrapping_shr(rs2_data as u32);
self.write_reg(rd, reg_data);
}
RiscvInstId::MUL => {
let rs1_data = self.read_reg(rs1);
let rs2_data = self.read_reg(rs2);
let reg_data: Xlen64T = rs1_data.wrapping_mul(rs2_data);
self.write_reg(rd, reg_data);
}
RiscvInstId::MULH => {
let rs1_data: i64 = self.read_reg(rs1) as i64;
let rs2_data: i64 = self.read_reg(rs2) as i64;
let mut reg_data: i64 = rs1_data.wrapping_mul(rs2_data);
reg_data = (reg_data >> 32) & 0x0ffffffff;
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::MULHSU => {
let rs1_data: i64 = (self.read_reg(rs1) as i32) as i64;
let rs2_data: i64 = (self.read_reg(rs2) as u32) as i64;
let mut reg_data: i64 = rs1_data.wrapping_mul(rs2_data);
reg_data = (reg_data >> 32) & 0xffffffff;
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::MULHU => {
let rs1_data: u64 = (self.read_reg(rs1) as u32) as u64;
let rs2_data: u64 = (self.read_reg(rs2) as u32) as u64;
let mut reg_data: u64 = rs1_data.wrapping_mul(rs2_data);
reg_data = (reg_data >> 32) & 0xffffffff;
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::REM => {
let rs1_data = self.read_reg(rs1);
let rs2_data = self.read_reg(rs2);
let reg_data: Xlen64T;
if rs2_data == 0 {
reg_data = rs1_data;
} else if rs2_data == -1 {
reg_data = 0;
} else {
reg_data = rs1_data.wrapping_rem(rs2_data);
}
self.write_reg(rd, reg_data);
}
RiscvInstId::REMU => {
let rs1_data: UXlen64T = self.read_reg(rs1) as UXlen64T;
let rs2_data: UXlen64T = self.read_reg(rs2) as UXlen64T;
let reg_data: UXlen64T;
if rs2_data == 0 {
reg_data = rs1_data;
} else {
reg_data = rs1_data.wrapping_rem(rs2_data);
}
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::DIV => {
let rs1_data = self.read_reg(rs1);
let rs2_data = self.read_reg(rs2);
let reg_data: Xlen64T;
if rs2_data == 0 {
reg_data = -1;
} else {
reg_data = rs1_data.wrapping_div(rs2_data);
}
self.write_reg(rd, reg_data);
}
RiscvInstId::DIVU => {
let rs1_data: UXlen64T = self.read_reg(rs1) as UXlen64T;
let rs2_data: UXlen64T = self.read_reg(rs2) as UXlen64T;
let reg_data: UXlen64T;
if rs2_data == 0 {
reg_data = 0xffffffff;
} else {
reg_data = rs1_data.wrapping_div(rs2_data);
}
self.write_reg(rd, reg_data as Xlen64T);
}
RiscvInstId::OR => {
let reg_data: Xlen64T = self.read_reg(rs1) | self.read_reg(rs2);
self.write_reg(rd, reg_data);
}
RiscvInstId::AND => {
let reg_data: Xlen64T = self.read_reg(rs1) & self.read_reg(rs2);
self.write_reg(rd, reg_data);
}
RiscvInstId::SB => {
let rs2_data = self.read_reg(rs2);
let addr: AddrT = (self.read_reg(rs1) + Self::extract_sfield(inst)) as AddrT;
self.write_bus_byte(addr, rs2_data);
}
RiscvInstId::SH => {
let rs2_data = self.read_reg(rs2);
let addr: AddrT = (self.read_reg(rs1) + Self::extract_sfield(inst)) as AddrT;
self.write_bus_hword(addr, rs2_data);
}
RiscvInstId::SW => {
let rs2_data = self.read_reg(rs2);
let addr = self.read_reg(rs1) + Self::extract_sfield(inst);
self.write_bus_word(addr as AddrT, rs2_data);
}
RiscvInstId::SD => {
let rs2_data = self.read_reg(rs2);
let addr = self.read_reg(rs1) + Self::extract_sfield(inst);
self.write_bus_dword(addr as AddrT, rs2_data);
}
RiscvInstId::JAL => {
let addr: AddrT = Self::extract_uj_field(inst) as AddrT;
self.write_reg(rd, (self.m_pc + 4) as Xlen64T);
self.m_pc = self.m_pc.wrapping_add(addr);
self.set_update_pc(true);
}
RiscvInstId::BEQ
| RiscvInstId::BNE
| RiscvInstId::BLT
| RiscvInstId::BGE
| RiscvInstId::BLTU
| RiscvInstId::BGEU => {
let rs1_data: Xlen64T = self.read_reg(rs1);
let rs2_data: Xlen64T = self.read_reg(rs2);
let addr: AddrT = Self::extract_sb_field(inst) as AddrT;
let jump_en: bool;
match dec_inst {
RiscvInstId::BEQ => jump_en = rs1_data == rs2_data,
RiscvInstId::BNE => jump_en = rs1_data != rs2_data,
RiscvInstId::BLT => jump_en = rs1_data < rs2_data,
RiscvInstId::BGE => jump_en = rs1_data >= rs2_data,
RiscvInstId::BLTU => jump_en = (rs1_data as UXlen64T) < (rs2_data as UXlen64T),
RiscvInstId::BGEU => jump_en = (rs1_data as UXlen64T) >= (rs2_data as UXlen64T),
_ => panic!("Unknown value Branch"),
}
if jump_en {
self.m_pc = self.m_pc.wrapping_add(addr);
self.set_update_pc(true);
}
}
RiscvInstId::JALR => {
let mut addr: AddrT = Self::extract_ifield(inst) as AddrT;
let rs1_data: AddrT = self.read_reg(rs1) as AddrT;
addr = rs1_data.wrapping_add(addr);
addr = addr & (!0x01);
self.write_reg(rd, (self.m_pc + 4) as Xlen64T);
self.m_pc = addr;
self.set_update_pc(true);
}
RiscvInstId::FENCE => {}
RiscvInstId::FENCEI => {}
RiscvInstId::ECALL => {
self.m_csr.csrrw(CsrAddr::Mepc, self.m_pc as Xlen64T); // MEPC
let current_priv: PrivMode = self.m_priv;
match current_priv {
PrivMode::User => self.generate_exception(ExceptCode::EcallFromUMode, 0),
PrivMode::Supervisor => self.generate_exception(ExceptCode::EcallFromSMode, 0),
PrivMode::Hypervisor => self.generate_exception(ExceptCode::EcallFromHMode, 0),
PrivMode::Machine => self.generate_exception(ExceptCode::EcallFromMMode, 0),
}
self.set_update_pc(true);
}
RiscvInstId::EBREAK => {}
RiscvInstId::URET => {}
RiscvInstId::SRET => {
let mstatus: Xlen64T = self
.m_csr
.csrrs(CsrAddr::Mstatus, PrivMode::Machine as Xlen64T);
let next_priv_uint: Xlen64T = Self::extract_bit_field(
mstatus,
SYSREG_MSTATUS_SPP_MSB,
SYSREG_MSTATUS_SPP_LSB,
);
let next_priv: PrivMode = PrivMode::from_u8(next_priv_uint as u8);
let mut next_mstatus: Xlen64T = mstatus;
next_mstatus = Self::set_bit_field(
next_mstatus,
Self::extract_bit_field(
mstatus,
SYSREG_MSTATUS_SPIE_MSB,
SYSREG_MSTATUS_SPIE_LSB,
),
SYSREG_MSTATUS_SIE_MSB,
SYSREG_MSTATUS_SIE_LSB,
);
next_mstatus = Self::set_bit_field(
next_mstatus,
1,
SYSREG_MSTATUS_SPIE_MSB,
SYSREG_MSTATUS_SPIE_LSB,
);
next_mstatus = Self::set_bit_field(
next_mstatus,
PrivMode::User as Xlen64T,
SYSREG_MSTATUS_SPP_MSB,
SYSREG_MSTATUS_SPP_LSB,
);
self.m_csr.csrrw(CsrAddr::Mstatus, next_mstatus);
let ret_pc = self.m_csr.csrrs(CsrAddr::Sepc, 0);
self.set_priv_mode(next_priv);
self.set_pc(ret_pc as AddrT);
self.set_update_pc(true);
}
RiscvInstId::MRET => {
let mepc: Xlen64T = self.m_csr.csrrs(CsrAddr::Mepc, 0); // MEPC
self.m_pc = mepc as AddrT;
self.set_update_pc(true);
}
RiscvInstId::ADDIW => {
let rs1_data = self.read_reg(rs1) as i32;
let imm_data = Self::extract_ifield(inst) as i32;
let reg_data = rs1_data.wrapping_add(imm_data) as Xlen64T;
self.write_reg(rd, reg_data);
}
RiscvInstId::SLLIW => {
let rs1_data = self.read_reg(rs1) as i32;
let imm_data = Self::extract_shamt_field(inst) & 0x01f;
let reg_data = rs1_data << imm_data;
self.write_reg(rd, reg_data as i64);
}
RiscvInstId::SRLIW => {
let rs1_data = self.read_reg(rs1) as u32;
let imm_data = Self::extract_shamt_field(inst) & 0x01f;
let reg_data = rs1_data >> imm_data;
self.write_reg(rd, Self::extend_sign(reg_data as Xlen64T, 31));
}
RiscvInstId::SRAIW => {
let rs1_data = self.read_reg(rs1) as i32;
let imm_data = Self::extract_shamt_field(inst) & 0x01f;
let reg_data = rs1_data >> imm_data;
self.write_reg(rd, Self::extend_sign(reg_data as Xlen64T, 31));
}
RiscvInstId::ADDW => {
let rs1_data = self.read_reg(rs1) as i32;
let rs2_data = self.read_reg(rs2) as i32;
let reg_data = rs1_data.wrapping_add(rs2_data);
self.write_reg(rd, reg_data.into());
}
RiscvInstId::SUBW => {
let rs1_data = self.read_reg(rs1) as i32;
let rs2_data = self.read_reg(rs2) as i32;
let reg_data = rs1_data.wrapping_sub(rs2_data);
self.write_reg(rd, reg_data.into());
}
RiscvInstId::SLLW => {
let rs1_data = self.read_reg(rs1) as UXlenT;
let rs2_data = self.read_reg(rs2) as UXlenT;
let reg_data = rs1_data.wrapping_shl(rs2_data);
self.write_reg(rd, Self::extend_sign(reg_data as Xlen64T, 31));
}
RiscvInstId::SRLW => {
let rs1_data = self.read_reg(rs1) as UXlenT;
let rs2_data = self.read_reg(rs2) as UXlenT;
let shamt: UXlenT = rs2_data & 0x1f;
let reg_data = rs1_data.wrapping_shr(shamt);
self.write_reg(rd, Self::extend_sign(reg_data as Xlen64T, 31));
}
RiscvInstId::SRAW => {
let rs1_data = self.read_reg(rs1) as XlenT;
let rs2_data = self.read_reg(rs2) as XlenT;
let shamt: UXlenT = (rs2_data & 0x1f) as UXlenT;
let reg_data = rs1_data.wrapping_shr(shamt);
self.write_reg(rd, Self::extend_sign(reg_data as Xlen64T, 31));
}
_ => {}
}
if self.is_update_pc() == false {
self.m_pc += 4;
}
self.m_trace.print_trace();
self.m_trace.clear();
}
}
|
use std::any::TypeId;
use std::collections::HashSet;
use std::marker::PhantomData;
use accessors::Accessor;
use crate::bitset::BitSet;
use crate::ecs::Components;
use crate::query::ComponentTypeId::{OptionalComponentTypeId, RequiredComponentTypeId};
use crate::EntityIndex;
pub trait Query<'a> {
type ResultType: 'a;
fn fetch(index: EntityIndex, components: &'a Components) -> Option<Self::ResultType>;
fn matching_ids(entity_count: usize, components: &'a Components) -> HashSet<EntityIndex>;
fn type_ids() -> Vec<ComponentTypeId>;
}
macro_rules! impl_query_tuples {
($th:tt, $($t:tt,)*) => {
impl<'a, $th, $($t,)*> Query<'a> for ($th, $($t,)*)
where
$th: Accessor<'a>,
$($t: Accessor<'a>,)*
{
type ResultType = (EntityIndex, ($th::RefType, $($t::RefType,)*));
fn fetch(index: EntityIndex, components: &'a Components) -> Option<Self::ResultType> {
Some((index, ($th::fetch(index, components)?, $($t::fetch(index, components)?,)*)))
}
#[allow(unused_mut)]
fn matching_ids(entity_count: usize, components: &'a Components) -> HashSet<EntityIndex> {
let mut result = $th::matching_ids(entity_count, components);
$(result = result.intersection(&$t::matching_ids(entity_count, components)).cloned().collect();)*
result
}
fn type_ids() -> Vec<ComponentTypeId> {
vec![$th::type_id(), $($t::type_id(),)*]
}
}
}
}
impl_query_tuples!(A,);
impl_query_tuples!(A, B,);
impl_query_tuples!(A, B, C,);
impl_query_tuples!(A, B, C, D,);
impl_query_tuples!(A, B, C, D, E,);
impl_query_tuples!(A, B, C, D, E, F,);
impl_query_tuples!(A, B, C, D, E, F, G,);
impl_query_tuples!(A, B, C, D, E, F, G, H,);
pub struct QueryIteratorByIds<'a, Q> {
inner_iterator: QueryIterator<'a, Q>,
ids: HashSet<usize>,
}
impl<'a, 'b, Q: Query<'b>> QueryIteratorByIds<'a, Q> {
#[must_use]
pub fn new(entity_count: usize, components: &'a Components, ids: &HashSet<usize>) -> Self {
Self {
inner_iterator: QueryIterator::new(entity_count, components),
ids: ids.iter().copied().collect(),
}
}
}
impl<'a, Q> Iterator for QueryIteratorByIds<'a, Q>
where
Q: Query<'a>,
{
type Item = Q::ResultType;
fn next(&mut self) -> Option<Self::Item> {
let mut next = self.inner_iterator.next();
while !self.ids.contains(&self.inner_iterator.index) && next.is_some() {
next = self.inner_iterator.next();
}
next
}
}
pub struct QueryIterator<'a, Q> {
index: EntityIndex,
components: &'a Components,
matching_entities: Vec<EntityIndex>,
marker: PhantomData<&'a Q>,
}
impl<'a, 'b, Q: Query<'b>> QueryIterator<'a, Q> {
#[must_use]
pub fn new(entity_count: usize, components: &'a Components) -> Self {
let mut bitsets = vec![];
for type_id in Q::type_ids() {
match type_id {
RequiredComponentTypeId(type_id) => {
if let Some(component_store) = components.get(&type_id) {
bitsets.push(component_store.entities_bitset);
}
}
OptionalComponentTypeId(_) => continue,
}
}
let mut matching_entities = vec![];
if bitsets.len() == Q::type_ids().iter().filter(|t| t.is_required()).count() {
'outer: for i in 0..entity_count {
for bitset in &bitsets {
if !bitset.bit(i) {
continue 'outer;
}
}
matching_entities.push(i);
}
}
Self {
index: 0,
components,
matching_entities,
marker: PhantomData,
}
}
}
impl<'a, Q> Iterator for QueryIterator<'a, Q>
where
Q: Query<'a>,
{
type Item = Q::ResultType;
fn next(&mut self) -> Option<Self::Item> {
self.index = self.matching_entities.pop()?;
Q::fetch(self.index, self.components)
}
}
pub mod accessors {
use std::any::TypeId;
use std::cell::{Ref, RefMut};
use std::collections::HashSet;
use std::marker::PhantomData;
use crate::bitset::BitSet;
use crate::ecs::Components;
use crate::query::ComponentTypeId;
use crate::query::ComponentTypeId::{OptionalComponentTypeId, RequiredComponentTypeId};
use crate::EntityIndex;
pub struct Opt<'a, T: Accessor<'a>>(PhantomData<&'a T>);
pub trait Accessor<'a> {
type RawType: 'a;
type RefType: 'a;
fn fetch(index: usize, components: &'a Components) -> Option<Self::RefType>;
fn matching_ids(entity_count: usize, components: &'a Components) -> HashSet<EntityIndex>;
fn type_id() -> ComponentTypeId;
}
impl<'a, T: 'static> Accessor<'a> for &T {
type RawType = T;
type RefType = Ref<'a, T>;
fn fetch(index: usize, components: &'a Components) -> Option<Self::RefType> {
Some(Ref::map(
components.get(&TypeId::of::<T>())?.component_data[index]
.as_ref()?
.borrow(),
|r| r.downcast_ref().unwrap(),
))
}
fn matching_ids(entity_count: usize, components: &'a Components) -> HashSet<EntityIndex> {
matching_ids_for_type::<T>(entity_count, components)
}
fn type_id() -> ComponentTypeId {
RequiredComponentTypeId(TypeId::of::<T>())
}
}
impl<'a, T: 'static> Accessor<'a> for &mut T {
type RawType = T;
type RefType = RefMut<'a, T>;
fn fetch(index: usize, components: &'a Components) -> Option<Self::RefType> {
Some(RefMut::map(
components.get(&TypeId::of::<T>())?.component_data[index]
.as_ref()?
.borrow_mut(),
|r| r.downcast_mut().unwrap(),
))
}
fn matching_ids(entity_count: usize, components: &'a Components) -> HashSet<EntityIndex> {
matching_ids_for_type::<T>(entity_count, components)
}
fn type_id() -> ComponentTypeId {
RequiredComponentTypeId(TypeId::of::<T>())
}
}
fn matching_ids_for_type<T: 'static>(
entity_count: usize,
components: &Components,
) -> HashSet<EntityIndex> {
let mut result = HashSet::new();
if let Some(component_store) = components.get(&TypeId::of::<T>()) {
for i in 0..entity_count.max(component_store.entities_bitset.bit_count()) {
if component_store.entities_bitset.bit(i) {
result.insert(i);
}
}
}
result
}
impl<'a, T: 'static + Accessor<'a>> Accessor<'a> for Opt<'a, T> {
type RawType = T::RawType;
type RefType = Option<T::RefType>;
fn fetch(index: usize, components: &'a Components) -> Option<Self::RefType> {
Some(T::fetch(index, components))
}
fn matching_ids(entity_count: usize, _components: &'a Components) -> HashSet<EntityIndex> {
(0..entity_count).collect()
}
fn type_id() -> ComponentTypeId {
if let RequiredComponentTypeId(type_id) = T::type_id() {
OptionalComponentTypeId(type_id)
} else {
panic!("Can't use nested OptionalComponentTypeId")
}
}
}
}
pub enum ComponentTypeId {
RequiredComponentTypeId(TypeId),
OptionalComponentTypeId(TypeId),
}
impl ComponentTypeId {
#[must_use]
pub fn is_required(&self) -> bool {
matches!(self, RequiredComponentTypeId(_))
}
}
|
// Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Add correlation of reads and replies.
#![cfg(not(tarpaulin_include))]
use crate::sink::{prelude::*, Reply};
use crate::source::prelude::*;
use async_channel::Sender;
use async_std_resolver::{
lookup::Lookup,
proto::{
rr::{RData, RecordType},
xfer::DnsRequestOptions,
},
resolver_from_system_conf, AsyncStdResolver,
};
use halfbrown::HashMap;
use std::boxed::Box;
use tremor_value::literal;
pub struct Dns {
// sink_url: TremorUrl,
event_origin_uri: EventOriginUri,
resolver: Option<AsyncStdResolver>,
// reply: Option<Sender<Reply>>,
}
impl offramp::Impl for Dns {
fn from_config(_config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> {
let event_origin_uri = EventOriginUri {
uid: 0,
scheme: "tremor-dns".to_string(),
host: "localhost".to_string(),
port: None,
path: Vec::new(),
};
Ok(SinkManager::new_box(Dns {
event_origin_uri,
resolver: None,
}))
}
}
fn str_to_record_type(s: &str) -> Result<RecordType> {
match s {
"A" => Ok(RecordType::A),
"AAAA" => Ok(RecordType::AAAA),
"ANAME" => Ok(RecordType::ANAME),
"ANY" => Ok(RecordType::ANY),
"AXFR" => Ok(RecordType::AXFR),
"CAA" => Ok(RecordType::CAA),
"CNAME" => Ok(RecordType::CNAME),
"HINFO" => Ok(RecordType::HINFO),
"HTTPS" => Ok(RecordType::HTTPS),
"IXFR" => Ok(RecordType::IXFR),
"MX" => Ok(RecordType::MX),
"NAPTR" => Ok(RecordType::NAPTR),
"NS" => Ok(RecordType::NS),
"NULL" => Ok(RecordType::NULL),
"OPENPGPKEY" => Ok(RecordType::OPENPGPKEY),
"OPT" => Ok(RecordType::OPT),
"PTR" => Ok(RecordType::PTR),
"SOA" => Ok(RecordType::SOA),
"SRV" => Ok(RecordType::SRV),
"SSHFP" => Ok(RecordType::SSHFP),
"SVCB" => Ok(RecordType::SVCB),
"TLSA" => Ok(RecordType::TLSA),
"TXT" => Ok(RecordType::TXT),
"ZERO" => Ok(RecordType::ZERO),
other => Err(format!("Invalid or unsupported record type: {}", other).into()),
}
}
fn rdata_to_value(r: &RData) -> Option<Value<'static>> {
Some(match r {
RData::A(v) => literal!({ "A": v.to_string() }),
RData::AAAA(v) => literal!({ "AAAA": v.to_string() }),
RData::ANAME(v) => literal!({ "ANAME": v.to_string() }),
RData::CNAME(v) => literal!({ "CNAME": v.to_string() }),
RData::TXT(v) => literal!({ "TXT": v.to_string() }),
RData::PTR(v) => literal!({ "PTR": v.to_string() }),
RData::CAA(v) => literal!({ "CAA": v.to_string() }),
RData::HINFO(v) => literal!({ "HINFO": v.to_string() }),
RData::HTTPS(v) => literal!({ "HTTPS": v.to_string() }),
RData::MX(v) => literal!({ "MX": v.to_string() }),
RData::NAPTR(v) => literal!({ "NAPTR": v.to_string() }),
RData::NULL(v) => literal!({ "NULL": v.to_string() }),
RData::NS(v) => literal!({ "NS": v.to_string() }),
RData::OPENPGPKEY(v) => literal!({ "OPENPGPKEY": v.to_string() }),
RData::SOA(v) => literal!({ "SOA": v.to_string() }),
RData::SRV(v) => literal!({ "SRV": v.to_string() }),
RData::SSHFP(v) => literal!({ "SSHFP": v.to_string() }),
RData::SVCB(v) => literal!({ "SVCB": v.to_string() }),
RData::TLSA(v) => literal!({ "TLSA": v.to_string() }),
RData::OPT(_) | RData::Unknown { .. } | RData::ZERO => return None,
})
}
fn lookup_to_value(l: &Lookup) -> Value<'static> {
l.record_iter()
.filter_map(|r| {
let mut v = rdata_to_value(r.rdata())?;
v.try_insert("ttl", r.ttl());
Some(v)
})
.collect()
}
impl Dns {
async fn query<'event>(
&self,
e: &Value<'event>,
correlation: Option<&Value<'event>>,
) -> Result<Event> {
let resolver = self.resolver.as_ref().ok_or("No resolver set")?;
let lookup = e.get("lookup").ok_or("Invalid DNS request")?;
let name = lookup
.as_str()
.or_else(|| lookup.get_str("name"))
.ok_or("Invaliud DNS request")?;
let data = if let Some(record_type) =
lookup.get_str("type").map(str_to_record_type).transpose()?
{
// type lookup
lookup_to_value(
&resolver
.lookup(name, record_type, DnsRequestOptions::default())
.await?,
)
} else {
// generic lookup
lookup_to_value(resolver.lookup_ip(name).await?.as_lookup())
};
let meta = correlation
.map(|c| literal!({ "correlation": c.clone_static() }))
.unwrap_or_default();
let e = Event {
data: (data, meta).into(),
origin_uri: Some(self.event_origin_uri.clone()),
..Event::default()
};
Ok(e)
}
}
#[async_trait::async_trait]
impl Sink for Dns {
async fn on_event(
&mut self,
_input: &str,
_codec: &mut dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
event: Event,
) -> ResultVec {
let mut res = Vec::with_capacity(event.len());
for (e, m) in event.value_meta_iter() {
match self.query(e, m.get("correlation")).await {
Ok(out) => res.push(Reply::Response(OUT, out)),
Err(err) => {
let data = literal!({
"event": e.clone_static(),
"error": format!("{}", err),
});
let meta = if let Some(c) = m.get("correlation") {
literal!({ "correlation": c.clone_static() })
} else {
Value::object()
};
let error_e = Event {
data: (data, meta).into(),
origin_uri: Some(self.event_origin_uri.clone()),
..Event::default()
};
res.push(Reply::Response(ERR, error_e));
}
}
}
Ok(Some(res))
}
async fn on_signal(&mut self, _signal: Event) -> ResultVec {
Ok(None)
}
async fn init(
&mut self,
_sink_uid: u64,
_sink_url: &TremorUrl,
_codec: &dyn Codec,
_codec_map: &HashMap<String, Box<dyn Codec>>,
_processors: Processors<'_>,
_is_linked: bool,
_reply_channel: Sender<Reply>,
) -> Result<()> {
// self.reply = Some(reply_channel);
self.resolver = Some(resolver_from_system_conf().await?);
Ok(())
}
fn is_active(&self) -> bool {
self.resolver.is_some()
}
fn auto_ack(&self) -> bool {
true
}
fn default_codec(&self) -> &str {
"null"
}
}
|
use std::collections::VecDeque;
use unicode_width::UnicodeWidthChar;
use alacritty_terminal::grid::Dimensions;
use crate::display::SizeInfo;
pub const CLOSE_BUTTON_TEXT: &str = "[X]";
const CLOSE_BUTTON_PADDING: usize = 1;
const MIN_FREE_LINES: usize = 3;
const TRUNCATED_MESSAGE: &str = "[MESSAGE TRUNCATED]";
/// Message for display in the MessageBuffer.
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct Message {
text: String,
ty: MessageType,
target: Option<String>,
}
/// Purpose of the message.
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum MessageType {
/// A message represents an error.
Error,
/// A message represents a warning.
Warning,
}
impl Message {
/// Create a new message.
pub fn new(text: String, ty: MessageType) -> Message {
Message { text, ty, target: None }
}
/// Formatted message text lines.
pub fn text(&self, size_info: &SizeInfo) -> Vec<String> {
let num_cols = size_info.columns();
let total_lines =
(size_info.height() - 2. * size_info.padding_y()) / size_info.cell_height();
let max_lines = (total_lines as usize).saturating_sub(MIN_FREE_LINES);
let button_len = CLOSE_BUTTON_TEXT.chars().count();
// Split line to fit the screen.
let mut lines = Vec::new();
let mut line = String::new();
let mut line_len = 0;
for c in self.text.trim().chars() {
if c == '\n'
|| line_len == num_cols
// Keep space in first line for button.
|| (lines.is_empty()
&& num_cols >= button_len
&& line_len == num_cols.saturating_sub(button_len + CLOSE_BUTTON_PADDING))
{
let is_whitespace = c.is_whitespace();
// Attempt to wrap on word boundaries.
let mut new_line = String::new();
if let Some(index) = line.rfind(char::is_whitespace).filter(|_| !is_whitespace) {
let split = line.split_off(index + 1);
line.pop();
new_line = split;
}
lines.push(Self::pad_text(line, num_cols));
line = new_line;
line_len = line.chars().count();
// Do not append whitespace at EOL.
if is_whitespace {
continue;
}
}
line.push(c);
// Reserve extra column for fullwidth characters.
let width = c.width().unwrap_or(0);
if width == 2 {
line.push(' ');
}
line_len += width
}
lines.push(Self::pad_text(line, num_cols));
// Truncate output if it's too long.
if lines.len() > max_lines {
lines.truncate(max_lines);
if TRUNCATED_MESSAGE.len() <= num_cols {
if let Some(line) = lines.iter_mut().last() {
*line = Self::pad_text(TRUNCATED_MESSAGE.into(), num_cols);
}
}
}
// Append close button to first line.
if button_len <= num_cols {
if let Some(line) = lines.get_mut(0) {
line.truncate(num_cols - button_len);
line.push_str(CLOSE_BUTTON_TEXT);
}
}
lines
}
/// Message type.
#[inline]
pub fn ty(&self) -> MessageType {
self.ty
}
/// Message target.
#[inline]
pub fn target(&self) -> Option<&String> {
self.target.as_ref()
}
/// Update the message target.
#[inline]
pub fn set_target(&mut self, target: String) {
self.target = Some(target);
}
/// Right-pad text to fit a specific number of columns.
#[inline]
fn pad_text(mut text: String, num_cols: usize) -> String {
let padding_len = num_cols.saturating_sub(text.chars().count());
text.extend(vec![' '; padding_len]);
text
}
}
/// Storage for message bar.
#[derive(Debug, Default)]
pub struct MessageBuffer {
messages: VecDeque<Message>,
}
impl MessageBuffer {
/// Check if there are any messages queued.
#[inline]
pub fn is_empty(&self) -> bool {
self.messages.is_empty()
}
/// Current message.
#[inline]
pub fn message(&self) -> Option<&Message> {
self.messages.front()
}
/// Remove the currently visible message.
#[inline]
pub fn pop(&mut self) {
// Remove the message itself.
let msg = self.messages.pop_front();
// Remove all duplicates.
if let Some(msg) = msg {
self.messages = self.messages.drain(..).filter(|m| m != &msg).collect();
}
}
/// Remove all messages with a specific target.
#[inline]
pub fn remove_target(&mut self, target: &str) {
self.messages = self
.messages
.drain(..)
.filter(|m| m.target().map(String::as_str) != Some(target))
.collect();
}
/// Add a new message to the queue.
#[inline]
pub fn push(&mut self, message: Message) {
self.messages.push_back(message);
}
/// Check whether the message is already queued in the message bar.
#[inline]
pub fn is_queued(&self, message: &Message) -> bool {
self.messages.contains(message)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::display::SizeInfo;
#[test]
fn appends_close_button() {
let input = "a";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(7., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![String::from("a [X]")]);
}
#[test]
fn multiline_close_button_first_line() {
let input = "fo\nbar";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(6., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![String::from("fo [X]"), String::from("bar ")]);
}
#[test]
fn splits_on_newline() {
let input = "a\nb";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(6., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines.len(), 2);
}
#[test]
fn splits_on_length() {
let input = "foobar1";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(6., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines.len(), 2);
}
#[test]
fn empty_with_shortterm() {
let input = "foobar";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(6., 0., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines.len(), 0);
}
#[test]
fn truncates_long_messages() {
let input = "hahahahahahahahahahaha truncate this because it's too long for the term";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(22., (MIN_FREE_LINES + 2) as f32, 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![
String::from("hahahahahahahahaha [X]"),
String::from("[MESSAGE TRUNCATED] ")
]);
}
#[test]
fn hide_button_when_too_narrow() {
let input = "ha";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(2., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![String::from("ha")]);
}
#[test]
fn hide_truncated_when_too_narrow() {
let input = "hahahahahahahahaha";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(2., (MIN_FREE_LINES + 2) as f32, 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![String::from("ha"), String::from("ha")]);
}
#[test]
fn add_newline_for_button() {
let input = "test";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(5., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![String::from("t [X]"), String::from("est ")]);
}
#[test]
fn remove_target() {
let mut message_buffer = MessageBuffer::default();
for i in 0..10 {
let mut msg = Message::new(i.to_string(), MessageType::Error);
if i % 2 == 0 && i < 5 {
msg.set_target("target".into());
}
message_buffer.push(msg);
}
message_buffer.remove_target("target");
// Count number of messages.
let mut num_messages = 0;
while message_buffer.message().is_some() {
num_messages += 1;
message_buffer.pop();
}
assert_eq!(num_messages, 7);
}
#[test]
fn pop() {
let mut message_buffer = MessageBuffer::default();
let one = Message::new(String::from("one"), MessageType::Error);
message_buffer.push(one.clone());
let two = Message::new(String::from("two"), MessageType::Warning);
message_buffer.push(two.clone());
assert_eq!(message_buffer.message(), Some(&one));
message_buffer.pop();
assert_eq!(message_buffer.message(), Some(&two));
}
#[test]
fn wrap_on_words() {
let input = "a\nbc defg";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(5., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![
String::from("a [X]"),
String::from("bc "),
String::from("defg ")
]);
}
#[test]
fn wrap_with_unicode() {
let input = "ab\nc 👩d fgh";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(7., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![
String::from("ab [X]"),
String::from("c 👩 d "),
String::from("fgh ")
]);
}
#[test]
fn strip_whitespace_at_linebreak() {
let input = "\n0 1 2 3";
let mut message_buffer = MessageBuffer::default();
message_buffer.push(Message::new(input.into(), MessageType::Error));
let size = SizeInfo::new(3., 10., 1., 1., 0., 0., false);
let lines = message_buffer.message().unwrap().text(&size);
assert_eq!(lines, vec![String::from("[X]"), String::from("0 1"), String::from("2 3"),]);
}
#[test]
fn remove_duplicates() {
let mut message_buffer = MessageBuffer::default();
for _ in 0..10 {
let msg = Message::new(String::from("test"), MessageType::Error);
message_buffer.push(msg);
}
message_buffer.push(Message::new(String::from("other"), MessageType::Error));
message_buffer.push(Message::new(String::from("test"), MessageType::Warning));
let _ = message_buffer.message();
message_buffer.pop();
// Count number of messages.
let mut num_messages = 0;
while message_buffer.message().is_some() {
num_messages += 1;
message_buffer.pop();
}
assert_eq!(num_messages, 2);
}
}
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(generators)]
fn main() {
static || {
loop {
// Test that `opt` is not live across the yield, even when borrowed in a loop
// See https://github.com/rust-lang/rust/issues/52792
let opt = {
yield;
true
};
&opt;
}
};
}
|
// Given an input string, "123456", converts it to a Vec<u32> whose elements are the parsed chars: [1,2,3,4,5,6]
// Will skip invalid base 10 digits: "123ab456" -> [1,2,3,4,5,6]
// If the input string is > 11, it will be right truncated: "1234567890123" -> [1,2,3,4,5,6,7,8,9,0,1]
fn to_upc(input: &'_ str) -> Vec<u32> {
let mut digits: Vec<_> = input.chars().filter_map(|c| char::to_digit(c, 10)).collect();
//ensure the vec is at most 11 digits
if digits.len() > 11 {
digits.truncate(11);
return digits
}
// It doesn't matter if the vec is 11 digits or n, as long
// as it's odd. Padding it to 11 long with 0s is pointless:
// 0 + n = n
if digits.len() < 11 && digits.len() & 1 != 1 {
digits.insert(0, 0);
}
digits
}
// Rather than go through two functions to get the sum, we'll
// just zip digits iterator with a repeating 3, 1, 3, 1, ... iterator,
// and then multiply each digit by the corresponding multiplier.
// Starts with 3 because the first digit is considered an odd numbered digit
// in the UPC.
fn sum(digits: &[u32]) -> u32 {
let multipliers = [3, 1].iter().cycle();
digits.iter()
.zip(multipliers)
.map(|(digit, factor)| digit * factor)
.sum()
}
pub fn calc_check_digit(input: &'_ str) -> u32 {
let digits = to_upc(input);
let total = sum(&digits);
let remainder = total % 10;
if remainder == 0 {
remainder
} else {
10 - remainder
}
} |
use crate::authenticate;
use crate::security::keystore::{calculate_hash, KeyManager};
use crate::timestamp_in_sec;
use crate::types::sensor_data::SensorData;
use std::sync::{Arc, Mutex};
use crate::iota_channels_lite::channel_author::Channel;
use crate::iota_channels_lite::utils::payload::json::PayloadBuilder;
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
use hyper::{header, Body, Request, Response, StatusCode};
///
/// Handles the status request returning status code 200 if the server is online
///
pub async fn status_response() -> Result<Response<Body>> {
Ok(Response::builder().status(200).body(Body::from("OK"))?)
}
///
/// Handles the reuqest from the sensor by parsing the provieded data into the SensorData Format.
/// It authenticates the device through the "device" attribute, and if successfull published the data to the Tangle
/// through the streams channel
///
pub async fn sensor_data_response(
req: Request<Body>,
channel: Arc<Mutex<Channel>>,
keystore: Arc<Mutex<KeyManager>>,
) -> Result<Response<Body>> {
let data = hyper::body::to_bytes(req.into_body()).await?;
let response;
let json_data: serde_json::Result<SensorData> = serde_json::from_slice(&data);
match json_data {
Ok(mut sensor_data) => {
let hash = keystore
.lock()
.expect("lock keystore")
.keystore
.api_key_author
.clone();
if authenticate(&sensor_data.device, hash.clone()) {
sensor_data.device.to_string().push_str("_id");
sensor_data.device = calculate_hash(sensor_data.device);
sensor_data.timestamp = serde_json::Value::from(timestamp_in_sec());
println!(
"POST /sensor_data -- {:?} -- authorized request by device",
timestamp_in_sec()
);
let mut channel = channel.lock().unwrap();
match channel
.write_signed(PayloadBuilder::new().public(&sensor_data).unwrap().build())
{
Ok(_) => {
response = Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from("Data Sucessfully Sent To Tangle"))?;
}
Err(_e) => {
println!(
"POST /sensor_data Error: Malformed json, use iot2tangle json format"
);
response = Response::builder()
.status(500)
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from("Error while sending data to Tangle"))?;
}
};
} else {
response = Response::builder()
.status(StatusCode::UNAUTHORIZED)
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from(
"Unauthorized - Device Name sent by device doesn't match the configuration",
))?;
println!(
"POST /sensor_data -- {:?} -- unauthorized request blocked",
timestamp_in_sec()
);
}
}
Err(_e) => {
response = Response::builder()
.status(StatusCode::BAD_REQUEST)
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from("Malformed json - use iot2tangle json format"))?;
}
}
Ok(response)
}
|
fn click_ana(start_cat: Category,
end_cat: Category,
clicks: &mut Vec<(UID, Category, Time)>) -> i32 {
let stream = HashMap::new();
let distances = for (uid, cat, _) in clicks {
let prev = stream.get(uid);
if cat == start_cat {
stream.insert(uid, 0);
None
} else if prev != -1 {
if cat == end_cat {
stream.insert(uid, -1);
Some(prev)
} else {
stream.insert(uid, prev + 1);
None
}
}
None
}
average(drop_none(distances))
}
fn partial_click_ana(start_cat: Category, end_cat: Category, clicks: Stream<(UID, Category, Time)>) -> i32 {
use iseq::Action;
let click_streams = group_by::<0>(clicks);
Stream::concat(for click_stream in click_streams {
let sequences = iseq::Seq::new();
for (_, cat, time) in stream {
let ev = if cat == start_cat {
Action::Open(time)
} else if cat == end_cat {
Action::Close(time)
} else {
Action::Insert(time)
}
sequences.apply(ev);
}
sequences.complete_intervals().map(Interval::len)
})
}
|
use crate::errors::ServiceError;
use crate::models::msg::Msg;
use crate::schema::option;
use crate::utils::validator::{re_test_name, Validate};
use actix::Message;
use actix_web::error;
use actix_web::Error;
use uuid::Uuid;
#[derive(Deserialize, Serialize, Debug, Message, Insertable)]
#[rtype(result = "Result<Msg, ServiceError>")]
#[table_name = "option"]
pub struct New {
pub name: String,
pub shop_id: Uuid,
pub price: f64,
pub html_type: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct InpNew {
pub name: String,
pub price: String,
pub html_type: String,
}
impl Validate for InpNew {
fn validate(&self) -> Result<(), Error> {
let name = &self.name;
let check_name = re_test_name(name);
if check_name {
Ok(())
} else {
Err(error::ErrorBadRequest("option name"))
}
}
}
impl InpNew {
pub fn new(&self, shop_id: Uuid) -> New {
New {
name: self.name.to_string(),
shop_id: shop_id,
price: self.price.parse().unwrap(),
html_type: self.html_type.parse().unwrap(),
}
}
}
#[derive(Deserialize, Serialize, Debug, Message, Identifiable, AsChangeset)]
#[rtype(result = "Result<Msg, ServiceError>")]
#[table_name = "option"]
pub struct Update {
pub id: i32,
pub shop_id: Uuid,
pub name: String,
pub price: f64,
pub html_type: String,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct InpUpdate {
pub id: i32,
pub name: String,
pub price: String,
pub html_type: String,
}
impl Validate for InpUpdate {
fn validate(&self) -> Result<(), Error> {
let name = &self.name;
let check_name = re_test_name(name);
if check_name {
Ok(())
} else {
Err(error::ErrorBadRequest("option name"))
}
}
}
impl InpUpdate {
pub fn new(&self, shop_id: Uuid) -> Update {
Update {
id: self.id,
shop_id: shop_id,
name: self.name.to_string(),
price: self.price.parse().unwrap(),
html_type: self.html_type.parse().unwrap(),
}
}
}
#[derive(Deserialize, Serialize, Debug, Message, Identifiable)]
#[rtype(result = "Result<Msg, ServiceError>")]
#[table_name = "option"]
pub struct Get {
pub id: i32,
pub shop_id: Uuid,
}
#[derive(Deserialize, Serialize, Debug, Message)]
#[rtype(result = "Result<Msg, ServiceError>")]
pub struct GetList {
pub shop_id: Uuid,
}
#[derive(Deserialize, Serialize, Debug, Message, Identifiable, AsChangeset)]
#[rtype(result = "Result<Msg, ServiceError>")]
#[table_name = "option"]
pub struct Delete {
pub id: i32,
pub shop_id: Uuid,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct InpDelete {
pub id: i32,
}
impl Validate for InpDelete {
fn validate(&self) -> Result<(), Error> {
Ok(())
}
}
impl InpDelete {
pub fn new(&self, shop_id: Uuid) -> Delete {
Delete {
id: self.id,
shop_id: shop_id,
}
}
}
|
use log::LevelFilter;
use serde::Deserialize;
use alacritty_config_derive::ConfigDeserialize;
/// Debugging options.
#[derive(ConfigDeserialize, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Debug {
pub log_level: LevelFilter,
pub print_events: bool,
/// Keep the log file after quitting.
pub persistent_logging: bool,
/// Should show render timer.
pub render_timer: bool,
/// Highlight damage information produced by alacritty.
pub highlight_damage: bool,
/// The renderer alacritty should be using.
pub renderer: Option<RendererPreference>,
/// Record ref test.
#[config(skip)]
pub ref_test: bool,
}
impl Default for Debug {
fn default() -> Self {
Self {
log_level: LevelFilter::Warn,
print_events: Default::default(),
persistent_logging: Default::default(),
render_timer: Default::default(),
highlight_damage: Default::default(),
ref_test: Default::default(),
renderer: Default::default(),
}
}
}
/// The renderer configuration options.
#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum RendererPreference {
/// OpenGL 3.3 renderer.
#[serde(rename = "glsl3")]
Glsl3,
/// GLES 2 renderer, with optional extensions like dual source blending.
#[serde(rename = "gles2")]
Gles2,
/// Pure GLES 2 renderer.
#[serde(rename = "gles2_pure")]
Gles2Pure,
}
|
#![allow(dead_code)]
pub const KEY_RESERVED: u8 = 0;
pub const KEY_ESC: u8 = 1;
pub const KEY_1: u8 = 2;
pub const KEY_2: u8 = 3;
pub const KEY_3: u8 = 4;
pub const KEY_4: u8 = 5;
pub const KEY_5: u8 = 6;
pub const KEY_6: u8 = 7;
pub const KEY_7: u8 = 8;
pub const KEY_8: u8 = 9;
pub const KEY_9: u8 = 10;
pub const KEY_0: u8 = 11;
pub const KEY_MINUS: u8 = 12;
pub const KEY_EQUAL: u8 = 13;
pub const KEY_BACKSPACE: u8 = 14;
pub const KEY_TAB: u8 = 15;
pub const KEY_Q: u8 = 16;
pub const KEY_W: u8 = 17;
pub const KEY_E: u8 = 18;
pub const KEY_R: u8 = 19;
pub const KEY_T: u8 = 20;
pub const KEY_Y: u8 = 21;
pub const KEY_U: u8 = 22;
pub const KEY_I: u8 = 23;
pub const KEY_O: u8 = 24;
pub const KEY_P: u8 = 25;
pub const KEY_LEFTBRACE: u8 = 26;
pub const KEY_RIGHTBRACE: u8 = 27;
pub const KEY_ENTER: u8 = 28;
pub const KEY_LEFTCTRL: u8 = 29;
pub const KEY_A: u8 = 30;
pub const KEY_S: u8 = 31;
pub const KEY_D: u8 = 32;
pub const KEY_F: u8 = 33;
pub const KEY_G: u8 = 34;
pub const KEY_H: u8 = 35;
pub const KEY_J: u8 = 36;
pub const KEY_K: u8 = 37;
pub const KEY_L: u8 = 38;
pub const KEY_SEMICOLON: u8 = 39;
pub const KEY_APOSTROPHE: u8 = 40;
pub const KEY_GRAVE: u8 = 41;
pub const KEY_LEFTSHIFT: u8 = 42;
pub const KEY_BACKSLASH: u8 = 43;
pub const KEY_Z: u8 = 44;
pub const KEY_X: u8 = 45;
pub const KEY_C: u8 = 46;
pub const KEY_V: u8 = 47;
pub const KEY_B: u8 = 48;
pub const KEY_N: u8 = 49;
pub const KEY_M: u8 = 50;
pub const KEY_COMMA: u8 = 51;
pub const KEY_DOT: u8 = 52;
pub const KEY_SLASH: u8 = 53;
pub const KEY_RIGHTSHIFT: u8 = 54;
pub const KEY_KPASTERISK: u8 = 55;
pub const KEY_LEFTALT: u8 = 56;
pub const KEY_SPACE: u8 = 57;
pub const KEY_CAPSLOCK: u8 = 58;
pub const KEY_F1: u8 = 59;
pub const KEY_F2: u8 = 60;
pub const KEY_F3: u8 = 61;
pub const KEY_F4: u8 = 62;
pub const KEY_F5: u8 = 63;
pub const KEY_F6: u8 = 64;
pub const KEY_F7: u8 = 65;
pub const KEY_F8: u8 = 66;
pub const KEY_F9: u8 = 67;
pub const KEY_F10: u8 = 68;
pub const KEY_NUMLOCK: u8 = 69;
pub const KEY_SCROLLLOCK: u8 = 70;
pub const KEY_KP7: u8 = 71;
pub const KEY_KP8: u8 = 72;
pub const KEY_KP9: u8 = 73;
pub const KEY_KPMINUS: u8 = 74;
pub const KEY_KP4: u8 = 75;
pub const KEY_KP5: u8 = 76;
pub const KEY_KP6: u8 = 77;
pub const KEY_KPPLUS: u8 = 78;
pub const KEY_KP1: u8 = 79;
pub const KEY_KP2: u8 = 80;
pub const KEY_KP3: u8 = 81;
pub const KEY_KP0: u8 = 82;
pub const KEY_KPDOT: u8 = 83;
pub const KEY_ZENKAKUHANKAKU: u8 = 85;
pub const KEY_102ND: u8 = 86;
pub const KEY_F11: u8 = 87;
pub const KEY_F12: u8 = 88;
pub const KEY_RO: u8 = 89;
pub const KEY_KATAKANA: u8 = 90;
pub const KEY_HIRAGANA: u8 = 91;
pub const KEY_HENKAN: u8 = 92;
pub const KEY_KATAKANAHIRAGANA: u8 = 93;
pub const KEY_MUHENKAN: u8 = 94;
pub const KEY_KPJPCOMMA: u8 = 95;
pub const KEY_KPENTER: u8 = 96;
pub const KEY_RIGHTCTRL: u8 = 97;
pub const KEY_KPSLASH: u8 = 98;
pub const KEY_SYSRQ: u8 = 99;
pub const KEY_RIGHTALT: u8 = 100;
pub const KEY_LINEFEED: u8 = 101;
pub const KEY_HOME: u8 = 102;
pub const KEY_UP: u8 = 103;
pub const KEY_PAGEUP: u8 = 104;
pub const KEY_LEFT: u8 = 105;
pub const KEY_RIGHT: u8 = 106;
pub const KEY_END: u8 = 107;
pub const KEY_DOWN: u8 = 108;
pub const KEY_PAGEDOWN: u8 = 109;
pub const KEY_INSERT: u8 = 110;
pub const KEY_DELETE: u8 = 111;
pub const KEY_MACRO: u8 = 112;
pub const KEY_MUTE: u8 = 113;
pub const KEY_VOLUMEDOWN: u8 = 114;
pub const KEY_VOLUMEUP: u8 = 115;
pub const KEY_POWER: u8 = 116 /* SC System Power Down */;
pub const KEY_KPEQUAL: u8 = 117;
pub const KEY_KPPLUSMINUS: u8 = 118;
pub const KEY_PAUSE: u8 = 119;
pub const KEY_SCALE: u8 = 120 /* AL Compiz Scale (Expose) */;
pub const KEY_KPCOMMA: u8 = 121;
pub const KEY_HANGEUL: u8 = 122;
pub const KEY_HANGUEL: u8 = KEY_HANGEUL;
pub const KEY_HANJA: u8 = 123;
pub const KEY_YEN: u8 = 124;
pub const KEY_LEFTMETA: u8 = 125;
pub const KEY_RIGHTMETA: u8 = 126;
pub const KEY_COMPOSE: u8 = 127;
pub const KEY_STOP: u8 = 128 /* AC Stop */;
pub const KEY_AGAIN: u8 = 129;
pub const KEY_PROPS: u8 = 130 /* AC Properties */;
pub const KEY_UNDO: u8 = 131 /* AC Undo */;
pub const KEY_FRONT: u8 = 132;
pub const KEY_COPY: u8 = 133 /* AC Copy */;
pub const KEY_OPEN: u8 = 134 /* AC Open */;
pub const KEY_PASTE: u8 = 135 /* AC Paste */;
pub const KEY_FIND: u8 = 136 /* AC Search */;
pub const KEY_CUT: u8 = 137 /* AC Cut */;
pub const KEY_HELP: u8 = 138 /* AL Integrated Help Center */;
pub const KEY_MENU: u8 = 139 /* Menu (show menu) */;
pub const KEY_CALC: u8 = 140 /* AL Calculator */;
pub const KEY_SETUP: u8 = 141;
pub const KEY_SLEEP: u8 = 142 /* SC System Sleep */;
pub const KEY_WAKEUP: u8 = 143 /* System Wake Up */;
pub const KEY_FILE: u8 = 144 /* AL Local Machine Browser */;
pub const KEY_SENDFILE: u8 = 145;
pub const KEY_DELETEFILE: u8 = 146;
pub const KEY_XFER: u8 = 147;
pub const KEY_PROG1: u8 = 148;
pub const KEY_PROG2: u8 = 149;
pub const KEY_WWW: u8 = 150 /* AL Internet Browser */;
pub const KEY_MSDOS: u8 = 151;
pub const KEY_COFFEE: u8 = 152 /* AL Terminal Lock/Screensaver */;
pub const KEY_SCREENLOCK: u8 = KEY_COFFEE;
pub const KEY_ROTATE_DISPLAY: u8 = 153 /* Display orientation for e.g. tablets */;
pub const KEY_DIRECTION: u8 = KEY_ROTATE_DISPLAY;
pub const KEY_CYCLEWINDOWS: u8 = 154;
pub const KEY_MAIL: u8 = 155;
pub const KEY_BOOKMARKS: u8 = 156 /* AC Bookmarks */;
pub const KEY_COMPUTER: u8 = 157;
pub const KEY_BACK: u8 = 158 /* AC Back */;
pub const KEY_FORWARD: u8 = 159 /* AC Forward */;
pub const KEY_CLOSECD: u8 = 160;
pub const KEY_EJECTCD: u8 = 161;
pub const KEY_EJECTCLOSECD: u8 = 162;
pub const KEY_NEXTSONG: u8 = 163;
pub const KEY_PLAYPAUSE: u8 = 164;
pub const KEY_PREVIOUSSONG: u8 = 165;
pub const KEY_STOPCD: u8 = 166;
pub const KEY_RECORD: u8 = 167;
pub const KEY_REWIND: u8 = 168;
pub const KEY_PHONE: u8 = 169 /* Media Select Telephone */;
pub const KEY_ISO: u8 = 170;
pub const KEY_CONFIG: u8 = 171 /* AL Consumer Control Configuration */;
pub const KEY_HOMEPAGE: u8 = 172 /* AC Home */;
pub const KEY_REFRESH : u8 = 173 /* AC Refresh */;
pub const KEY_EXIT : u8 =174 /* AC Exit */;
pub const KEY_MOVE : u8 =175;
pub const KEY_EDIT : u8 =176;
pub const KEY_SCROLLUP : u8 = 177;
pub const KEY_SCROLLDOWN : u8 = 178;
pub const KEY_KPLEFTPAREN : u8 = 179;
pub const KEY_KPRIGHTPAREN : u8 =180;
pub const KEY_NEW : u8 =181 /* AC New */;
pub const KEY_REDO : u8 =182 /* AC Redo/Repeat */;
pub const KEY_F13 : u8 = 183;
pub const KEY_F14 : u8 = 184;
pub const KEY_F15 : u8 = 185;
pub const KEY_F16 : u8 = 186;
pub const KEY_F17 : u8 = 187;
pub const KEY_F18 : u8 = 188;
pub const KEY_F19 : u8 = 189;
pub const KEY_F20 : u8 = 190;
pub const KEY_F21 : u8 = 191;
pub const KEY_F22 : u8 = 192;
pub const KEY_F23 : u8 = 193;
pub const KEY_F24 : u8 = 194;
pub const KEY_PLAYCD : u8 = 200;
pub const KEY_PAUSECD : u8 = 201;
pub const KEY_PROG3 : u8 = 202;
pub const KEY_PROG4 : u8 = 203;
pub const KEY_DASHBOARD : u8 = 204 /* AL Dashboard */;
pub const KEY_SUSPEND : u8 = 205;
pub const KEY_CLOSE : u8 = 206 /* AC Close */;
pub const KEY_PLAY : u8 = 207;
pub const KEY_FASTFORWARD : u8 = 208;
pub const KEY_BASSBOOST : u8 = 209;
pub const KEY_PRINT : u8 =210 /* AC Print */;
pub const KEY_HP : u8 = 211;
pub const KEY_CAMERA : u8 = 212;
pub const KEY_SOUND : u8 =213;
pub const KEY_QUESTION : u8 = 214;
pub const KEY_EMAIL : u8 =215;
pub const KEY_CHAT : u8 =216;
pub const KEY_SEARCH : u8 = 217;
pub const KEY_CONNECT : u8 = 218;
pub const KEY_FINANCE : u8 = 219 /* AL Checkbook/Finance */;
pub const KEY_SPORT : u8 = 220;
pub const KEY_SHOP : u8 = 221;
pub const KEY_ALTERASE : u8 = 222;
pub const KEY_CANCEL : u8 = 223 /* AC Cancel */;
pub const KEY_BRIGHTNESSDOWN : u8 = 224;
pub const KEY_BRIGHTNESSUP : u8 =225;
pub const KEY_MEDIA : u8 =226;
pub const KEY_SWITCHVIDEOMODE : u8 =227 /* Cycle between available video;
pub outputs (Monitor/LCD/TV-out/etc) */;
pub const KEY_KBDILLUMTOGGLE: u8 = 228;
pub const KEY_KBDILLUMDOWN: u8 = 229;
pub const KEY_KBDILLUMUP: u8 = 230;
pub const KEY_SEND : u8 =231 /* AC Send */;
pub const KEY_REPLY : u8 =232 /* AC Reply */;
pub const KEY_FORWARDMAIL : u8 =233 /* AC Forward Msg */;
pub const KEY_SAVE : u8 =234 /* AC Save */;
pub const KEY_DOCUMENTS : u8 =235;
pub const KEY_BATTERY : u8 =236;
pub const KEY_BLUETOOTH : u8 =237;
pub const KEY_WLAN : u8 =238;
pub const KEY_UWB : u8 = 239;
pub const KEY_UNKNOWN : u8 = 240;
pub const KEY_VIDEO_NEXT : u8 =241 /* drive next video source */;
pub const KEY_VIDEO_PREV : u8 =242 /* drive previous video source */;
pub const KEY_BRIGHTNESS_CYCLE : u8 =243 /* brightness up, after max is min */;
pub const KEY_BRIGHTNESS_AUTO : u8 =244 /* Set Auto Brightness: manual;
pub brightness control is off,;
pub rely on ambient */;
pub const KEY_BRIGHTNESS_ZERO : u8 = KEY_BRIGHTNESS_AUTO;
pub const KEY_DISPLAY_OFF : u8 =245 /* display device to off state */;
pub const KEY_WWAN : u8 =246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */;
pub const KEY_WIMAX : u8 =KEY_WWAN;
pub const KEY_RFKILL : u8 = 247 /* Key that controls all radios */;
pub const KEY_MICMUTE : u8 = 248 /* Mute / unmute the microphone */; |
use regex_syntax::ParserBuilder;
/// A common set of configuration options that apply to the syntax of a regex.
///
/// This represents a group of configuration options that specifically apply
/// to how the concrete syntax of a regular expression is interpreted. In
/// particular, they are generally forwarded to the
/// [`ParserBuilder`](https://docs.rs/regex-syntax/*/regex_syntax/struct.ParserBuilder.html)
/// in the
/// [`regex-syntax`](https://docs.rs/regex-syntax)
/// crate when building a regex from its concrete syntax directly.
///
/// These options are defined as a group since they apply to every regex engine
/// in this crate. Instead of re-defining them on every engine's builder, they
/// are instead provided here as one cohesive unit.
#[derive(Clone, Copy, Debug)]
pub struct SyntaxConfig {
case_insensitive: bool,
multi_line: bool,
dot_matches_new_line: bool,
swap_greed: bool,
ignore_whitespace: bool,
unicode: bool,
utf8: bool,
nest_limit: u32,
octal: bool,
}
impl SyntaxConfig {
/// Return a new default syntax configuration.
pub fn new() -> SyntaxConfig {
// These defaults match the ones used in regex-syntax.
SyntaxConfig {
case_insensitive: false,
multi_line: false,
dot_matches_new_line: false,
swap_greed: false,
ignore_whitespace: false,
unicode: true,
utf8: true,
nest_limit: 250,
octal: false,
}
}
/// Enable or disable the case insensitive flag by default.
///
/// When Unicode mode is enabled, case insensitivity is Unicode-aware.
/// Specifically, it will apply the "simple" case folding rules as
/// specified by Unicode.
///
/// By default this is disabled. It may alternatively be selectively
/// enabled in the regular expression itself via the `i` flag.
pub fn case_insensitive(mut self, yes: bool) -> SyntaxConfig {
self.case_insensitive = yes;
self
}
/// Enable or disable the multi-line matching flag by default.
///
/// When this is enabled, the `^` and `$` look-around assertions will
/// match immediately after and immediately before a new line character,
/// respectively. Note that the `\A` and `\z` look-around assertions are
/// unaffected by this setting and always correspond to matching at the
/// beginning and end of the input.
///
/// By default this is disabled. It may alternatively be selectively
/// enabled in the regular expression itself via the `m` flag.
pub fn multi_line(mut self, yes: bool) -> SyntaxConfig {
self.multi_line = yes;
self
}
/// Enable or disable the "dot matches any character" flag by default.
///
/// When this is enabled, `.` will match any character. When it's disabled,
/// then `.` will match any character except for a new line character.
///
/// Note that `.` is impacted by whether the "unicode" setting is enabled
/// or not. When Unicode is enabled (the defualt), `.` will match any UTF-8
/// encoding of any Unicode scalar value (sans a new line, depending on
/// whether this "dot matches new line" option is enabled). When Unicode
/// mode is disabled, `.` will match any byte instead. Because of this,
/// when Unicode mode is disabled, `.` can only be used when the "allow
/// invalid UTF-8" option is enabled, since `.` could otherwise match
/// invalid UTF-8.
///
/// By default this is disabled. It may alternatively be selectively
/// enabled in the regular expression itself via the `s` flag.
pub fn dot_matches_new_line(mut self, yes: bool) -> SyntaxConfig {
self.dot_matches_new_line = yes;
self
}
/// Enable or disable the "swap greed" flag by default.
///
/// When this is enabled, `.*` (for example) will become ungreedy and `.*?`
/// will become greedy.
///
/// By default this is disabled. It may alternatively be selectively
/// enabled in the regular expression itself via the `U` flag.
pub fn swap_greed(mut self, yes: bool) -> SyntaxConfig {
self.swap_greed = yes;
self
}
/// Enable verbose mode in the regular expression.
///
/// When enabled, verbose mode permits insigificant whitespace in many
/// places in the regular expression, as well as comments. Comments are
/// started using `#` and continue until the end of the line.
///
/// By default, this is disabled. It may be selectively enabled in the
/// regular expression by using the `x` flag regardless of this setting.
pub fn ignore_whitespace(mut self, yes: bool) -> SyntaxConfig {
self.ignore_whitespace = yes;
self
}
/// Enable or disable the Unicode flag (`u`) by default.
///
/// By default this is **enabled**. It may alternatively be selectively
/// disabled in the regular expression itself via the `u` flag.
///
/// Note that unless "allow invalid UTF-8" is enabled (it's disabled by
/// default), a regular expression will fail to parse if Unicode mode is
/// disabled and a sub-expression could possibly match invalid UTF-8.
///
/// **WARNING**: Unicode mode can greatly increase the size of the compiled
/// DFA, which can noticeably impact both memory usage and compilation
/// time. This is especially noticeable if your regex contains character
/// classes like `\w` that are impacted by whether Unicode is enabled or
/// not. If Unicode is not necessary, you are encouraged to disable it.
pub fn unicode(mut self, yes: bool) -> SyntaxConfig {
self.unicode = yes;
self
}
/// When disabled, the builder will permit the construction of a regular
/// expression that may match invalid UTF-8.
///
/// For example, when [`SyntaxConfig::unicode`] is disabled, then
/// expressions like `[^a]` may match invalid UTF-8 since they can match
/// any single byte that is not `a`. By default, these sub-expressions
/// are disallowed to avoid returning offsets that split a UTF-8
/// encoded codepoint. However, in cases where matching at arbitrary
/// locations is desired, this option can be disabled to permit all such
/// sub-expressions.
///
/// When enabled (the default), the builder is guaranteed to produce a
/// regex that will only ever match valid UTF-8 (otherwise, the builder
/// will return an error).
pub fn utf8(mut self, yes: bool) -> SyntaxConfig {
self.utf8 = yes;
self
}
/// Set the nesting limit used for the regular expression parser.
///
/// The nesting limit controls how deep the abstract syntax tree is allowed
/// to be. If the AST exceeds the given limit (e.g., with too many nested
/// groups), then an error is returned by the parser.
///
/// The purpose of this limit is to act as a heuristic to prevent stack
/// overflow when building a finite automaton from a regular expression's
/// abstract syntax tree. In particular, construction currently uses
/// recursion. In the future, the implementation may stop using recursion
/// and this option will no longer be necessary.
///
/// This limit is not checked until the entire AST is parsed. Therefore,
/// if callers want to put a limit on the amount of heap space used, then
/// they should impose a limit on the length, in bytes, of the concrete
/// pattern string. In particular, this is viable since the parser will
/// limit itself to heap space proportional to the lenth of the pattern
/// string.
///
/// Note that a nest limit of `0` will return a nest limit error for most
/// patterns but not all. For example, a nest limit of `0` permits `a` but
/// not `ab`, since `ab` requires a concatenation AST item, which results
/// in a nest depth of `1`. In general, a nest limit is not something that
/// manifests in an obvious way in the concrete syntax, therefore, it
/// should not be used in a granular way.
pub fn nest_limit(mut self, limit: u32) -> SyntaxConfig {
self.nest_limit = limit;
self
}
/// Whether to support octal syntax or not.
///
/// Octal syntax is a little-known way of uttering Unicode codepoints in
/// a regular expression. For example, `a`, `\x61`, `\u0061` and
/// `\141` are all equivalent regular expressions, where the last example
/// shows octal syntax.
///
/// While supporting octal syntax isn't in and of itself a problem, it does
/// make good error messages harder. That is, in PCRE based regex engines,
/// syntax like `\1` invokes a backreference, which is explicitly
/// unsupported in Rust's regex engine. However, many users expect it to
/// be supported. Therefore, when octal support is disabled, the error
/// message will explicitly mention that backreferences aren't supported.
///
/// Octal syntax is disabled by default.
pub fn octal(mut self, yes: bool) -> SyntaxConfig {
self.octal = yes;
self
}
/// Returns whether "unicode" mode is enabled.
pub fn get_unicode(&self) -> bool {
self.unicode
}
/// Returns whether "case insensitive" mode is enabled.
pub fn get_case_insensitive(&self) -> bool {
self.case_insensitive
}
/// Returns whether "multi line" mode is enabled.
pub fn get_multi_line(&self) -> bool {
self.multi_line
}
/// Returns whether "dot matches new line" mode is enabled.
pub fn get_dot_matches_new_line(&self) -> bool {
self.dot_matches_new_line
}
/// Returns whether "swap greed" mode is enabled.
pub fn get_swap_greed(&self) -> bool {
self.swap_greed
}
/// Returns whether "ignore whitespace" mode is enabled.
pub fn get_ignore_whitespace(&self) -> bool {
self.ignore_whitespace
}
/// Returns whether UTF-8 mode is enabled.
pub fn get_utf8(&self) -> bool {
self.utf8
}
/// Returns the "nest limit" setting.
pub fn get_nest_limit(&self) -> u32 {
self.nest_limit
}
/// Returns whether "octal" mode is enabled.
pub fn get_octal(&self) -> bool {
self.octal
}
/// Applies this configuration to the given parser.
pub(crate) fn apply(&self, builder: &mut ParserBuilder) {
builder
.unicode(self.unicode)
.case_insensitive(self.case_insensitive)
.multi_line(self.multi_line)
.dot_matches_new_line(self.dot_matches_new_line)
.swap_greed(self.swap_greed)
.ignore_whitespace(self.ignore_whitespace)
.allow_invalid_utf8(!self.utf8)
.nest_limit(self.nest_limit)
.octal(self.octal);
}
}
impl Default for SyntaxConfig {
fn default() -> SyntaxConfig {
SyntaxConfig::new()
}
}
|
//! This module contains the main event loop for the linux operating system.
//!
//! This application, on linux, uses x11 for window management. This module includes basic
//! input event gathering, keyboard and mouse events. The primary functions of
//! interest are `make_window` and `update_screen`.
//! `make_window` contains the event loop and `update_screen` updates the x11
//! buffer with the contents of our backbuffer.
//!
#![cfg(target_os = "linux")]
#![allow(warnings, unused)]
use crate::lab_sims::*;
use crate::ui_tools::ui_test;
use crate::x11::xlib;
use x11::xlib::*;/*{XOpenDisplay, XDefaultScreen, XBlackPixel, XWhitePixel, XNextEvent,
XCreateSimpleWindow, XSetStandardProperties, XSelectInput, XEvent,
XSetBackground, XSetForeground, XClearWindow, XInternAtom, XMapRaised,
XSetWMProtocols, XFreeGC, XDestroyWindow, XCloseDisplay, XDestroyImage,
XDefaultRootWindow, ExposureMask, ButtonPressMask, KeyPressMask, XPending,
XCreateGC, XCreatePixmapFromBitmapData, XCopyPlane, XFlush, XSync,
XCreateImage, XDefaultVisual, ZPixmap, XDefaultDepth, XPutImage, XImage};*/
use std::ptr::{null, null_mut};
use std::time::{Duration, Instant};
use std::thread::sleep;
use crate::rendertools::*;
use crate::{WindowCanvas, WindowInfo,
GLOBAL_BACKBUFFER, OsPackage,
GLOBAL_WINDOWINFO, inputhandler, SETICON};
use inputhandler::*;
use crate::misc::*;
#[macro_use]
use crate::{timeit, DEBUG_timeit};
use crate::debug_tools::*;
/// This function creates a new x11 image and draws that image in the given display window.
/// The resulting x11 image is then destroyed.
fn update_screen(buffer: &mut [u8], dis: *mut xlib::Display, visual: *mut xlib::Visual, win: u64, depth: u32, gc: xlib::GC, window_width: u32, window_height: u32){unsafe{
let image = XCreateImage(dis, visual, depth, ZPixmap, 0, buffer.as_mut_ptr() as *mut _, window_width, window_height, 32, 0);
let mut _image = (image as *mut XImage).as_mut().unwrap();
XPutImage(dis, win, gc, image, 0, 0, 0, 0, window_width, window_height);
XSync(dis, 0);
_image.data = null_mut();
XDestroyImage(image);
}}
/// This function is supposed to set the icon for the application. It currently does not work.
///
fn _set_icon( dis: *mut x11::xlib::_XDisplay, win: x11::xlib::Window, bmp: &TGBitmap ){unsafe{
use std::ffi::CString;
let net_wm_icon = x11::xlib::XInternAtom(dis, CString::new("_NET_WM_ICON").expect("net_wm_icon").into_raw(), x11::xlib::False);
let cardinal = x11::xlib::XInternAtom(dis, CString::new("CARDINAL").expect("cardinal").into_raw(), x11::xlib::False);
let width = bmp.width;
let height = bmp.height;
let mut _buffer = Vec::with_capacity((2+width*height) as usize);
_buffer.push( width );
_buffer.push( height );
let buffer = bmp.rgba.as_ptr();
for i in (0..height).rev(){
for j in 0..width{
let a = *buffer.offset((4*(i * height + j) + 3) as isize) as u32;
let r = *buffer.offset((4*(i * height + j) + 2) as isize) as u32;
let g = *buffer.offset((4*(i * height + j) + 1) as isize) as u32;
let b = *buffer.offset((4*(i * height + j) + 0) as isize) as u32;
//uint8_t a = ((uint8_t*)&pixel)[0];
//uint8_t r = ((uint8_t*)&pixel)[1];
//uint8_t g = ((uint8_t*)&pixel)[2];
//uint8_t b = ((uint8_t*)&pixel)[3];
//((uint8_t*)&_pixel)[0] = r;
//((uint8_t*)&_pixel)[1] = g;
//((uint8_t*)&_pixel)[2] = b;
//((uint8_t*)&_pixel)[3] = a;
let _pixel : i32 = std::mem::transmute(0x00000000 + (a << 24) + (b << 16) + (g << 8) + r);
_buffer.push(_pixel);
}
}
let length = 2 + width * height;
let _cp = x11::xlib::XChangeProperty(dis, win, net_wm_icon, cardinal, 32, x11::xlib::PropModeReplace, _buffer.as_ptr() as *mut u8, length);
let _mw = x11::xlib::XMapWindow(dis, win);
}}
/// This is the primary function for the application. The event/render loop occurs here.
///
pub fn make_window() {unsafe{
//NOTE
//Standard x11 window initialization occurs here.
let window_width = 1000;
let window_height = 550;
let dis = XOpenDisplay(null());
let screen = XDefaultScreen(dis);
let black = XBlackPixel(dis, screen);
let white = XWhitePixel(dis, screen);
let win = XCreateSimpleWindow(dis, XDefaultRootWindow(dis), 0, 0,
window_width, window_height, 5, black,
black);
use std::ffi::CString;
XSetStandardProperties(dis,win,CString::new("CircuitLab").unwrap().into_raw(),
CString::new("Temp v01").unwrap().into_raw(),
0,null_mut(),0,null_mut());
XSelectInput(dis, win, ExposureMask|ButtonPressMask|KeyPressMask|KeyReleaseMask);
let gc=XCreateGC(dis, win, 0, null_mut());
XSetBackground(dis, gc, 0);
XSetForeground(dis, gc, white);
XClearWindow(dis, win);
XMapRaised(dis, win);
let mut wm_delete_window = XInternAtom(dis, CString::new("WM_DELETE_WINDOW").unwrap().into_raw(),
0/*False*/);
XSetWMProtocols(dis, win, &mut wm_delete_window as *mut _, 1);
let mut presentation_buffer = vec![0u8; (4*window_width*window_height) as usize];
let mut bmp_buffer = vec![0u8; (4*window_width*window_height) as usize];
let visual = XDefaultVisual(dis, 0);
let depth = XDefaultDepth(dis, screen) as u32;
XFlush(dis);
unsafe{
//NOTE
//Setting up GLOBAL_BACKBUFFER dimensions and dpi properties.
GLOBAL_BACKBUFFER.info.width = window_width as i32;
GLOBAL_BACKBUFFER.info.height = window_height as i32;
GLOBAL_BACKBUFFER.info.planes = 1;
GLOBAL_BACKBUFFER.w = window_width as i32;
GLOBAL_BACKBUFFER.h = window_height as i32;
GLOBAL_BACKBUFFER.buffer = bmp_buffer.as_mut_ptr() as *mut _;
GLOBAL_WINDOWINFO.w = GLOBAL_BACKBUFFER.w;
GLOBAL_WINDOWINFO.h = GLOBAL_BACKBUFFER.h;
GLOBAL_BACKBUFFER.display_width = XDisplayWidth(dis, 0);
GLOBAL_BACKBUFFER.display_width_mm = XDisplayWidthMM(dis, 0);
GLOBAL_BACKBUFFER.display_height = XDisplayHeight(dis, 0);
GLOBAL_BACKBUFFER.display_height_mm = XDisplayHeightMM(dis, 0);
{
let x_mm = GLOBAL_BACKBUFFER.display_width_mm as f32;
let x = GLOBAL_BACKBUFFER.display_width as f32;
let y_mm = GLOBAL_BACKBUFFER.display_height_mm as f32;
let y = GLOBAL_BACKBUFFER.display_height as f32;
if x >= 1f32 && y >= 1f32 {
GLOBAL_BACKBUFFER.dpmm = (x.powi(2) + y.powi(2)).sqrt() / (x_mm.powi(2) + y_mm.powi(2)).sqrt();
} else {
GLOBAL_BACKBUFFER.dpmm = DPMM_SCALE;
}
}
}
let mut mouseinfo = MouseInfo::new();
let mut textinfo = TextInfo{character: Vec::with_capacity(10), timing:Vec::new()};
let mut keyboardinfo = KeyboardInfo{key: Vec::new(), status:Vec::new()};
let mut ls_app_storage = LsAppStorage::new();
let mut stopwatch = StopWatch::new();
let mut stopwatch_lbutton = StopWatch::new();
let mut old_window_info = GLOBAL_WINDOWINFO;
let mut exe_path = std::env::current_exe().expect("could not find the exe path");
let in_target_path = exe_path.to_string_lossy().contains("target/release");
//init_debugging( Some([0, 0, 600, 500]) );
let mut exit = false;
loop{
match &SETICON {
Some(bmp)=>{
_set_icon(dis, win, bmp);
},
None=>{}
}
SETICON = None;
{
let max_len = (GLOBAL_BACKBUFFER.h * GLOBAL_BACKBUFFER.w * 4) as usize;
let count = (GLOBAL_BACKBUFFER.w * 4) as usize;
for i in 0..GLOBAL_BACKBUFFER.h as usize {unsafe{
let mut ptr_bmp = bmp_buffer.as_mut_ptr().offset( (i*count) as isize );
let mut ptr_pre = presentation_buffer.as_mut_ptr().offset( (max_len - (i+1)*count ) as isize);
std::ptr::copy_nonoverlapping(ptr_bmp, ptr_pre, count);
}}
}
update_screen(&mut presentation_buffer[..], dis, visual, win, depth, gc, GLOBAL_BACKBUFFER.w as _, GLOBAL_BACKBUFFER.h as _);
{//TODO change window size if application asks
if GLOBAL_WINDOWINFO.w != old_window_info.w
|| GLOBAL_WINDOWINFO.h != old_window_info.h{
XResizeWindow(dis, win, GLOBAL_WINDOWINFO.w as _, GLOBAL_WINDOWINFO.h as _);
}
}
let mut window_struct = XWindowAttributes{
x: 0,
y: 0,
width: 0,
height: 0,
border_width: 0,
depth: 0,
visual: null_mut(),
root: 0,
class: 0,
bit_gravity: 0,
win_gravity: 0,
backing_store: 0,
backing_planes: 0,
backing_pixel: 0,
save_under: 0,
colormap: 0,
map_installed: 0,
map_state: 0,
all_event_masks: 0,
your_event_mask: 0,
do_not_propagate_mask: 0,
override_redirect: 0,
screen: null_mut(),
};
XGetWindowAttributes(dis, win, &mut window_struct as *mut _);
GLOBAL_WINDOWINFO.w = window_struct.width;
GLOBAL_WINDOWINFO.h = window_struct.height;
old_window_info = GLOBAL_WINDOWINFO;
if window_struct.width != GLOBAL_BACKBUFFER.w
|| window_struct.height != GLOBAL_BACKBUFFER.h{
GLOBAL_BACKBUFFER.w = window_struct.width;
GLOBAL_BACKBUFFER.h = window_struct.height;
GLOBAL_BACKBUFFER.info.width = GLOBAL_BACKBUFFER.w;
GLOBAL_BACKBUFFER.info.height = GLOBAL_BACKBUFFER.h;
let size = (4 * GLOBAL_BACKBUFFER.w * GLOBAL_BACKBUFFER.h) as usize;
bmp_buffer.resize(size, 0);
presentation_buffer.resize(size, 0);
GLOBAL_BACKBUFFER.buffer = bmp_buffer.as_mut_ptr() as *mut _;
}
keyboardinfo.key.clear();
keyboardinfo.status.clear();
textinfo.character.clear();
textinfo.timing.clear();
mouseinfo.old_lbutton = mouseinfo.lbutton;
mouseinfo.old_rbutton = mouseinfo.rbutton;
mouseinfo.lbutton = ButtonStatus::Up;
mouseinfo.rbutton = ButtonStatus::Up;
mouseinfo.double_lbutton = false;
mouseinfo.wheel_delta = 0;
mouseinfo.delta_x = mouseinfo.x;
mouseinfo.delta_y = mouseinfo.y;
let mut x : i32 = 0;
let mut y : i32 = 0;
let mut _x : i32 = 0;
let mut _y : i32 = 0;
{
let mut mask = 0u32;
let mut _w0 : Window = 0;
let mut _w1 : Window = 0;
XQueryPointer(dis, win, &mut _w0 as *mut _, &mut _w1 as *mut _,
&mut x as *mut _, &mut y as *mut _, &mut _x as *mut _,
&mut _y as *mut _, &mut mask as *mut _);
if mask&256 == 256{//Left click TODO check with mouse
mouseinfo.lbutton = ButtonStatus::Down;
} else if mask&1024 == 1024 {//Right click TODO check with mouse
mouseinfo.rbutton = ButtonStatus::Down;
}
if mouseinfo.lbutton == ButtonStatus::Up
&& mouseinfo.old_lbutton == ButtonStatus::Down{
stopwatch_lbutton.reset_lap_timer();
}
if mouseinfo.lbutton == ButtonStatus::Down
&& mouseinfo.old_lbutton == ButtonStatus::Up
&& stopwatch_lbutton.lap_time().as_millis() <= 450 {
mouseinfo.double_lbutton = true;
}
mouseinfo.x = _x;
mouseinfo.y = GLOBAL_BACKBUFFER.h as i32 - _y;
}
let mut text_key : KeySym = 0;
let mut text = [0u8; 4];
let mut event = XEvent{type_: 0};
while XPending(dis) !=0 {
XNextEvent(dis, &mut event as *mut _);
match event.type_ {
KeyPress=>{
if XLookupString(&mut event.key as *mut _,
text.as_mut_ptr() as *mut _, 4,
&mut text_key as *mut _, null_mut()) == 1{
if text[0] == 27
&& in_target_path {//ESC text code
exit = true;
}
let temp_str = std::str::from_utf8(&text).expect("Good string");
for (i_ts, it_ts) in temp_str.chars().enumerate(){
if i_ts > 0 && it_ts == '\0' {}
else {
textinfo.character.push(it_ts);
}
}
}
let _key = XLookupKeysym(&mut event.key as *mut _, 0);
keyboardinfo.update_keyboardinfo_linux(_key, true);
},
KeyRelease=>{
let _key = XLookupKeysym(&mut event.key as *mut _, 0);
let mut peek_event = XEvent{type_: 0};
if XEventsQueued(dis, 1) > 0 {
XPeekEvent(dis, &mut peek_event as *mut _);
if peek_event.type_ == KeyPress
&& XLookupKeysym(&mut peek_event.key as *mut _, 0) == _key{}
else {
keyboardinfo.update_keyboardinfo_linux(_key, false);//TODO status should not be a bool it is not enough information
}
} else {
keyboardinfo.update_keyboardinfo_linux(_key, false);//TODO status should not be a bool it is not enough information
}
},
ClientMessage=>{
if event.client_message.data.get_long(0) == wm_delete_window as i64{
exit = true;
}
},
ButtonPress=>{
//NOTE Mousewheel things
if event.button.button == 4 {
mouseinfo.wheel_delta = 1;
mouseinfo.wheel += 1;
}
if event.button.button == 5 {
mouseinfo.wheel_delta = -1;
mouseinfo.wheel -= 1;
}
},
_=>{
}
}
}
mouseinfo.delta_x = mouseinfo.x - mouseinfo.delta_x;
mouseinfo.delta_y = mouseinfo.y - mouseinfo.delta_y;
if circuit_sim(&mut OsPackage{window_canvas: &mut GLOBAL_BACKBUFFER, window_info: &mut GLOBAL_WINDOWINFO},
&mut ls_app_storage, &keyboardinfo, &textinfo, &mouseinfo) != 0 { break; }
let delta_time = stopwatch.lap_time();
//draw_string(&mut GLOBAL_BACKBUFFER, &format!("{:#.3?}", delta_time), 0, GLOBAL_BACKBUFFER.h-30, C4_WHITE, 26.0);//TODO we should avg things so we no flicker
stopwatch.reset_lap_timer();
//draw_debuginfo(&mut GLOBAL_BACKBUFFER);
//reset_frame_debugging();
if exit {
break;
}
}
XFreeGC(dis, gc);
XDestroyWindow(dis, win);
XCloseDisplay(dis);
}}
|
#![allow(dead_code)]
use chrono::NaiveDate;
use regex::Regex;
use std::io;
use std::fs::File;
use std::io::BufRead;
use std::collections::BTreeMap;
use crate::gen;
use util_rust::parse;
const FILE_IMPORT_BOOKS_PERSONAL: &str = r"E:\ConnectedText Restructure 2020-10-17\Audible Books Personal.txt";
const FILE_IMPORT_PURCHASE_DATES_PERSONAL: &str = r"E:\ConnectedText Restructure 2020-10-17\Audible Books Purchase History Personal.txt";
const FILE_IMPORT_BOOKS_QUADRAVEN: &str = r"E:\ConnectedText Restructure 2020-10-17\Audible Books Quadraven.txt";
const FILE_IMPORT_PURCHASE_DATES_QUADRAVEN: &str = r"E:\ConnectedText Restructure 2020-10-17\Audible Books Purchase History Quadraven.txt";
const PATH_HOME_PROJECT_SOURCE: &str = r"E:\ConnectedText Restructure\Home Project";
const PATH_HOME_PROJECT_DEST: &str = r"E:\ConnectedText Restructure 2020-10-17\Home Dest";
const PATH_HOME_PROJECT_DEST_FIXED: &str = r"E:\ConnectedText Restructure 2020-10-17\Home Dest Fixed";
const PATH_TOOLS_PROJECT_DEST: &str = r"E:\ConnectedText Restructure 2020-10-17\Tools Dest";
const PATH_TOOLS_PROJECT_DEST_FIXED: &str = r"E:\ConnectedText Restructure 2020-10-17\Tools Dest Fixed";
const PATH_GEN_BOOKS: &str = r"E:\ConnectedText Restructure 2020-10-17\Gen Books";
pub fn main() {
println!("\nAudible start\n");
// import::fix_file_names(path::Path::new(FILE_FULL_EXPORT), path::Path::new(PATH_TOOLS_PROJECT_DEST), path::Path::new(PATH_TOOLS_PROJECT_DEST_FIXED));
// import::fix_file_names(path::Path::new(FILE_FULL_EXPORT), path::Path::new(PATH_HOME_PROJECT_DEST), path::Path::new(PATH_HOME_PROJECT_DEST_FIXED));
// gen_from_audible_books(FILE_IMPORT_BOOKS_PERSONAL, FILE_IMPORT_PURCHASE_DATES_PERSONAL, "personal", PATH_GEN_BOOKS);
// gen_from_audible_books(FILE_IMPORT_BOOKS_QUADRAVEN, FILE_IMPORT_PURCHASE_DATES_QUADRAVEN, "Quadraven", PATH_GEN_BOOKS);
println!("\nAudible done\n");
}
fn gen_from_audible_books(file_import_books: &str, file_import_purchase_dates: &str, account_name: &str, path_gen: &str) {
let books = import_audible_books(file_import_books, file_import_purchase_dates, account_name);
gen::gen_book_text_files(path_gen, books);
}
pub fn import_audible_books(file_import_books: &str, file_import_purchase_dates: &str, account_name: &str) -> Vec<BookForAudible> {
let purchase_dates = import_audible_purchase_dates(file_import_purchase_dates);
let mut v = vec![];
let mut book = make_empty_audible_book(account_name);
let mut title_line = false;
let file = File::open(file_import_books).unwrap();
for line_result in io::BufReader::new(file).lines() {
if book.title.len() > 0 && book.short_title.len() > 0 && book.acquired_date.is_none() {
// Figure out the aquired date.
let date = purchase_dates.get(&book.title);
if let Some(date) = date {
book.acquired_date = Some(*date);
} else {
let date = purchase_dates.get(&book.short_title);
if let Some(date) = date {
book.acquired_date = Some(*date);
} else {
dbg!(&book);
panic!("No match for purchase date.");
}
}
}
let line = line_result.unwrap().trim().to_string();
//rintln!("{}", line);
if line.contains("By cover art") {
if book.short_title.len() > 0 {
v.push(book.clone());
}
book = make_empty_audible_book(account_name);
book.short_title = parse::before(&line, "By cover art").trim().to_string();
title_line = true;
continue;
}
if title_line {
book.title = line.to_string();
title_line = false;
continue;
}
if line.starts_with("By:") {
let authors = parse::after(&line, "By: ").trim().to_string();
let authors = authors.split(",").map(|x| x.trim().to_string()).collect::<Vec<String>>();
//rintln!("{:?}", &authors);
book.authors = authors;
continue;
}
if line.starts_with("Narrated by:") {
let narrators = parse::after(&line, "Narrated by: ").trim().to_string();
let narrators = narrators.split(",").map(|x| x.trim().to_string()).collect::<Vec<String>>();
book.narrators = narrators;
continue;
}
if line.starts_with("Series:") {
let series = parse::after(&line, "Series: ").trim().to_string();
book.series = Some(series);
continue;
}
if line.eq("Finished") {
book.read = Some(true);
continue;
}
}
v.push(book.clone());
dbg!(&v);
v
}
pub fn import_audible_purchase_dates(file_import: &str) -> BTreeMap<String, NaiveDate> {
let mut purchase_dates = BTreeMap::new();
dbg!(&file_import);
let file = File::open(file_import).unwrap();
let lines: Vec<String> = io::BufReader::new(file).lines().map(|x| x.unwrap().trim().to_string()).collect::<Vec<_>>();
let mut date = NaiveDate::from_ymd(1900, 1, 1);
let date_regex = Regex::new(r"^\d{2}-\d{2}-\d{2}$").unwrap();
// let date_regex = Regex::new(r"18").unwrap();
//bg!(lines);
for line_index in 0..lines.len() {
if lines[line_index].starts_with("By: ") {
// See if the next line has a date.
let date_line = lines[line_index + 1].clone();
if date_regex.is_match(&date_line) {
//rintln!("{}", &lines[line_index + 1]);
let m = u32::from_str_radix(&date_line[..2], 10).unwrap();
assert!(m > 0);
assert!(m <= 12);
let d = u32::from_str_radix(&date_line[3..5], 10).unwrap();
assert!(d > 0);
assert!(d <= 31);
let y = 2000 + i32::from_str_radix(&date_line[6..8], 10).unwrap();
assert!(y >= 2014);
assert!(y <= 2020);
date = NaiveDate::from_ymd(y, m, d);
//bg!(&date);
}
// The title is one line before the "By: " line.
let title = parse::after(&lines[line_index - 1], "By: ").trim().to_string();
purchase_dates.insert(title, date.clone());
}
}
dbg!(&purchase_dates);
dbg!(&purchase_dates.len());
purchase_dates
}
fn make_empty_audible_book(account_name: &str) -> BookForAudible {
BookForAudible {
audible_account: account_name.to_string(),
short_title: "".to_string(),
title: "".to_string(),
format: "Audiobook".to_string(),
authors: vec![],
narrators: vec![],
series: None,
location: format!("Audible ({})", account_name).to_string(),
year: None,
acquired_date: None,
read: Some(false),
}
}
#[derive(Clone, Debug)]
pub struct BookForAudible {
pub audible_account: String,
pub short_title: String,
pub title: String,
pub format: String,
pub authors: Vec<String>,
pub narrators: Vec<String>,
pub series: Option<String>,
pub location: String,
pub year: Option<u32>,
pub acquired_date: Option<NaiveDate>,
pub read: Option<bool>,
}
#[derive(Clone)]
pub struct BookAcquired {
pub title: String,
pub aquired_date: NaiveDate,
}
|
use test_winrt::Windows::Foundation::GuidHelper;
use windows::core::GUID;
#[test]
fn guid_helper() -> windows::core::Result<()> {
let a = GuidHelper::CreateNewGuid()?;
let b = GuidHelper::CreateNewGuid()?;
assert!(!GuidHelper::Equals(&a, &b)?);
assert!(GuidHelper::Equals(&a, &a)?);
Ok(())
}
#[test]
fn guid_from_string() {
let a = GUID::from("CFF52E04-CCA6-4614-A17E-754910C84A99");
let b = GUID::from_values(0xCFF52E04, 0xCCA6, 0x4614, [0xA1, 0x7E, 0x75, 0x49, 0x10, 0xC8, 0x4A, 0x99]);
assert!(a == b);
}
|
// Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::any::Any;
use std::ops::Deref;
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use common_base::base::uuid;
use common_catalog::plan::DataSourceInfo;
use common_catalog::plan::DataSourcePlan;
use common_catalog::plan::PartInfo;
use common_catalog::plan::PartStatistics;
use common_catalog::plan::Partitions;
use common_catalog::plan::PartitionsShuffleKind;
use common_catalog::plan::PushDownInfo;
use common_catalog::plan::StageTableInfo;
use common_catalog::table::AppendMode;
use common_catalog::table::Table;
use common_catalog::table_context::TableContext;
use common_exception::ErrorCode;
use common_exception::Result;
use common_expression::BlockThresholds;
use common_expression::DataBlock;
use common_meta_app::principal::StageInfo;
use common_meta_app::schema::TableInfo;
use common_meta_app::schema::UpsertTableCopiedFileReq;
use common_pipeline_core::Pipeline;
use common_pipeline_sources::input_formats::InputContext;
use common_pipeline_sources::input_formats::SplitInfo;
use common_storage::init_stage_operator;
use common_storage::StageFileInfo;
use opendal::Operator;
use parking_lot::Mutex;
use crate::stage_table_sink::StageTableSink;
/// TODO: we need to track the data metrics in stage table.
pub struct StageTable {
table_info: StageTableInfo,
// This is no used but a placeholder.
// But the Table trait need it:
// fn get_table_info(&self) -> &TableInfo).
table_info_placeholder: TableInfo,
block_compact_threshold: Mutex<Option<BlockThresholds>>,
}
impl StageTable {
pub fn try_create(table_info: StageTableInfo) -> Result<Arc<dyn Table>> {
let table_info_placeholder = TableInfo::default().set_schema(table_info.schema());
Ok(Arc::new(Self {
table_info,
table_info_placeholder,
block_compact_threshold: Default::default(),
}))
}
/// Get operator with correctly prefix.
pub fn get_op(stage: &StageInfo) -> Result<Operator> {
init_stage_operator(stage)
}
pub async fn list_files(stage_info: &StageTableInfo) -> Result<Vec<StageFileInfo>> {
let op = Self::get_op(&stage_info.stage_info)?;
let infos = stage_info
.files_info
.list(&op, false)
.await?
.into_iter()
.collect::<Vec<_>>();
Ok(infos)
}
fn get_block_compact_thresholds_with_default(&self) -> BlockThresholds {
let guard = self.block_compact_threshold.lock();
match guard.deref() {
None => BlockThresholds::default(),
Some(t) => *t,
}
}
}
#[async_trait::async_trait]
impl Table for StageTable {
fn as_any(&self) -> &dyn Any {
self
}
// External stage has no table info yet.
fn get_table_info(&self) -> &TableInfo {
&self.table_info_placeholder
}
fn get_data_source_info(&self) -> DataSourceInfo {
DataSourceInfo::StageSource(self.table_info.clone())
}
async fn read_partitions(
&self,
ctx: Arc<dyn TableContext>,
_push_downs: Option<PushDownInfo>,
) -> Result<(PartStatistics, Partitions)> {
let stage_info = &self.table_info;
// User set the files.
let files = if let Some(files) = &stage_info.files_to_copy {
files.clone()
} else {
StageTable::list_files(stage_info).await?
};
let format =
InputContext::get_input_format(&stage_info.stage_info.file_format_options.format)?;
let operator = StageTable::get_op(&stage_info.stage_info)?;
let splits = format
.get_splits(
files,
&stage_info.stage_info,
&operator,
&ctx.get_settings(),
)
.await?;
let partitions = splits
.into_iter()
.map(|v| {
let part_info: Box<dyn PartInfo> = Box::new((*v).clone());
Arc::new(part_info)
})
.collect::<Vec<_>>();
Ok((
PartStatistics::default(),
Partitions::create_nolazy(PartitionsShuffleKind::Seq, partitions),
))
}
fn read_data(
&self,
ctx: Arc<dyn TableContext>,
plan: &DataSourcePlan,
pipeline: &mut Pipeline,
) -> Result<()> {
let stage_table_info =
if let DataSourceInfo::StageSource(stage_table_info) = &plan.source_info {
stage_table_info
} else {
return Err(ErrorCode::Internal(""));
};
let mut splits = vec![];
for part in &plan.parts.partitions {
if let Some(split) = part.as_any().downcast_ref::<SplitInfo>() {
splits.push(Arc::new(split.clone()));
}
}
// Build copy pipeline.
let settings = ctx.get_settings();
let schema = stage_table_info.schema.clone();
let stage_info = stage_table_info.stage_info.clone();
let operator = StageTable::get_op(&stage_table_info.stage_info)?;
let compact_threshold = self.get_block_compact_thresholds_with_default();
let input_ctx = Arc::new(InputContext::try_create_from_copy(
operator,
settings,
schema,
stage_info,
splits,
ctx.get_scan_progress(),
compact_threshold,
)?);
input_ctx.format.exec_copy(input_ctx.clone(), pipeline)?;
ctx.set_on_error_map(input_ctx.get_maximum_error_per_file());
Ok(())
}
fn append_data(
&self,
ctx: Arc<dyn TableContext>,
pipeline: &mut Pipeline,
_: AppendMode,
_: bool,
) -> Result<()> {
let single = self.table_info.stage_info.copy_options.single;
let op = StageTable::get_op(&self.table_info.stage_info)?;
let uuid = uuid::Uuid::new_v4().to_string();
let group_id = AtomicUsize::new(0);
// parallel compact unload, the partial block will flush into next operator
if !single && pipeline.output_len() > 1 {
pipeline.add_transform(|input, output| {
let gid = group_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
StageTableSink::try_create(
input,
ctx.clone(),
self.table_info.clone(),
op.clone(),
Some(output),
uuid.clone(),
gid,
)
})?;
}
// final compact unload
pipeline.resize(1)?;
// Add sink pipe.
pipeline.add_sink(|input| {
let gid = group_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
StageTableSink::try_create(
input,
ctx.clone(),
self.table_info.clone(),
op.clone(),
None,
uuid.clone(),
gid,
)
})
}
// TODO use tmp file_name & rename to have atomic commit
async fn commit_insertion(
&self,
_ctx: Arc<dyn TableContext>,
_operations: Vec<DataBlock>,
_copied_files: Option<UpsertTableCopiedFileReq>,
_overwrite: bool,
) -> Result<()> {
Ok(())
}
// Truncate the stage file.
async fn truncate(&self, _ctx: Arc<dyn TableContext>, _: bool) -> Result<()> {
Err(ErrorCode::Unimplemented(
"S3 external table truncate() unimplemented yet!",
))
}
fn get_block_compact_thresholds(&self) -> BlockThresholds {
let guard = self.block_compact_threshold.lock();
(*guard).expect("must success")
}
fn set_block_compact_thresholds(&self, thresholds: BlockThresholds) {
let mut guard = self.block_compact_threshold.lock();
(*guard) = Some(thresholds)
}
}
|
use math::traits::ToIterator;
use crate::plink_bed::{
geno_to_lowest_two_bits, get_num_people_last_byte, lowest_two_bits_to_geno,
usize_div_ceil,
};
pub struct PlinkSnps {
bytes: Vec<u8>,
num_snps: usize,
}
impl PlinkSnps {
pub fn new(bytes: Vec<u8>, num_snps: usize) -> PlinkSnps {
assert!(
num_snps <= bytes.len() * 4,
format!(
"num_snps ({}) > bytes.len() * 4 = {}",
num_snps,
bytes.len() * 4
)
);
PlinkSnps {
bytes,
num_snps,
}
}
pub fn from_geno(geno: Vec<u8>) -> PlinkSnps {
let num_bytes = usize_div_ceil(geno.len(), 4);
let mut bytes: Vec<u8> = Vec::with_capacity(num_bytes);
let mut snp_index = 0;
if let Some(num_people_last_byte) = get_num_people_last_byte(geno.len())
{
for _ in 0..num_bytes - 1 {
bytes.push(
geno_to_lowest_two_bits(geno[snp_index])
| (geno_to_lowest_two_bits(geno[snp_index + 1]) << 2)
| (geno_to_lowest_two_bits(geno[snp_index + 2]) << 4)
| (geno_to_lowest_two_bits(geno[snp_index + 3]) << 6),
);
snp_index += 4;
}
// last byte
let mut last_byte = 0u8;
for j in 0..num_people_last_byte {
last_byte |=
geno_to_lowest_two_bits(geno[snp_index + j]) << (j * 2);
}
bytes.push(last_byte);
}
PlinkSnps::new(bytes, geno.len())
}
#[inline]
pub fn get_num_snps(&self) -> usize {
self.num_snps
}
#[inline]
pub fn to_bytes(&self) -> &Vec<u8> {
&self.bytes
}
#[inline]
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
}
impl ToIterator<'_, PlinkSnpsIter, u8> for PlinkSnps {
fn to_iter(&self) -> PlinkSnpsIter {
PlinkSnpsIter {
bytes: self.bytes.clone(),
num_snps: self.num_snps,
byte_cursor: 0,
cursor: 0,
}
}
}
impl IntoIterator for PlinkSnps {
type IntoIter = PlinkSnpsIter;
type Item = <PlinkSnpsIter as Iterator>::Item;
fn into_iter(self) -> Self::IntoIter {
PlinkSnpsIter {
bytes: self.bytes,
num_snps: self.num_snps,
byte_cursor: 0,
cursor: 0,
}
}
}
pub struct PlinkSnpsIter {
bytes: Vec<u8>,
num_snps: usize,
byte_cursor: usize,
cursor: usize,
}
impl Iterator for PlinkSnpsIter {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.cursor >= self.num_snps {
None
} else {
let snp = lowest_two_bits_to_geno(
self.bytes[self.byte_cursor] >> (2 * (self.cursor % 4) as u8),
);
self.cursor += 1;
if self.cursor % 4 == 0 {
self.byte_cursor += 1;
}
Some(snp)
}
}
}
#[cfg(test)]
mod tests {
use math::traits::ToIterator;
use super::PlinkSnps;
#[test]
fn test_plink_snps() {
let expected_num_snps = 12;
let snps = PlinkSnps::new(
vec![0b10_00_11_00, 0b00_00_11_11, 0b11_10_10_10, 0b11001100],
expected_num_snps,
);
let expected: Vec<u8> = vec![2, 0, 2, 1, 0, 0, 2, 2, 1, 1, 1, 0];
let mut num_snps = 0;
for (s, e) in snps.to_iter().zip(expected.iter()) {
assert_eq!(s, *e);
num_snps += 1;
}
assert_eq!(num_snps, expected_num_snps);
}
#[test]
fn test_plink_snps_from_geno() {
fn test(geno: Vec<u8>) {
let plink_snps = PlinkSnps::from_geno(geno.clone());
let num_snps = geno.len();
let mut iter = plink_snps.into_iter();
for i in 0..num_snps {
assert_eq!(Some(geno[i]), iter.next());
}
assert_eq!(None, iter.next());
}
test(vec![]);
test(vec![0]);
test(vec![1]);
test(vec![2]);
test(vec![0, 1, 1]);
test(vec![2, 2, 0, 1]);
test(vec![1, 2, 0, 1, 1, 0]);
test(vec![1, 2, 0, 1, 1, 0, 2, 1]);
test(vec![1, 2, 2, 0, 0, 1, 0, 0, 2, 0]);
}
}
|
use std::cell::Cell;
use std::collections::{HashMap,BTreeSet};
use std::sync::mpsc::{Receiver,Sender};
use std::sync::{Arc,Barrier};
use std::ffi::CString;
use libc::{c_char,c_int,c_void,ssize_t,size_t};
use crate::device::Device;
use crate::quick_io::{append_to_file_at_path,slurp_file_at_path,fd_poll_read};
use crate::control::{Config,Manifest};
trait WarnIfErr {
fn warn_if_err(&self);
}
impl<T,E: std::fmt::Debug> WarnIfErr for Result<T,E> {
fn warn_if_err(&self) {
if let Err(e) = self {
eprintln!("Warning: {:?}", e);
}
}
}
// This needs to be read directly from a file.
#[derive(Copy,Clone,Debug)]
#[repr(C, packed)]
struct BlkEvent {
magic: u32, /* MAGIC << 8 | version */
sequence: u32, /* event number */
time: u64, /* in nanoseconds */
sector: u64, /* disk offset */
bytes: u32, /* transfer length */
action: u32, /* what happened (a 16 high-bit field for category and then a 16 low-bit enum for action) */
pid: u32, /* who did it */
device: u32, /* device identifier (major is 12 high-bits then minor is 20 low-bits */
cpu: u32, /* on what cpu did it happen */
error: u16, /* completion error */
pdu_len: u16, /* length of data after this trace */
}
const MAGIC_NATIVE_ENDIAN: u32 = 0x65617400;
const MAGIC_REVERSE_ENDIAN: u32 = 0x00746165;
const SUPPORTED_VERSION: u8 = 0x07;
impl BlkEvent {
fn try_read_from_file(trace_pipe_fd: c_int) -> Option<BlkEvent> {
let event_size = ::std::mem::size_of::<BlkEvent>();
// Wait 1ms for something
if !fd_poll_read(trace_pipe_fd, 1) {
return None;
}
let mut event = unsafe {
let mut event: BlkEvent = ::std::mem::uninitialized();
let buffer = ::std::slice::from_raw_parts_mut(&mut event as *mut BlkEvent as *mut u8, event_size);
let bytes_read = libc::read(trace_pipe_fd, buffer.as_mut_ptr() as *mut c_void, event_size as size_t);
if bytes_read == event_size as ssize_t {
event
} else if bytes_read == 0 {
return None;
} else if bytes_read < 0 {
let errno = *libc::__errno_location();
if errno == libc::EAGAIN || errno == libc::EWOULDBLOCK {
// Not got anything to read right now.
return None;
} else {
panic!("Could not read from trace pipe");
}
} else {
panic!("Read an incorrect number of bytes for a blk event. Wanted {}, read {}", event_size, bytes_read);
}
};
let magic = event.magic;
let (native_endian, version): (bool, u8) =
if magic & 0xffffff00 == MAGIC_NATIVE_ENDIAN {
(true, (magic & 0xff) as u8)
} else if magic & 0x00ffffff == MAGIC_REVERSE_ENDIAN {
(false, (magic >> 24) as u8)
} else {
panic!("Incorrect magic number for event. Got {:x}", magic);
};
if version != SUPPORTED_VERSION {
panic!("Unsupprted blk event format - only version 0x07 is supported");
}
if !native_endian {
event.swap_endian();
}
if event.pdu_len > 0 {
// Just discard - we don't care.
let mut discard: Vec<u8> = vec![0; event.pdu_len as usize];
unsafe {
if libc::read(trace_pipe_fd, discard.as_mut_ptr() as *mut c_void, event.pdu_len as size_t) != event.pdu_len as ssize_t {
panic!("Could not read (pdu portion of) event from trace pipe");
}
}
}
Some(event)
}
fn swap_endian(&mut self) {
self.magic = self.magic.swap_bytes();
self.sequence = self.sequence.swap_bytes();
self.time = self.time.swap_bytes();
self.sector = self.sector.swap_bytes();
self.bytes = self.bytes.swap_bytes();
self.action = self.action.swap_bytes();
self.pid = self.pid.swap_bytes();
self.device = self.device.swap_bytes();
self.cpu = self.cpu.swap_bytes();
self.error = self.error.swap_bytes();
self.pdu_len = self.pdu_len.swap_bytes();
}
}
// RAII-based do something then undo it.
struct DoUndo<'u> {
undoer: Option<Box<dyn FnOnce() + 'u>>,
}
impl<'u> DoUndo<'u> {
pub fn new<D: FnOnce(), U: FnOnce() + 'u>(doer: D, undoer: U) -> Self {
doer();
Self {
undoer: Some(Box::new(undoer)),
}
}
}
impl<'u> Drop for DoUndo<'u> {
fn drop(&mut self) {
(self.undoer.take().unwrap())();
}
}
pub fn run(config: &Config, manifest: &Manifest, devices: &Vec<Device>, log_channel: Sender<(usize, usize)>, sync_barrier_channel: Receiver<Arc<Barrier>>) {
let mut device_map: HashMap<u32, HashMap<&Device, usize>> = HashMap::new();
for (i, device) in devices.iter().enumerate() {
let base_device_event_dev = device.get_base_device().event_dev;
if !device_map.contains_key(&base_device_event_dev) {
device_map.insert(base_device_event_dev, HashMap::new());
}
if let Some(_) = device_map.get_mut(&base_device_event_dev).unwrap().insert(device, i) {
panic!("Duplicate device found");
}
}
let device_map = device_map; // Drop mutability
let whole_disk_devices: Vec<&Device> =
devices
.iter()
.map(|device| {device.get_base_device()})
.collect::<BTreeSet<&Device>>() // Deduplicate and sort
.into_iter()
.collect();
{
let events_enabled = slurp_file_at_path(&config.tracing_path.join("events/enable")).unwrap();
if std::str::from_utf8(&events_enabled).unwrap() != "0\n" {
panic!("Some tracing events are already enabled");
}
}
let old_current_tracer = slurp_file_at_path(&config.tracing_path.join("current_tracer")).unwrap();
let _current_tracer_setup = DoUndo::new(
|| {append_to_file_at_path(&config.tracing_path.join("current_tracer"), b"blk\n").unwrap();},
|| {append_to_file_at_path(&config.tracing_path.join("current_tracer"), &old_current_tracer).warn_if_err();},
);
let old_tracer_option_bin = slurp_file_at_path(&config.tracing_path.join("options/bin")).unwrap();
let _tracer_option_bin_setup = DoUndo::new(
|| {append_to_file_at_path(&config.tracing_path.join("options/bin"), b"1\n").unwrap();},
|| {append_to_file_at_path(&config.tracing_path.join("options/bin"), &old_tracer_option_bin).warn_if_err();},
);
let old_tracer_option_context = slurp_file_at_path(&config.tracing_path.join("options/context-info")).unwrap();
let _tracer_option_context = DoUndo::new(
|| {append_to_file_at_path(&config.tracing_path.join("options/context-info"), b"0\n").unwrap();},
|| {append_to_file_at_path(&config.tracing_path.join("options/context-info"), &old_tracer_option_context).warn_if_err();},
);
let old_buffer_size = slurp_file_at_path(&config.tracing_path.join("buffer_size_kb")).unwrap();
let _buffer_size = DoUndo::new(
|| {append_to_file_at_path(&config.tracing_path.join("buffer_size_kb"), format!("{}\n", config.trace_buffer_size).as_bytes()).unwrap();},
|| {append_to_file_at_path(&config.tracing_path.join("buffer_size_kb"), &old_buffer_size).warn_if_err();},
);
let trace_pipe_fd = unsafe {
libc::open(CString::new(config.tracing_path.join("trace_pipe").to_str().unwrap()).unwrap().as_ptr(), libc::O_RDONLY | libc::O_NONBLOCK)
};
if trace_pipe_fd < 0 {
panic!("Could not open trace pipe");
}
// Flush anything in the trace_pipe first so we know we're only going
// to get blk data.
{
let trace_pipe_file = unsafe{
libc::fdopen(trace_pipe_fd, b"rb\0".as_ptr() as *const c_char)
};
// I'm assuming the stream can't ever half-write a data structure.
unsafe{
while libc::fgetc(trace_pipe_file) >= 0 {
};
};
}
// Use whole disk devices, as they're unique, and they'll give us good defaults.
let old_block_trace_enables: Vec<(Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>)> = whole_disk_devices.iter().map(
|device| {
(
slurp_file_at_path(&device.sys_dev_path.join("trace/act_mask")).unwrap(),
slurp_file_at_path(&device.sys_dev_path.join("trace/start_lba")).unwrap(),
slurp_file_at_path(&device.sys_dev_path.join("trace/end_lba")).unwrap(),
slurp_file_at_path(&device.sys_dev_path.join("trace/enable")).unwrap()
)
}
).collect();
let _block_trace_enable_setup = DoUndo::new(
|| {
for device in &whole_disk_devices {
append_to_file_at_path(&device.sys_dev_path.join("trace/act_mask"), b"queue\n").unwrap();
append_to_file_at_path(&device.sys_dev_path.join("trace/start_lba"), b"0\n").unwrap();
append_to_file_at_path(&device.sys_dev_path.join("trace/end_lba"), format!("{}\n", device.end_sector).as_bytes()).unwrap();
append_to_file_at_path(&device.sys_dev_path.join("trace/enable"), b"1\n").unwrap();
}
},
|| {
for (device, old_block_trace_enable) in whole_disk_devices.iter().zip(&old_block_trace_enables) {
let (act_mask, start_lba, end_lba, _) = old_block_trace_enable;
append_to_file_at_path(&device.sys_dev_path.join("trace/end_lba"), &end_lba).warn_if_err();
append_to_file_at_path(&device.sys_dev_path.join("trace/start_lba"), &start_lba).warn_if_err();
append_to_file_at_path(&device.sys_dev_path.join("trace/act_mask"), &act_mask).warn_if_err();
}
for (device, old_block_trace_enable) in whole_disk_devices.iter().zip(&old_block_trace_enables) {
let (_, _, _, enable) = old_block_trace_enable;
append_to_file_at_path(&device.sys_dev_path.join("trace/enable"), &enable).unwrap();
}
}
);
let continuing = Cell::new(true);
// Returns bool for whether or not something was read.
let consume_event = || {
match BlkEvent::try_read_from_file(trace_pipe_fd) {
None => {
false
},
Some(event) => {
let category = event.action >> 16;
let action = event.action & 0xffff;
let absolute_sector: u64 = event.sector;
let bytes: u64 = event.bytes as u64;
if category & 0x0002 == 0 {
// Was not a write operation, so we don't care.
return true;
}
if action != 1 {
// Was not a QUEUE action.
return true;
}
if bytes == 0 {
// There is no data location associated, so skip.
return true;
}
let event_dev = event.device;
if let Some(child_devices) = device_map.get(&event_dev) {
// child_device may contain both a whole disk AND partitions.
for (device, device_number) in child_devices {
if device.start_sector <= absolute_sector && absolute_sector < device.end_sector {
let chunk_size = manifest.jobs[*device_number].chunk_size as u64;
let relative_sector: u64 = absolute_sector - device.start_sector;
let first_byte: u64 = relative_sector * 512; // I think a sector is always 512 on Linux?
let last_byte: u64 = first_byte + bytes - 1;
let first_chunk: usize = (first_byte / chunk_size) as usize;
let last_chunk: usize = (last_byte / chunk_size) as usize;
if last_byte >= device.sector_count * 512 {
// This might be violated if we're tracing a partition whilst a whole disk is modified!
// As such, this should not panic, but a warning may be useful.
eprintln!("Traced operation extends beyond end of device. This may happen if a device has been extended, or if a whole disk is modified whilst a partition is being traced. Event is from {} to {}, but matched device ({}:{}) is from {} to {}. Event: {:?}", absolute_sector, absolute_sector + bytes/512, device.major, device.minor, device.start_sector, device.end_sector, event);
}
for change_index in first_chunk..(last_chunk+1) {
if let Err(_) = log_channel.send((*device_number, change_index)) {
continuing.set(false);
return true;
}
}
// We might be tracing both a whole disk AND a partition, so don't break!
}
}
};
true
},
}
};
while continuing.get() {
match sync_barrier_channel.try_recv() {
Ok(barrier) => {
eprintln!("Syncing...");
while consume_event() {}
barrier.wait();
},
Err(std::sync::mpsc::TryRecvError::Empty) => {
// Does not block
if !consume_event() {
std::thread::yield_now();
}
},
Err(std::sync::mpsc::TryRecvError::Disconnected) => {
continuing.set(false);
}
};
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.