text stringlengths 8 4.13M |
|---|
use proconio::{input, marker::Usize1};
fn dfs(i: usize, p: usize, g: &Vec<Vec<usize>>, size: &mut Vec<usize>) {
size[i] = 1;
for &j in &g[i] {
if j == p {
continue;
}
dfs(j, i, g, size);
size[i] += size[j];
}
}
fn solve(i: usize, p: usize, g: &Vec<Vec<usize>>, size: &Vec<usize>, acc: usize, path: &mut Vec<usize>) {
let mut children = vec![acc];
for &j in &g[i] {
if j == p {
continue;
}
children.push(size[j]);
}
let mut dp1 = 0;
let mut dp2 = 0;
for &c in &children {
dp2 += dp1 * c;
dp1 += c;
}
path[i] = dp2;
for &j in &g[i] {
if j == p {
continue;
}
solve(j, i, g, size, acc + size[i] - size[j], path);
}
}
fn main() {
input! {
n: usize,
edges: [(Usize1, Usize1); n - 1],
};
if n <= 2 {
println!("0");
return;
}
let mut g = vec![vec![]; n];
for (a, b) in edges {
g[a].push(b);
g[b].push(a);
}
let mut size = vec![0; n];
dfs(0, std::usize::MAX, &g, &mut size);
let mut path = vec![0; n];
solve(0, std::usize::MAX, &g, &size, 0, &mut path);
let mut ans = n * (n - 1) * (n - 2) / 6;
for p in path {
ans -= p;
}
println!("{}", ans);
}
|
use diesel;
use diesel::prelude::*;
use diesel::pg::PgConnection;
use super::schema::todos;
#[derive(Insertable)]
#[table_name="todos"]
pub struct NewTodo<'a> {
pub title: &'a str,
pub description: &'a str,
}
#[derive(Queryable)]
pub struct Todo {
pub id: i32,
pub title: String,
pub description: String,
}
impl<'a> NewTodo<'a> {
pub fn insert(new_todo: NewTodo, db_conn: &PgConnection) -> Todo {
diesel::insert_into(todos::table)
.values(&new_todo)
.get_result(db_conn)
.expect("Error saving new todo")
}
}
impl Todo {
pub fn all(db_conn: &PgConnection) -> Vec<Todo> {
use super::schema::todos::dsl;
todos::table
.load(db_conn)
.expect("Error oading todos")
}
pub fn toggle(db_conn: &PgConnection)
}
|
pub mod lexer;
|
extern crate gl;
extern crate glfw;
use std::ffi::CStr;
use std::mem;
use std::os::raw::c_void;
use std::path::Path;
use std::ptr;
use cgmath::{Matrix4, vec3, Vector3};
use image::GenericImage;
use crate::sdl_main::{SCR_HEIGHT, SCR_WIDTH};
use crate::shader::Shader;
use self::gl::types::*;
const CHARS_PER_LINE: f32 = 31.0;
const CHAR_LINES: f32 = 3.0;
pub struct DrawText {
shader: Shader,
vao: u32,
texture: u32,
}
impl DrawText {
pub fn new() -> DrawText {
let (our_shader, vao, texture1) = unsafe {
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
let our_shader = Shader::new(
"resources/font_texture.vs",
"resources/font_texture.fs");
let posx: f32 = 1.0 / CHARS_PER_LINE*0.5;
let posy: f32 = 1.0 / CHAR_LINES * 0.25;
let cw: f32 = 1.0 / CHARS_PER_LINE;
let ch: f32 = 1.0 / CHAR_LINES;
let mut vertices: [f32; CHARS_PER_LINE as usize * 30 * CHAR_LINES as usize] = [0.0; CHARS_PER_LINE as usize * 30 * CHAR_LINES as usize];
for y in 0..CHAR_LINES as usize {
//let offset = (CHARS_PER_LINE * 30.0) as usize * y ; //(CHAR_LINES-1.0 - y as f32 ) as usize;
let offset = (CHARS_PER_LINE * 30.0) as usize * (2 - y); //(CHAR_LINES-1.0 - y as f32 ) as usize;
for x in 0..CHARS_PER_LINE as usize {
let ix: f32 = 0.0;
let iy: f32 = 0.0;
let imagex: f32 = x as f32 * cw;
let imagey: f32 = y as f32 * ch;
let v = [
ix, iy, 0.0, imagex, imagey,
ix + posx, iy, 0.0, imagex + cw, imagey,
ix + posx, iy + posy, 0.0, imagex + cw, imagey + ch,
ix + posx, iy + posy, 0.0, imagex + cw, imagey + ch,
ix, iy + posy, 0.0, imagex, imagey + ch,
ix, iy, 0.0, imagex, imagey,
];
for i in 0..30 {
vertices[offset + i + x * 30] = v[i];
}
}
}
let (mut vbo, mut vao) = (0, 0);
gl::GenVertexArrays(1, &mut vao);
gl::GenBuffers(1, &mut vbo);
gl::BindVertexArray(vao);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
gl::BufferData(gl::ARRAY_BUFFER,
(vertices.len() * mem::size_of::<GLfloat>()) as GLsizeiptr,
&vertices[0] as *const f32 as *const c_void,
gl::STATIC_DRAW);
let stride = 5 * mem::size_of::<GLfloat>() as GLsizei;
gl::VertexAttribPointer(0, 3, gl::FLOAT, gl::FALSE, stride, ptr::null());
gl::EnableVertexAttribArray(0);
gl::VertexAttribPointer(1, 2, gl::FLOAT, gl::FALSE, stride, (3 * mem::size_of::<GLfloat>()) as *const c_void);
gl::EnableVertexAttribArray(1);
let mut texture1 = 0;
gl::GenTextures(1, &mut texture1);
gl::BindTexture(gl::TEXTURE_2D, texture1);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::REPEAT as i32); // set texture wrapping to gl::REPEAT (default wrapping method)
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::REPEAT as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as i32);
let img = image::open(&Path::new("resources/textures/font.png")).expect("Failed to load texture");
let data = img.flipv().raw_pixels();
gl::TexImage2D(gl::TEXTURE_2D,
0,
gl::RGBA as i32,
img.width() as i32,
img.height() as i32,
0,
gl::RGBA,
gl::UNSIGNED_BYTE,
&data[0] as *const u8 as *const c_void);
gl::GenerateMipmap(gl::TEXTURE_2D);
our_shader.useProgram();
(our_shader, vao, texture1)
};
DrawText{
shader: our_shader,
vao: vao,
texture: texture1
}
}
pub unsafe fn draw_text(&self,message: &str, x: f32, y: f32, colour:Vector3<f32>) {
gl::ActiveTexture(gl::TEXTURE0);
gl::BindTexture(gl::TEXTURE_2D, self.texture);
self.shader.useProgram();
self.shader.setVec3(c_str!("colour"), colour.x,colour.y,colour.z);
gl::BindVertexArray(self.vao);
let char_vec: Vec<char> = message.chars().collect();
let scale_x = SCR_WIDTH as f32;
let scale_y = SCR_HEIGHT as f32 / 2.0;
let xx: f32 = x * 2.0;
let yy: f32 = y / scale_y - 1.0;
let mut letter = 0;
for c in char_vec {
if c as u8 > 32 {
let another_position: [Vector3<f32>; 1] = [vec3(((xx + letter as f32 * 32.0) as f32 / scale_x) - 1.0, yy, 0.0)];
let model: Matrix4<f32> = Matrix4::from_translation(another_position[0]);
self.shader.setMat4(c_str!("model"), &model);
let triangles = if c >= 'A' && c <= '_' {
let abcdefg = c as u8 - 'A' as u8;
31 * 6 + abcdefg as i32 * 6
} else if c > '_' {
let abcdefg = c as u8 - '`' as u8;
31 * 6 * 2 + abcdefg as i32 * 6
} else {
let abcdefg = c as u8 - '!' as u8;
abcdefg as i32 * 6
};
gl::DrawArrays(gl::TRIANGLES, triangles, 6);
}
letter = letter + 1;
}
}
}
|
extern crate base64;
mod api;
mod monitor;
mod static_resources;
use std::ops::Deref;
use std::time::Duration;
use crate::rocket::{config::Environment, Config};
use crate::rand::{self, RngCore};
#[derive(Debug)]
struct DetectInterval(Duration);
impl DetectInterval {
#[inline]
fn get_value(&self) -> Duration {
self.0
}
}
impl Deref for DetectInterval {
type Target = Duration;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(Debug)]
struct AuthKey(Option<String>);
impl AuthKey {
#[inline]
fn get_value(&self) -> Option<&str> {
self.0.as_deref()
}
}
pub fn launch(
monitor: Duration,
address: String,
listen_port: u16,
auth_key: Option<String>,
only_api: bool,
) {
let mut config = Config::build(if cfg!(debug_assertions) {
Environment::Development
} else {
Environment::Production
});
let mut secret_key = [0u8; 32];
rand::thread_rng().fill_bytes(&mut secret_key);
config.secret_key = Some(base64::encode(&secret_key));
config.address = address;
config.port = listen_port;
let rocket =
rocket::custom(config.unwrap()).manage(DetectInterval(monitor)).manage(AuthKey(auth_key));
let rocket = api::mounts(rocket);
let rocket = if only_api {
rocket
} else {
let rocket = static_resources::rocket_handler(rocket);
monitor::rocket_handler(rocket)
};
rocket.launch();
}
|
//! # generator
//!
//! Rust generator library
//!
#![cfg_attr(nightly, feature(asm))]
#![cfg_attr(nightly, feature(repr_simd))]
#![cfg_attr(nightly, feature(core_intrinsics))]
#![cfg_attr(nightly, feature(naked_functions))]
#![cfg_attr(nightly, feature(thread_local))]
#![cfg_attr(test, deny(warnings))]
#![deny(missing_docs)]
#[macro_use]
extern crate log;
mod alloc;
mod detail;
mod gen_impl;
mod reg_context;
mod rt;
mod scope;
mod stack;
mod yield_;
pub use gen_impl::{Generator, GeneratorImpl, Gn};
pub use rt::{get_local_data, is_generator, Error};
pub use scope::Scope;
pub use yield_::{
co_get_yield, co_set_para, co_yield_with, done, get_yield, yield_, yield_from, yield_with,
};
|
#![feature(core_intrinsics)]
#![feature(thread_id_value)]
#![feature(stmt_expr_attributes)]
mod profiler;
mod raw_event;
mod serialization;
mod sinks;
use std::borrow::Borrow;
use std::collections::hash_map::Entry;
use std::convert::Into;
use std::fs;
use std::path::Path;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use cfg_if::cfg_if;
use fxhash::FxHashMap;
use log::warn;
use parking_lot::RwLock;
use self::serialization::SerializationSink as Sink;
#[inline(never)]
#[cold]
pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
#[macro_export]
macro_rules! likely {
($e:expr) => {
#[allow(unused_unsafe)]
{
unsafe { std::intrinsics::likely($e) }
}
};
}
#[macro_export]
macro_rules! unlikely {
($e:expr) => {
#[allow(unused_unsafe)]
{
unsafe { std::intrinsics::unlikely($e) }
}
};
}
cfg_if! {
if #[cfg(windows)] {
/// FileSerializationSink is faster on Windows
type SerializationSink = sinks::FileSerializationSink;
} else if #[cfg(target_arch = "wasm32")] {
type SerializationSink = sinks::ByteVecSink;
} else {
/// MmapSerializatioSink is faster on macOS and Linux
type SerializationSink = sinks::MmapSerializationSink;
}
}
type Profiler = profiler::Profiler<SerializationSink>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
pub enum ProfileCategory {
Parsing,
Lowering,
Translation,
Codegen,
Linking,
Other,
}
bitflags::bitflags! {
struct EventFilter: u32 {
const GENERIC_ACTIVITIES = 1 << 0;
const QUERY = 1 << 1;
const MLIR = 1 << 2;
const LLVM = 1 << 3;
const DEFAULT = Self::GENERIC_ACTIVITIES.bits | Self::QUERY.bits | Self::MLIR.bits | Self::LLVM.bits;
}
}
// keep this in sync with the `-Z self-profile-events` help message in firefly_session/options.rs
const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
("none", EventFilter::empty()),
("all", EventFilter::all()),
("default", EventFilter::DEFAULT),
("generic", EventFilter::GENERIC_ACTIVITIES),
("query", EventFilter::QUERY),
("mlir", EventFilter::MLIR),
("llvm", EventFilter::LLVM),
];
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct StringId(u32);
impl StringId {
pub const INVALID: Self = Self(u32::max_value());
}
/// A reference to the SelfProfiler. It can be cloned and sent across thread
/// boundaries at will.
#[derive(Clone)]
pub struct SelfProfilerRef {
// This field is `None` if self-profiling is disabled for the current
// compilation session.
profiler: Option<Arc<SelfProfiler>>,
// We store the filter mask directly in the reference because that doesn't
// cost anything and allows for filtering with checking if the profiler is
// actually enabled.
event_filter_mask: EventFilter,
// Print verbose generic activities to stdout
print_verbose_generic_activities: bool,
}
impl SelfProfilerRef {
pub fn new(
profiler: Option<Arc<SelfProfiler>>,
print_verbose_generic_activities: bool,
) -> SelfProfilerRef {
// If there is no SelfProfiler then the filter mask is set to NONE,
// ensuring that nothing ever tries to actually access it.
let event_filter_mask = profiler
.as_ref()
.map(|p| p.event_filter_mask)
.unwrap_or(EventFilter::empty());
SelfProfilerRef {
profiler,
event_filter_mask,
print_verbose_generic_activities,
}
}
// This shim makes sure that calls only get executed if the filter mask
// lets them pass. It also contains some trickery to make sure that
// code is optimized for non-profiling compilation sessions, i.e. anything
// past the filter check is never inlined so it doesn't clutter the fast
// path.
#[inline(always)]
fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
where
F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
{
#[inline(never)]
fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
where
F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
{
let profiler = profiler_ref.profiler.as_ref().unwrap();
f(&**profiler)
}
if unlikely!(self.event_filter_mask.contains(event_filter)) {
cold_call(self, f)
} else {
TimingGuard::none()
}
}
/// Start profiling a verbose generic activity. Profiling continues until the
/// VerboseTimingGuard returned from this call is dropped. In addition to recording
/// a measureme event, "verbose" generic activities also print a timing entry to
/// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
pub fn verbose_generic_activity<'a>(
&'a self,
event_label: &'static str,
) -> VerboseTimingGuard<'a> {
let message = if self.print_verbose_generic_activities {
Some(event_label.to_owned())
} else {
None
};
VerboseTimingGuard::start(message, self.generic_activity(event_label))
}
/// Start profiling a generic activity. Profiling continues until the
/// TimingGuard returned from this call is dropped.
#[inline(always)]
pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
let event_label = profiler.get_or_alloc_cached_string(event_label);
TimingGuard::start(profiler, profiler.generic_id, event_label)
})
}
#[inline(always)]
pub fn query(&self, event_label: &'static str) -> TimingGuard<'_> {
self.exec(EventFilter::QUERY, |profiler| {
let event_label = profiler.get_or_alloc_cached_string(event_label);
TimingGuard::start(profiler, profiler.query_id, event_label)
})
}
#[inline(always)]
pub fn mlir(&self, event_label: &'static str) -> TimingGuard<'_> {
self.exec(EventFilter::MLIR, |profiler| {
let event_label = profiler.get_or_alloc_cached_string(event_label);
TimingGuard::start(profiler, profiler.mlir_id, event_label)
})
}
#[inline(always)]
pub fn llvm(&self, event_label: &'static str) -> TimingGuard<'_> {
self.exec(EventFilter::LLVM, |profiler| {
let event_label = profiler.get_or_alloc_cached_string(event_label);
TimingGuard::start(profiler, profiler.llvm_id, event_label)
})
}
pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
if let Some(profiler) = &self.profiler {
f(&profiler)
}
}
#[inline]
pub fn enabled(&self) -> bool {
self.profiler.is_some()
}
#[inline]
pub fn llvm_recording_enabled(&self) -> bool {
self.event_filter_mask.contains(EventFilter::LLVM)
}
#[inline]
pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
self.profiler.clone()
}
}
pub struct SelfProfiler {
profiler: Profiler,
event_filter_mask: EventFilter,
string_cache: RwLock<FxHashMap<String, StringId>>,
next_string_id: AtomicU32,
generic_id: StringId,
query_id: StringId,
mlir_id: StringId,
llvm_id: StringId,
}
impl SelfProfiler {
pub fn new(output_dir: &Path, event_filters: &Option<Vec<String>>) -> anyhow::Result<Self> {
let sink = Arc::new(SerializationSink::from_path(output_dir)?);
let profiler = Profiler::new(sink);
let string_cache = RwLock::new(FxHashMap::default());
let mut sc = string_cache.write();
let generic_id = StringId(1);
sc.insert("generic".to_owned(), generic_id);
let query_id = StringId(2);
sc.insert("query".to_owned(), query_id);
let mlir_id = StringId(3);
sc.insert("mlir".to_owned(), mlir_id);
let llvm_id = StringId(4);
sc.insert("llvm".to_owned(), llvm_id);
let next_string_id = AtomicU32::new(5);
drop(sc);
let mut event_filter_mask = EventFilter::empty();
if let Some(ref event_filters) = *event_filters {
let mut unknown_events = vec![];
for item in event_filters {
if let Some(&(_, mask)) =
EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
{
event_filter_mask |= mask;
} else {
unknown_events.push(item.clone());
}
}
if !unknown_events.is_empty() {
unknown_events.sort();
unknown_events.dedup();
warn!(
"Unknown self-profiler events specified: {}. Available options are: {}.",
unknown_events.join(", "),
EVENT_FILTERS_BY_NAME
.iter()
.map(|&(name, _)| name.to_string())
.collect::<Vec<_>>()
.join(", ")
);
}
} else {
event_filter_mask = EventFilter::DEFAULT;
}
Ok(Self {
profiler,
event_filter_mask,
string_cache,
next_string_id,
generic_id,
query_id,
mlir_id,
llvm_id,
})
}
/// Gets a `StringId` for the given string. This method makes sure that
/// any strings going through it will only be allocated once in the
/// profiling data.
pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
where
A: Borrow<str> + Into<String>,
{
// Only acquire a read-lock first since we assume that the string is
// already present in the common case.
{
let string_cache = self.string_cache.read();
if let Some(&id) = string_cache.get(s.borrow()) {
return id;
}
}
let mut string_cache = self.string_cache.write();
// Check if the string has already been added in the small time window
// between dropping the read lock and acquiring the write lock.
match string_cache.entry(s.into()) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
let string_id = self.next_string_id.fetch_add(1, Ordering::Relaxed);
*e.insert(StringId(string_id))
}
}
}
}
#[must_use]
pub struct TimingGuard<'a>(Option<profiler::TimingGuard<'a, SerializationSink>>);
impl<'a> TimingGuard<'a> {
#[inline]
pub fn start(
profiler: &'a SelfProfiler,
event_kind: StringId,
event_id: StringId,
) -> TimingGuard<'a> {
let thread_id = std::thread::current().id().as_u64().get() as u32;
let raw_profiler = &profiler.profiler;
let timing_guard =
raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
TimingGuard(Some(timing_guard))
}
#[inline]
pub fn none() -> TimingGuard<'a> {
TimingGuard(None)
}
#[inline(always)]
pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
let _timer = self;
f()
}
}
#[must_use]
pub struct VerboseTimingGuard<'a> {
start_and_message: Option<(Instant, String)>,
_guard: TimingGuard<'a>,
}
impl<'a> VerboseTimingGuard<'a> {
pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
VerboseTimingGuard {
_guard,
start_and_message: message.map(|msg| (Instant::now(), msg)),
}
}
#[inline(always)]
pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
let _timer = self;
f()
}
}
impl Drop for VerboseTimingGuard<'_> {
fn drop(&mut self) {
if let Some((start, ref message)) = self.start_and_message {
print_time_passes_entry(true, &message[..], start.elapsed());
}
}
}
pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) {
if !do_it {
return;
}
let mem_string = match get_resident() {
Some(n) => {
let mb = n as f64 / 1_000_000.0;
format!("; rss: {}MB", mb.round() as usize)
}
None => String::new(),
};
println!(
"time: {}{}\t{}",
duration_to_secs_str(dur),
mem_string,
what
);
}
// Hack up our own formatting for the duration to make it easier for scripts
// to parse (always use the same number of decimal places and the same unit).
pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
const NANOS_PER_SEC: f64 = 1_000_000_000.0;
let secs = dur.as_secs() as f64 + dur.subsec_nanos() as f64 / NANOS_PER_SEC;
format!("{:.3}", secs)
}
// Memory reporting
cfg_if! {
if #[cfg(windows)] {
fn get_resident() -> Option<usize> {
use std::mem::{self, MaybeUninit};
use winapi::shared::minwindef::DWORD;
use winapi::um::processthreadsapi::GetCurrentProcess;
use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
match unsafe {
GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
} {
0 => None,
_ => {
let pmc = unsafe { pmc.assume_init() };
Some(pmc.WorkingSetSize as usize)
}
}
}
} else if #[cfg(unix)] {
fn get_resident() -> Option<usize> {
let field = 1;
let contents = fs::read("/proc/self/statm").ok()?;
let contents = String::from_utf8(contents).ok()?;
let s = contents.split_whitespace().nth(field)?;
let npages = s.parse::<usize>().ok()?;
Some(npages * 4096)
}
} else {
fn get_resident() -> Option<usize> {
None
}
}
}
|
#[macro_use]
extern crate serde_derive;
use std::collections::HashMap;
pub use sp_core::{
H256 as Hash,
crypto::{Pair, Ss58Codec,AccountId32 as AccountId},
};
use sp_runtime::{
MultiSignature,
generic::Era,
};
use codec::{Encode, Decode, Compact};
use system::Phase;
use events::{EventsDecoder, RuntimeEvent, SystemEvent};
use extrinsic::xt_primitives::{GenericAddress, GenericExtra};//AdditionalSigned
pub use chain_helper::ChainHelper as SubChainHelper;
pub use keyring::{Sr25519, Ed25519, Crypto};
use sp_core::H256;
mod keyring;
pub mod extrinsic;
pub mod error;
pub mod events;
pub mod chain_helper;
pub mod node_metadata;
/// The block number type used in this runtime.
pub type BlockNumber = u64;
/// The timestamp moment type used in this runtime.
pub type Moment = u64;
pub type Balance = u128;
#[derive(Clone, Debug, Decode)]
pub enum Token{
EEE,
TokenX
}
impl ToString for Token {
fn to_string(&self) -> String {
match &self {
Token::EEE => "EEE".to_owned(),
Token::TokenX => "TokenX".to_owned(),
}
}
}
#[derive(Encode, Decode, Debug)]
pub struct RawTx {
pub func_data: Vec<u8>,
pub index: u32,
pub genesis_hash: H256,
pub spec_version: u32,
pub tx_version:u32,
}
/// Used to transfer the decoded result of account information, use the default unit here?
#[derive(Clone, Debug, Default, Decode)]
pub struct EeeAccountInfo {
pub nonce: u32,
pub refcount: u32,
pub free: u128,
//To avoid java does not support u128 type format, all converted to String format
pub reserved: u128,
pub misc_frozen: u128,
pub fee_frozen: u128,
}
#[derive(Clone, Debug, Default, Decode)]
pub struct EeeAccountInfoRefU8 {
pub nonce: u32,
pub refcount: u8,
pub free: u128,
//To avoid java does not support u128 type format, all converted to String format
pub reserved: u128,
pub misc_frozen: u128,
pub fee_frozen: u128,
}
//Designed as an option, when the transaction is a transaction that sets the block time, there is no signature
#[derive(Default, Debug)]
pub struct TransferDetail {
pub index: Option<u32>,
//The transaction nonce corresponding to the signed account
pub from: Option<String>,
//Signature account from which account is transferred out of balance
pub to: Option<String>,
pub signer: Option<String>,
//Destination account, balance account
pub value: Option<u128>,
pub hash: Option<String>,
pub timestamp: Option<u64>,
pub token_name: String,
pub method_name: String,
pub ext_data: Option<String>,
}
|
use std::env;
use std::process;
fn main() {
// Parse args for the number which is the length of tx list
let args: Vec<String> = env::args().collect();
let tx_num = merkle_tree_rust::parse_args(&args).unwrap_or_else(|err| {
eprintln!("Error parsing args: {}" ,err);
process::exit(1);
});
// Generate tx list of specified length
let txs = merkle_tree_rust::make_txs(tx_num);
// Create a merkle tree from a list of string as fake transctions
let merkle_tree = merkle_tree_rust::make_merkle_tree(&txs).unwrap_or_else(|err| {
eprintln!("Error creating merkle tree: {}" ,err);
process::exit(1);
});
// Get merkle proof of a tx from a merkle tree
let proof = merkle_tree_rust::get_merkle_proof(&txs, txs.last().unwrap().clone().as_str()).unwrap();
// Verify the tx by merkle proof
let root = merkle_tree_rust::get_root_by_proof(txs.last().unwrap().clone().as_ref(), &proof).unwrap();
assert_eq!(root, merkle_tree.last().unwrap()[0]);
} |
fn main() {
matching_literals();
matching_named_variables();
multiple_patterns();
matching_ranges();
destructuring_structs();
destructuring_enums();
nested_structs_and_enums();
destructuring_structs_and_tuples();
ignoring_an_entire_value();
ignoring_parts_of_a_value();
ignore_named_var_with_underscore();
ignoring_remaining_parts();
match_guards();
bindings();
}
fn matching_literals() {
let x = 1;
match x {
1 => println!("matching_literals: one"),
2 => println!("matching_literals: two"),
3 => println!("matching_literals: three"),
_ => println!("matching_literals: anything"),
}
}
/*
Named variables are irrefutable patterns that match any value, and we’ve used
them many times in the book. However, there is a complication when you use
named variables in match expressions. Because match starts a new scope,
variables declared as part of a pattern inside the match expression will shadow
those with the same name outside the match construct, as is the case with all
variables.
*/
fn matching_named_variables() {
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("matching_named_variables: Got 50"),
/*
Because we’re in a new scope inside the match expression, this is a new
y variable, not the y we declared at the beginning with the value 10.
This new y binding will match any value inside a Some, which is what we
have in x.
*/
Some(y) => println!("matching_named_variables: matched, y = {:?}", y),
_ => println!("matching_named_variables: default case, x = {:?}", x),
}
println!("matching_named_variables: at the end: x = {:?}, y = {:?}", x, y);
}
fn multiple_patterns() {
let x = 1;
match x {
1 | 2 => println!("multiple_patterns: one or two"),
3 => println!("multiple_patterns: three"),
_ => println!("multiple_patterns: anything"),
}
}
fn matching_ranges() {
let x = 5;
match x {
1...5 => println!("matching_ranges: one through five"),
_ => println!("matching_ranges: something else"),
}
let x = 'c';
match x {
'a'...'j' => println!("matching_ranges: early ASCII letter"),
'k'...'z' => println!("matching_ranges: late ASCII letter"),
_ => println!("matching_ranges: something else"),
}
}
fn destructuring_structs() {
struct Point {
x: i32,
y: i32,
}
let p = Point { x: 0, y: 7 };
let Point { x: a, y: b } = p;
assert_eq!(0, a);
assert_eq!(7, b);
println!("destructuring_structs: a={}, b={}.", a, b);
/*
Because having variable names match the fields is common and because writing
let Point { x: x, y: y } = p; contains a lot of duplication, there is a
shorthand for patterns that match struct fields: you only need to list the
name of the struct field, and the variables created from the pattern will
have the same names.
*/
let Point { x, y } = p;
assert_eq!(0, x);
assert_eq!(7, y);
println!("destructuring_structs: x={}, y={}.", x, y);
/*
We can also destructure with literal values as part of the struct pattern
rather than creating variables for all the fields.
*/
match p {
Point { x, y: 0 } => println!("destructuring_structs: On the x axis at {}", x),
Point { x: 0, y } => println!("destructuring_structs: On the y axis at {}", y),
Point { x, y } => println!("destructuring_structs: On neither axis: ({}, {})", x, y),
}
}
fn destructuring_enums() {
#[allow(dead_code)]
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(i32, i32, i32),
}
let msg = Message::ChangeColor(0, 160, 255);
match msg {
Message::Quit => {
println!("destructuring_enums: The Quit variant has no data to destructure.")
},
Message::Move { x, y } => {
println!(
"destructuring_enums: Move in the x direction {} and in the y direction {}",
x,
y
);
}
Message::Write(text) => println!("destructuring_enums: Text message: {}", text),
Message::ChangeColor(r, g, b) => {
println!(
"destructuring_enums: Change the color to red {}, green {}, and blue {}",
r,
g,
b
)
}
}
}
fn nested_structs_and_enums() {
#[allow(dead_code)]
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
#[allow(dead_code)]
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!(
"nested_structs_and_enums: Change the color to red {}, green {}, and blue {}",
r,
g,
b
)
},
Message::ChangeColor(Color::Hsv(h, s, v)) => {
println!(
"nested_structs_and_enums: Change the color to hue {}, saturation {}, and value {}",
h,
s,
v
)
}
_ => ()
}
}
fn destructuring_structs_and_tuples() {
struct Point {
x: i32,
y: i32,
}
let ((feet, inches), Point {x, y}) = ((3, 10), Point { x: 3, y: -10 });
println!("destructuring_structs_and_tuples: feet={}, inches={}, x={}, y={}.", feet, inches, x, y);
}
fn foo(_: i32, y: i32) {
println!("ignoring_an_entire_value(foo): This code only uses the y parameter: {}", y);
}
fn ignoring_an_entire_value() {
foo(3, 4);
}
fn ignoring_parts_of_a_value() {
let mut setting_value = Some(5);
let new_setting_value = Some(10);
match (setting_value, new_setting_value) {
(Some(_), Some(_)) => {
println!("ignoring_parts_of_a_value: Can't overwrite an existing customized value");
}
_ => {
setting_value = new_setting_value;
}
}
println!("ignoring_parts_of_a_value: setting is {:?}", setting_value);
let numbers = (2, 4, 8, 16, 32);
match numbers {
(first, _, third, _, fifth) => {
println!("ignoring_parts_of_a_value: Some numbers: {}, {}, {}", first, third, fifth)
},
}
}
fn ignore_named_var_with_underscore() {
let s = Some(String::from("Hello!"));
/*
An unused variable starting with an underscore still binds the value, which
might take ownership of the value. The next line causes compiler error: "
borrow of moved value: `s`". because the s value will still be moved
into _s, which prevents us from using s again.
*/
// if let Some(_s) = s {
/*
Using the underscore by itself doesn’t ever bind to the value. The next line
will compile without any errors because s doesn’t get moved into "_".
*/
if let Some(_) = s {
println!("ignore_named_var_with_underscore: found a string");
}
println!("ignore_named_var_with_underscore: s={:?}", s);
}
fn ignoring_remaining_parts() {
#[allow(dead_code)]
struct Point {
x: i32,
y: i32,
z: i32,
}
let origin = Point { x: 0, y: 0, z: 0 };
match origin {
Point { x, .. } => println!("ignoring_remaining_parts: x is {}", x),
}
let numbers = (2, 4, 8, 16, 32);
match numbers {
(first, .., last) => {
println!("ignoring_remaining_parts: Some numbers: {}, {}", first, last);
},
}
}
/*
A match guard is an additional if condition specified after the pattern in a
match arm that must also match, along with the pattern matching, for that arm to
be chosen.
*/
fn match_guards() {
let num = Some(4);
match num {
Some(x) if x < 5 => println!("match_guards: num less than five: {}", x),
Some(x) => println!("match_guards: num {}", x),
None => (),
}
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("match_guards: Got 50"),
Some(n) if n == y => println!("match_guards: Matched, n = {:?}", n),
_ => println!("match_guards: Default case, x = {:?}", x),
}
println!("match_guards: at the end: x = {:?}, y = {:?}", x, y);
let x = 4;
let y = false;
match x {
/*
The match condition states that the arm only matches if the value of x
is equal to 4, 5, or 6 and if y is true.
*/
4 | 5 | 6 if y => println!("match_guards: 4, 5, or 6? yes"),
_ => println!("match_guards: 4, 5, or 6? no"),
}
}
/*
The at operator (@) lets us create a variable that holds a value at the same
time we’re testing that value to see whether it matches a pattern. Using @ lets
us test a value and save it in a variable within one pattern.
*/
fn bindings() {
enum Message {
Hello { id: i32 },
}
let msg = Message::Hello { id: 5 };
match msg {
Message::Hello { id: id_variable @ 3...7 } => {
println!("Found an id in range: {}", id_variable)
},
Message::Hello { id: 10...12 } => {
println!("Found an id in another range")
},
Message::Hello { id } => {
println!("Found some other id: {}", id)
},
}
} |
use sciter::{Value};
use flate2::read::ZlibEncoder;
use flate2::Compression;
use imagequant;
use lodepng::{self, ColorType::PALETTE, CompressSettings, State, RGBA};
use std::io::Read;
use std::os::raw::c_uchar;
use std::path::{ Path, PathBuf };
use std::{str, fs};
// https://stackoverflow.com/a/55033999/13378247
use crate::misc::{ Args, Options, make_error_message, append_dir };
pub fn compress_file(file_name: String, options: Options) -> Args {
println!("png::compress_file");
let path = Path::new(&file_name);
if !path.is_file() {
return make_error_message(format!("Not a file: {}", path.display()));
}
let in_file_name_path_buf = PathBuf::from(path);
let file = match lodepng::decode32_file(&in_file_name_path_buf) {
Ok(file) => file,
Err(_) => return make_error_message(format!("Could not open file: {}", in_file_name_path_buf.display()))
};
let add_ext = match options.addExt.to_bool().unwrap() {
true => ".min",
_ => ""
};
let add_folder = options.addFolder.to_bool().unwrap();
if add_folder {
let path = path.clone().parent().unwrap().join("minified");
fs::create_dir_all(path);
}
let out_file_name_string = format!(
"{}{}.{}",
path.file_stem().unwrap().to_str().unwrap(),
add_ext,
path.extension().unwrap().to_str().unwrap()
);
let mut out_file_name_path_buf = path.with_file_name(out_file_name_string);
if add_folder {
out_file_name_path_buf = append_dir(Path::new(&out_file_name_path_buf), "minified");
}
let width = file.width;
let height = file.height;
let in_buffer_len = file.buffer.len();
let (palette, pixels) = quantize(&file.buffer, width as usize, height as usize);
let mut state = make_state();
add_palette_to_state(&mut state, palette);
let out_buffer_len: usize;
// encode and output final filesize
match state.encode(&pixels, width, height) {
Ok(out_buffer) => {
out_buffer_len = out_buffer.len();
filtering(out_buffer, width, height);
}
Err(_) => {
return make_error_message("Failed to encode the image.".to_string())
}
}
match state.encode_file(&out_file_name_path_buf, &pixels, width, height) {
Err(e) => {
let err_msg = str::from_utf8(e.c_description());
let err_msg = err_msg.ok().unwrap();
return make_error_message(format!("{:?}", err_msg))
}
_ => Args {
path: Value::from(out_file_name_path_buf.display().to_string()),
sizeBefore: Value::from(in_buffer_len as i32),
sizeAfter: Value::from(out_buffer_len as i32),
error: Value::from(false)
}
}
}
// using imagequant quantize the PNG to reduce the file size
// returns the palette and vector of pixels
fn quantize(buffer: &[RGBA], width: usize, height: usize) -> (Vec<RGBA>, Vec<u8>) {
// quantize
let mut liq = imagequant::new();
liq.set_speed(1);
liq.set_quality(70, 99);
let ref mut img = liq
.new_image(&buffer, width as usize, height as usize, 0.45455)
.unwrap();
let mut res = match liq.quantize(img) {
Ok(res) => res,
Err(_) => panic!("Failed to quantize image"),
};
res.remapped(img).unwrap()
}
// create the initial state with default settings for PNG with a palette
// use flate2 for the image compression rather than the compression that comes with the
// lonepng package, flate2 tends to be significantly faster as well as produces a smaller image
fn make_state() -> State {
let mut state = lodepng::ffi::State::new();
state.info_png_mut().color.colortype = PALETTE;
state.info_png_mut().color.set_bitdepth(8);
state.info_raw_mut().colortype = PALETTE;
state.info_raw_mut().set_bitdepth(8);
// lib uses custom deflate which is slower and creates a larger filesize than flate2
unsafe {
state.set_custom_zlib(Some(deflate_ffi), std::ptr::null());
}
state.encoder.add_id = 0;
state.encoder.text_compression = 1;
state
}
// add the palette from the quantization to the image state
fn add_palette_to_state(state: &mut State, palette: Vec<RGBA>) {
palette.iter().for_each(|palette| {
state
.info_png_mut()
.color
.palette_add(palette.clone())
.unwrap();
state.info_raw_mut().palette_add(palette.clone()).unwrap();
});
}
// to override the default compressor for lodepng, an unsafe external c function has to be passed to used
unsafe extern "C" fn deflate_ffi(
out: &mut *mut c_uchar,
out_size: &mut usize,
input: *const c_uchar,
input_size: usize,
settings: *const CompressSettings,
) -> u32 {
let input = vec_from_raw(input, input_size);
let settings = std::ptr::read(settings);
let (mut buffer, size) = deflate(&input, settings);
std::mem::replace(out, buffer.as_mut_ptr());
std::ptr::replace(out_size, size);
return 0;
}
// call flate2's zlib encoder return the buffer and length
fn deflate(input: &[u8], _settings: CompressSettings) -> (Vec<u8>, usize) {
let mut z = ZlibEncoder::new(input, Compression::best());
let mut buffer = vec![];
match z.read_to_end(&mut buffer) {
Ok(len) => (buffer, len),
Err(_) => panic!("Failed to compress buffer"),
}
}
// convert the raw buffer to a vector
unsafe fn vec_from_raw(data: *const c_uchar, len: usize) -> Vec<u8> {
std::slice::from_raw_parts(data, len).to_owned()
}
fn filtering(buffer: Vec<u8>, _width: usize, _height: usize) {
// let w_buffer: &
// let encoder = png::Encoder::new(&buffer as &[u8], width, height);
//dbg!(buffer);
} |
use std::io::Read;
fn main() {
let mut buf = String::new();
// 標準入力から全部bufに読み込む
std::io::stdin().read_to_string(&mut buf).unwrap();
// 行ごとのiterが取れる
let mut iter = buf.split_whitespace();
let items: usize = iter.next().unwrap().parse().unwrap();
let points: Vec<(i32, i32)> = (0..items)
.map(|_| {
(
iter.next().unwrap().parse().unwrap(),
iter.next().unwrap().parse().unwrap(),
)
})
.collect();
let mut max_length: f64 = 0.0;
for point in points.iter() {
for next in points[1..].iter() {
let x = (next.0 - point.0).pow(2) as f64;
let y = (next.1 - point.1).pow(2) as f64;
let length = (x + y).sqrt();
if length > max_length {
max_length = length;
}
}
}
println!("{:.6}", max_length);
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - AES DMA Interrupt Mask"]
pub aes_dmaim: AES_DMAIM,
#[doc = "0x04 - AES DMA Raw Interrupt Status"]
pub aes_dmaris: AES_DMARIS,
#[doc = "0x08 - AES DMA Masked Interrupt Status"]
pub aes_dmamis: AES_DMAMIS,
#[doc = "0x0c - AES DMA Interrupt Clear"]
pub aes_dmaic: AES_DMAIC,
}
#[doc = "AES DMA Interrupt Mask"]
pub struct AES_DMAIM {
register: vcell::VolatileCell<u32>,
}
#[doc = "AES DMA Interrupt Mask"]
pub mod aes_dmaim;
#[doc = "AES DMA Raw Interrupt Status"]
pub struct AES_DMARIS {
register: vcell::VolatileCell<u32>,
}
#[doc = "AES DMA Raw Interrupt Status"]
pub mod aes_dmaris;
#[doc = "AES DMA Masked Interrupt Status"]
pub struct AES_DMAMIS {
register: vcell::VolatileCell<u32>,
}
#[doc = "AES DMA Masked Interrupt Status"]
pub mod aes_dmamis;
#[doc = "AES DMA Interrupt Clear"]
pub struct AES_DMAIC {
register: vcell::VolatileCell<u32>,
}
#[doc = "AES DMA Interrupt Clear"]
pub mod aes_dmaic;
|
//import module 'world' in this file.
mod world;
fn main() {
//add_wall requires a mutable world as it will modify the vector containing the items.
let mut world = world::world::World::new();
world.add_wall();
world.add_player();
println!("Printing the world:{}", world);
}
|
use id_types::*;
//test
#[derive(Copy, Clone, Debug)]
pub struct Mover{
pub x: f64,
pub y: f64,
pub xspeed: f64,
pub yspeed: f64,
pub width: i64,
pub height: i64,
pub solid: bool,
pub disabled: bool
}
pub struct MoverBuilder{
pub x: f64,
pub y: f64,
pub xspeed: f64,
pub yspeed: f64,
pub width: i64,
pub height: i64,
pub solid: bool,
pub disabled: bool
}
pub struct Orbiter{
pub mover_id: MoverID,
pub orbiting_id: MoverID,
pub radius: f64,
pub angular_velocity: f64,
pub angle: f64
}
pub struct Timer{
pub steps_passed: u32
}
pub struct DisabledMover{
pub alpha_id: MoverID,
pub beta_id: MoverID
}
|
pub fn htonc(u: u8) -> u8 {
u.to_be()
}
pub fn ntohc(u: u8) -> u8 {
u8::from_be(u)
}
pub fn get(u: u8, idx: u8) -> bool {
let idx2 = 7 - idx;
return ((u >> idx2) & 1) != 0;
}
pub fn combine(u: u8, v: u8) -> u16 {
return ((u as u16) << 8) | (v as u16);
}
pub fn combine32(a: u8, b: u8, c: u8, d: u8) -> u32 {
return ((a as u32) << 24) | ((b as u32) << 16) | ((c as u32) << 8) | ((d as u32) << 0);
}
|
use crate::bytes;
use crate::compress::{max_compress_len, Encoder};
use crate::crc32::CheckSummer;
use crate::error::Error;
use crate::MAX_BLOCK_SIZE;
/// The maximum chunk of compressed bytes that can be processed at one time.
///
/// This is computed via `max_compress_len(MAX_BLOCK_SIZE)`.
///
/// TODO(ag): Replace with const fn once they support nominal branching.
pub const MAX_COMPRESS_BLOCK_SIZE: usize = 76490;
/// The special magic string that starts any stream.
///
/// This may appear more than once in a stream in order to support easy
/// concatenation of files compressed in the Snappy frame format.
pub const STREAM_IDENTIFIER: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY";
/// The body of the special stream identifier.
pub const STREAM_BODY: &'static [u8] = b"sNaPpY";
/// The length of a snappy chunk type (1 byte), packet length (3 bytes)
/// and CRC field (4 bytes). This is technically the chunk header _plus_
/// the CRC present in most chunks.
pub const CHUNK_HEADER_AND_CRC_SIZE: usize = 8;
/// An enumeration describing each of the 4 main chunk types.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ChunkType {
Stream = 0xFF,
Compressed = 0x00,
Uncompressed = 0x01,
Padding = 0xFE,
}
impl ChunkType {
/// Converts a byte to one of the four defined chunk types represented by
/// a single byte. If the chunk type is reserved, then it is returned as
/// an Err.
pub fn from_u8(b: u8) -> Result<ChunkType, u8> {
match b {
0xFF => Ok(ChunkType::Stream),
0x00 => Ok(ChunkType::Compressed),
0x01 => Ok(ChunkType::Uncompressed),
0xFE => Ok(ChunkType::Padding),
b => Err(b),
}
}
}
/// Compress a single frame (or decide to pass it through uncompressed). This
/// will output a frame header in `dst_chunk_header`, and it will return a slice
/// pointing to the data to use in the frame. The `dst_chunk_header` array must
/// always have a size of 8 bytes.
///
/// If `always_use_dst` is set to false, the return value may point into either
/// `src` (for data we couldn't compress) or into `dst` (for data we could
/// compress). If `always_use_dst` is true, the data will always be in `dst`.
/// This is a bit weird, but because of Rust's ownership rules, it's easiest
/// for a single function to always be in charge of writing to `dst`.
pub fn compress_frame<'a>(
enc: &mut Encoder,
checksummer: CheckSummer,
src: &'a [u8],
dst_chunk_header: &mut [u8],
dst: &'a mut [u8],
always_use_dst: bool,
) -> Result<&'a [u8], Error> {
// This is a purely internal function, with a bunch of preconditions.
assert!(src.len() <= MAX_BLOCK_SIZE);
assert!(dst.len() >= max_compress_len(MAX_BLOCK_SIZE));
assert_eq!(dst_chunk_header.len(), CHUNK_HEADER_AND_CRC_SIZE);
// Build a checksum of our _uncompressed_ data.
let checksum = checksummer.crc32c_masked(src);
// Compress the buffer. If compression sucked, throw it out and
// write uncompressed bytes instead. Since our buffer is at most
// MAX_BLOCK_SIZE and our dst buffer has size
// max_compress_len(MAX_BLOCK_SIZE), we have enough space.
let compress_len = enc.compress(src, dst)?;
let (chunk_type, chunk_len) =
// We add 4 to the chunk_len because of the checksum.
if compress_len >= src.len() - (src.len() / 8) {
(ChunkType::Uncompressed, 4 + src.len())
} else {
(ChunkType::Compressed, 4 + compress_len)
};
dst_chunk_header[0] = chunk_type as u8;
bytes::write_u24_le(chunk_len as u32, &mut dst_chunk_header[1..]);
bytes::write_u32_le(checksum, &mut dst_chunk_header[4..]);
// Return the data to put in our frame.
if chunk_type == ChunkType::Compressed {
Ok(&dst[0..compress_len])
} else if always_use_dst {
dst[..src.len()].copy_from_slice(src);
Ok(&dst[..src.len()])
} else {
Ok(src)
}
}
|
//! Utility methods, mostly for dealing with IO.
macro_rules! try_parse {
($field:expr) => {
try_parse!($field, FromStr::from_str)
};
($field:expr, $from_str:path) => {
match $from_str($field) {
Ok(result) => Ok(result),
Err(_) => Err(Error::new(
ErrorKind::InvalidInput,
format!("Could not parse {:?}", $field),
)),
}?
};
}
|
//! Dialect
use serde::{Deserialize, Serialize};
/// Dialect are options to change the default CSV output format;
/// <https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions>
#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Dialect {
/// If true, the results will contain a header row
#[serde(skip_serializing_if = "Option::is_none")]
pub header: Option<bool>,
/// Separator between cells; the default is ,
#[serde(skip_serializing_if = "Option::is_none")]
pub delimiter: Option<String>,
/// <https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns>
#[serde(skip_serializing_if = "Option::is_none")]
pub annotations: Option<Vec<Annotations>>,
/// Character prefixed to comment strings
#[serde(skip_serializing_if = "Option::is_none")]
pub comment_prefix: Option<String>,
/// Format of timestamps
#[serde(skip_serializing_if = "Option::is_none")]
pub date_time_format: Option<DateTimeFormat>,
}
impl Dialect {
/// Dialect are options to change the default CSV output format;
/// <https://www.w3.org/TR/2015/REC-tabular-metadata-20151217/#dialect-descriptions>
pub fn new() -> Self {
Self::default()
}
}
/// <https://www.w3.org/TR/2015/REC-tabular-data-model-20151217/#columns>
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Annotations {
/// Group Annotation
Group,
/// Datatype Annotation
Datatype,
/// Default Annotation
Default,
}
/// Timestamp Format
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum DateTimeFormat {
/// RFC3339
Rfc3339,
/// RFC3339Nano
Rfc3339Nano,
}
|
pub mod file_state;
pub mod logic_files;
pub mod snes_state;
use crate::lttp::{
server_config::DataSourceType,
AppState,
};
use std::{
sync::Arc,
time::{
Duration,
Instant,
},
};
use tokio::time::{
self,
sleep,
};
use tracing::{
debug,
error,
};
#[tracing::instrument(skip(app_state))]
pub async fn game_state_poller(app_state: Arc<AppState>) {
loop {
let loop_start = Instant::now();
debug!("Starting file update poll cycle");
let server_config = match app_state.server_config.read() {
Ok(sc) => sc.clone(),
Err(e) => {
error!("Unable to get server config for game state polling: {:?}", e);
continue;
}
};
match server_config.source_type {
DataSourceType::LocalFile => {
file_state::poll_status(app_state.clone(), &server_config.data_source);
}
DataSourceType::QUsb2snes => {
if let Err(e) =
snes_state::poll_status(app_state.clone(), &server_config.data_source).await
{
error!("Problem fetching SNES state: {:?}", e);
};
}
}
debug!("File update poll cycle completed in: {:?}", loop_start.elapsed());
sleep(Duration::from_millis(server_config.data_poll_rate.into())).await;
}
}
#[tracing::instrument(skip(app_state))]
pub async fn device_list_poller(app_state: Arc<AppState>) {
let mut timer = time::interval(time::Duration::from_millis(1_000));
timer.set_missed_tick_behavior(time::MissedTickBehavior::Skip);
loop {
timer.tick().await;
let mut client = match qusb2snes_client::Client::new().await {
Ok(c) => c,
Err(e) => {
debug!("Error connecting to QUsb2snes server: {:?}", e);
continue;
}
};
if let Ok(dev_list) = client.device_list().await {
let mut server_config = match app_state.server_config.write() {
Ok(sc) => sc,
Err(e) => {
error!("Unable to get server config to update device list: {:?}", e);
continue;
}
};
server_config.qusb_devices = dev_list;
} else {
debug!("Unable to get device list from QUsb2snes server.");
continue;
};
}
}
|
use nom::types::CompleteStr;
pub mod opcode_parsers;
pub mod register_parsers;
pub mod operand_parsers;
pub mod instruction_parsers;
pub mod program_parsers;
pub mod label_parsers;
pub mod directive_parsers;
pub mod assembler_errors;
pub mod symbols;
use crate::instruction::Opcode;
use program_parsers::{program, Program};
use instruction_parsers::{AssemblerInstruction};
use assembler_errors::AssemblerError;
use symbols::{Symbol, SymbolTable, SymbolType};
pub const PIE_HEADER_PREFIX: [u8; 4] = [45, 50, 49, 45];
pub const PIE_HEADER_LENGTH: usize = 64;
#[derive(Debug, PartialEq)]
pub enum Token {
Op {code: Opcode},
Register { reg_num: u8 },
IntegerOperand { value: i32 },
LabelDeclaration { name: String },
LabelUsage { name: String },
Directive { name: String },
IrString { name: String }
}
#[derive(Debug, Default)]
pub struct Assembler {
phase: AssemblerPhase,
pub symbols: SymbolTable,
pub ro: Vec<u8>,
pub bytecode: Vec<u8>,
ro_offset: u32,
sections: Vec<AssemblerSection>,
current_section: Option<AssemblerSection>,
current_instruction: u32,
errors: Vec<AssemblerError>
}
#[derive(Debug, PartialEq)]
pub enum AssemblerPhase {
First,
Second
}
#[derive(Debug, PartialEq, Clone)]
pub enum AssemblerSection {
Data { starting_instruction: Option<u32> },
Code { starting_instruction: Option<u32> },
Unknown
}
impl Assembler {
pub fn new() -> Assembler {
Assembler {
current_instruction: 0,
ro_offset: 0,
ro: vec![],
bytecode: vec![],
sections: vec![],
errors: vec![],
phase: AssemblerPhase::First,
symbols: SymbolTable::new(),
current_section: None
}
}
pub fn assemble(&mut self, raw: &str) -> Result<Vec<u8>, Vec<AssemblerError>> {
match program(CompleteStr(raw)) {
Ok((_remainder, program)) => {
let mut assembled_program = self.write_pie_header();
self.process_first_phase(&program);
if !self.errors.is_empty() {
return Err(self.errors.clone());
}
if self.sections.len() != 2 {
println!("Did not find at least two sections.");
self.errors.push(AssemblerError::InsufficientSections);
return Err(self.errors.clone());
}
let mut body = self.process_second_phase(&program);
assembled_program.append(&mut body);
Ok(assembled_program)
},
Err(e) => {
println!("There was an error assembling the code: {:?}", e);
Err(vec![AssemblerError::ParseError{ error: e.to_string() }])
}
}
}
fn process_first_phase(&mut self, p: &Program) {
for i in &p.instructions {
if i.is_label() {
if self.current_section.is_some() {
self.process_label_declaration(&i);
} else {
self.errors.push(AssemblerError::NoSegmentDeclarationFound{ instruction: self.current_instruction });
}
}
if i.is_directive() {
self.process_directive(i);
}
self.current_instruction += 1;
}
self.phase = AssemblerPhase::Second;
}
fn process_second_phase(&mut self, p: &Program) -> Vec<u8> {
self.current_instruction = 0;
let mut program = vec![];
for i in &p.instructions {
if i.is_opcode() {
let mut bytes = i.to_bytes(&self.symbols);
program.append(&mut bytes);
}
if i.is_directive() {
self.process_directive(i);
}
self.current_instruction += 1;
}
program
}
fn process_label_declaration(&mut self, i: &AssemblerInstruction) {
let name = match i.get_label_name() {
Some(name) => { name },
None => {
self.errors.push(AssemblerError::StringConstantDeclaredWithoutLabel{ instruction: self.current_instruction });
return;
}
};
if self.symbols.has_symbol(&name) {
self.errors.push(AssemblerError::SymbolAlreadyDeclared);
return;
}
let symbol = Symbol::new(name, SymbolType::Label);
self.symbols.add_symbol(symbol);
}
fn process_directive(&mut self, i: &AssemblerInstruction) {
let directive_name = match i.get_directive_name() {
Some(name) => {
name
},
None => {
println!("Directive has an invalid name: {:?}", i);
return;
}
};
if i.has_operands() {
match directive_name.as_ref() {
"asciiz" => {
self.handle_asciiz(i);
},
_ => {
self.errors.push(AssemblerError::UnknownDirectiveFound{ directive: directive_name.clone() });
return;
}
}
} else {
self.process_section_header(&directive_name);
}
}
fn process_section_header(&mut self, header_name: &str) {
let new_section: AssemblerSection = header_name.into();
if new_section == AssemblerSection::Unknown {
println!("Found a section header is unknown: {:?}", header_name);
return;
}
self.sections.push(new_section.clone());
self.current_section = Some(new_section);
}
fn handle_asciiz(&mut self, i: &AssemblerInstruction) {
if self.phase != AssemblerPhase::First { return; };
match i.get_string_constant() {
Some(s) => {
match i.get_label_name() {
Some(name) => { self.symbols.set_symbol_offset(&name, self.ro_offset); },
None => {
println!("Found a string constant with no associated label!");
return;
}
};
for byte in s.as_bytes() {
self.ro.push(*byte);
self.ro_offset += 1;
}
self.ro.push(0);
self.ro_offset += 1;
},
None => {
println!("String constant following an .asciiz was empty");
}
}
}
fn write_pie_header(&self) -> Vec<u8> {
let mut header = vec![];
for byte in &PIE_HEADER_PREFIX {
header.push(byte.clone());
}
while header.len() < PIE_HEADER_LENGTH {
header.push(0 as u8);
}
println!("Header length: {}", header.len());
header
}
}
impl Default for AssemblerPhase {
fn default() -> AssemblerPhase {
AssemblerPhase::First
}
}
impl Default for AssemblerSection {
fn default() -> Self {
AssemblerSection::Unknown
}
}
impl<'a> From<&'a str> for AssemblerSection {
fn from(name: &str) -> AssemblerSection {
match name {
"data" => {
AssemblerSection::Data { starting_instruction: None }
}
"code" => {
AssemblerSection::Code { starting_instruction: None }
}
_ => {
AssemblerSection::Unknown
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::vm::VM;
#[test]
fn test_symbol_table() {
let mut sym = SymbolTable::default();
let new_symbol = Symbol::new_with_offset("test".to_string(), SymbolType::Label, 12);
sym.add_symbol(new_symbol);
assert_eq!(sym.symbols.len(), 1);
let v = sym.symbol_value("test");
assert_eq!(true, v.is_some());
let v = v.unwrap();
assert_eq!(v, 12);
let v = sym.symbol_value("does_not_exist");
assert_eq!(false, v.is_some());
}
#[test]
fn test_assemble_program() {
let mut asm = Assembler::new();
let test_string = ".data\n.code\nload $0 #100\nload $1 #1\nload $2 #0\ntest: inc $0\nneq $0 $2\njmpe @test\nhlt";
// let test_string = "test: inc $0\njmp test";
println!("Attempting to assemble: {:?}", test_string);
let program = asm.assemble(test_string).unwrap();
println!("Assembled Program: {:?}", program);
let mut vm = VM::default();
assert_eq!(program.len(), 92);
vm.add_bytes(program);
assert_eq!(vm.program.len(), 92)
}
#[test]
fn test_ro_data() {
let mut asm = Assembler::new();
let test_string = ".data\ntest: .asciiz 'This is a test'\n.code\n";
let program = asm.assemble(test_string);
assert_eq!(program.is_ok(), true);
}
#[test]
fn test_bad_ro_data() {
let mut asm = Assembler::new();
let test_string = ".code\ntest: .asciiz 'This is a test'\n.wrong\n";
let program = asm.assemble(test_string);
assert_eq!(program.is_ok(), false);
}
#[test]
fn test_first_phase_no_segment() {
let mut asm = Assembler::new();
let test_string = "hello: .asciiz 'Fail'";
let result = program(CompleteStr(test_string));
assert_eq!(result.is_ok(), true);
let (_, p) = result.unwrap();
asm.process_first_phase(&p);
assert_eq!(asm.errors.len(), 1);
}
#[test]
fn test_first_phase_inside_segment() {
let mut asm = Assembler::new();
let test_string = ".data\ntest: .asciiz 'Hello'";
let result = program(CompleteStr(test_string));
assert_eq!(result.is_ok(), true);
let (_, p) = result.unwrap();
asm.process_first_phase(&p);
assert_eq!(asm.errors.len(), 0);
}
}
|
//! `ipfs-http` http API implementation.
//!
//! This crate is most useful as a binary used first and foremost for compatibility testing against
//! other ipfs implementations.
#[macro_use]
extern crate tracing;
pub mod v0;
pub mod config;
|
fn sequence_has_n_unique<const N: usize>(seq: &[u8]) -> bool {
for (i, &ch) in seq.iter().take(N.min(seq.len())).enumerate() {
if seq[..i].contains(&ch) {
return false;
}
}
true
}
fn find_marker<const N: usize>(input: &str) -> usize {
let input = input.as_bytes();
for i in 0..input.len() {
if sequence_has_n_unique::<N>(&input[i..]) {
return i + N;
}
}
panic!("Couldn't find marker");
}
fn part1(input: &str) -> usize {
find_marker::<4>(input)
}
fn part2(input: &str) -> usize {
find_marker::<14>(input)
}
aoc::tests! {
fn part1:
"mjqjpqmgbljsphdztnvjfqwrcgsmlb" => 7;
"bvwbjplbgvbhsrlpgdmjqwftvncz" => 5;
"nppdvjthqldpwncqszvftbrmjlhg" => 6;
"nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg" => 10;
"zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw" => 11;
in => 1766;
fn part2:
"mjqjpqmgbljsphdztnvjfqwrcgsmlb" => 19;
"bvwbjplbgvbhsrlpgdmjqwftvncz" => 23;
"nppdvjthqldpwncqszvftbrmjlhg" => 23;
"nznrnfrfntjfmvfwmzdfjlvtqnbhcprsg" => 29;
"zcfzfwzzqfrljwzlrfnpqdbhtmscgvjw" => 26;
in => 2383;
}
aoc::main!(part1, part2);
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{err_msg, Error, Fail, ResultExt},
fuchsia_async::{DurationExt, TimeoutExt},
fuchsia_bluetooth::{
error::Error as BTError, expectation::asynchronous::ExpectableStateExt, types::Address,
},
fuchsia_zircon::{Duration, DurationNum},
futures::TryFutureExt,
};
use crate::harness::low_energy_central::CentralHarness;
mod central_expectation {
use crate::harness::low_energy_central::{CentralState, ScanStateChange};
use fuchsia_bluetooth::expectation::Predicate;
use fuchsia_bluetooth::types::le::RemoteDevice;
pub fn scan_enabled() -> Predicate<CentralState> {
Predicate::new(
|state: &CentralState| -> bool {
!state.scan_state_changes.is_empty()
&& state.scan_state_changes.last() == Some(&ScanStateChange::ScanEnabled)
},
Some("Scan was enabled"),
)
}
pub fn scan_disabled() -> Predicate<CentralState> {
Predicate::new(
|state: &CentralState| -> bool {
!state.scan_state_changes.is_empty()
&& state.scan_state_changes.last() == Some(&ScanStateChange::ScanDisabled)
},
Some("Scan was disabled"),
)
}
pub fn device_found(expected_name: &str) -> Predicate<CentralState> {
let expected_name = expected_name.to_string();
let msg = format!("Peer '{}' has been discovered", expected_name);
let has_expected_name = move |peer: &RemoteDevice| -> bool {
peer.advertising_data
.as_ref()
.and_then(|ad| ad.name.as_ref())
.iter()
.any(|&name| name == &expected_name)
};
Predicate::new(
move |state: &CentralState| -> bool {
!state.remote_devices.is_empty()
&& state.remote_devices.iter().any(&has_expected_name)
},
Some(&msg),
)
}
}
fn scan_timeout() -> Duration {
10.seconds()
}
async fn start_scan(central: &CentralHarness) -> Result<(), Error> {
let status = central
.aux()
.proxy()
.start_scan(None)
.map_err(|e| e.context("FIDL error sending command").into())
.on_timeout(scan_timeout().after_now(), move || Err(err_msg("Timed out")))
.await
.context("Could not initialize scan")?;
if let Some(e) = status.error {
return Err(BTError::from(*e).into());
}
Ok(())
}
async fn test_enable_scan(central: CentralHarness) -> Result<(), Error> {
let address = Address::Random([1, 0, 0, 0, 0, 0]);
let _peer = central.aux().add_le_peer_default(&address).await?;
start_scan(¢ral).await?;
let _ = central
.when_satisfied(
central_expectation::scan_enabled().and(central_expectation::device_found("Fake")),
scan_timeout(),
)
.await?;
Ok(())
}
async fn test_enable_and_disable_scan(central: CentralHarness) -> Result<(), Error> {
start_scan(¢ral).await?;
let _ = central.when_satisfied(central_expectation::scan_enabled(), scan_timeout()).await?;
let _ = central.aux().proxy().stop_scan()?;
let _ = central.when_satisfied(central_expectation::scan_disabled(), scan_timeout()).await?;
Ok(())
}
/// Run all test cases.
pub fn run_all() -> Result<(), Error> {
run_suite!("le.Central", [test_enable_scan, test_enable_and_disable_scan])
}
|
// auto generated, do not modify.
// created: Mon Feb 22 23:57:02 2016
// src-file: /QtGui/qcursor.h
// dst-file: /src/gui/qcursor.rs
//
// header block begin =>
#![feature(libc)]
#![feature(core)]
#![feature(collections)]
extern crate libc;
use self::libc::*;
// <= header block end
// main block begin =>
// <= main block end
// use block begin =>
use std::ops::Deref;
use super::qscreen::*; // 773
use super::qpixmap::*; // 773
use super::qbitmap::*; // 773
use super::super::core::qpoint::*; // 771
// <= use block end
// ext block begin =>
// #[link(name = "Qt5Core")]
// #[link(name = "Qt5Gui")]
// #[link(name = "Qt5Widgets")]
// #[link(name = "QtInline")]
extern {
fn QCursor_Class_Size() -> c_int;
// proto: static void QCursor::setPos(QScreen * screen, int x, int y);
fn C_ZN7QCursor6setPosEP7QScreenii(arg0: *mut c_void, arg1: c_int, arg2: c_int);
// proto: QPixmap QCursor::pixmap();
fn C_ZNK7QCursor6pixmapEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: void QCursor::QCursor(const QBitmap & bitmap, const QBitmap & mask, int hotX, int hotY);
fn C_ZN7QCursorC2ERK7QBitmapS2_ii(arg0: *mut c_void, arg1: *mut c_void, arg2: c_int, arg3: c_int) -> u64;
// proto: void QCursor::QCursor(const QPixmap & pixmap, int hotX, int hotY);
fn C_ZN7QCursorC2ERK7QPixmapii(arg0: *mut c_void, arg1: c_int, arg2: c_int) -> u64;
// proto: void QCursor::~QCursor();
fn C_ZN7QCursorD2Ev(qthis: u64 /* *mut c_void*/);
// proto: const QBitmap * QCursor::mask();
fn C_ZNK7QCursor4maskEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: void QCursor::QCursor(const QCursor & cursor);
fn C_ZN7QCursorC2ERKS_(arg0: *mut c_void) -> u64;
// proto: static void QCursor::setPos(int x, int y);
fn C_ZN7QCursor6setPosEii(arg0: c_int, arg1: c_int);
// proto: static void QCursor::setPos(QScreen * screen, const QPoint & p);
fn C_ZN7QCursor6setPosEP7QScreenRK6QPoint(arg0: *mut c_void, arg1: *mut c_void);
// proto: static void QCursor::setPos(const QPoint & p);
fn C_ZN7QCursor6setPosERK6QPoint(arg0: *mut c_void);
// proto: const QBitmap * QCursor::bitmap();
fn C_ZNK7QCursor6bitmapEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: static QPoint QCursor::pos(const QScreen * screen);
fn C_ZN7QCursor3posEPK7QScreen(arg0: *mut c_void) -> *mut c_void;
// proto: static QPoint QCursor::pos();
fn C_ZN7QCursor3posEv() -> *mut c_void;
// proto: void QCursor::QCursor();
fn C_ZN7QCursorC2Ev() -> u64;
// proto: QPoint QCursor::hotSpot();
fn C_ZNK7QCursor7hotSpotEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
} // <= ext block end
// body block begin =>
// class sizeof(QCursor)=8
#[derive(Default)]
pub struct QCursor {
// qbase: None,
pub qclsinst: u64 /* *mut c_void*/,
}
impl /*struct*/ QCursor {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QCursor {
return QCursor{qclsinst: qthis, ..Default::default()};
}
}
// proto: static void QCursor::setPos(QScreen * screen, int x, int y);
impl /*struct*/ QCursor {
pub fn setPos_s<RetType, T: QCursor_setPos_s<RetType>>( overload_args: T) -> RetType {
return overload_args.setPos_s();
// return 1;
}
}
pub trait QCursor_setPos_s<RetType> {
fn setPos_s(self ) -> RetType;
}
// proto: static void QCursor::setPos(QScreen * screen, int x, int y);
impl<'a> /*trait*/ QCursor_setPos_s<()> for (&'a QScreen, i32, i32) {
fn setPos_s(self ) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursor6setPosEP7QScreenii()};
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = self.1 as c_int;
let arg2 = self.2 as c_int;
unsafe {C_ZN7QCursor6setPosEP7QScreenii(arg0, arg1, arg2)};
// return 1;
}
}
// proto: QPixmap QCursor::pixmap();
impl /*struct*/ QCursor {
pub fn pixmap<RetType, T: QCursor_pixmap<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.pixmap(self);
// return 1;
}
}
pub trait QCursor_pixmap<RetType> {
fn pixmap(self , rsthis: & QCursor) -> RetType;
}
// proto: QPixmap QCursor::pixmap();
impl<'a> /*trait*/ QCursor_pixmap<QPixmap> for () {
fn pixmap(self , rsthis: & QCursor) -> QPixmap {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK7QCursor6pixmapEv()};
let mut ret = unsafe {C_ZNK7QCursor6pixmapEv(rsthis.qclsinst)};
let mut ret1 = QPixmap::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QCursor::QCursor(const QBitmap & bitmap, const QBitmap & mask, int hotX, int hotY);
impl /*struct*/ QCursor {
pub fn new<T: QCursor_new>(value: T) -> QCursor {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QCursor_new {
fn new(self) -> QCursor;
}
// proto: void QCursor::QCursor(const QBitmap & bitmap, const QBitmap & mask, int hotX, int hotY);
impl<'a> /*trait*/ QCursor_new for (&'a QBitmap, &'a QBitmap, Option<i32>, Option<i32>) {
fn new(self) -> QCursor {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursorC2ERK7QBitmapS2_ii()};
let ctysz: c_int = unsafe{QCursor_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = self.1.qclsinst as *mut c_void;
let arg2 = (if self.2.is_none() {-1} else {self.2.unwrap()}) as c_int;
let arg3 = (if self.3.is_none() {-1} else {self.3.unwrap()}) as c_int;
let qthis: u64 = unsafe {C_ZN7QCursorC2ERK7QBitmapS2_ii(arg0, arg1, arg2, arg3)};
let rsthis = QCursor{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: void QCursor::QCursor(const QPixmap & pixmap, int hotX, int hotY);
impl<'a> /*trait*/ QCursor_new for (&'a QPixmap, Option<i32>, Option<i32>) {
fn new(self) -> QCursor {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursorC2ERK7QPixmapii()};
let ctysz: c_int = unsafe{QCursor_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = (if self.1.is_none() {-1} else {self.1.unwrap()}) as c_int;
let arg2 = (if self.2.is_none() {-1} else {self.2.unwrap()}) as c_int;
let qthis: u64 = unsafe {C_ZN7QCursorC2ERK7QPixmapii(arg0, arg1, arg2)};
let rsthis = QCursor{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: void QCursor::~QCursor();
impl /*struct*/ QCursor {
pub fn free<RetType, T: QCursor_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QCursor_free<RetType> {
fn free(self , rsthis: & QCursor) -> RetType;
}
// proto: void QCursor::~QCursor();
impl<'a> /*trait*/ QCursor_free<()> for () {
fn free(self , rsthis: & QCursor) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursorD2Ev()};
unsafe {C_ZN7QCursorD2Ev(rsthis.qclsinst)};
// return 1;
}
}
// proto: const QBitmap * QCursor::mask();
impl /*struct*/ QCursor {
pub fn mask<RetType, T: QCursor_mask<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.mask(self);
// return 1;
}
}
pub trait QCursor_mask<RetType> {
fn mask(self , rsthis: & QCursor) -> RetType;
}
// proto: const QBitmap * QCursor::mask();
impl<'a> /*trait*/ QCursor_mask<QBitmap> for () {
fn mask(self , rsthis: & QCursor) -> QBitmap {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK7QCursor4maskEv()};
let mut ret = unsafe {C_ZNK7QCursor4maskEv(rsthis.qclsinst)};
let mut ret1 = QBitmap::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QCursor::QCursor(const QCursor & cursor);
impl<'a> /*trait*/ QCursor_new for (&'a QCursor) {
fn new(self) -> QCursor {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursorC2ERKS_()};
let ctysz: c_int = unsafe{QCursor_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.qclsinst as *mut c_void;
let qthis: u64 = unsafe {C_ZN7QCursorC2ERKS_(arg0)};
let rsthis = QCursor{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: static void QCursor::setPos(int x, int y);
impl<'a> /*trait*/ QCursor_setPos_s<()> for (i32, i32) {
fn setPos_s(self ) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursor6setPosEii()};
let arg0 = self.0 as c_int;
let arg1 = self.1 as c_int;
unsafe {C_ZN7QCursor6setPosEii(arg0, arg1)};
// return 1;
}
}
// proto: static void QCursor::setPos(QScreen * screen, const QPoint & p);
impl<'a> /*trait*/ QCursor_setPos_s<()> for (&'a QScreen, &'a QPoint) {
fn setPos_s(self ) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursor6setPosEP7QScreenRK6QPoint()};
let arg0 = self.0.qclsinst as *mut c_void;
let arg1 = self.1.qclsinst as *mut c_void;
unsafe {C_ZN7QCursor6setPosEP7QScreenRK6QPoint(arg0, arg1)};
// return 1;
}
}
// proto: static void QCursor::setPos(const QPoint & p);
impl<'a> /*trait*/ QCursor_setPos_s<()> for (&'a QPoint) {
fn setPos_s(self ) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursor6setPosERK6QPoint()};
let arg0 = self.qclsinst as *mut c_void;
unsafe {C_ZN7QCursor6setPosERK6QPoint(arg0)};
// return 1;
}
}
// proto: const QBitmap * QCursor::bitmap();
impl /*struct*/ QCursor {
pub fn bitmap<RetType, T: QCursor_bitmap<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.bitmap(self);
// return 1;
}
}
pub trait QCursor_bitmap<RetType> {
fn bitmap(self , rsthis: & QCursor) -> RetType;
}
// proto: const QBitmap * QCursor::bitmap();
impl<'a> /*trait*/ QCursor_bitmap<QBitmap> for () {
fn bitmap(self , rsthis: & QCursor) -> QBitmap {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK7QCursor6bitmapEv()};
let mut ret = unsafe {C_ZNK7QCursor6bitmapEv(rsthis.qclsinst)};
let mut ret1 = QBitmap::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: static QPoint QCursor::pos(const QScreen * screen);
impl /*struct*/ QCursor {
pub fn pos_s<RetType, T: QCursor_pos_s<RetType>>( overload_args: T) -> RetType {
return overload_args.pos_s();
// return 1;
}
}
pub trait QCursor_pos_s<RetType> {
fn pos_s(self ) -> RetType;
}
// proto: static QPoint QCursor::pos(const QScreen * screen);
impl<'a> /*trait*/ QCursor_pos_s<QPoint> for (&'a QScreen) {
fn pos_s(self ) -> QPoint {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursor3posEPK7QScreen()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN7QCursor3posEPK7QScreen(arg0)};
let mut ret1 = QPoint::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: static QPoint QCursor::pos();
impl<'a> /*trait*/ QCursor_pos_s<QPoint> for () {
fn pos_s(self ) -> QPoint {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursor3posEv()};
let mut ret = unsafe {C_ZN7QCursor3posEv()};
let mut ret1 = QPoint::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QCursor::QCursor();
impl<'a> /*trait*/ QCursor_new for () {
fn new(self) -> QCursor {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN7QCursorC2Ev()};
let ctysz: c_int = unsafe{QCursor_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let qthis: u64 = unsafe {C_ZN7QCursorC2Ev()};
let rsthis = QCursor{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: QPoint QCursor::hotSpot();
impl /*struct*/ QCursor {
pub fn hotSpot<RetType, T: QCursor_hotSpot<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.hotSpot(self);
// return 1;
}
}
pub trait QCursor_hotSpot<RetType> {
fn hotSpot(self , rsthis: & QCursor) -> RetType;
}
// proto: QPoint QCursor::hotSpot();
impl<'a> /*trait*/ QCursor_hotSpot<QPoint> for () {
fn hotSpot(self , rsthis: & QCursor) -> QPoint {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK7QCursor7hotSpotEv()};
let mut ret = unsafe {C_ZNK7QCursor7hotSpotEv(rsthis.qclsinst)};
let mut ret1 = QPoint::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// <= body block end
|
extern crate iterator_to_hash_map;
use std::collections::HashMap;
use iterator_to_hash_map::ToHashMap;
struct Person {
id: i32,
first_name: &'static str,
last_name: &'static str,
}
#[test]
fn it_works() {
let brad = Person {
id: 1,
first_name: "Brad",
last_name: "Urani",
};
let barb = Person {
id: 2,
first_name: "Barb",
last_name: "Hanover",
};
let a = vec![brad, barb];
let key_func = |i: &Person| -> i32 { i.id };
let value_func = |i: &Person| -> String {
i.first_name.to_string() + &" " + &i.last_name.to_string()
};
let map = a.to_hash_map(key_func, value_func);
let mut hm: HashMap<i32, String> = HashMap::new();
hm.insert(1, "Brad Urani".to_string());
hm.insert(2, "Barb Hanover".to_string());
assert_eq!(map.get(&1).unwrap(), hm.get(&1).unwrap());
assert_eq!(map.get(&2).unwrap(), hm.get(&2).unwrap());
}
|
#[doc = "Reader of register PRIVCFGR2"]
pub type R = crate::R<u32, super::PRIVCFGR2>;
#[doc = "Writer for register PRIVCFGR2"]
pub type W = crate::W<u32, super::PRIVCFGR2>;
#[doc = "Register PRIVCFGR2 `reset()`'s with value 0"]
impl crate::ResetValue for super::PRIVCFGR2 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `PRIV32`"]
pub type PRIV32_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV32`"]
pub struct PRIV32_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV32_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `PRIV33`"]
pub type PRIV33_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV33`"]
pub struct PRIV33_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV33_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `PRIV34`"]
pub type PRIV34_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV34`"]
pub struct PRIV34_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV34_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `PRIV35`"]
pub type PRIV35_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV35`"]
pub struct PRIV35_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV35_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `PRIV36`"]
pub type PRIV36_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV36`"]
pub struct PRIV36_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV36_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `PRIV37`"]
pub type PRIV37_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV37`"]
pub struct PRIV37_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV37_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `PRIV38`"]
pub type PRIV38_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV38`"]
pub struct PRIV38_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV38_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `PRIV39`"]
pub type PRIV39_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV39`"]
pub struct PRIV39_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV39_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `PRIV40`"]
pub type PRIV40_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV40`"]
pub struct PRIV40_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV40_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `PRIV41`"]
pub type PRIV41_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV41`"]
pub struct PRIV41_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV41_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `PRIV42`"]
pub type PRIV42_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PRIV42`"]
pub struct PRIV42_W<'a> {
w: &'a mut W,
}
impl<'a> PRIV42_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
impl R {
#[doc = "Bit 0 - PRIV32"]
#[inline(always)]
pub fn priv32(&self) -> PRIV32_R {
PRIV32_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - PRIV33"]
#[inline(always)]
pub fn priv33(&self) -> PRIV33_R {
PRIV33_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - PRIV34"]
#[inline(always)]
pub fn priv34(&self) -> PRIV34_R {
PRIV34_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - PRIV35"]
#[inline(always)]
pub fn priv35(&self) -> PRIV35_R {
PRIV35_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - PRIV36"]
#[inline(always)]
pub fn priv36(&self) -> PRIV36_R {
PRIV36_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - PRIV37"]
#[inline(always)]
pub fn priv37(&self) -> PRIV37_R {
PRIV37_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - PRIV38"]
#[inline(always)]
pub fn priv38(&self) -> PRIV38_R {
PRIV38_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - PRIV39"]
#[inline(always)]
pub fn priv39(&self) -> PRIV39_R {
PRIV39_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - PRIV40"]
#[inline(always)]
pub fn priv40(&self) -> PRIV40_R {
PRIV40_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - PRIV41"]
#[inline(always)]
pub fn priv41(&self) -> PRIV41_R {
PRIV41_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - PRIV42"]
#[inline(always)]
pub fn priv42(&self) -> PRIV42_R {
PRIV42_R::new(((self.bits >> 10) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - PRIV32"]
#[inline(always)]
pub fn priv32(&mut self) -> PRIV32_W {
PRIV32_W { w: self }
}
#[doc = "Bit 1 - PRIV33"]
#[inline(always)]
pub fn priv33(&mut self) -> PRIV33_W {
PRIV33_W { w: self }
}
#[doc = "Bit 2 - PRIV34"]
#[inline(always)]
pub fn priv34(&mut self) -> PRIV34_W {
PRIV34_W { w: self }
}
#[doc = "Bit 3 - PRIV35"]
#[inline(always)]
pub fn priv35(&mut self) -> PRIV35_W {
PRIV35_W { w: self }
}
#[doc = "Bit 4 - PRIV36"]
#[inline(always)]
pub fn priv36(&mut self) -> PRIV36_W {
PRIV36_W { w: self }
}
#[doc = "Bit 5 - PRIV37"]
#[inline(always)]
pub fn priv37(&mut self) -> PRIV37_W {
PRIV37_W { w: self }
}
#[doc = "Bit 6 - PRIV38"]
#[inline(always)]
pub fn priv38(&mut self) -> PRIV38_W {
PRIV38_W { w: self }
}
#[doc = "Bit 7 - PRIV39"]
#[inline(always)]
pub fn priv39(&mut self) -> PRIV39_W {
PRIV39_W { w: self }
}
#[doc = "Bit 8 - PRIV40"]
#[inline(always)]
pub fn priv40(&mut self) -> PRIV40_W {
PRIV40_W { w: self }
}
#[doc = "Bit 9 - PRIV41"]
#[inline(always)]
pub fn priv41(&mut self) -> PRIV41_W {
PRIV41_W { w: self }
}
#[doc = "Bit 10 - PRIV42"]
#[inline(always)]
pub fn priv42(&mut self) -> PRIV42_W {
PRIV42_W { w: self }
}
}
|
//! Module containing the `/ping` application command.
use crate::ApplicationCommandHandler;
use serenity::async_trait;
use serenity::model::guild::Guild;
use serenity::model::id::GuildId;
use serenity::model::interactions::application_command::ApplicationCommandInteraction;
use serenity::prelude::*;
use std::collections::HashSet;
use std::error::Error;
/// Keeps track of guilds for which this application command handler has already
/// been registered. When this application command is registered to a new guild,
/// the corresponding [`GuildId`] is added to [`PingRegisteredInfo::Value`]
#[doc(hidden)]
struct PingRegisteredInfo;
impl TypeMapKey for PingRegisteredInfo {
type Value = HashSet<GuildId>;
}
/// A basic [`ApplicationCommandHandler`], intended for demonstration purposes.
///
/// This [`ApplicationCommandHandler`] simply replies to `/ping` with
/// `Pong, <user>!`.
pub struct PingApplicationCommand;
#[async_trait]
impl ApplicationCommandHandler for PingApplicationCommand {
async fn register_command(
&self,
ctx: &Context,
guild: &Guild,
) -> Result<
serenity::model::interactions::application_command::ApplicationCommand,
Box<dyn Error>,
> {
let mut data = ctx.data.write().await;
// Ensure that this application command has not already been registered for the
// given guild, according to the global application context's data.
if !data
.entry::<PingRegisteredInfo>()
.or_default()
.insert(guild.id)
{
return Err(format!(
"ping application command already registered for {}",
guild.name,
)
.into());
}
guild
.create_application_command(&ctx.http, |command| {
command
.name(self.get_name())
.description("Check if this bot is working")
})
.await
.map_err(|e| e.into())
}
async fn handle_command(
&self,
ctx: &Context,
command: ApplicationCommandInteraction,
) -> Result<(), Box<dyn Error>> {
// This application command simply responds to any incoming request with `Pong,
// <user>!`.
command
.create_interaction_response(&ctx.http, |response| {
response.interaction_response_data(|message| {
message.content(format!("Pong, {}!", command.user.mention()))
})
})
.await
.map_err(|e| e.into())
}
fn get_name(&self) -> String {
"ping".to_string()
}
}
|
fn main() {
let reference_to_nothing = dangle();
/*let mut s = String::from("hello");
let r1 = &s; // no problem
let r2 = &s; // no problem
let r3 = &mut s; // BIG PROBLEM
println!("{}, {}, and {}", r1, r2, r3);*/
// let mut s1 = String::from("hello");
// change1(&mut s1);
// println!("s1:{}",s1);
//change(&s1);
/*let len = calculate_length(&s1);
println!("The length of '{}' is {}.", s1, len);*/
}
fn dangle() -> &String {
let s = String::from("hello");
&s
}
/*fn change1(s: &mut String) {
s.push_str(", I'm trying!!!");
}*/
/*fn change(s:&String) {
s.push_str(", world");
}*/
/*
fn calculate_length(s: &String) -> usize {
s.len()
}*/
|
mod palindromes;
pub fn demo(digits: u32) {
println!("{:?}", palindromes::largest_palindrome(digits));
}
|
// auto generated, do not modify.
// created: Mon Feb 22 23:57:02 2016
// src-file: /QtCore/qtimezone.h
// dst-file: /src/core/qtimezone.rs
//
// header block begin =>
#![feature(libc)]
#![feature(core)]
#![feature(collections)]
extern crate libc;
use self::libc::*;
// <= header block end
// main block begin =>
// <= main block end
// use block begin =>
use std::ops::Deref;
// use super::qlist::*; // 775
use super::qdatetime::*; // 773
use super::qbytearray::*; // 773
use super::qstring::*; // 773
use super::qlocale::*; // 773
// <= use block end
// ext block begin =>
// #[link(name = "Qt5Core")]
// #[link(name = "Qt5Gui")]
// #[link(name = "Qt5Widgets")]
// #[link(name = "QtInline")]
extern {
fn QTimeZone_Class_Size() -> c_int;
// proto: static QList<QByteArray> QTimeZone::availableTimeZoneIds();
fn C_ZN9QTimeZone20availableTimeZoneIdsEv() -> *mut c_void;
// proto: void QTimeZone::swap(QTimeZone & other);
fn C_ZN9QTimeZone4swapERS_(qthis: u64 /* *mut c_void*/, arg0: *mut c_void);
// proto: bool QTimeZone::isValid();
fn C_ZNK9QTimeZone7isValidEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: bool QTimeZone::hasDaylightTime();
fn C_ZNK9QTimeZone15hasDaylightTimeEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: static QTimeZone QTimeZone::utc();
fn C_ZN9QTimeZone3utcEv() -> *mut c_void;
// proto: static QList<QByteArray> QTimeZone::availableTimeZoneIds(int offsetSeconds);
fn C_ZN9QTimeZone20availableTimeZoneIdsEi(arg0: c_int) -> *mut c_void;
// proto: void QTimeZone::QTimeZone(int offsetSeconds);
fn C_ZN9QTimeZoneC2Ei(arg0: c_int) -> u64;
// proto: QString QTimeZone::abbreviation(const QDateTime & atDateTime);
fn C_ZNK9QTimeZone12abbreviationERK9QDateTime(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> *mut c_void;
// proto: void QTimeZone::QTimeZone();
fn C_ZN9QTimeZoneC2Ev() -> u64;
// proto: static QByteArray QTimeZone::ianaIdToWindowsId(const QByteArray & ianaId);
fn C_ZN9QTimeZone17ianaIdToWindowsIdERK10QByteArray(arg0: *mut c_void) -> *mut c_void;
// proto: static QByteArray QTimeZone::systemTimeZoneId();
fn C_ZN9QTimeZone16systemTimeZoneIdEv() -> *mut c_void;
// proto: bool QTimeZone::isDaylightTime(const QDateTime & atDateTime);
fn C_ZNK9QTimeZone14isDaylightTimeERK9QDateTime(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_char;
// proto: static bool QTimeZone::isTimeZoneIdAvailable(const QByteArray & ianaId);
fn C_ZN9QTimeZone21isTimeZoneIdAvailableERK10QByteArray(arg0: *mut c_void) -> c_char;
// proto: QString QTimeZone::comment();
fn C_ZNK9QTimeZone7commentEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: static QByteArray QTimeZone::windowsIdToDefaultIanaId(const QByteArray & windowsId);
fn C_ZN9QTimeZone24windowsIdToDefaultIanaIdERK10QByteArray(arg0: *mut c_void) -> *mut c_void;
// proto: bool QTimeZone::hasTransitions();
fn C_ZNK9QTimeZone14hasTransitionsEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: int QTimeZone::daylightTimeOffset(const QDateTime & atDateTime);
fn C_ZNK9QTimeZone18daylightTimeOffsetERK9QDateTime(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_int;
// proto: static QTimeZone QTimeZone::systemTimeZone();
fn C_ZN9QTimeZone14systemTimeZoneEv() -> *mut c_void;
// proto: void QTimeZone::QTimeZone(const QByteArray & ianaId);
fn C_ZN9QTimeZoneC2ERK10QByteArray(arg0: *mut c_void) -> u64;
// proto: void QTimeZone::QTimeZone(const QTimeZone & other);
fn C_ZN9QTimeZoneC2ERKS_(arg0: *mut c_void) -> u64;
// proto: void QTimeZone::~QTimeZone();
fn C_ZN9QTimeZoneD2Ev(qthis: u64 /* *mut c_void*/);
// proto: int QTimeZone::standardTimeOffset(const QDateTime & atDateTime);
fn C_ZNK9QTimeZone18standardTimeOffsetERK9QDateTime(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_int;
// proto: QByteArray QTimeZone::id();
fn C_ZNK9QTimeZone2idEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: int QTimeZone::offsetFromUtc(const QDateTime & atDateTime);
fn C_ZNK9QTimeZone13offsetFromUtcERK9QDateTime(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_int;
// proto: static QList<QByteArray> QTimeZone::windowsIdToIanaIds(const QByteArray & windowsId);
fn C_ZN9QTimeZone18windowsIdToIanaIdsERK10QByteArray(arg0: *mut c_void) -> *mut c_void;
} // <= ext block end
// body block begin =>
// class sizeof(QTimeZone)=1
#[derive(Default)]
pub struct QTimeZone {
// qbase: None,
pub qclsinst: u64 /* *mut c_void*/,
}
impl /*struct*/ QTimeZone {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QTimeZone {
return QTimeZone{qclsinst: qthis, ..Default::default()};
}
}
// proto: static QList<QByteArray> QTimeZone::availableTimeZoneIds();
impl /*struct*/ QTimeZone {
pub fn availableTimeZoneIds_s<RetType, T: QTimeZone_availableTimeZoneIds_s<RetType>>( overload_args: T) -> RetType {
return overload_args.availableTimeZoneIds_s();
// return 1;
}
}
pub trait QTimeZone_availableTimeZoneIds_s<RetType> {
fn availableTimeZoneIds_s(self ) -> RetType;
}
// proto: static QList<QByteArray> QTimeZone::availableTimeZoneIds();
impl<'a> /*trait*/ QTimeZone_availableTimeZoneIds_s<u64> for () {
fn availableTimeZoneIds_s(self ) -> u64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone20availableTimeZoneIdsEv()};
let mut ret = unsafe {C_ZN9QTimeZone20availableTimeZoneIdsEv()};
return ret as u64; // 5
// return 1;
}
}
// proto: void QTimeZone::swap(QTimeZone & other);
impl /*struct*/ QTimeZone {
pub fn swap<RetType, T: QTimeZone_swap<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.swap(self);
// return 1;
}
}
pub trait QTimeZone_swap<RetType> {
fn swap(self , rsthis: & QTimeZone) -> RetType;
}
// proto: void QTimeZone::swap(QTimeZone & other);
impl<'a> /*trait*/ QTimeZone_swap<()> for (&'a QTimeZone) {
fn swap(self , rsthis: & QTimeZone) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone4swapERS_()};
let arg0 = self.qclsinst as *mut c_void;
unsafe {C_ZN9QTimeZone4swapERS_(rsthis.qclsinst, arg0)};
// return 1;
}
}
// proto: bool QTimeZone::isValid();
impl /*struct*/ QTimeZone {
pub fn isValid<RetType, T: QTimeZone_isValid<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isValid(self);
// return 1;
}
}
pub trait QTimeZone_isValid<RetType> {
fn isValid(self , rsthis: & QTimeZone) -> RetType;
}
// proto: bool QTimeZone::isValid();
impl<'a> /*trait*/ QTimeZone_isValid<i8> for () {
fn isValid(self , rsthis: & QTimeZone) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone7isValidEv()};
let mut ret = unsafe {C_ZNK9QTimeZone7isValidEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QTimeZone::hasDaylightTime();
impl /*struct*/ QTimeZone {
pub fn hasDaylightTime<RetType, T: QTimeZone_hasDaylightTime<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.hasDaylightTime(self);
// return 1;
}
}
pub trait QTimeZone_hasDaylightTime<RetType> {
fn hasDaylightTime(self , rsthis: & QTimeZone) -> RetType;
}
// proto: bool QTimeZone::hasDaylightTime();
impl<'a> /*trait*/ QTimeZone_hasDaylightTime<i8> for () {
fn hasDaylightTime(self , rsthis: & QTimeZone) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone15hasDaylightTimeEv()};
let mut ret = unsafe {C_ZNK9QTimeZone15hasDaylightTimeEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: static QTimeZone QTimeZone::utc();
impl /*struct*/ QTimeZone {
pub fn utc_s<RetType, T: QTimeZone_utc_s<RetType>>( overload_args: T) -> RetType {
return overload_args.utc_s();
// return 1;
}
}
pub trait QTimeZone_utc_s<RetType> {
fn utc_s(self ) -> RetType;
}
// proto: static QTimeZone QTimeZone::utc();
impl<'a> /*trait*/ QTimeZone_utc_s<QTimeZone> for () {
fn utc_s(self ) -> QTimeZone {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone3utcEv()};
let mut ret = unsafe {C_ZN9QTimeZone3utcEv()};
let mut ret1 = QTimeZone::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: static QList<QByteArray> QTimeZone::availableTimeZoneIds(int offsetSeconds);
impl<'a> /*trait*/ QTimeZone_availableTimeZoneIds_s<u64> for (i32) {
fn availableTimeZoneIds_s(self ) -> u64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone20availableTimeZoneIdsEi()};
let arg0 = self as c_int;
let mut ret = unsafe {C_ZN9QTimeZone20availableTimeZoneIdsEi(arg0)};
return ret as u64; // 5
// return 1;
}
}
// proto: void QTimeZone::QTimeZone(int offsetSeconds);
impl /*struct*/ QTimeZone {
pub fn new<T: QTimeZone_new>(value: T) -> QTimeZone {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QTimeZone_new {
fn new(self) -> QTimeZone;
}
// proto: void QTimeZone::QTimeZone(int offsetSeconds);
impl<'a> /*trait*/ QTimeZone_new for (i32) {
fn new(self) -> QTimeZone {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZoneC2Ei()};
let ctysz: c_int = unsafe{QTimeZone_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self as c_int;
let qthis: u64 = unsafe {C_ZN9QTimeZoneC2Ei(arg0)};
let rsthis = QTimeZone{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: QString QTimeZone::abbreviation(const QDateTime & atDateTime);
impl /*struct*/ QTimeZone {
pub fn abbreviation<RetType, T: QTimeZone_abbreviation<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.abbreviation(self);
// return 1;
}
}
pub trait QTimeZone_abbreviation<RetType> {
fn abbreviation(self , rsthis: & QTimeZone) -> RetType;
}
// proto: QString QTimeZone::abbreviation(const QDateTime & atDateTime);
impl<'a> /*trait*/ QTimeZone_abbreviation<QString> for (&'a QDateTime) {
fn abbreviation(self , rsthis: & QTimeZone) -> QString {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone12abbreviationERK9QDateTime()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZNK9QTimeZone12abbreviationERK9QDateTime(rsthis.qclsinst, arg0)};
let mut ret1 = QString::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QTimeZone::QTimeZone();
impl<'a> /*trait*/ QTimeZone_new for () {
fn new(self) -> QTimeZone {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZoneC2Ev()};
let ctysz: c_int = unsafe{QTimeZone_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let qthis: u64 = unsafe {C_ZN9QTimeZoneC2Ev()};
let rsthis = QTimeZone{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: static QByteArray QTimeZone::ianaIdToWindowsId(const QByteArray & ianaId);
impl /*struct*/ QTimeZone {
pub fn ianaIdToWindowsId_s<RetType, T: QTimeZone_ianaIdToWindowsId_s<RetType>>( overload_args: T) -> RetType {
return overload_args.ianaIdToWindowsId_s();
// return 1;
}
}
pub trait QTimeZone_ianaIdToWindowsId_s<RetType> {
fn ianaIdToWindowsId_s(self ) -> RetType;
}
// proto: static QByteArray QTimeZone::ianaIdToWindowsId(const QByteArray & ianaId);
impl<'a> /*trait*/ QTimeZone_ianaIdToWindowsId_s<QByteArray> for (&'a QByteArray) {
fn ianaIdToWindowsId_s(self ) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone17ianaIdToWindowsIdERK10QByteArray()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN9QTimeZone17ianaIdToWindowsIdERK10QByteArray(arg0)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: static QByteArray QTimeZone::systemTimeZoneId();
impl /*struct*/ QTimeZone {
pub fn systemTimeZoneId_s<RetType, T: QTimeZone_systemTimeZoneId_s<RetType>>( overload_args: T) -> RetType {
return overload_args.systemTimeZoneId_s();
// return 1;
}
}
pub trait QTimeZone_systemTimeZoneId_s<RetType> {
fn systemTimeZoneId_s(self ) -> RetType;
}
// proto: static QByteArray QTimeZone::systemTimeZoneId();
impl<'a> /*trait*/ QTimeZone_systemTimeZoneId_s<QByteArray> for () {
fn systemTimeZoneId_s(self ) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone16systemTimeZoneIdEv()};
let mut ret = unsafe {C_ZN9QTimeZone16systemTimeZoneIdEv()};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: bool QTimeZone::isDaylightTime(const QDateTime & atDateTime);
impl /*struct*/ QTimeZone {
pub fn isDaylightTime<RetType, T: QTimeZone_isDaylightTime<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isDaylightTime(self);
// return 1;
}
}
pub trait QTimeZone_isDaylightTime<RetType> {
fn isDaylightTime(self , rsthis: & QTimeZone) -> RetType;
}
// proto: bool QTimeZone::isDaylightTime(const QDateTime & atDateTime);
impl<'a> /*trait*/ QTimeZone_isDaylightTime<i8> for (&'a QDateTime) {
fn isDaylightTime(self , rsthis: & QTimeZone) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone14isDaylightTimeERK9QDateTime()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZNK9QTimeZone14isDaylightTimeERK9QDateTime(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: static bool QTimeZone::isTimeZoneIdAvailable(const QByteArray & ianaId);
impl /*struct*/ QTimeZone {
pub fn isTimeZoneIdAvailable_s<RetType, T: QTimeZone_isTimeZoneIdAvailable_s<RetType>>( overload_args: T) -> RetType {
return overload_args.isTimeZoneIdAvailable_s();
// return 1;
}
}
pub trait QTimeZone_isTimeZoneIdAvailable_s<RetType> {
fn isTimeZoneIdAvailable_s(self ) -> RetType;
}
// proto: static bool QTimeZone::isTimeZoneIdAvailable(const QByteArray & ianaId);
impl<'a> /*trait*/ QTimeZone_isTimeZoneIdAvailable_s<i8> for (&'a QByteArray) {
fn isTimeZoneIdAvailable_s(self ) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone21isTimeZoneIdAvailableERK10QByteArray()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN9QTimeZone21isTimeZoneIdAvailableERK10QByteArray(arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: QString QTimeZone::comment();
impl /*struct*/ QTimeZone {
pub fn comment<RetType, T: QTimeZone_comment<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.comment(self);
// return 1;
}
}
pub trait QTimeZone_comment<RetType> {
fn comment(self , rsthis: & QTimeZone) -> RetType;
}
// proto: QString QTimeZone::comment();
impl<'a> /*trait*/ QTimeZone_comment<QString> for () {
fn comment(self , rsthis: & QTimeZone) -> QString {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone7commentEv()};
let mut ret = unsafe {C_ZNK9QTimeZone7commentEv(rsthis.qclsinst)};
let mut ret1 = QString::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: static QByteArray QTimeZone::windowsIdToDefaultIanaId(const QByteArray & windowsId);
impl /*struct*/ QTimeZone {
pub fn windowsIdToDefaultIanaId_s<RetType, T: QTimeZone_windowsIdToDefaultIanaId_s<RetType>>( overload_args: T) -> RetType {
return overload_args.windowsIdToDefaultIanaId_s();
// return 1;
}
}
pub trait QTimeZone_windowsIdToDefaultIanaId_s<RetType> {
fn windowsIdToDefaultIanaId_s(self ) -> RetType;
}
// proto: static QByteArray QTimeZone::windowsIdToDefaultIanaId(const QByteArray & windowsId);
impl<'a> /*trait*/ QTimeZone_windowsIdToDefaultIanaId_s<QByteArray> for (&'a QByteArray) {
fn windowsIdToDefaultIanaId_s(self ) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone24windowsIdToDefaultIanaIdERK10QByteArray()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN9QTimeZone24windowsIdToDefaultIanaIdERK10QByteArray(arg0)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: bool QTimeZone::hasTransitions();
impl /*struct*/ QTimeZone {
pub fn hasTransitions<RetType, T: QTimeZone_hasTransitions<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.hasTransitions(self);
// return 1;
}
}
pub trait QTimeZone_hasTransitions<RetType> {
fn hasTransitions(self , rsthis: & QTimeZone) -> RetType;
}
// proto: bool QTimeZone::hasTransitions();
impl<'a> /*trait*/ QTimeZone_hasTransitions<i8> for () {
fn hasTransitions(self , rsthis: & QTimeZone) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone14hasTransitionsEv()};
let mut ret = unsafe {C_ZNK9QTimeZone14hasTransitionsEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: int QTimeZone::daylightTimeOffset(const QDateTime & atDateTime);
impl /*struct*/ QTimeZone {
pub fn daylightTimeOffset<RetType, T: QTimeZone_daylightTimeOffset<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.daylightTimeOffset(self);
// return 1;
}
}
pub trait QTimeZone_daylightTimeOffset<RetType> {
fn daylightTimeOffset(self , rsthis: & QTimeZone) -> RetType;
}
// proto: int QTimeZone::daylightTimeOffset(const QDateTime & atDateTime);
impl<'a> /*trait*/ QTimeZone_daylightTimeOffset<i32> for (&'a QDateTime) {
fn daylightTimeOffset(self , rsthis: & QTimeZone) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone18daylightTimeOffsetERK9QDateTime()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZNK9QTimeZone18daylightTimeOffsetERK9QDateTime(rsthis.qclsinst, arg0)};
return ret as i32; // 1
// return 1;
}
}
// proto: static QTimeZone QTimeZone::systemTimeZone();
impl /*struct*/ QTimeZone {
pub fn systemTimeZone_s<RetType, T: QTimeZone_systemTimeZone_s<RetType>>( overload_args: T) -> RetType {
return overload_args.systemTimeZone_s();
// return 1;
}
}
pub trait QTimeZone_systemTimeZone_s<RetType> {
fn systemTimeZone_s(self ) -> RetType;
}
// proto: static QTimeZone QTimeZone::systemTimeZone();
impl<'a> /*trait*/ QTimeZone_systemTimeZone_s<QTimeZone> for () {
fn systemTimeZone_s(self ) -> QTimeZone {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone14systemTimeZoneEv()};
let mut ret = unsafe {C_ZN9QTimeZone14systemTimeZoneEv()};
let mut ret1 = QTimeZone::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QTimeZone::QTimeZone(const QByteArray & ianaId);
impl<'a> /*trait*/ QTimeZone_new for (&'a QByteArray) {
fn new(self) -> QTimeZone {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZoneC2ERK10QByteArray()};
let ctysz: c_int = unsafe{QTimeZone_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.qclsinst as *mut c_void;
let qthis: u64 = unsafe {C_ZN9QTimeZoneC2ERK10QByteArray(arg0)};
let rsthis = QTimeZone{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: void QTimeZone::QTimeZone(const QTimeZone & other);
impl<'a> /*trait*/ QTimeZone_new for (&'a QTimeZone) {
fn new(self) -> QTimeZone {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZoneC2ERKS_()};
let ctysz: c_int = unsafe{QTimeZone_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.qclsinst as *mut c_void;
let qthis: u64 = unsafe {C_ZN9QTimeZoneC2ERKS_(arg0)};
let rsthis = QTimeZone{qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: void QTimeZone::~QTimeZone();
impl /*struct*/ QTimeZone {
pub fn free<RetType, T: QTimeZone_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QTimeZone_free<RetType> {
fn free(self , rsthis: & QTimeZone) -> RetType;
}
// proto: void QTimeZone::~QTimeZone();
impl<'a> /*trait*/ QTimeZone_free<()> for () {
fn free(self , rsthis: & QTimeZone) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZoneD2Ev()};
unsafe {C_ZN9QTimeZoneD2Ev(rsthis.qclsinst)};
// return 1;
}
}
// proto: int QTimeZone::standardTimeOffset(const QDateTime & atDateTime);
impl /*struct*/ QTimeZone {
pub fn standardTimeOffset<RetType, T: QTimeZone_standardTimeOffset<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.standardTimeOffset(self);
// return 1;
}
}
pub trait QTimeZone_standardTimeOffset<RetType> {
fn standardTimeOffset(self , rsthis: & QTimeZone) -> RetType;
}
// proto: int QTimeZone::standardTimeOffset(const QDateTime & atDateTime);
impl<'a> /*trait*/ QTimeZone_standardTimeOffset<i32> for (&'a QDateTime) {
fn standardTimeOffset(self , rsthis: & QTimeZone) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone18standardTimeOffsetERK9QDateTime()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZNK9QTimeZone18standardTimeOffsetERK9QDateTime(rsthis.qclsinst, arg0)};
return ret as i32; // 1
// return 1;
}
}
// proto: QByteArray QTimeZone::id();
impl /*struct*/ QTimeZone {
pub fn id<RetType, T: QTimeZone_id<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.id(self);
// return 1;
}
}
pub trait QTimeZone_id<RetType> {
fn id(self , rsthis: & QTimeZone) -> RetType;
}
// proto: QByteArray QTimeZone::id();
impl<'a> /*trait*/ QTimeZone_id<QByteArray> for () {
fn id(self , rsthis: & QTimeZone) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone2idEv()};
let mut ret = unsafe {C_ZNK9QTimeZone2idEv(rsthis.qclsinst)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: int QTimeZone::offsetFromUtc(const QDateTime & atDateTime);
impl /*struct*/ QTimeZone {
pub fn offsetFromUtc<RetType, T: QTimeZone_offsetFromUtc<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.offsetFromUtc(self);
// return 1;
}
}
pub trait QTimeZone_offsetFromUtc<RetType> {
fn offsetFromUtc(self , rsthis: & QTimeZone) -> RetType;
}
// proto: int QTimeZone::offsetFromUtc(const QDateTime & atDateTime);
impl<'a> /*trait*/ QTimeZone_offsetFromUtc<i32> for (&'a QDateTime) {
fn offsetFromUtc(self , rsthis: & QTimeZone) -> i32 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QTimeZone13offsetFromUtcERK9QDateTime()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZNK9QTimeZone13offsetFromUtcERK9QDateTime(rsthis.qclsinst, arg0)};
return ret as i32; // 1
// return 1;
}
}
// proto: static QList<QByteArray> QTimeZone::windowsIdToIanaIds(const QByteArray & windowsId);
impl /*struct*/ QTimeZone {
pub fn windowsIdToIanaIds_s<RetType, T: QTimeZone_windowsIdToIanaIds_s<RetType>>( overload_args: T) -> RetType {
return overload_args.windowsIdToIanaIds_s();
// return 1;
}
}
pub trait QTimeZone_windowsIdToIanaIds_s<RetType> {
fn windowsIdToIanaIds_s(self ) -> RetType;
}
// proto: static QList<QByteArray> QTimeZone::windowsIdToIanaIds(const QByteArray & windowsId);
impl<'a> /*trait*/ QTimeZone_windowsIdToIanaIds_s<u64> for (&'a QByteArray) {
fn windowsIdToIanaIds_s(self ) -> u64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QTimeZone18windowsIdToIanaIdsERK10QByteArray()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN9QTimeZone18windowsIdToIanaIdsERK10QByteArray(arg0)};
return ret as u64; // 5
// return 1;
}
}
// <= body block end
|
mod message_receiver;
mod place_runner;
mod plugin;
mod place;
mod core;
use std::{
fmt,
path::{Path, PathBuf},
error::Error,
};
use crate::{
core::{run_place, run_script, DEFAULT_PORT, DEFAULT_TIMEOUT},
place_runner::PlaceRunnerOptions,
message_receiver::RobloxMessage,
};
#[derive(Debug)]
struct BadPathError {
path: PathBuf,
}
impl Error for BadPathError {}
impl fmt::Display for BadPathError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Invalid file type: {}", self.path.display())
}
}
pub enum RunEnvironment<'a> {
EmptyPlace,
PlaceFile(&'a Path),
}
pub struct RunOptions<'a> {
pub env: RunEnvironment<'a>,
pub lua_script: &'a str,
pub port: Option<u16>,
pub timeout: Option<u16>,
}
pub fn run(opts: RunOptions) -> Result<Vec<RobloxMessage>, Box<dyn Error>> {
let place_runner_opts = PlaceRunnerOptions {
lua_script: opts.lua_script,
port: opts.port.unwrap_or(DEFAULT_PORT),
timeout: opts.timeout.unwrap_or(DEFAULT_TIMEOUT),
};
let message_queue = match opts.env {
RunEnvironment::EmptyPlace => run_script(place_runner_opts),
RunEnvironment::PlaceFile(path) => {
let extension = match path.extension() {
Some(e) => e.to_str().unwrap(),
None => return Err(Box::new(BadPathError {
path: path.to_path_buf(),
})),
};
run_place(path, extension, place_runner_opts)
},
};
let mut messages = Vec::new();
while let Some(message) = message_queue.recv()? {
messages.push(message);
}
Ok(messages)
} |
use std::env;
use std::io::{self, Write};
use std::process;
fn main() {
let bytes = match env::args().nth(1) {
None => {
writeln!(&mut io::stderr(), "Usage: compress-escaped string")
.unwrap();
process::exit(1);
}
Some(arg) => arg.into_bytes(),
};
let compressed = frame_press(&bytes);
println!("{}", escape(&compressed));
println!("{}", escape(&frame_depress(&compressed)));
}
fn frame_press(bytes: &[u8]) -> Vec<u8> {
use snap::write;
let mut wtr = write::FrameEncoder::new(vec![]);
wtr.write_all(bytes).unwrap();
wtr.into_inner().unwrap()
}
fn frame_depress(bytes: &[u8]) -> Vec<u8> {
use snap::read;
use std::io::Read;
let mut buf = vec![];
read::FrameDecoder::new(bytes).read_to_end(&mut buf).unwrap();
buf
}
fn escape(bytes: &[u8]) -> String {
use std::ascii::escape_default;
bytes.iter().flat_map(|&b| escape_default(b)).map(|b| b as char).collect()
}
|
//! Coordinate systems and geometry definitions. Some conversions are dependent on the application
//! state, and so those functions are a part of the `AppContext`.
use crate::app::config;
use metfor::{Celsius, CelsiusDiff, HectoPascal, Knots, Meters, PaPS, WindSpdDir};
/// Common operations on rectangles
pub trait Rect {
/// Get the minimum x coordinate
fn min_x(&self) -> f64;
/// Get the maximum x coordinate
fn max_x(&self) -> f64;
/// Get the minimum y coordinate
fn min_y(&self) -> f64;
/// Get the maximum y coordinate
fn max_y(&self) -> f64;
/// Check if two rectangles overlap
fn overlaps(&self, other: &Self) -> bool {
if self.min_x() > other.max_x() {
return false;
}
if self.max_x() < other.min_x() {
return false;
}
if self.min_y() > other.max_y() {
return false;
}
if self.max_y() < other.min_y() {
return false;
}
true
}
// Check if this rectangle is inside another.
fn inside(&self, big_rect: &Self) -> bool {
if self.min_x() < big_rect.min_x() {
return false;
}
if self.max_x() > big_rect.max_x() {
return false;
}
if self.min_y() < big_rect.min_y() {
return false;
}
if self.max_y() > big_rect.max_y() {
return false;
}
true
}
/// Get the width of this rectangle
fn width(&self) -> f64 {
self.max_x() - self.min_x()
}
/// Get the height of this rectangle
fn height(&self) -> f64 {
self.max_y() - self.min_y()
}
}
/***************************************************************************************************
* Temperature - Pressure Coordinates for Skew-T Log-P plot.
* ************************************************************************************************/
/// Temperature-Pressure coordinates.
/// Origin lower left. (Temperature, Pressure)
#[derive(Clone, Copy, Debug)]
pub struct TPCoords {
/// Temperature in Celsius
pub temperature: Celsius,
/// Pressure in hPa
pub pressure: HectoPascal,
}
/***************************************************************************************************
* Speed - Direction Coordinates for the Hodograph
* ************************************************************************************************/
/// Speed-Direction coordinates for the hodograph.
/// Origin center. (Speed, Direction wind is from)
#[derive(Clone, Copy, Debug)]
pub struct SDCoords {
/// Wind speed and direction.
pub spd_dir: WindSpdDir<Knots>,
}
/***************************************************************************************************
* Omega(W) - Pressure coords for the vertical velocity and RH plot
* ************************************************************************************************/
/// Omega-Pressure coordinates.
/// Origin lower left. (Omega, Pressure)
#[derive(Clone, Copy, Debug)]
pub struct WPCoords {
/// Omega in Pa/s
pub w: PaPS,
/// Pressure in hPa
pub p: HectoPascal,
}
/***************************************************************************************************
* Percent - Pressure coords for the Cloud Cover
* ************************************************************************************************/
/// Percent-Pressure coordinates.
#[derive(Clone, Copy, Debug)]
pub struct PPCoords {
/// Percent 0.0 - 1.0
pub pcnt: f64,
/// Pressure in hPa
pub press: HectoPascal,
}
/***************************************************************************************************
* Speed - Pressure coords for the wind speed profile
* ************************************************************************************************/
/// Speed-Pressure coordinates.
#[derive(Clone, Copy, Debug)]
pub struct SPCoords {
/// Speed in knots
pub spd: Knots,
/// Pressure in hPa
pub press: HectoPascal,
}
/***************************************************************************************************
* △T - Height coords for the fire plume chart
* ************************************************************************************************/
/// DeltaT-Height coordinates.
#[derive(Clone, Copy, Debug)]
pub struct DtHCoords {
/// DeltaT in Celsius
pub dt: CelsiusDiff,
/// Height in meters
pub height: Meters,
}
/***************************************************************************************************
* △T - Percent coords for the percent wet cape chart.
* ************************************************************************************************/
/// DeltaT-Percent coordinates.
#[derive(Clone, Copy, Debug)]
pub struct DtPCoords {
/// DeltaT in Celsius
pub dt: CelsiusDiff,
/// Height in meters
pub percent: f64,
}
/***************************************************************************************************
* X - Y Coords for a default plot area that can be zoomed and panned
* ************************************************************************************************/
/// XY coordinates of the skew-t graph, range 0.0 to 1.0. This coordinate system is dependent on
/// settings for the maximum/minimum plottable pressure and temperatures in the config module.
/// Origin lower left, (x,y)
#[derive(Clone, Copy, Debug)]
pub struct XYCoords {
pub x: f64,
pub y: f64,
}
impl XYCoords {
pub fn origin() -> Self {
XYCoords { x: 0.0, y: 0.0 }
}
}
#[derive(Clone, Copy, Debug)]
pub struct XYRect {
pub lower_left: XYCoords,
pub upper_right: XYCoords,
}
impl Rect for XYRect {
fn min_x(&self) -> f64 {
self.lower_left.x
}
fn max_x(&self) -> f64 {
self.upper_right.x
}
fn min_y(&self) -> f64 {
self.lower_left.y
}
fn max_y(&self) -> f64 {
self.upper_right.y
}
}
/***************************************************************************************************
* Screen Coords - the coordinate system to actually draw in.
* ************************************************************************************************/
/// On screen coordinates. Meant to scale and translate `XYCoords` to fit on the screen.
/// Origin lower left, (x,y).
/// When drawing using cairo functions, use these coordinates.
#[derive(Clone, Copy, Debug)]
pub struct ScreenCoords {
pub x: f64,
pub y: f64,
}
impl ScreenCoords {
pub fn origin() -> Self {
ScreenCoords { x: 0.0, y: 0.0 }
}
}
#[derive(Clone, Copy, Debug)]
pub struct ScreenRect {
pub lower_left: ScreenCoords,
pub upper_right: ScreenCoords,
}
impl ScreenRect {
pub fn add_padding(&self, padding: f64) -> ScreenRect {
ScreenRect {
lower_left: ScreenCoords {
x: self.lower_left.x - padding,
y: self.lower_left.y - padding,
},
upper_right: ScreenCoords {
x: self.upper_right.x + padding,
y: self.upper_right.y + padding,
},
}
}
pub fn expand_to_fit(&mut self, point: ScreenCoords) {
let ScreenCoords { x, y } = point;
if x < self.lower_left.x {
self.lower_left.x = x;
}
if x > self.upper_right.x {
self.upper_right.x = x;
}
if y < self.lower_left.y {
self.lower_left.y = y;
}
if y > self.upper_right.y {
self.upper_right.y = y;
}
}
}
impl Rect for ScreenRect {
fn min_x(&self) -> f64 {
self.lower_left.x
}
fn max_x(&self) -> f64 {
self.upper_right.x
}
fn min_y(&self) -> f64 {
self.lower_left.y
}
fn max_y(&self) -> f64 {
self.upper_right.y
}
}
/***************************************************************************************************
* Device Coords - the coordinate system of the device
* ************************************************************************************************/
/// Device coordinates (pixels positions).
/// Origin upper left, (Column, Row)
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct DeviceCoords {
pub col: f64,
pub row: f64,
}
impl From<(f64, f64)> for DeviceCoords {
fn from(src: (f64, f64)) -> Self {
DeviceCoords {
col: src.0,
row: src.1,
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct DeviceRect {
pub upper_left: DeviceCoords,
pub width: f64,
pub height: f64,
}
impl Rect for DeviceRect {
fn min_x(&self) -> f64 {
self.upper_left.col
}
fn max_x(&self) -> f64 {
self.upper_left.col + self.width
}
fn min_y(&self) -> f64 {
self.upper_left.row
}
fn max_y(&self) -> f64 {
self.upper_left.row + self.height
}
}
/***************************************************************************************************
* Converting Pressure to the y coordinate
* ************************************************************************************************/
/// Given a pressure value, convert it to a y-value from X-Y coordinates.
///
/// Overwhelmingly the veritical coordinate system is based on pressure, so this is a very common
/// operation to do, and you want it to always be done them same.
pub fn convert_pressure_to_y(pressure: HectoPascal) -> f64 {
(config::MAXP / pressure).log10() / (config::MAXP / config::MINP).log10()
}
/// Provide an inverse function as well.
pub fn convert_y_to_pressure(y: f64) -> HectoPascal {
config::MAXP * (config::MINP / config::MAXP).powf(y)
}
#[test]
fn test_pressure_to_y_and_back() {
use metfor::Quantity;
for i in 100..=1000 {
let p = HectoPascal(i as f64);
let y = convert_pressure_to_y(p);
let pback = convert_y_to_pressure(y);
println!("p = {:?} y = {:?} pback = {:?}", p, y, pback);
assert!((p - pback).abs() < HectoPascal(1.0e-6));
}
}
|
use std::{ptr, mem};
use utils::SIZE_MASKS;
/// Encoder takes in typed data and produces a binary buffer
/// represented as `Vec<u8>`.
pub struct Encoder {
data: Vec<u8>,
bool_index: usize,
bool_shift: u8,
}
pub trait BitEncode {
fn encode(&self, &mut Encoder);
#[inline(always)]
fn size_hint() -> usize {
0
}
}
impl Encoder {
/// Create a new instance of the `Encoder`.
#[inline(always)]
pub fn new() -> Encoder {
Encoder {
data: Vec::new(),
bool_index: ::std::usize::MAX,
bool_shift: 0,
}
}
/// Create a new instance of the `Encoder` with a preallocated buffer capacity.
#[inline(always)]
pub fn with_capacity(capacity: usize) -> Encoder {
Encoder {
data: Vec::with_capacity(capacity),
bool_index: ::std::usize::MAX,
bool_shift: 0,
}
}
#[inline]
pub fn encode<E: BitEncode>(val: E) -> Vec<u8> {
let mut e = Encoder::with_capacity(E::size_hint());
val.encode(&mut e);
e.data
}
/// Store any type implementing `BitEncode` on the buffer.
#[inline]
pub fn write<E: BitEncode>(&mut self, val: E) -> &mut Self {
val.encode(self);
self
}
/// Store a `u8` on the buffer.
#[inline]
pub fn uint8(&mut self, val: u8) -> &mut Self {
self.data.push(val);
self
}
/// Store a 'u16' on the buffer.
#[inline]
pub fn uint16(&mut self, val: u16) -> &mut Self {
val.encode(self);
self
}
/// Store a 'u32' on the buffer.
#[inline]
pub fn uint32(&mut self, val: u32) -> &mut Self {
val.encode(self);
self
}
/// Store a 'u64' on the buffer.
#[inline]
pub fn uint64(&mut self, val: u64) -> &mut Self {
val.encode(self);
self
}
/// Store an `i8` on the buffer.
#[inline]
pub fn int8(&mut self, val: i8) -> &mut Self {
val.encode(self);
self
}
/// Store an `i16` on the buffer.
#[inline]
pub fn int16(&mut self, val: i16) -> &mut Self {
val.encode(self);
self
}
#[inline]
/// Store an `i32` on the buffer.
pub fn int32(&mut self, val: i32) -> &mut Self {
val.encode(self);
self
}
#[inline]
/// Store an `i32` on the buffer.
pub fn int64(&mut self, val: i64) -> &mut Self {
val.encode(self);
self
}
/// Store an `f32` on the buffer.
#[inline]
pub fn float32(&mut self, val: f32) -> &mut Self {
val.encode(self);
self
}
/// Store an `f64` on the buffer.
#[inline]
pub fn float64(&mut self, val: f64) -> &mut Self {
val.encode(self);
self
}
/// Store a `bool` on the buffer. Calling `bool` multiple times
/// in a row will attempt to store the information on a single
/// byte.
///
/// ```
/// use bitsparrow::Encoder;
///
/// let buffer = Encoder::new()
/// .bool(true)
/// .bool(false)
/// .bool(false)
/// .bool(false)
/// .bool(false)
/// .bool(true)
/// .bool(true)
/// .bool(true)
/// .end();
///
/// // booleans are stacked as bits on a single byte, right to left.
/// assert_eq!(buffer, &[0b11100001]);
/// ```
#[inline]
pub fn bool(&mut self, val: bool) -> &mut Self {
val.encode(self);
self
}
/// Store a `usize` on the buffer. This will use a variable amount of bytes
/// depending on the value of `usize`, making it a very powerful and flexible
/// type to send around. BitSparrow uses `size` internally to prefix `string`
/// and `bytes` as those can have an arbitrary length, and using a large
/// number type such as u32 could be an overkill if all you want to send is
/// `"Foo"`. Detailed explanation on how BitSparrow stores `size` can be found
/// on [the homepage](http://bitsparrow.io).
#[inline]
pub fn size(&mut self, val: usize) -> &mut Self {
self.size_with_reserve(val, 0);
self
}
/// Store an arbitary collection of bytes represented as `&[u8]`,
/// easy to use by dereferencing `Vec<u8>` with `&`.
#[inline]
pub fn bytes(&mut self, val: &[u8]) -> &mut Self {
val.encode(self);
self
}
/// Store an arbitrary UTF-8 Rust string on the buffer.
#[inline]
pub fn string(&mut self, val: &str) -> &mut Self {
val.encode(self);
self
}
/// Finish encoding, obtain the buffer and reset the encoder.
#[inline(always)]
pub fn end(&mut self) -> Vec<u8> {
self.bool_index = ::std::usize::MAX;
self.bool_shift = 0;
mem::replace(&mut self.data, Vec::new())
}
#[inline(always)]
fn size_with_reserve(&mut self, size: usize, item_size: usize) {
if size < 128 {
// Encoding size means data will follow, so it makes sense to reserve
// capacity on the buffer beforehand
self.data.reserve(1 + size * item_size);
self.data.push(size as u8);
return;
}
let mut masked = size as u64;
let lead = masked.leading_zeros() as usize;
let bytes = if lead == 0 { 9 } else { 9 - (lead - 1) / 7 };
let mut buf: [u8; 9] = unsafe { mem::uninitialized() };
for i in (1 .. bytes).rev() {
buf[i] = masked as u8;
masked >>= 8;
}
buf[0] = (masked as u8) | SIZE_MASKS[bytes - 1];
// Same as above...
self.data.reserve(bytes + size * item_size);
self.data.extend_from_slice(&buf[0 .. bytes]);
}
}
// impl BitEncode for u8 {
// #[inline(always)]
// fn encode(&self, e: &mut Encoder) {
// e.data.push(*self);
// }
// }
impl BitEncode for i8 {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
e.data.push(*self as u8);
}
}
macro_rules! impl_encodable {
($t:ty) => {
impl BitEncode for $t {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
unsafe {
let ptr: *const u8 = mem::transmute(&self.to_be());
let len = e.data.len();
e.data.reserve(mem::size_of::<$t>());
e.data.set_len(len + mem::size_of::<$t>());
ptr::copy_nonoverlapping(
ptr,
e.data.as_mut_ptr().offset(len as isize),
mem::size_of::<$t>()
);
}
}
#[inline(always)]
fn size_hint() -> usize {
mem::size_of::<$t>()
}
}
}
}
impl_encodable!(u16);
impl_encodable!(u32);
impl_encodable!(u64);
impl_encodable!(i16);
impl_encodable!(i32);
impl_encodable!(i64);
impl BitEncode for f32 {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(&unsafe { mem::transmute::<f32, u32>(*self) }, e);
}
#[inline(always)]
fn size_hint() -> usize {
8
}
}
impl BitEncode for f64 {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(&unsafe { mem::transmute::<f64, u64>(*self) }, e);
}
#[inline(always)]
fn size_hint() -> usize {
8
}
}
impl BitEncode for bool {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
let bit = *self as u8;
let index = e.data.len();
if e.bool_index == index && e.bool_shift < 7 {
e.bool_shift += 1;
e.data[index - 1] |= bit << e.bool_shift;
} else {
e.bool_index = index + 1;
e.bool_shift = 0;
e.data.push(bit);
}
}
}
impl BitEncode for usize {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
e.size_with_reserve(*self, 0);
}
}
impl BitEncode for [u8] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
e.size_with_reserve(self.len(), 1);
unsafe {
let len = e.data.len();
// Encoder::size must reserve capacity beforehand
debug_assert!(e.data.capacity() >= len + self.len());
ptr::copy_nonoverlapping(
self.as_ptr(),
e.data.as_mut_ptr().offset(len as isize),
self.len()
);
e.data.set_len(len + self.len());
}
}
#[inline(always)]
fn size_hint() -> usize {
16
}
}
macro_rules! impl_deref {
($t:ty, $size:expr) => {
impl<'a> BitEncode for &'a $t {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(*self, e);
}
#[inline(always)]
fn size_hint() -> usize {
$size
}
}
}
}
impl_deref!(u16, 2);
impl_deref!(u32, 4);
impl_deref!(u64, 8);
impl_deref!(i8, 1);
impl_deref!(i16, 2);
impl_deref!(i32, 4);
impl_deref!(i64, 8);
impl_deref!(f32, 4);
impl_deref!(f64, 8);
impl_deref!(usize, 1);
impl_deref!(bool, 1);
impl_deref!([u8], 16);
macro_rules! impl_array {
($( $size:expr ),*) => {
$(
impl BitEncode for [u8; $size] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[u8]>::as_ref(self), e);
}
#[inline(always)]
fn size_hint() -> usize {
$size + 1
}
}
impl<'a> BitEncode for &'a [u8; $size] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[u8]>::as_ref(self), e);
}
#[inline(always)]
fn size_hint() -> usize {
$size + 1
}
}
impl<E: BitEncode> BitEncode for [E; $size] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[E]>::as_ref(self), e);
}
#[inline(always)]
fn size_hint() -> usize {
$size * E::size_hint() + 1
}
}
impl<'a, E: BitEncode> BitEncode for &'a [E; $size] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[E]>::as_ref(self), e);
}
#[inline(always)]
fn size_hint() -> usize {
$size * E::size_hint() + 1
}
}
)*
}
}
impl_array!(
0,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32
);
impl<'a> BitEncode for &'a Vec<u8> {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[u8]>::as_ref(*self), e);
}
#[inline(always)]
fn size_hint() -> usize {
16
}
}
impl<'a> BitEncode for &'a str {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(self.as_bytes(), e);
}
#[inline(always)]
fn size_hint() -> usize {
16
}
}
impl BitEncode for String {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(self.as_bytes(), e);
}
#[inline(always)]
fn size_hint() -> usize {
16
}
}
impl<'a> BitEncode for &'a String {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(self.as_bytes(), e);
}
#[inline(always)]
fn size_hint() -> usize {
16
}
}
impl<E: BitEncode> BitEncode for [E] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
e.size_with_reserve(self.len(), E::size_hint());
for item in self {
BitEncode::encode(item, e);
}
}
}
impl<'a, E: BitEncode> BitEncode for &'a [E] {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(*self, e);
}
}
impl<E: BitEncode> BitEncode for Vec<E> {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[E]>::as_ref(self), e);
}
}
impl<'a, E: BitEncode> BitEncode for &'a Vec<E> {
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(AsRef::<[E]>::as_ref(*self), e);
}
}
macro_rules! impl_tuple {
($( $l:ident: $n:tt ),*) => {
impl<$($l),*> BitEncode for ($($l),*) where
$(
$l: BitEncode,
)*
{
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
e.data.reserve(Self::size_hint());
$(
self.$n.encode(e);
)*
}
#[inline]
fn size_hint() -> usize {
$( $l::size_hint() + )* 0
}
}
impl<'a, $($l),*> BitEncode for &'a ($($l),*) where
$(
$l: BitEncode,
)*
{
#[inline(always)]
fn encode(&self, e: &mut Encoder) {
BitEncode::encode(*self, e);
}
#[inline]
fn size_hint() -> usize {
$( $l::size_hint() + )* 0
}
}
}
}
impl_tuple!(A: 0, B: 1);
impl_tuple!(A: 0, B: 1, C: 2);
impl_tuple!(A: 0, B: 1, C: 2, D: 3);
impl_tuple!(A: 0, B: 1, C: 2, D: 3, E: 4);
impl_tuple!(A: 0, B: 1, C: 2, D: 3, E: 4, F: 5);
impl_tuple!(A: 0, B: 1, C: 2, D: 3, E: 4, F: 5, G: 6);
impl_tuple!(A: 0, B: 1, C: 2, D: 3, E: 4, F: 5, G: 6, H: 7);
impl_tuple!(A: 0, B: 1, C: 2, D: 3, E: 4, F: 5, G: 6, H: 7, I: 8);
impl_tuple!(A: 0, B: 1, C: 2, D: 3, E: 4, F: 5, G: 6, H: 7, I: 8, J: 9);
|
// Copyright 2018 Serde Developers
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::error::{Error, Result};
use serde::de::{self, Deserialize, DeserializeSeed, SeqAccess, Visitor};
use std::ops::{AddAssign, MulAssign};
pub struct Deserializer<'de> {
// This bytes starts with the input data and characters are truncated off
// the beginning as data is parsed.
input: &'de [u8],
}
impl<'de> Deserializer<'de> {
// By convention, `Deserializer` constructors are named like `from_xyz`.
// That way basic use cases are satisfied by something like
// `serde_json::from_bytes(...)` while advanced use cases that require a
// deserializer can make one with `serde_json::Deserializer::from_bytes(...)`.
pub fn from_bytes(input: &'de [u8]) -> Self {
Deserializer { input }
}
}
// By convention, the public API of a Serde deserializer is one or more
// `from_xyz` methods such as `from_bytes`, `from_bytes`, or `from_reader`
// depending on what Rust types the deserializer is able to consume as input.
//
// This basic deserializer supports only `from_bytes`.
pub fn from_bytes<'a, T>(s: &'a [u8]) -> Result<T>
where
T: Deserialize<'a>,
{
let mut deserializer = Deserializer::from_bytes(s);
let t = T::deserialize(&mut deserializer)?;
if deserializer.input.is_empty() {
Ok(t)
} else {
Err(Error::TrailingCharacters)
}
}
// SERDE IS NOT A PARSING LIBRARY. This impl block defines a few basic parsing
// functions from scratch. More complicated formats may wish to use a dedicated
// parsing library to help implement their Serde deserializer.
impl<'de> Deserializer<'de> {
// Look at the first character in the input without consuming it.
fn peek_u8(&mut self) -> Result<u8> {
self.input.iter().copied().next().ok_or(Error::Eof)
}
// Consume the first character in the input.
fn next_u8(&mut self) -> Result<u8> {
let b = self.peek_u8()?;
self.input = &self.input[1..];
Ok(b)
}
// Parse a group of decimal digits as an unsigned integer of type T.
//
// This implementation is a bit too lenient, for example `001` is not
// allowed in JSON. Also the various arithmetic operations can overflow and
// panic or return bogus data. But it is good enough for example code!
fn parse_u8<T>(&mut self) -> Result<T>
where
T: AddAssign<T> + MulAssign<T> + From<u8>,
{
let b = self.next_u8()?;
Ok(T::from(b))
}
fn parse_u16<T>(&mut self) -> Result<T>
where
T: AddAssign<T> + MulAssign<T> + From<u16>,
{
let mut count = 0;
let mut value: [u8; 2] = [0; 2];
while count < 2 {
value[count] = self.next_u8()?;
count += 1;
}
Ok(T::from(u16::from_be_bytes(value)))
}
fn parse_u32<T>(&mut self) -> Result<T>
where
T: AddAssign<T> + MulAssign<T> + From<u32>,
{
let mut count = 0;
let mut value: [u8; 4] = [0; 4];
while count < 4 {
value[count] = self.next_u8()?;
count += 1;
}
Ok(T::from(u32::from_be_bytes(value)))
}
fn parse_u64<T>(&mut self) -> Result<T>
where
T: AddAssign<T> + MulAssign<T> + From<u64>,
{
let mut count = 0;
let mut value: [u8; 8] = [0; 8];
while count < 8 {
value[count] = self.next_u8()?;
count += 1;
}
Ok(T::from(u64::from_be_bytes(value)))
}
fn parse_vec_u8<T>(&mut self) -> Result<T>
where
T: From<Vec<u8>>,
{
let bytes: Vec<u8> = self.input[..].to_vec();
self.input = &[];
Ok(T::from(bytes))
}
}
impl<'de, 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> {
type Error = Error;
// Look at the input data to decide what Serde data model type to
// deserialize as. Not all data formats are able to support this operation.
// Formats that support `deserialize_any` are known as self-describing.
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
self.deserialize_u8(visitor)
}
// Uses the `parse_bool` parsing function defined above to read the JSON
// identifier `true` or `false` from the input.
//
// Parsing refers to looking at the input and deciding that it contains the
// JSON value `true` or `false`.
//
// Deserialization refers to mapping that JSON value into Serde's data
// model by invoking one of the `Visitor` methods. In the case of JSON and
// bool that mapping is straightforward so the distinction may seem silly,
// but in other cases Deserializers sometimes perform non-obvious mappings.
// For example the TOML format has a Datetime type and Serde's data model
// does not. In the `toml` crate, a Datetime in the input is deserialized by
// mapping it to a Serde data model "struct" type with a special name and a
// single field containing the Datetime represented as a string.
fn deserialize_bool<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// The `parse_signed` function is generic over the integer type `T` so here
// it is invoked with `T=i8`. The next 8 methods are similar.
fn deserialize_i8<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
fn deserialize_i16<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
fn deserialize_i32<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
fn deserialize_i64<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_u8(self.parse_u8()?)
}
fn deserialize_u16<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_u16(self.parse_u16()?)
}
fn deserialize_u32<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_u32(self.parse_u32()?)
}
fn deserialize_u64<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_u64(self.parse_u64()?)
}
// Float parsing is stupidly hard.
fn deserialize_f32<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Float parsing is stupidly hard.
fn deserialize_f64<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// The `Serializer` implementation on the previous page serialized chars as
// single-character strings so handle that representation here.
fn deserialize_char<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
// Parse a string, check that it is one character, call `visit_char`.
unimplemented!()
}
// Refer to the "Understanding deserializer lifetimes" page for information
// about the three deserialization flavors of strings in Serde.
fn deserialize_str<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
fn deserialize_string<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// The `Serializer` implementation on the previous page serialized byte
// arrays as JSON arrays of bytes. Handle that representation here.
fn deserialize_bytes<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
let bytes = self.parse_vec_u8()?;
visitor.visit_byte_buf(bytes)
}
// An absent optional is represented as the JSON `null` and a present
// optional is represented as just the contained value.
//
// As commented in `Serializer` implementation, this is a lossy
// representation. For example the values `Some(())` and `None` both
// serialize as just `null`. Unfortunately this is typically what people
// expect when working with JSON. Other formats are encouraged to behave
// more intelligently if possible.
fn deserialize_option<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// In Serde, unit means an anonymous value containing no data.
fn deserialize_unit<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Unit struct means a named value containing no data.
fn deserialize_unit_struct<V>(
self,
_name: &'static str,
_visitor: V,
) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// As is done here, serializers are encouraged to treat newtype structs as
// insignificant wrappers around the data they contain. That means not
// parsing anything other than the contained value.
fn deserialize_newtype_struct<V>(
self,
_name: &'static str,
visitor: V,
) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_newtype_struct(self)
}
// Deserialization of compound types like sequences and maps happens by
// passing the visitor an "Access" object that gives it the ability to
// iterate through the data contained in the sequence.
fn deserialize_seq<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Tuples look just like sequences in JSON. Some formats may be able to
// represent tuples more efficiently.
//
// As indicated by the length parameter, the `Deserialize` implementation
// for a tuple in the Serde data model is required to know the length of the
// tuple before even looking at the input data.
fn deserialize_tuple<V>(self, _len: usize, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Tuple structs look just like sequences in JSON.
fn deserialize_tuple_struct<V>(
self,
_name: &'static str,
_len: usize,
_visitor: V,
) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Much like `deserialize_seq` but calls the visitors `visit_map` method
// with a `MapAccess` implementation, rather than the visitor's `visit_seq`
// method with a `SeqAccess` implementation.
fn deserialize_map<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Structs look just like maps in JSON.
//
// Notice the `fields` parameter - a "struct" in the Serde data model means
// that the `Deserialize` implementation is required to know what the fields
// are before even looking at the input data. Any key-value pairing in which
// the fields cannot be known ahead of time is probably a map.
fn deserialize_struct<V>(
mut self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value>
where
V: Visitor<'de>,
{
visitor.visit_seq(BytesSeparated::new(&mut self))
}
fn deserialize_enum<V>(
self,
_name: &'static str,
_variants: &'static [&'static str],
_visitor: V,
) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// An identifier in Serde is the type that identifies a field of a struct or
// the variant of an enum. In JSON, struct fields and enum variants are
// represented as strings. In other formats they may be represented as
// numeric indices.
fn deserialize_identifier<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
// Like `deserialize_any` but indicates to the `Deserializer` that it makes
// no difference which `Visitor` method is called because the data is
// ignored.
//
// Some deserializers are able to implement this more efficiently than
// `deserialize_any`, for example by rapidly skipping over matched
// delimiters without paying close attention to the data in between.
//
// Some formats are not able to implement this at all. Formats that can
// implement `deserialize_any` and `deserialize_ignored_any` are known as
// self-describing.
fn deserialize_ignored_any<V>(self, _visitor: V) -> Result<V::Value>
where
V: Visitor<'de>,
{
unimplemented!()
}
}
/// In order to handle commas correctly when deserializing a JSON array or map,
// we need to track whether we are on the first element or past the first
// element.
struct BytesSeparated<'a, 'de: 'a> {
de: &'a mut Deserializer<'de>,
}
impl<'a, 'de> BytesSeparated<'a, 'de> {
fn new(de: &'a mut Deserializer<'de>) -> Self {
BytesSeparated { de }
}
}
// `SeqAccess` is provided to the `Visitor` to give it the ability to iterate
// through elements of the sequence.
impl<'de, 'a> SeqAccess<'de> for BytesSeparated<'a, 'de> {
type Error = Error;
fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>>
where
T: DeserializeSeed<'de>,
{
// Deserialize an array element.
seed.deserialize(&mut *self.de).map(Some)
}
}
////////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::from_bytes;
use crate::value_type::Word;
use serde_derive::Deserialize;
#[test]
fn test_struct() {
#[derive(Deserialize, PartialEq, Debug)]
struct Test {
b8: u8,
b16: u16,
b32: u32,
b64: u64,
word: Word,
#[serde(with = "serde_bytes")]
v8: Vec<u8>,
}
let test = [
0x00u8, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A,
0x0B, 0x0C, 0x0D, 0x0E, 0x34, 0x12, 0x0F, 0x10,
]
.to_vec();
let expected = Test {
b8: 0x00u8,
b16: 0x0102u16,
b32: 0x03040506u32,
b64: 0x0708090A0B0C0D0Eu64,
word: Word(0x1234u16),
v8: vec![0x0Fu8, 0x10],
};
assert_eq!(from_bytes::<Test>(&test[..]).unwrap(), expected);
}
}
|
pub mod compass;
pub mod lsm;
pub mod motor;
pub mod pca; |
use std::collections::HashMap;
use crate::blog_clusters::BlogClusters;
use crate::hlf_parser::{parse, HlfLhs, HlfRhs, Symbol};
use crate::shared::{path_title, HTMLTemplate};
use crate::tag::Tag; // for template filling
pub struct HomepageTemplate {
hlfs: HashMap<HlfLhs, HlfRhs>,
}
impl HTMLTemplate for HomepageTemplate {
fn load(template_raw: &str) -> Result<Self, String> {
let hlfs_vec = match parse(&template_raw) {
Some(x) => x,
None => return Err("template parse failed".to_string()),
};
let mut hlfs = HashMap::new();
for i in hlfs_vec.iter() {
hlfs.insert(i.lhs.clone(), i.rhs.clone());
}
Ok(Self { hlfs: hlfs })
}
fn fill(&self, cluster: &BlogClusters) -> Vec<(String, String)> {
let mut result = String::new();
let main = self.hlfs.get("main").expect("main symbol not found");
let blog_chunk_rhs = match main.get(1).unwrap() {
Symbol::N(x) => self.hlfs.get(x).expect(&format!("{} symbol not found.", x)),
_ => panic!(),
};
let tags_rhs = match blog_chunk_rhs.get(1).unwrap() {
Symbol::N(x) => self.hlfs.get(x).expect(&format!("{} symbol not found.", x)),
_ => panic!(),
};
let tag_rhs = match tags_rhs.get(1).unwrap() {
Symbol::N(x) => self.hlfs.get(x).expect(&format!("{} symbol not found.", x)),
_ => panic!(),
};
match main.get(0).unwrap() {
Symbol::T(x) => result.push_str(x),
_ => panic!(),
};
let blogs = cluster.get_blogs();
for blog in blogs {
match blog_chunk_rhs.get(0).unwrap() {
Symbol::T(x) => result.push_str(
&x.replace("_slot_of_blog_path", &(path_title(&blog.title) + ".html"))
.replace("_slot_of_blog_title", &blog.title)
.replace("_slot_of_blog_preview", &blog.preview),
),
_ => panic!(),
}
match tags_rhs.get(0).unwrap() {
Symbol::T(x) => result.push_str(x),
_ => panic!(),
}
let tags: Vec<&Tag> = blog
.tags
.iter()
.map(|x| cluster.get_tag(*x).unwrap())
.collect();
for tag in tags {
match tag_rhs.get(0).unwrap() {
Symbol::T(x) => result.push_str(&x.replace("_slot_of_tag_name", &tag.name)),
_ => panic!(),
}
}
match tags_rhs.get(2).unwrap() {
Symbol::T(x) => result.push_str(x),
_ => panic!(),
}
match blog_chunk_rhs.get(2).unwrap() {
Symbol::T(x) => result.push_str(x),
_ => panic!(),
}
}
match main.get(2).unwrap() {
Symbol::T(x) => result.push_str(x),
_ => panic!(),
}
vec![("index.html".to_string(), result)]
}
}
|
// Copied from:
// https://github.com/stm32-rs/stm32-eth/blob/master/examples/ip.rs
#![no_main]
#![no_std]
extern crate stm32f4xx_hal as hal;
#[allow(unused_imports)]
use panic_semihosting;
use crate::hal::{prelude::*, serial::config::Config, serial::Serial, stm32, stm32::interrupt};
use core::cell::RefCell;
use core::fmt::Write;
use cortex_m::asm;
use cortex_m::interrupt::Mutex;
use cortex_m_rt::{entry, exception, ExceptionFrame};
use smoltcp::iface::{EthernetInterfaceBuilder, NeighborCache};
use smoltcp::socket::{SocketSet, TcpSocket, TcpSocketBuffer};
use smoltcp::time::Instant;
use smoltcp::wire::{EthernetAddress, IpAddress, IpCidr, Ipv4Address};
use stm32_eth::{Eth, RingEntry};
const SRC_MAC: [u8; 6] = [0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF];
static TIME: Mutex<RefCell<u64>> = Mutex::new(RefCell::new(0));
static ETH_PENDING: Mutex<RefCell<bool>> = Mutex::new(RefCell::new(false));
#[entry]
fn main() -> ! {
let dp = stm32::Peripherals::take().expect("Failed to take stm32::Peripherals");
let mut cp =
cortex_m::peripheral::Peripherals::take().expect("Failed to take cortex_m::Peripherals");
setup_systick(&mut cp.SYST);
stm32_eth::setup(&dp.RCC, &dp.SYSCFG);
// Set up the system clock. We want to run at 48MHz for this one.
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(48.mhz()).freeze();
// Setup USART3
let gpiod = dp.GPIOD.split();
let pin_tx = gpiod.pd8.into_alternate_af7();
let pin_rx = gpiod.pd9.into_alternate_af7();
let serial = Serial::usart3(
dp.USART3,
(pin_tx, pin_rx),
Config {
baudrate: 115_200.bps(),
..Default::default()
},
clocks,
)
.unwrap();
// TODO
let (mut stdout, _rx) = serial.split();
//unsafe {
// log::set_logger(&LOGGER).unwrap();
//}
//log::set_max_level(LevelFilter::Info);
writeln!(stdout, "Enabling ethernet...").unwrap();
let gpioa = dp.GPIOA.split();
let gpiob = dp.GPIOB.split();
let gpioc = dp.GPIOC.split();
let gpiog = dp.GPIOG.split();
stm32_eth::setup_pins(
gpioa.pa1, gpioa.pa2, gpioa.pa7, gpiob.pb13, gpioc.pc1, gpioc.pc4, gpioc.pc5, gpiog.pg11,
gpiog.pg13,
);
let mut rx_ring: [RingEntry<_>; 16] = Default::default();
let mut tx_ring: [RingEntry<_>; 8] = Default::default();
let mut eth = Eth::new(
dp.ETHERNET_MAC,
dp.ETHERNET_DMA,
SRC_MAC,
&mut rx_ring[..],
&mut tx_ring[..],
);
eth.enable_interrupt(&mut cp.NVIC);
let local_addr = Ipv4Address::new(192, 168, 1, 39);
//let local_addr = Ipv4Address::new(10, 0, 0, 1);
let ip_addr = IpCidr::new(IpAddress::from(local_addr), 24);
let mut ip_addrs = [ip_addr];
let mut neighbor_storage = [None; 16];
let neighbor_cache = NeighborCache::new(&mut neighbor_storage[..]);
let ethernet_addr = EthernetAddress(SRC_MAC);
let mut iface = EthernetInterfaceBuilder::new(&mut eth)
.ethernet_addr(ethernet_addr)
.ip_addrs(&mut ip_addrs[..])
.neighbor_cache(neighbor_cache)
.finalize();
let mut server_rx_buffer = [0; 2048];
let mut server_tx_buffer = [0; 2048];
let server_socket = TcpSocket::new(
TcpSocketBuffer::new(&mut server_rx_buffer[..]),
TcpSocketBuffer::new(&mut server_tx_buffer[..]),
);
let mut sockets_storage = [None, None];
let mut sockets = SocketSet::new(&mut sockets_storage[..]);
let server_handle = sockets.add(server_socket);
writeln!(stdout, "Ready, listening at {}", ip_addr).unwrap();
loop {
let time: u64 = cortex_m::interrupt::free(|cs| *TIME.borrow(cs).borrow());
cortex_m::interrupt::free(|cs| {
let mut eth_pending = ETH_PENDING.borrow(cs).borrow_mut();
*eth_pending = false;
});
match iface.poll(&mut sockets, Instant::from_millis(time as i64)) {
Ok(true) => {
let mut socket = sockets.get::<TcpSocket>(server_handle);
if !socket.is_open() {
socket
.listen(80)
.or_else(|e| writeln!(stdout, "TCP listen error: {:?}", e))
.unwrap();
}
if socket.can_send() {
write!(socket, "hello\n")
.map(|_| {
socket.close();
})
.or_else(|e| writeln!(stdout, "TCP send error: {:?}", e))
.unwrap();
}
}
Ok(false) => {
// Sleep if no ethernet work is pending
cortex_m::interrupt::free(|cs| {
let eth_pending = ETH_PENDING.borrow(cs).borrow_mut();
if !*eth_pending {
asm::wfi();
// Awaken by interrupt
}
});
}
Err(e) =>
// Ignore malformed packets
{
writeln!(stdout, "Error: {:?}", e).unwrap()
}
}
}
}
fn setup_systick(syst: &mut stm32::SYST) {
syst.set_reload(stm32::SYST::get_ticks_per_10ms() / 10);
syst.clear_current();
syst.enable_counter();
syst.enable_interrupt();
}
#[exception]
fn SysTick() {
cortex_m::interrupt::free(|cs| {
let mut time = TIME.borrow(cs).borrow_mut();
*time += 1;
})
}
#[interrupt]
fn ETH() {
cortex_m::interrupt::free(|cs| {
let mut eth_pending = ETH_PENDING.borrow(cs).borrow_mut();
*eth_pending = true;
});
// Clear interrupt flags
let p = unsafe { stm32::Peripherals::steal() };
stm32_eth::eth_interrupt_handler(&p.ETHERNET_DMA);
}
#[exception]
fn HardFault(ef: &ExceptionFrame) -> ! {
panic!("HardFault at {:#?}", ef);
}
#[exception]
fn DefaultHandler(irqn: i16) {
panic!("Unhandled exception (IRQn = {})", irqn);
}
|
//! Manage the music database and provide `Track`, `Playlist` and `Token` structs
//!
//! This crate can be used to search, get all playlists, find a certain token and do a lot of other
//! useful stuff. The underlying implementation uses a SQLite database and manages all information
//! with some tables. It is used in the `server` binary, `sync` library and other libraries which
//! have to alter the database in a stable way.
//!
//! ## Example:
//! ```rust,no_run
//! extern crate hex_database;
//! extern crate hex_gossip;
//!
//! use std::path::Path;
//! use hex_database::{Instance, View};
//! use hex_gossip::GossipConf;
//!
//! pub fn main() {
//! let instance = Instance::from_file("/opt/music/music.db", GossipConf::new());
//! let view = instance.view();
//! for playlist in view.get_playlists() {
//! println!("{:#?}", playlist);
//! }
//! }
//! ```
#[cfg(feature="rusqlite")]
extern crate rusqlite;
#[cfg(feature="serde")]
#[macro_use]
extern crate serde;
#[cfg(feature="sha2")]
extern crate sha2;
#[cfg(feature="rusqlite")]
extern crate hex_gossip;
#[cfg(feature="rusqlite")]
extern crate bincode;
#[cfg(feature="rusqlite")]
extern crate futures;
#[cfg(feature="rusqlite")]
extern crate tokio;
#[macro_use]
extern crate log;
pub mod error;
pub mod objects;
pub mod search;
pub mod events;
pub mod utils;
mod transition;
mod file;
#[cfg(feature="rusqlite")]
mod instance;
mod read;
mod write;
pub use error::{Result, Error};
pub use events::{Action, Event};
pub use transition::TransitionAction;
pub use objects::{Track, Playlist, Token, TrackKey, PlaylistKey, TokenId};
#[cfg(feature="rusqlite")]
pub use instance::Instance;
pub use read::Reader;
pub use write::Writer;
pub use file::Files;
#[cfg(feature="hex-gossip")]
pub use hex_gossip::{GossipConf, Transition};
#[cfg(not(feature = "hex-gossip"))]
mod GossipDummy {
pub type PeerId = Vec<u8>;
/// Transition key is the 256bit hash of the body
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct TransitionKey(pub [u8; 32]);
/// A signed transition in a DAG
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq, Hash)]
pub struct Transition {
pub key: TransitionKey,
pub pk: PeerId,
pub refs: Vec<TransitionKey>,
pub body: Option<Vec<u8>>,
pub sign: [u8; 32],
pub state: u8
}
}
#[cfg(not(feature = "hex-gossip"))]
pub use GossipDummy::Transition;
|
pub mod client;
mod model; |
use std::cell::RefCell;
use std::cmp;
use std::collections::VecDeque;
use std::rc::Rc;
#[derive(Debug, PartialEq, Eq)]
pub struct TreeNode {
pub val: i32,
pub left: Option<Rc<RefCell<TreeNode>>>,
pub right: Option<Rc<RefCell<TreeNode>>>,
}
#[allow(dead_code)]
impl TreeNode {
#[inline]
pub fn new(val: i32) -> Self {
TreeNode {
val,
left: None,
right: None,
}
}
}
#[allow(dead_code)]
fn max_depth(root: Option<Rc<RefCell<TreeNode>>>) -> i32 {
match root {
None => 0,
Some(node) => {
let mut node_ref = node.borrow_mut();
let left_depth = max_depth(node_ref.left.take());
let right_depth = max_depth(node_ref.right.take());
1 + cmp::max(left_depth, right_depth)
}
}
}
#[allow(dead_code)]
fn max_depth_iterative(root: Option<Rc<RefCell<TreeNode>>>) -> i32 {
if root.is_none() {
return 0;
}
let mut queue: VecDeque<Rc<RefCell<TreeNode>>> = VecDeque::new();
queue.push_back(root.unwrap());
let mut depth = 0;
while queue.len() > 0 {
let mut level_node_count = queue.len();
depth += 1;
while level_node_count > 0 {
let node = queue.pop_front().unwrap();
let mut borrowed = node.borrow_mut();
if let Some(left) = borrowed.left.take() {
queue.push_back(left);
}
if let Some(right) = borrowed.right.take() {
queue.push_back(right);
}
level_node_count -= 1;
}
}
depth
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_max_depth() {
run_tests(max_depth);
}
#[test]
fn test_max_depth_iterative() {
run_tests(max_depth_iterative);
}
fn run_tests<F: Fn(Option<Rc<RefCell<TreeNode>>>) -> i32>(fun: F) {
assert_eq!(fun(make_tree(1, 2, 3)), 2);
assert_eq!(fun(Some(Rc::new(RefCell::new(TreeNode::new(50))))), 1);
}
fn make_tree(middle: i32, left: i32, right: i32) -> Option<Rc<RefCell<TreeNode>>> {
let left = Some(Rc::new(RefCell::new(TreeNode::new(left))));
let right = Some(Rc::new(RefCell::new(TreeNode::new(right))));
let middle = TreeNode {
val: middle,
left,
right,
};
Some(Rc::new(RefCell::new(middle)))
}
}
|
use std::time::Instant;
use std::fs;
fn main() {
let start = Instant::now();
let str_test = fs::read_to_string("example.txt").expect("Error in reading file");
let all_groups = split_by_empty_line(&str_test);
let sum_pt1 = sum_part1(&all_groups);
assert_eq!(sum_pt1, 11);
let sum_pt2 = sum_part2(&all_groups);
assert_eq!(sum_pt2, 6);
let str_in = fs::read_to_string("input.txt").expect("Error in reading file");
let all_groups = split_by_empty_line(&str_in);
let sum_pt1 = sum_part1(&all_groups);
println!("Sum part1 = {}", sum_pt1);
let sum_pt2 = sum_part2(&all_groups);
println!("Sum part2 = {}", sum_pt2);
let duration = start.elapsed();
println!("Finished after {:?}", duration);
}
fn sum_part1(groups: &Vec<String>) -> usize {
let mut result = 0;
for this_group in groups {
result += cnt_all_answers_in(this_group);
}
result
}
fn cnt_all_answers_in(group: &String) -> usize {
let mut result = Vec::new();
for each_one in group.lines(){
for answers in each_one.chars() {
if result.contains(&answers) {
continue;
}
result.push(answers);
}
}
result.len()
}
fn sum_part2(groups: &Vec<String>) -> usize {
let mut result = 0;
for this_group in groups {
result += cnt_common_answer_in(this_group);
}
result
}
fn cnt_common_answer_in(group: &String) -> usize {
let mut common_answers = "abcdefghijklmnopqrstuvwxyz".to_string();
for each_one_answer in group.split_whitespace() {
common_answers = strip_characters(&common_answers, each_one_answer);
}
common_answers.len()
}
fn strip_characters(original : &str, to_keep : &str) -> String {
original.chars().filter(|&c| to_keep.contains(c)).collect()
}
fn split_by_empty_line(input: &String) -> Vec<String> {
let mut groups = Vec::new();
let mut new_group = "".to_string();
// Fill answers per group from input
for line in input.lines() {
// while no empty line, concatenate the string
if line.len() > 0 {
new_group = new_group + line + "\n";
}
else {
groups.push(new_group); // group is complete.
new_group = "".to_string(); // reset the string (new group)
}
}
// for loop is finished, put the latest in group in the vector
groups.push(new_group);
groups
}
|
mod directed_graph;
mod test;
pub use directed_graph::{DirectedGraph};
|
use simple_cache::{Cache, CacheItem};
struct Object {
value: i32,
string: String,
}
impl CacheItem for Object {}
#[tokio::test]
async fn insert_and_get() {
let cache = Cache::new();
let object = Object {
value: 1,
string: String::from("test!"),
};
let cache_get = cache.get::<Object, _>("test");
assert!(cache_get.is_ok());
assert!(cache_get.unwrap().is_none());
let cache_insert = cache.insert("test", Some(object));
assert!(cache_insert.is_ok());
assert!(cache_insert.unwrap().is_none());
let cache_get = cache.get::<Object, _>("test").unwrap().unwrap().unwrap();
assert_eq!(cache_get.value, 1);
assert_eq!(cache_get.string, "test!");
let cache_insert = cache.insert::<Object>("test", None);
assert!(cache_insert.is_ok());
assert!(cache_insert.unwrap().is_some());
let cache_get = cache.get::<Object, _>("test").unwrap();
assert!(cache_get.is_some());
assert!(cache_get.unwrap().is_none());
}
#[tokio::test]
async fn remove() {
let cache = Cache::new();
let object = Object {
value: 1,
string: String::from("test!"),
};
let _ = cache.insert("test", Some(object));
let _ = cache.remove("test");
let cache_get = cache.get::<Object, _>("test");
assert!(cache_get.is_ok());
assert!(cache_get.unwrap().is_none());
let cache_remove = cache.remove("test");
assert!(cache_remove.is_err());
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
pub type XboxLiveDeviceAddress = *mut ::core::ffi::c_void;
pub type XboxLiveEndpointPair = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct XboxLiveEndpointPairCreationBehaviors(pub u32);
impl XboxLiveEndpointPairCreationBehaviors {
pub const None: Self = Self(0u32);
pub const ReevaluatePath: Self = Self(1u32);
}
impl ::core::marker::Copy for XboxLiveEndpointPairCreationBehaviors {}
impl ::core::clone::Clone for XboxLiveEndpointPairCreationBehaviors {
fn clone(&self) -> Self {
*self
}
}
pub type XboxLiveEndpointPairCreationResult = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct XboxLiveEndpointPairCreationStatus(pub i32);
impl XboxLiveEndpointPairCreationStatus {
pub const Succeeded: Self = Self(0i32);
pub const NoLocalNetworks: Self = Self(1i32);
pub const NoCompatibleNetworkPaths: Self = Self(2i32);
pub const LocalSystemNotAuthorized: Self = Self(3i32);
pub const Canceled: Self = Self(4i32);
pub const TimedOut: Self = Self(5i32);
pub const RemoteSystemNotAuthorized: Self = Self(6i32);
pub const RefusedDueToConfiguration: Self = Self(7i32);
pub const UnexpectedInternalError: Self = Self(8i32);
}
impl ::core::marker::Copy for XboxLiveEndpointPairCreationStatus {}
impl ::core::clone::Clone for XboxLiveEndpointPairCreationStatus {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct XboxLiveEndpointPairState(pub i32);
impl XboxLiveEndpointPairState {
pub const Invalid: Self = Self(0i32);
pub const CreatingOutbound: Self = Self(1i32);
pub const CreatingInbound: Self = Self(2i32);
pub const Ready: Self = Self(3i32);
pub const DeletingLocally: Self = Self(4i32);
pub const RemoteEndpointTerminating: Self = Self(5i32);
pub const Deleted: Self = Self(6i32);
}
impl ::core::marker::Copy for XboxLiveEndpointPairState {}
impl ::core::clone::Clone for XboxLiveEndpointPairState {
fn clone(&self) -> Self {
*self
}
}
pub type XboxLiveEndpointPairStateChangedEventArgs = *mut ::core::ffi::c_void;
pub type XboxLiveEndpointPairTemplate = *mut ::core::ffi::c_void;
pub type XboxLiveInboundEndpointPairCreatedEventArgs = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct XboxLiveNetworkAccessKind(pub i32);
impl XboxLiveNetworkAccessKind {
pub const Open: Self = Self(0i32);
pub const Moderate: Self = Self(1i32);
pub const Strict: Self = Self(2i32);
}
impl ::core::marker::Copy for XboxLiveNetworkAccessKind {}
impl ::core::clone::Clone for XboxLiveNetworkAccessKind {
fn clone(&self) -> Self {
*self
}
}
pub type XboxLiveQualityOfServiceMeasurement = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct XboxLiveQualityOfServiceMeasurementStatus(pub i32);
impl XboxLiveQualityOfServiceMeasurementStatus {
pub const NotStarted: Self = Self(0i32);
pub const InProgress: Self = Self(1i32);
pub const InProgressWithProvisionalResults: Self = Self(2i32);
pub const Succeeded: Self = Self(3i32);
pub const NoLocalNetworks: Self = Self(4i32);
pub const NoCompatibleNetworkPaths: Self = Self(5i32);
pub const LocalSystemNotAuthorized: Self = Self(6i32);
pub const Canceled: Self = Self(7i32);
pub const TimedOut: Self = Self(8i32);
pub const RemoteSystemNotAuthorized: Self = Self(9i32);
pub const RefusedDueToConfiguration: Self = Self(10i32);
pub const UnexpectedInternalError: Self = Self(11i32);
}
impl ::core::marker::Copy for XboxLiveQualityOfServiceMeasurementStatus {}
impl ::core::clone::Clone for XboxLiveQualityOfServiceMeasurementStatus {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct XboxLiveQualityOfServiceMetric(pub i32);
impl XboxLiveQualityOfServiceMetric {
pub const AverageLatencyInMilliseconds: Self = Self(0i32);
pub const MinLatencyInMilliseconds: Self = Self(1i32);
pub const MaxLatencyInMilliseconds: Self = Self(2i32);
pub const AverageOutboundBitsPerSecond: Self = Self(3i32);
pub const MinOutboundBitsPerSecond: Self = Self(4i32);
pub const MaxOutboundBitsPerSecond: Self = Self(5i32);
pub const AverageInboundBitsPerSecond: Self = Self(6i32);
pub const MinInboundBitsPerSecond: Self = Self(7i32);
pub const MaxInboundBitsPerSecond: Self = Self(8i32);
}
impl ::core::marker::Copy for XboxLiveQualityOfServiceMetric {}
impl ::core::clone::Clone for XboxLiveQualityOfServiceMetric {
fn clone(&self) -> Self {
*self
}
}
pub type XboxLiveQualityOfServiceMetricResult = *mut ::core::ffi::c_void;
pub type XboxLiveQualityOfServicePrivatePayloadResult = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct XboxLiveSocketKind(pub i32);
impl XboxLiveSocketKind {
pub const None: Self = Self(0i32);
pub const Datagram: Self = Self(1i32);
pub const Stream: Self = Self(2i32);
}
impl ::core::marker::Copy for XboxLiveSocketKind {}
impl ::core::clone::Clone for XboxLiveSocketKind {
fn clone(&self) -> Self {
*self
}
}
|
//! query-interface - dynamically query a type-erased object for any trait implementation
//!
//! ```rust
//! #[macro_use]
//! extern crate query_interface;
//! use query_interface::{Object, ObjectClone};
//! use std::fmt::Debug;
//!
//! #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
//! struct Foo;
//!
//! interfaces!(Foo: ObjectClone, Debug, Bar);
//!
//! trait Bar {
//! fn do_something(&self);
//! }
//! impl Bar for Foo {
//! fn do_something(&self) {
//! println!("I'm a Foo!");
//! }
//! }
//!
//! fn main() {
//! let obj = Box::new(Foo) as Box<Object>;
//! let obj2 = obj.clone();
//! println!("{:?}", obj2);
//!
//! obj2.query_ref::<Bar>().unwrap().do_something(); // Prints: "I'm a Foo!"
//! }
//! ```
#[cfg(feature = "dynamic")]
#[macro_use]
extern crate lazy_static;
use std::any::{TypeId, Any};
use std::ptr;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::collections::hash_map::DefaultHasher;
use std::fmt::{Debug, Display};
use std::path::PathBuf;
#[cfg(feature = "dynamic")]
#[macro_use]
pub mod dynamic;
/// Represents a trait object's vtable pointer. You shouldn't need to use this as a
/// consumer of the crate but it is required for macro expansion.
#[doc(hidden)]
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct VTable(*const ());
impl VTable {
pub fn none() -> VTable {
VTable(ptr::null())
}
}
unsafe impl Send for VTable {}
unsafe impl Sync for VTable {}
/// Represents a trait object's layout. You shouldn't need to use this as a
/// consumer of the crate but it is required for macro expansion.
#[doc(hidden)]
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct TraitObject {
pub data: *const (),
pub vtable: VTable
}
/// Obtain the vtable for a type/trait pair. You shouldn't need to use this as a
/// consumer of the crate but it is required for macro expansion.
#[doc(hidden)]
#[macro_export]
macro_rules! vtable_for {
($x:ty as $y:ty) => ({
let x = ::std::ptr::null::<$x>() as *const $y;
#[allow(unused_unsafe)]
unsafe { ::std::mem::transmute::<_, $crate::TraitObject>(x).vtable }
})
}
/// Define a custom Object-like trait. The `query`, `query_ref` and `query_mut`
/// methods will be automatically implemented on this trait object.
///
/// You may add additional static bounds to your custom trait via the
/// `HasInterface<I>` trait. This example will statically ensure that all
/// types convertible to `MyObject` can be cloned. Your trait must extend
/// `Object`.
///
/// ```rust
/// # #[macro_use]
/// # extern crate query_interface;
/// # use query_interface::*;
/// trait MyObject: Object + ObjectClone + HasInterface<ObjectClone> { }
/// mopo!(MyObject);
/// # fn main() {}
/// ```
#[macro_export]
macro_rules! mopo {
($name:ty) => (
impl $name {
pub fn query_ref<U: ::std::any::Any + ?Sized>(&self) -> Option<&U> {
if let Some(vtable) = self.query_vtable(::std::any::TypeId::of::<U>()) {
unsafe {
let data = self as *const Self;
let u = $crate::TraitObject { data: data as *const (), vtable: vtable };
Some(*::std::mem::transmute::<_, &&U>(&u))
}
} else {
None
}
}
pub fn query_mut<U: ::std::any::Any + ?Sized>(&mut self) -> Option<&mut U> {
if let Some(vtable) = self.query_vtable(::std::any::TypeId::of::<U>()) {
unsafe {
let data = self as *mut Self;
let mut u = $crate::TraitObject { data: data as *const (), vtable: vtable };
Some(*::std::mem::transmute::<_, &mut &mut U>(&mut u))
}
} else {
None
}
}
pub fn query<U: ::std::any::Any + ?Sized>(self: Box<Self>) -> ::std::result::Result<Box<U>, Box<Self>> {
if let Some(vtable) = self.query_vtable(::std::any::TypeId::of::<U>()) {
unsafe {
let data = Box::into_raw(self);
let mut u = $crate::TraitObject { data: data as *const (), vtable: vtable };
Ok(Box::from_raw(*::std::mem::transmute::<_, &mut *mut U>(&mut u)))
}
} else {
Err(self)
}
}
pub fn query_arc<U: ::std::any::Any + ?Sized>(self_: ::std::sync::Arc<Self>) -> ::std::result::Result<::std::sync::Arc<U>, ::std::sync::Arc<Self>> {
if let Some(vtable) = self_.query_vtable(::std::any::TypeId::of::<U>()) {
unsafe {
let data = ::std::sync::Arc::into_raw(self_);
let mut u = $crate::TraitObject { data: data as *const (), vtable: vtable };
Ok(::std::sync::Arc::from_raw(*::std::mem::transmute::<_, &mut *mut U>(&mut u)))
}
} else {
Err(self_)
}
}
pub fn query_rc<U: ::std::any::Any + ?Sized>(self_: ::std::rc::Rc<Self>) -> ::std::result::Result<::std::rc::Rc<U>, ::std::rc::Rc<Self>> {
if let Some(vtable) = self_.query_vtable(::std::any::TypeId::of::<U>()) {
unsafe {
let data = ::std::rc::Rc::into_raw(self_);
let mut u = $crate::TraitObject { data: data as *const (), vtable: vtable };
Ok(::std::rc::Rc::from_raw(*::std::mem::transmute::<_, &mut *mut U>(&mut u)))
}
} else {
Err(self_)
}
}
pub fn obj_partial_eq(&self, other: &Self) -> bool {
if let Some(x) = self.query_ref::<$crate::ObjectPartialEq>() {
x.obj_eq(other.query_ref().unwrap())
} else {
(self as *const Self) == (other as *const Self)
}
}
pub fn obj_partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
if let Some(x) = self.query_ref::<$crate::ObjectPartialOrd>() {
x.obj_partial_cmp(other.query_ref().unwrap())
} else {
None
}
}
}
impl ::std::clone::Clone for Box<$name> {
fn clone(&self) -> Self {
(**self).to_owned()
}
}
impl ::std::borrow::ToOwned for $name {
type Owned = Box<$name>;
fn to_owned(&self) -> Box<$name> {
self.query_ref::<$crate::ObjectClone>().expect("Object not clonable!").obj_clone().query::<$name>().unwrap()
}
}
impl ::std::fmt::Debug for $name {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
if let Some(o) = self.query_ref::<::std::fmt::Debug>() {
o.fmt(f)
} else {
writeln!(f, "Object {{ <no `Debug` implementation> }}")
}
}
}
impl ::std::cmp::PartialEq for $name {
fn eq(&self, other: &Self) -> bool {
// Require `Eq` rather than `PartialEq` as this allows `Object`s to be used as
// key in hash maps
if let Some(x) = self.query_ref::<$crate::ObjectEq>() {
x.obj_eq(other.query_ref().unwrap())
} else {
// This trivially meets the requirements of `Eq`
(self as *const Self) == (other as *const Self)
}
}
}
impl ::std::cmp::Eq for $name {}
impl ::std::cmp::PartialOrd for $name {
fn partial_cmp(&self, other: &Self) -> Option<::std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl ::std::cmp::Ord for $name {
fn cmp(&self, other: &Self) -> ::std::cmp::Ordering {
if let Some(x) = self.query_ref::<$crate::ObjectOrd>() {
if let Some(o) = x.obj_cmp(other.query_ref().unwrap()) {
return o
}
}
Ord::cmp(&(self as *const Self), &(other as *const Self))
}
}
impl ::std::hash::Hash for $name {
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
if let Some(x) = self.query_ref::<$crate::ObjectHash>() {
x.obj_hash(state)
} else {
state.write_usize(self as *const Self as *const () as usize)
}
}
}
)
}
/// This trait is the primary function of the library. `Object` trait objects
/// can be freely queried for any other trait, allowing conversion between
/// trait objects.
pub unsafe trait Object: Any {
/// This is implemented by the `interfaces!` macro, and should never be
/// manually implemented.
#[doc(hidden)]
fn query_vtable(&self, id: TypeId) -> Option<VTable>;
}
/// You can use this trait to ensure that a type implements a trait as an
/// interface. This means the type declared the trait in its `interfaces!(...)`
/// list, and guarantees that querying an `Object` of that type for the trait
/// will always succeed.
///
/// When using `HasInterface<SomeTrait>` in a generic bound, you should also
/// specify `SomeTrait` as a bound. While `HasInterface<SomeTrait>` is a more
/// stringent requirement than, and in practice implies `SomeTrait`, the
/// compiler cannot deduce that because it is enforced through macros rather
/// than the type system.
pub unsafe trait HasInterface<I: ?Sized> {}
mopo!(Object);
/// This is an object-safe version of `Clone`, which is automatically
/// implemented for all `Clone + Object` types. This is a support trait used to
/// allow `Object` trait objects to be clonable.
pub trait ObjectClone {
fn obj_clone(&self) -> Box<Object>;
}
impl<T: Clone + Object> ObjectClone for T {
fn obj_clone(&self) -> Box<Object> {
Box::new(self.clone())
}
}
/// This is an object-safe version of `PartialEq`, which is automatically
/// implemented for all `PartialEq + Object` types. This is a support trait used to
/// allow `Object` trait objects to be comparable in this way.
pub trait ObjectPartialEq {
fn obj_eq(&self, other: &Object) -> bool;
}
impl<T: PartialEq + Object> ObjectPartialEq for T {
fn obj_eq(&self, other: &Object) -> bool {
if let Some(o) = other.query_ref::<Self>() {
self == o
} else {
false
}
}
}
/// This is an object-safe version of `Eq`, which is automatically
/// implemented for all `Eq + Object` types. This is a support trait used to
/// allow `Object` trait objects to be comparable in this way.
pub trait ObjectEq: ObjectPartialEq {}
impl<T: Eq + Object> ObjectEq for T {}
/// This is an object-safe version of `PartialOrd`, which is automatically
/// implemented for all `PartialOrd + Object` types. This is a support trait used to
/// allow `Object` trait objects to be comparable in this way.
pub trait ObjectPartialOrd {
fn obj_partial_cmp(&self, other: &Object) -> Option<Ordering>;
}
impl<T: PartialOrd + Object> ObjectPartialOrd for T {
fn obj_partial_cmp(&self, other: &Object) -> Option<Ordering> {
if let Some(o) = other.query_ref::<Self>() {
self.partial_cmp(o)
} else {
None
}
}
}
/// This is an object-safe version of `Ord`, which is automatically
/// implemented for all `Ord + Object` types. This is a support trait used to
/// allow `Object` trait objects to be comparable in this way.
pub trait ObjectOrd {
fn obj_cmp(&self, other: &Object) -> Option<Ordering>;
}
impl<T: Ord + Object> ObjectOrd for T {
fn obj_cmp(&self, other: &Object) -> Option<Ordering> {
if let Some(o) = other.query_ref::<Self>() {
Some(self.cmp(o))
} else {
None
}
}
}
/// This is an object-safe version of `Hash`, which is automatically
/// implemented for all `Hash + Object` types. This is a support trait used to
/// allow `Object` trait objects to be comparable in this way.
///
/// Note: `Object`s are not guaranteed to hash to the same value as their
/// underlying type.
pub trait ObjectHash {
fn obj_hash(&self, state: &mut Hasher);
}
impl<T: Hash + Object> ObjectHash for T {
fn obj_hash(&self, state: &mut Hasher) {
let mut h = DefaultHasher::new();
self.hash(&mut h);
state.write_u64(h.finish());
}
}
/// Allow a set of traits to be dynamically queried from a type when it is
/// stored as an `Object` trait object.
///
/// Example use:
///
/// ```rust
/// # #[macro_use]
/// # extern crate query_interface;
/// # use query_interface::*;
/// #[derive(Clone)]
/// struct Foo;
/// interfaces!(Foo: ObjectClone);
/// # fn main() {}
/// ```
#[macro_export]
macro_rules! interfaces {
(@unbracket $(($($v:tt)*))*) => ($($($v)*)*);
(@inner $imp:tt $cond:tt $name:ty: $($iface:ty),+ {}) => (
interfaces!(@unbracket $imp ($crate::HasInterface<$name> for $name) $cond ({}));
interfaces!(@unbracket $imp ($crate::HasInterface<$crate::Object> for $name) $cond ({}));
$(interfaces!(@unbracket $imp ($crate::HasInterface<$iface> for $name) $cond ({}));)*
interfaces!(@unbracket $imp ($crate::Object for $name) $cond ({
fn query_vtable(&self, id: ::std::any::TypeId) -> Option<$crate::VTable> {
if id == ::std::any::TypeId::of::<$name>() {
Some($crate::VTable::none())
} else if id == ::std::any::TypeId::of::<$crate::Object>() {
Some(vtable_for!($name as $crate::Object))
} else $(if id == ::std::any::TypeId::of::<$iface>() {
Some(vtable_for!($name as $iface))
} else)* {
// If "dynamic" feature is enabled, fall back to
// looking in the registry
#[cfg(feature = "dynamic")]
{ $crate::dynamic::find_in_registry::<$name>(id) }
// No dynamic lookup
#[cfg(not(feature = "dynamic"))]
{ None }
}
}
}));
);
(@imp ($($result:tt)*) $name:ty: $($iface:ty),+ $(where $($cond:tt)*)*) => (
interfaces!(@inner (unsafe impl<$($result)*>) ($(where $($cond)*)*) $name: $($iface),+ {});
);
(@parse < $($rest:tt)*) => (
interfaces!(@parseArg () $($rest)*);
);
(@parse $($rest:tt)*) => (
interfaces!(@imp () $($rest)*);
);
(@parseArg ($($result:tt)*) $name:ident , $($rest:tt)*) => (
interfaces!(@parseArg ($($result)* $name ,) $($rest)*);
);
(@parseArg ($($result:tt)*) $name:ident : $($rest:tt)*) => (
interfaces!(@parseBound ($($result)* $name : ) $($rest)*);
);
(@parseArg ($($result:tt)*) $name:ident > $($rest:tt)*) => (
interfaces!(@imp ($($result)* $name) $($rest)*);
);
(@parseBound ($($result:tt)*) $bound:tt + $($rest:tt)*) => (
interfaces!(@parseBound ($($result)* $bound +) $($rest)*);
);
(@parseBound ($($result:tt)*) $bound:tt , $($rest:tt)*) => (
interfaces!(@parseArg ($($result)* $bound ,) $($rest)*);
);
(@parseBound ($($result:tt)*) $bound:tt > $($rest:tt)*) => (
interfaces!(@imp ($($result)* $bound) $($rest)*);
);
(< $($rest:tt)*) => (
interfaces!(@parse < $($rest)*);
);
($x:ty: $($rest:tt)*) => (
interfaces!(@parse $x: $($rest)*);
);
(@expand2 ($name:ty) ($($rest:tt)*)) => (
interfaces!($name $($rest)*);
);
(@expand {$($name:ty),*} $rest:tt) => (
$( interfaces!(@expand2 ($name) $rest); )*
);
({$($name:ty),*} $($rest:tt)*) => (
interfaces!(@expand {$($name),*} ($($rest)*));
);
}
// Integral types
interfaces!({
bool, i8, u8, i16, u16, i32, u32, i64, u64, char
}: ObjectClone, Debug, Display, ObjectPartialEq, ObjectPartialOrd, ObjectEq, ObjectOrd, ObjectHash, ToString);
// Floating point types
interfaces!({
f32, f64
}: ObjectClone, Debug, Display, ObjectPartialEq, ObjectPartialOrd, ToString);
// Strings
interfaces!(String: ObjectClone, Debug, Display, ObjectPartialEq, ObjectPartialOrd, ObjectEq, ObjectOrd, ObjectHash, ToString);
// Paths
interfaces!(PathBuf: ObjectClone, Debug, ObjectPartialEq, ObjectPartialOrd, ObjectEq, ObjectOrd, ObjectHash);
// Vecs
interfaces!({
Vec<bool>, Vec<i8>, Vec<u8>, Vec<i16>, Vec<u16>, Vec<i32>, Vec<u32>, Vec<i64>, Vec<u64>, Vec<char>
}: ObjectClone, Debug, ObjectPartialEq, ObjectPartialOrd, ObjectEq, ObjectOrd, ObjectHash);
interfaces!({
Vec<f32>, Vec<f64>
}: ObjectClone, Debug, ObjectPartialEq, ObjectPartialOrd);
interfaces!({
Vec<String>, Vec<PathBuf>
}: ObjectClone, Debug, ObjectPartialEq, ObjectPartialOrd, ObjectEq, ObjectOrd, ObjectHash);
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use std::sync::Arc;
use std::rc::Rc;
#[derive(Debug, Clone)]
struct Bar;
interfaces!(Bar: Foo, super::ObjectClone, Debug, Custom);
trait Foo: Debug {
fn test(&self) -> bool { false }
}
trait Foo2: Debug {}
impl Foo for Bar {
fn test(&self) -> bool { true }
}
impl Foo2 for Bar {}
#[derive(Debug, Clone)]
struct GenericBar<T>(T);
interfaces!(<T: Debug + 'static> GenericBar<T>: super::ObjectClone, Debug where T: Clone);
#[test]
fn test_ref() {
let x = Box::new(Bar) as Box<super::Object>;
let foo: Option<&Foo> = x.query_ref();
assert!(foo.is_some());
assert!(foo.unwrap().test());
let foo2: Option<&Foo2> = x.query_ref();
assert!(foo2.is_none());
let bar: Option<&Bar> = x.query_ref();
assert!(bar.is_some());
}
#[test]
fn test_mut() {
let mut x = Box::new(Bar) as Box<super::Object>;
{
let foo = x.query_mut::<Foo>();
assert!(foo.is_some());
assert!(foo.unwrap().test());
}
{
let foo2 = x.query_mut::<Foo2>();
assert!(foo2.is_none());
}
{
let bar = x.query_mut::<Bar>();
assert!(bar.is_some());
}
}
#[test]
fn test_owned() {
let x = Box::new(Bar) as Box<super::Object>;
let foo: Result<Box<Foo>, _> = x.clone().query();
assert!(foo.is_ok());
assert!(foo.unwrap().test());
let foo2: Result<Box<Foo2>, _> = x.clone().query();
assert!(foo2.is_err());
let bar: Result<Box<Bar>, _> = x.clone().query();
assert!(bar.is_ok());
}
#[test]
fn test_rc() {
let x = Rc::new(Bar) as Rc<super::Object>;
let foo: Result<Rc<Foo>, _> = super::Object::query_rc(x.clone());
assert!(foo.is_ok());
assert!(foo.unwrap().test());
let foo2: Result<Rc<Foo2>, _> = super::Object::query_rc(x.clone());
assert!(foo2.is_err());
let bar: Result<Rc<Bar>, _> = super::Object::query_rc(x.clone());
assert!(bar.is_ok());
}
#[test]
fn test_arc() {
let x = Arc::new(Bar) as Arc<super::Object>;
let foo: Result<Arc<Foo>, _> = super::Object::query_arc(x.clone());
assert!(foo.is_ok());
assert!(foo.unwrap().test());
let foo2: Result<Arc<Foo2>, _> = super::Object::query_arc(x.clone());
assert!(foo2.is_err());
let bar: Result<Arc<Bar>, _> = super::Object::query_arc(x.clone());
assert!(bar.is_ok());
}
trait Custom : super::Object {}
impl Custom for Bar {}
mopo!(Custom);
#[test]
fn test_derived() {
let x = Box::new(Bar) as Box<Custom>;
let foo: Result<Box<Foo>, _> = x.clone().query();
assert!(foo.is_ok());
assert!(foo.unwrap().test());
let foo2: Result<Box<Foo2>, _> = x.clone().query();
assert!(foo2.is_err());
let bar: Result<Box<Bar>, _> = x.clone().query();
assert!(bar.is_ok());
}
trait Dynamic {
fn test(&self) -> u32;
}
impl Dynamic for Bar {
fn test(&self) -> u32 { 42 }
}
#[test]
fn test_dynamic() {
let x = Box::new(Bar) as Box<super::Object>;
let dyn1: Option<&Dynamic> = x.query_ref();
assert!(dyn1.is_none());
dynamic_interfaces! {
Bar: Dynamic;
}
let dyn2: Option<&Dynamic> = x.query_ref();
assert!(dyn2.unwrap().test() == 42);
}
#[test]
fn test_primitives() {
Box::new(1) as Box<super::Object>;
Box::new(1f32) as Box<super::Object>;
Box::new("test".to_string()) as Box<super::Object>;
Box::new(vec![1,2,3]) as Box<super::Object>;
}
}
|
// Copyright 2019 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under the MIT license <LICENSE-MIT
// http://opensource.org/licenses/MIT> or the Modified BSD license <LICENSE-BSD
// https://opensource.org/licenses/BSD-3-Clause>, at your option. This file may not be copied,
// modified, or distributed except according to those terms. Please review the Licences for the
// specific language governing permissions and limitations relating to use of the SAFE Network
// Software.
use std::mem;
use std::ptr;
use std::slice;
/// Provides FFI-safe pointers, as opposed to raw `as_ptr()` in `Vec` and `String` which can return
/// values such as `0x01` that can cause segmentation faults with the automatic pointer
/// dereferencing on the front-end side (e.g. in Node.js).
pub trait SafePtr {
/// Resulting pointer type.
type Ptr;
/// Returns a pointer that guarantees safe dereferencing on the front-end side.
fn as_safe_ptr(&self) -> *const Self::Ptr;
}
impl<T> SafePtr for Vec<T> {
type Ptr = T;
fn as_safe_ptr(&self) -> *const T {
if self.is_empty() {
ptr::null()
} else {
self.as_ptr()
}
}
}
/// Consumes a `Vec` and transfers ownership of the data to a C caller, returning (pointer, size).
///
/// The pointer which this function returns must be returned to Rust and reconstituted using
/// `vec_from_raw_parts` to be properly deallocated. Specifically, one should not use the standard C
/// `free()` function to deallocate this data.
///
/// Failure to call `vec_from_raw_parts` will lead to a memory leak.
pub fn vec_into_raw_parts<T>(v: Vec<T>) -> (*mut T, usize) {
let mut b = v.into_boxed_slice();
let ptr = b.as_mut_ptr();
let len = b.len();
mem::forget(b);
(ptr, len)
}
/// Retakes ownership of a `Vec` that was transferred to C via `vec_into_raw_parts`.
///
/// # Safety
///
/// Unsafe. See documentation for `slice::from_raw_parts_mut` and `Box::from_raw`.
pub unsafe fn vec_from_raw_parts<T>(ptr: *mut T, len: usize) -> Vec<T> {
Box::from_raw(slice::from_raw_parts_mut(ptr, len)).into_vec()
}
/// Converts a pointer and length to `Vec` by cloning the contents.
/// Note: This does NOT free the memory pointed to by `ptr`.
///
/// # Safety
///
/// Unsafe. See documentation for `slice::from_raw_parts`.
pub unsafe fn vec_clone_from_raw_parts<T: Clone>(ptr: *const T, len: usize) -> Vec<T> {
slice::from_raw_parts(ptr, len).to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn vec_conversions() {
for _ in 0..5 {
let v = vec!["foo", "bar"];
for _ in 0..5 {
let (ptr, len) = vec_into_raw_parts(v.clone());
let v2 = unsafe { vec_clone_from_raw_parts(ptr, len) };
assert_eq!(v, v2);
let v3 = unsafe { vec_from_raw_parts(ptr, len) };
assert_eq!(v, v3);
}
}
}
}
|
struct Person {
first_name: &'static str,
last_name: &'static str,
}
struct Team {
leader: Person,
member: Person,
}
fn main() {
println!("Dakota Hill's Testing Suite");
println!("Current Test: Structs");
println!("---------------------------");
let iron_man = Person { first_name: "Tony", last_name: "Stark" };
let war_machine = Person { first_name: "James", last_name: "Rhodes" };
let captain_america = Person { first_name: "Steve", last_name: "Rogers" };
let winter_soldier = Person { first_name: "Bucky", last_name: "Barnes" };
let team_tony = Team { leader: iron_man, member: war_machine};
let team_cap = Team { leader: captain_america, member: winter_soldier};
println!("Team Iron Man is lead by {} {}", team_tony.leader.first_name, team_tony.leader.last_name);
println!("He leads {} {}", team_tony.member.first_name, team_tony.member.last_name);
println!("Team Captain America is lead by {} {}", team_cap.leader.first_name, team_cap.leader.last_name);
println!("He leads {} {}", team_cap.member.first_name, team_cap.member.last_name);
}
|
mod colour;
use ansi_term::Colour;
use colour::hash_colour;
use regex::{Captures, Regex};
use std::io::{self, BufRead};
fn main() {
// TODO: make the regex adjustable from the CLI
let pattern = Regex::new(r"((0x)?(\d|[a-fA-F]){6}(…|\.)?(\d|[a-fA-F]){6,}|\d{3,})").unwrap();
let stdin = io::stdin();
for line in stdin.lock().lines() {
let line = line.unwrap();
let result = pattern.replace_all(&line, |cap: &Captures| {
let item = &cap[0];
let (r, g, b) = hash_colour(item);
format!("{}", Colour::RGB(r, g, b).paint(item))
});
println!("{}", result);
}
}
|
use winapi::_core::ops::Deref;
use winapi::Interface;
use winapi::shared::ntdef::{HRESULT, ULONG};
use winapi::ctypes::{c_void, c_ulong, c_ushort, c_uchar};
use winapi::shared::guiddef::{GUID, REFIID};
#[inline]
fn uuid(a: c_ulong,
b: c_ushort,
c: c_ushort,
d1: c_uchar,
d2: c_uchar,
d3: c_uchar,
d4: c_uchar,
d5: c_uchar,
d6: c_uchar,
d7: c_uchar,
d8: c_uchar)
-> GUID {
GUID {
Data1: a,
Data2: b,
Data3: c,
Data4: [d1, d2, d3, d4, d5, d6, d7, d8],
}
}
#[repr(C)]
pub struct IUnknownVtbl {
pub QueryInterface: unsafe extern "system" fn(This: *mut IUnknown,
riid: REFIID,
ppvObject: *mut *mut c_void)
-> HRESULT,
pub AddRef: unsafe extern "system" fn(This: *mut IUnknown) -> ULONG,
pub Release: unsafe extern "system" fn(This: *mut IUnknown) -> ULONG,
}
#[repr(C)]
pub struct IUnknown {
pub lpVtbl: *const IUnknownVtbl,
}
impl IUnknown {
#[inline]
pub unsafe fn QueryInterface(&self, riid: REFIID, ppvObject: *mut *mut c_void) -> HRESULT {
((*self.lpVtbl).QueryInterface)(self as *const _ as *mut _, riid, ppvObject)
}
#[inline]
pub unsafe fn AddRef(&self) -> ULONG {
((*self.lpVtbl).AddRef)(self as *const _ as *mut _)
}
#[inline]
pub unsafe fn Release(&self) -> ULONG {
((*self.lpVtbl).Release)(self as *const _ as *mut _)
}
}
impl Interface for IUnknown {
#[inline]
fn uuidof() -> GUID {
uuid(0x00000000,
0x0000,
0x0000,
0xc0,
0x00,
0x00,
0x00,
0x00,
0x00,
0x00,
0x46)
}
}
#[repr(C)]
pub struct ISequentialStreamVtbl {
pub parent: IUnknownVtbl,
pub Read: unsafe extern "system" fn(This: *mut ISequentialStream,
pv: *mut c_void,
cb: ULONG,
pcbRead: *mut ULONG)
-> HRESULT,
pub Write: unsafe extern "system" fn(This: *mut ISequentialStream,
pv: *const c_void,
cb: ULONG,
pcbWritten: *mut ULONG)
-> HRESULT,
}
#[repr(C)]
pub struct ISequentialStream {
pub lpVtbl: *const ISequentialStreamVtbl,
}
impl ISequentialStream {
#[inline]
pub unsafe fn Read(&self, pv: *mut c_void, cb: ULONG, pcbRead: *mut ULONG) -> HRESULT {
((*self.lpVtbl).Read)(self as *const _ as *mut _, pv, cb, pcbRead)
}
#[inline]
pub unsafe fn Write(&self, pv: *mut c_void, cb: ULONG, pcbWritten: *mut ULONG) -> HRESULT {
((*self.lpVtbl).Read)(self as *const _ as *mut _, pv, cb, pcbWritten)
}
}
impl Deref for ISequentialStream {
type Target = IUnknown;
#[inline]
fn deref(&self) -> &IUnknown {
unsafe { &*(self as *const ISequentialStream as *const IUnknown) }
}
}
impl Interface for ISequentialStream {
#[inline]
fn uuidof() -> GUID {
uuid(0x0c733a30,
0x2a1c,
0x11ce,
0xad,
0xe5,
0x00,
0xaa,
0x00,
0x44,
0x77,
0x3d)
}
}
|
use crate::rtrs::textures::Texture;
use crate::Color;
use crate::HitRecord;
use crate::Material;
use crate::Point;
use crate::Ray;
use crate::Vector;
use std::sync::Arc;
#[derive(Debug)]
pub struct Lambertian {
pub albedo: Arc<dyn Texture>,
}
impl Lambertian {
pub fn new(albedo: Arc<dyn Texture>) -> Self {
Self { albedo: albedo }
}
}
impl Material for Lambertian {
fn scatter(
&self,
ray_in: &Ray,
record: &HitRecord,
attenuation: &mut Color,
scattered: &mut Ray,
) -> bool {
scattered.origin = record.p;
scattered.direction = record.normal + Vector::random_unit();
scattered.time = ray_in.time;
let v = self.albedo.value(record.u, record.v, &record.p);
attenuation.r = v.r;
attenuation.g = v.g;
attenuation.b = v.b;
true
}
fn emitted(&self, _u: f64, _v: f64, _p: &Point) -> Color {
Color::new(0.0, 0.0, 0.0)
}
}
|
use crate::class_file::unvalidated::read::FromReader;
use crate::class_file::unvalidated::ClassFile;
use crate::class_file::unvalidated::ConstantIdx;
use crate::class_file::unvalidated::Error;
use std::fmt;
use std::io::{Read, Seek, SeekFrom};
pub struct InstructionDisplay<'a, 'b> {
instruction: &'a Instruction,
class_file: &'b ClassFile,
}
impl<'a, 'b> fmt::Display for InstructionDisplay<'a, 'b> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.instruction {
Instruction::Nop => write!(f, "nop"),
Instruction::AConstNull => write!(f, "aconstnull"),
Instruction::IConstM1 => write!(f, "iconstm1"),
Instruction::IConst0 => write!(f, "iconst0"),
Instruction::IConst1 => write!(f, "iconst1"),
Instruction::IConst2 => write!(f, "iconst2"),
Instruction::IConst3 => write!(f, "iconst3"),
Instruction::IConst4 => write!(f, "iconst4"),
Instruction::IConst5 => write!(f, "iconst5"),
Instruction::LConst0 => write!(f, "lconst0"),
Instruction::LConst1 => write!(f, "lconst1"),
Instruction::FConst0 => write!(f, "fconst0"),
Instruction::FConst1 => write!(f, "fconst1"),
Instruction::FConst2 => write!(f, "fconst2"),
Instruction::DConst0 => write!(f, "dconst0"),
Instruction::DConst1 => write!(f, "dconst1"),
Instruction::BIPush(b) => write!(f, "bipush {}", b),
Instruction::SIPush(s) => write!(f, "sipush {}", s),
Instruction::Ldc(idx) => write!(
f,
"ldc {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::LdcW(idx) => write!(
f,
"ldcw {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::Ldc2W(idx) => write!(
f,
"ldc2w {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::ILoad(idx) => write!(f, "iload {}", idx),
Instruction::LLoad(idx) => write!(f, "lload {}", idx),
Instruction::FLoad(idx) => write!(f, "fload {}", idx),
Instruction::DLoad(idx) => write!(f, "dload {}", idx),
Instruction::ALoad(idx) => write!(f, "aload {}", idx),
Instruction::ILoad0 => write!(f, "iload0"),
Instruction::ILoad1 => write!(f, "iload1"),
Instruction::ILoad2 => write!(f, "iload2"),
Instruction::ILoad3 => write!(f, "iload3"),
Instruction::LLoad0 => write!(f, "lload0"),
Instruction::LLoad1 => write!(f, "lload1"),
Instruction::LLoad2 => write!(f, "lload2"),
Instruction::LLoad3 => write!(f, "lload3"),
Instruction::FLoad0 => write!(f, "fload0"),
Instruction::FLoad1 => write!(f, "fload1"),
Instruction::FLoad2 => write!(f, "fload2"),
Instruction::FLoad3 => write!(f, "fload3"),
Instruction::DLoad0 => write!(f, "dload0"),
Instruction::DLoad1 => write!(f, "dload1"),
Instruction::DLoad2 => write!(f, "dload2"),
Instruction::DLoad3 => write!(f, "dload3"),
Instruction::ALoad0 => write!(f, "aload0"),
Instruction::ALoad1 => write!(f, "aload1"),
Instruction::ALoad2 => write!(f, "aload2"),
Instruction::ALoad3 => write!(f, "aload3"),
Instruction::IAStore => write!(f, "iastore"),
Instruction::IALoad => write!(f, "iaload"),
Instruction::LALoad => write!(f, "laload"),
Instruction::FALoad => write!(f, "faload"),
Instruction::DALoad => write!(f, "daload"),
Instruction::AALoad => write!(f, "aaload"),
Instruction::BALoad => write!(f, "baload"),
Instruction::CALoad => write!(f, "caload"),
Instruction::SALoad => write!(f, "saload"),
Instruction::IStore(idx) => write!(f, "istore {}", idx),
Instruction::LStore(idx) => write!(f, "lstore {}", idx),
Instruction::FStore(idx) => write!(f, "fstore {}", idx),
Instruction::DStore(idx) => write!(f, "dstore {}", idx),
Instruction::AStore(idx) => write!(f, "astore {}", idx),
Instruction::IStore0 => write!(f, "istore0"),
Instruction::IStore1 => write!(f, "istore1"),
Instruction::IStore2 => write!(f, "istore2"),
Instruction::IStore3 => write!(f, "istore3"),
Instruction::LStore0 => write!(f, "lstore0"),
Instruction::LStore1 => write!(f, "lstore1"),
Instruction::LStore2 => write!(f, "lstore2"),
Instruction::LStore3 => write!(f, "lstore3"),
Instruction::FStore0 => write!(f, "fstore0"),
Instruction::FStore1 => write!(f, "fstore1"),
Instruction::FStore2 => write!(f, "fstore2"),
Instruction::FStore3 => write!(f, "fstore3"),
Instruction::DStore0 => write!(f, "dstore0"),
Instruction::DStore1 => write!(f, "dstore1"),
Instruction::DStore2 => write!(f, "dstore2"),
Instruction::DStore3 => write!(f, "dstore3"),
Instruction::AStore0 => write!(f, "astore0"),
Instruction::AStore1 => write!(f, "astore1"),
Instruction::AStore2 => write!(f, "astore2"),
Instruction::AStore3 => write!(f, "astore3"),
Instruction::LAStore => write!(f, "lastore"),
Instruction::FAStore => write!(f, "fastore"),
Instruction::DAStore => write!(f, "dastore"),
Instruction::AAStore => write!(f, "aastore"),
Instruction::BAStore => write!(f, "bastore"),
Instruction::CAStore => write!(f, "castore"),
Instruction::SAStore => write!(f, "sastore"),
Instruction::Pop => write!(f, "pop"),
Instruction::Pop2 => write!(f, "pop2"),
Instruction::Dup => write!(f, "dup"),
Instruction::DupX1 => write!(f, "dupx1"),
Instruction::DupX2 => write!(f, "dupx2"),
Instruction::Dup2 => write!(f, "dup2"),
Instruction::Dup2X1 => write!(f, "dup2x1"),
Instruction::Dup2X2 => write!(f, "dup2x2"),
Instruction::Swap => write!(f, "swap"),
Instruction::IAdd => write!(f, "iadd"),
Instruction::LAdd => write!(f, "ladd"),
Instruction::FAdd => write!(f, "fadd"),
Instruction::DAdd => write!(f, "dadd"),
Instruction::ISub => write!(f, "isub"),
Instruction::LSub => write!(f, "lsub"),
Instruction::FSub => write!(f, "fsub"),
Instruction::DSub => write!(f, "dsub"),
Instruction::IMul => write!(f, "imul"),
Instruction::LMul => write!(f, "lmul"),
Instruction::FMul => write!(f, "fmul"),
Instruction::DMul => write!(f, "dmul"),
Instruction::IDiv => write!(f, "idiv"),
Instruction::LDiv => write!(f, "ldiv"),
Instruction::FDiv => write!(f, "fdiv"),
Instruction::DDiv => write!(f, "ddiv"),
Instruction::IRem => write!(f, "irem"),
Instruction::LRem => write!(f, "lrem"),
Instruction::FRem => write!(f, "frem"),
Instruction::DRem => write!(f, "drem"),
Instruction::INeg => write!(f, "ineg"),
Instruction::LNeg => write!(f, "lneg"),
Instruction::FNeg => write!(f, "fneg"),
Instruction::DNeg => write!(f, "dneg"),
Instruction::IShl => write!(f, "ishl"),
Instruction::LShl => write!(f, "lshl"),
Instruction::IShr => write!(f, "ishr"),
Instruction::LShr => write!(f, "lshr"),
Instruction::IUshr => write!(f, "iushr"),
Instruction::LUshr => write!(f, "lushr"),
Instruction::IAnd => write!(f, "iand"),
Instruction::LAnd => write!(f, "land"),
Instruction::IOr => write!(f, "ior"),
Instruction::LOr => write!(f, "lor"),
Instruction::IXor => write!(f, "ixor"),
Instruction::LXor => write!(f, "lxor"),
Instruction::IInc(idx, inc) => write!(f, "iinc {}, {}", idx, inc),
Instruction::I2L => write!(f, "i2l"),
Instruction::I2F => write!(f, "i2f"),
Instruction::I2D => write!(f, "i2d"),
Instruction::L2I => write!(f, "l2i"),
Instruction::L2F => write!(f, "l2f"),
Instruction::L2D => write!(f, "l2d"),
Instruction::F2I => write!(f, "f2i"),
Instruction::F2L => write!(f, "f2l"),
Instruction::F2D => write!(f, "f2d"),
Instruction::D2I => write!(f, "d2i"),
Instruction::D2L => write!(f, "d2l"),
Instruction::D2F => write!(f, "d2f"),
Instruction::I2B => write!(f, "i2b"),
Instruction::I2C => write!(f, "i2c"),
Instruction::I2S => write!(f, "i2s"),
Instruction::ICmp => write!(f, "icmp"),
Instruction::FCmpL => write!(f, "fcmpl"),
Instruction::FCmpG => write!(f, "fcmpg"),
Instruction::DCmpL => write!(f, "dcmpl"),
Instruction::DCmpG => write!(f, "dcmpg"),
Instruction::IfEq(offset) => write!(f, "ifeq {}", offset),
Instruction::IfNe(offset) => write!(f, "ifne {}", offset),
Instruction::IfLt(offset) => write!(f, "iflt {}", offset),
Instruction::IfGe(offset) => write!(f, "ifge {}", offset),
Instruction::IfGt(offset) => write!(f, "ifgt {}", offset),
Instruction::IfLe(offset) => write!(f, "ifle {}", offset),
Instruction::IfIcmpEq(offset) => write!(f, "ificmpeq {}", offset),
Instruction::IfIcmpNe(offset) => write!(f, "ificmpne {}", offset),
Instruction::IfIcmpLt(offset) => write!(f, "ificmplt {}", offset),
Instruction::IfIcmpGe(offset) => write!(f, "ificmpge {}", offset),
Instruction::IfIcmpGt(offset) => write!(f, "ificmpgt {}", offset),
Instruction::IfIcmpLe(offset) => write!(f, "ificmple {}", offset),
Instruction::IfAcmpEq(offset) => write!(f, "ifacmpeq {}", offset),
Instruction::IfAcmpNe(offset) => write!(f, "ifacmpne {}", offset),
Instruction::Goto(offset) => write!(f, "goto {}", offset),
Instruction::Jsr(offset) => write!(f, "jsr {}", offset),
Instruction::Ret(idx) => write!(f, "ret {}", idx),
Instruction::TableSwitch(default, low, high, entries) => write!(
f,
"tableswitch {} {}..{}, {:?}",
default, low, high, entries
),
Instruction::LookupSwitch(default, entries) => {
write!(f, "lookupswitch {} {:?}", default, entries)
}
Instruction::IReturn => write!(f, "ireturn"),
Instruction::LReturn => write!(f, "lreturn"),
Instruction::FReturn => write!(f, "freturn"),
Instruction::DReturn => write!(f, "dreturn"),
Instruction::AReturn => write!(f, "areturn"),
Instruction::Return => write!(f, "return"),
Instruction::GetStatic(idx) => write!(
f,
"getstatic {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::PutStatic(idx) => write!(
f,
"putstatic {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::GetField(idx) => write!(
f,
"getfield {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::PutField(idx) => write!(
f,
"putfield {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::InvokeVirtual(idx) => write!(
f,
"invokevirtual {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::InvokeSpecial(idx) => write!(
f,
"invokespecial {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::InvokeStatic(idx) => write!(
f,
"invokestatic {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::InvokeInterface(idx, count) => write!(
f,
"invokeinterface {} (const {}), {}",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner(),
count
),
Instruction::InvokeDynamic(idx) => write!(
f,
"invokedynamic {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::New(idx) => write!(
f,
"new {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::NewArray(tpe) => write!(f, "newarray (const {})", tpe),
Instruction::ANewArray => write!(f, "anewarray"),
Instruction::ArrayLength => write!(f, "arraylength"),
Instruction::AThrow => write!(f, "athrow"),
Instruction::CheckCast(idx) => write!(
f,
"checkcast {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::InstanceOf(idx) => write!(
f,
"instanceof {} (const {})",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner()
),
Instruction::MonitorEnter => write!(f, "monitorenter"),
Instruction::MonitorExit => write!(f, "monitorexit"),
Instruction::MultiANewArray(idx, dimensions) => write!(
f,
"multianewarray {} (const {}), {}",
self.class_file
.get_const(*idx)
.unwrap()
.display(self.class_file),
idx.inner(),
dimensions
),
Instruction::IfNull(offset) => write!(f, "ifnull {}", offset),
Instruction::IfNonNull(offset) => write!(f, "ifnonnull {}", offset),
Instruction::GotoW(offset) => write!(f, "gotow {}", offset),
Instruction::JsrW(offset) => write!(f, "jsrw {}", offset),
}
}
}
impl Instruction {
pub fn display<'a, 'b>(&'a self, class_file: &'b ClassFile) -> InstructionDisplay<'a, 'b> {
InstructionDisplay {
instruction: self,
class_file,
}
}
}
#[derive(Debug)]
pub enum Instruction {
Nop,
AConstNull,
IConstM1,
IConst0,
IConst1,
IConst2,
IConst3,
IConst4,
IConst5,
LConst0,
LConst1,
FConst0,
FConst1,
FConst2,
DConst0,
DConst1,
BIPush(i8),
SIPush(i16),
Ldc(ConstantIdx),
LdcW(ConstantIdx),
Ldc2W(ConstantIdx),
ILoad(u16),
LLoad(u16),
FLoad(u16),
DLoad(u16),
ALoad(u16),
ILoad0,
ILoad1,
ILoad2,
ILoad3,
LLoad0,
LLoad1,
LLoad2,
LLoad3,
FLoad0,
FLoad1,
FLoad2,
FLoad3,
DLoad0,
DLoad1,
DLoad2,
DLoad3,
ALoad0,
ALoad1,
ALoad2,
ALoad3,
IAStore,
IALoad,
LALoad,
FALoad,
DALoad,
AALoad,
BALoad,
CALoad,
SALoad,
IStore(u16),
LStore(u16),
FStore(u16),
DStore(u16),
AStore(u16),
IStore0,
IStore1,
IStore2,
IStore3,
LStore0,
LStore1,
LStore2,
LStore3,
FStore0,
FStore1,
FStore2,
FStore3,
DStore0,
DStore1,
DStore2,
DStore3,
AStore0,
AStore1,
AStore2,
AStore3,
LAStore,
FAStore,
DAStore,
AAStore,
BAStore,
CAStore,
SAStore,
Pop,
Pop2,
Dup,
DupX1,
DupX2,
Dup2,
Dup2X1,
Dup2X2,
Swap,
IAdd,
LAdd,
FAdd,
DAdd,
ISub,
LSub,
FSub,
DSub,
IMul,
LMul,
FMul,
DMul,
IDiv,
LDiv,
FDiv,
DDiv,
IRem,
LRem,
FRem,
DRem,
INeg,
LNeg,
FNeg,
DNeg,
IShl,
LShl,
IShr,
LShr,
IUshr,
LUshr,
IAnd,
LAnd,
IOr,
LOr,
IXor,
LXor,
IInc(u16, i16),
I2L,
I2F,
I2D,
L2I,
L2F,
L2D,
F2I,
F2L,
F2D,
D2I,
D2L,
D2F,
I2B,
I2C,
I2S,
ICmp,
FCmpL,
FCmpG,
DCmpL,
DCmpG,
IfEq(i16),
IfNe(i16),
IfLt(i16),
IfGe(i16),
IfGt(i16),
IfLe(i16),
IfIcmpEq(i16),
IfIcmpNe(i16),
IfIcmpLt(i16),
IfIcmpGe(i16),
IfIcmpGt(i16),
IfIcmpLe(i16),
IfAcmpEq(i16),
IfAcmpNe(i16),
Goto(i16),
Jsr(i16),
Ret(u16),
TableSwitch(i32, i32, i32, Vec<i32>),
LookupSwitch(i32, Vec<(u32, i32)>),
IReturn,
LReturn,
FReturn,
DReturn,
AReturn,
Return,
GetStatic(ConstantIdx),
PutStatic(ConstantIdx),
GetField(ConstantIdx),
PutField(ConstantIdx),
InvokeVirtual(ConstantIdx),
InvokeSpecial(ConstantIdx),
InvokeStatic(ConstantIdx),
InvokeInterface(ConstantIdx, u8),
InvokeDynamic(ConstantIdx),
New(ConstantIdx),
NewArray(u8),
ANewArray,
ArrayLength,
AThrow,
CheckCast(ConstantIdx),
InstanceOf(ConstantIdx),
MonitorEnter,
MonitorExit,
MultiANewArray(ConstantIdx, u8),
IfNull(i16),
IfNonNull(i16),
GotoW(i32),
JsrW(i32),
}
impl<R: Read + Seek> FromReader<R> for Instruction {
fn read_from(data: &mut R) -> Result<Self, Error> {
fn read_instruction<R: Read + Seek>(
data: &mut R,
opc: u8,
wide: bool,
) -> Result<Instruction, Error> {
fn read_idx<R: Read>(data: &mut R, wide: bool) -> Result<u16, Error> {
if wide {
u16::read_from(data)
} else {
u8::read_from(data).map(|v| v.into())
}
}
let opc = match opc {
0x00 => Instruction::Nop,
0x01 => Instruction::AConstNull,
0x02 => Instruction::IConstM1,
0x03 => Instruction::IConst0,
0x04 => Instruction::IConst1,
0x05 => Instruction::IConst2,
0x06 => Instruction::IConst3,
0x07 => Instruction::IConst4,
0x08 => Instruction::IConst5,
0x09 => Instruction::LConst0,
0x0a => Instruction::LConst1,
0x0b => Instruction::FConst0,
0x0c => Instruction::FConst1,
0x0d => Instruction::FConst2,
0x0e => Instruction::DConst0,
0x0f => Instruction::DConst1,
0x10 => Instruction::BIPush(i8::read_from(data)?),
0x11 => Instruction::SIPush(i16::read_from(data)?),
0x12 => Instruction::Ldc(ConstantIdx::new(u8::read_from(data)? as u16).unwrap()),
0x13 => Instruction::LdcW(ConstantIdx::read_from(data)?),
0x14 => Instruction::Ldc2W(ConstantIdx::read_from(data)?),
0x15 => Instruction::ILoad(read_idx(data, wide)?),
0x16 => Instruction::LLoad(read_idx(data, wide)?),
0x17 => Instruction::FLoad(read_idx(data, wide)?),
0x18 => Instruction::DLoad(read_idx(data, wide)?),
0x19 => Instruction::ALoad(read_idx(data, wide)?),
0x1a => Instruction::ILoad0,
0x1b => Instruction::ILoad1,
0x1c => Instruction::ILoad2,
0x1d => Instruction::ILoad3,
0x1e => Instruction::LLoad0,
0x1f => Instruction::LLoad1,
0x20 => Instruction::LLoad2,
0x21 => Instruction::LLoad3,
0x22 => Instruction::FLoad0,
0x23 => Instruction::FLoad1,
0x24 => Instruction::FLoad2,
0x25 => Instruction::FLoad3,
0x26 => Instruction::DLoad0,
0x27 => Instruction::DLoad1,
0x28 => Instruction::DLoad2,
0x29 => Instruction::DLoad3,
0x2a => Instruction::ALoad0,
0x2b => Instruction::ALoad1,
0x2c => Instruction::ALoad2,
0x2d => Instruction::ALoad3,
0x2e => Instruction::IALoad,
0x2f => Instruction::LALoad,
0x30 => Instruction::FALoad,
0x31 => Instruction::DALoad,
0x32 => Instruction::AALoad,
0x33 => Instruction::BALoad,
0x34 => Instruction::CALoad,
0x35 => Instruction::SALoad,
0x36 => Instruction::IStore(read_idx(data, wide)?),
0x37 => Instruction::LStore(read_idx(data, wide)?),
0x38 => Instruction::FStore(read_idx(data, wide)?),
0x39 => Instruction::DStore(read_idx(data, wide)?),
0x3a => Instruction::AStore(read_idx(data, wide)?),
0x3b => Instruction::IStore0,
0x3c => Instruction::IStore1,
0x3d => Instruction::IStore2,
0x3e => Instruction::IStore3,
0x3f => Instruction::LStore0,
0x40 => Instruction::LStore1,
0x41 => Instruction::LStore2,
0x42 => Instruction::LStore3,
0x43 => Instruction::FStore0,
0x44 => Instruction::FStore1,
0x45 => Instruction::FStore2,
0x46 => Instruction::FStore3,
0x47 => Instruction::DStore0,
0x48 => Instruction::DStore1,
0x49 => Instruction::DStore2,
0x4a => Instruction::DStore3,
0x4b => Instruction::AStore0,
0x4c => Instruction::AStore1,
0x4d => Instruction::AStore2,
0x4e => Instruction::AStore3,
0x4f => Instruction::IAStore,
0x50 => Instruction::LAStore,
0x51 => Instruction::FAStore,
0x52 => Instruction::DAStore,
0x53 => Instruction::AAStore,
0x54 => Instruction::BAStore,
0x55 => Instruction::CAStore,
0x56 => Instruction::SAStore,
0x57 => Instruction::Pop,
0x58 => Instruction::Pop2,
0x59 => Instruction::Dup,
0x5a => Instruction::DupX1,
0x5b => Instruction::DupX2,
0x5c => Instruction::Dup2,
0x5d => Instruction::Dup2X1,
0x5e => Instruction::Dup2X2,
0x5f => Instruction::Swap,
0x60 => Instruction::IAdd,
0x61 => Instruction::LAdd,
0x62 => Instruction::FAdd,
0x63 => Instruction::DAdd,
0x64 => Instruction::ISub,
0x65 => Instruction::LSub,
0x66 => Instruction::FSub,
0x67 => Instruction::DSub,
0x68 => Instruction::IMul,
0x69 => Instruction::LMul,
0x6a => Instruction::FMul,
0x6b => Instruction::DMul,
0x6c => Instruction::IDiv,
0x6d => Instruction::LDiv,
0x6e => Instruction::FDiv,
0x6f => Instruction::DDiv,
0x70 => Instruction::IRem,
0x71 => Instruction::LRem,
0x72 => Instruction::FRem,
0x73 => Instruction::DRem,
0x74 => Instruction::INeg,
0x75 => Instruction::LNeg,
0x76 => Instruction::FNeg,
0x77 => Instruction::DNeg,
0x78 => Instruction::IShl,
0x79 => Instruction::LShl,
0x7a => Instruction::IShr,
0x7b => Instruction::LShr,
0x7c => Instruction::IUshr,
0x7d => Instruction::LUshr,
0x7e => Instruction::IAnd,
0x7f => Instruction::LAnd,
0x80 => Instruction::IOr,
0x81 => Instruction::LOr,
0x82 => Instruction::IXor,
0x83 => Instruction::LXor,
0x84 => Instruction::IInc(read_idx(data, wide)?, read_idx(data, wide)? as i16),
0x85 => Instruction::I2L,
0x86 => Instruction::I2F,
0x87 => Instruction::I2D,
0x88 => Instruction::L2I,
0x89 => Instruction::L2F,
0x8a => Instruction::L2D,
0x8b => Instruction::F2I,
0x8c => Instruction::F2L,
0x8d => Instruction::F2D,
0x8e => Instruction::D2I,
0x8f => Instruction::D2L,
0x90 => Instruction::D2F,
0x91 => Instruction::I2B,
0x92 => Instruction::I2C,
0x93 => Instruction::I2S,
0x94 => Instruction::ICmp,
0x95 => Instruction::FCmpL,
0x96 => Instruction::FCmpG,
0x97 => Instruction::DCmpL,
0x98 => Instruction::DCmpG,
0x99 => Instruction::IfEq(i16::read_from(data)?),
0x9a => Instruction::IfNe(i16::read_from(data)?),
0x9b => Instruction::IfLt(i16::read_from(data)?),
0x9c => Instruction::IfGe(i16::read_from(data)?),
0x9d => Instruction::IfGt(i16::read_from(data)?),
0x9e => Instruction::IfLe(i16::read_from(data)?),
0x9f => Instruction::IfIcmpEq(i16::read_from(data)?),
0xa0 => Instruction::IfIcmpNe(i16::read_from(data)?),
0xa1 => Instruction::IfIcmpLt(i16::read_from(data)?),
0xa2 => Instruction::IfIcmpGe(i16::read_from(data)?),
0xa3 => Instruction::IfIcmpGt(i16::read_from(data)?),
0xa4 => Instruction::IfIcmpLe(i16::read_from(data)?),
0xa5 => Instruction::IfAcmpEq(i16::read_from(data)?),
0xa6 => Instruction::IfAcmpNe(i16::read_from(data)?),
0xa7 => Instruction::Goto(i16::read_from(data)?),
0xa8 => Instruction::Jsr(i16::read_from(data)?),
0xa9 => Instruction::Ret(read_idx(data, wide)?),
0xaa => {
while data.seek(SeekFrom::Current(0))? % 4 != 0 {
let _ = u8::read_from(data)?;
}
let default = i32::read_from(data)?;
let low = i32::read_from(data)?;
let high = i32::read_from(data)?;
let mut entries = Vec::new();
for _ in low..=high {
entries.push(i32::read_from(data)?);
}
Instruction::TableSwitch(default, low, high, entries)
}
0xab => {
while data.seek(SeekFrom::Current(0))? % 4 != 0 {
let _ = u8::read_from(data)?;
}
let default = i32::read_from(data)?;
// Not a bug!
// "Immediately after the padding follow a series of signed 32-bit
// values: default, npairs, and then npairs pairs of signed 32-bit values."
let count = i32::read_from(data)?;
let mut entries = Vec::new();
for _ in 0..count {
entries.push((u32::read_from(data)?, i32::read_from(data)?));
}
Instruction::LookupSwitch(default, entries)
}
0xac => Instruction::IReturn,
0xad => Instruction::LReturn,
0xae => Instruction::FReturn,
0xaf => Instruction::DReturn,
0xb0 => Instruction::AReturn,
0xb1 => Instruction::Return,
0xb2 => Instruction::GetStatic(ConstantIdx::read_from(data)?),
0xb3 => Instruction::PutStatic(ConstantIdx::read_from(data)?),
0xb4 => Instruction::GetField(ConstantIdx::read_from(data)?),
0xb5 => Instruction::PutField(ConstantIdx::read_from(data)?),
0xb6 => Instruction::InvokeVirtual(ConstantIdx::read_from(data)?),
0xb7 => Instruction::InvokeSpecial(ConstantIdx::read_from(data)?),
0xb8 => Instruction::InvokeStatic(ConstantIdx::read_from(data)?),
0xb9 => Instruction::InvokeInterface(
ConstantIdx::read_from(data)?,
u8::read_from(data)?,
),
0xba => Instruction::InvokeDynamic(ConstantIdx::read_from(data)?),
0xbb => Instruction::New(ConstantIdx::read_from(data)?),
0xbc => Instruction::NewArray(u8::read_from(data)?),
0xbd => Instruction::ANewArray,
0xbe => Instruction::ArrayLength,
0xbf => Instruction::AThrow,
0xc0 => Instruction::CheckCast(ConstantIdx::read_from(data)?),
0xc1 => Instruction::InstanceOf(ConstantIdx::read_from(data)?),
0xc2 => Instruction::MonitorEnter,
0xc3 => Instruction::MonitorExit,
0xc4 => {
return Err(Error::BadInstruction(0xc4, wide));
}
0xc5 => {
Instruction::MultiANewArray(ConstantIdx::read_from(data)?, u8::read_from(data)?)
}
0xc6 => Instruction::IfNull(i16::read_from(data)?),
0xc7 => Instruction::IfNonNull(i16::read_from(data)?),
0xc8 => Instruction::GotoW(i32::read_from(data)?),
0xc9 => Instruction::JsrW(i32::read_from(data)?),
other => {
return Err(Error::BadInstruction(other, wide));
}
};
Ok(opc)
};
let first = u8::read_from(data)?;
if first == 0xc4 {
let next = u8::read_from(data)?;
read_instruction(data, next, true)
} else {
read_instruction(data, first, false)
}
}
}
|
extern crate spacesuit;
use spacesuit::{prove, verify, SpacesuitError, Value};
extern crate curve25519_dalek;
extern crate bulletproofs;
use bulletproofs::{BulletproofGens, PedersenGens};
fn spacesuit_helper(
bp_gens: &BulletproofGens,
inputs: Vec<Value>,
outputs: Vec<Value>,
) -> Result<(), SpacesuitError> {
let m = inputs.len();
let n = outputs.len();
let pc_gens = PedersenGens::default();
let (proof, commitments) = prove(&bp_gens, &pc_gens, &inputs, &outputs)?;
verify(&bp_gens, &pc_gens, &proof, commitments, m, n)
}
// Helper functions to make the tests easier to read
fn yuan(q: u64) -> Value {
Value {
q,
a: 888u64.into(),
t: 999u64.into(),
}
}
fn peso(q: u64) -> Value {
Value {
q,
a: 666u64.into(),
t: 777u64.into(),
}
}
fn euro(q: u64) -> Value {
Value {
q,
a: 444u64.into(),
t: 555u64.into(),
}
}
fn zero() -> Value {
Value::zero()
}
// m=1, n=1
#[test]
fn spacesuit_1_1() {
let bp_gens = BulletproofGens::new(1000, 1);
assert!(spacesuit_helper(&bp_gens, vec![yuan(1)], vec![yuan(1)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![peso(4)], vec![peso(4)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![yuan(1)], vec![peso(4)]).is_err());
}
// max(m, n) = 2
#[test]
fn spacesuit_uneven_2() {
let bp_gens = BulletproofGens::new(1000, 1);
assert!(spacesuit_helper(&bp_gens, vec![yuan(3)], vec![yuan(1), yuan(2)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![yuan(1), yuan(2)], vec![yuan(3)]).is_ok());
}
// m=2, n=2
#[test]
fn spacesuit_2_2() {
let bp_gens = BulletproofGens::new(1000, 1);
// Only shuffle (all different flavors)
assert!(spacesuit_helper(&bp_gens, vec![yuan(1), peso(4)], vec![yuan(1), peso(4)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![yuan(1), peso(4)], vec![peso(4), yuan(1)]).is_ok());
// Middle shuffle & merge & split (has multiple inputs or outputs of same flavor)
assert!(spacesuit_helper(&bp_gens, vec![peso(4), peso(4)], vec![peso(4), peso(4)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![peso(5), peso(3)], vec![peso(5), peso(3)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![peso(5), peso(3)], vec![peso(1), peso(7)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![peso(1), peso(8)], vec![peso(0), peso(9)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![yuan(1), yuan(1)], vec![peso(4), yuan(1)]).is_err());
}
// m=3, n=3
#[test]
fn spacesuit_3_3() {
let bp_gens = BulletproofGens::new(1000, 1);
// Only shuffle
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![yuan(1), peso(4), euro(8)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![yuan(1), euro(8), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![peso(4), yuan(1), euro(8)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![peso(4), euro(8), yuan(1)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![euro(8), yuan(1), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![euro(8), peso(4), yuan(1)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![yuan(2), peso(4), euro(8)]
)
.is_err()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![yuan(1), euro(4), euro(8)]
)
.is_err()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(8)],
vec![yuan(1), peso(4), euro(9)]
)
.is_err()
);
// Middle shuffle & merge & split
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(1), peso(4)],
vec![yuan(1), yuan(1), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), yuan(3), peso(4)],
vec![yuan(2), yuan(5), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), yuan(3), peso(4)],
vec![peso(4), yuan(2), yuan(5)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(2), yuan(5)],
vec![yuan(4), yuan(3), yuan(1)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(2), yuan(5)],
vec![yuan(4), yuan(3), yuan(10)]
)
.is_err()
);
// End shuffles & merge & split & middle shuffle
// (multiple asset types that need to be grouped and merged or split)
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), yuan(1)],
vec![yuan(1), yuan(1), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), peso(4), yuan(3)],
vec![peso(3), yuan(7), peso(1)]
)
.is_ok()
);
}
// max(m, n) = 3
#[test]
fn spacesuit_uneven_3() {
let bp_gens = BulletproofGens::new(1000, 1);
assert!(spacesuit_helper(&bp_gens, vec![yuan(4), yuan(4), yuan(3)], vec![yuan(11)]).is_ok());
assert!(spacesuit_helper(&bp_gens, vec![yuan(11)], vec![yuan(4), yuan(4), yuan(3)],).is_ok());
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(11), peso(4)],
vec![yuan(4), yuan(7), peso(4)],
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), yuan(7), peso(4)],
vec![yuan(11), peso(4)],
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(5), yuan(6)],
vec![yuan(4), yuan(4), yuan(3)],
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), yuan(4), yuan(3)],
vec![yuan(5), yuan(6)],
)
.is_ok()
);
}
// m=4, n=4
#[test]
fn spacesuit_4_4() {
let bp_gens = BulletproofGens::new(1000, 1);
// Only shuffle
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(7), euro(10)],
vec![yuan(1), peso(4), euro(7), euro(10)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), euro(7), euro(10)],
vec![euro(7), yuan(1), euro(10), peso(4),]
)
.is_ok()
);
// Middle shuffle & merge & split
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(1), peso(4), peso(4)],
vec![yuan(1), yuan(1), peso(4), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), yuan(3), peso(4), peso(4)],
vec![yuan(2), yuan(5), peso(1), peso(7)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), yuan(3), peso(4), peso(4)],
vec![peso(1), peso(7), yuan(2), yuan(5)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(1), yuan(5), yuan(2)],
vec![yuan(1), yuan(1), yuan(5), yuan(2)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(2), yuan(5), yuan(2)],
vec![yuan(4), yuan(3), yuan(3), zero()]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), yuan(2), yuan(5), yuan(2)],
vec![yuan(4), yuan(3), yuan(3), yuan(20)]
)
.is_err()
);
// End shuffles & merge & split & middle shuffle
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(1), peso(4), yuan(1), peso(4)],
vec![peso(4), yuan(1), yuan(1), peso(4)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(4), peso(4), peso(4), yuan(3)],
vec![peso(1), yuan(2), yuan(5), peso(7)]
)
.is_ok()
);
assert!(
spacesuit_helper(
&bp_gens,
vec![yuan(10), peso(1), peso(2), peso(3)],
vec![yuan(5), yuan(4), yuan(1), peso(6)]
)
.is_ok()
);
}
|
pub mod foo;
pub mod display;
pub fn lol(x: i32) {
for y in 0..x {
println!("{}", y);
}
}
|
pub struct Gameboard {
pub cells: Vec<Vec<(u8, bool)>>
}
impl Gameboard {
pub fn new(cells: Vec<Vec<(u8, bool)>>) -> Gameboard {
Gameboard {
cells: cells,
}
}
// get value
pub fn char(&self, ind: [usize; 2]) -> Option<char> {
Some(match self.cells[ind[1]][ind[0]].0 {
1 => '1',
2 => '2',
3 => '3',
4 => '4',
5 => '5',
6 => '6',
7 => '7',
8 => '8',
9 => '9',
_ => return None,
})
}
// set value
pub fn set(&mut self, ind: [usize; 2], val: u8) {
if self.cells[ind[1]][ind[0]].1 {
self.cells[ind[1]][ind[0]].0 = val;
}
}
pub fn check_horizontally(&mut self, ind: usize) -> bool{
let help: Vec<u8> = (&self.cells[ind]).into_iter().map(|x| x.0).collect();
if help.contains(&0){
return false;
}else{
for i in 1..10 {
if !help.contains(&i){
return false;
}
}
true
}
}
pub fn check_vertically(&mut self, ind: usize) -> bool{
let tab: Vec<u8> = (&self.cells).into_iter().map(|x| x[ind].0).collect();
if tab.contains(&0){
return false;
}else{
for i in 1..10 {
if !tab.contains(&i){
return false;
}
}
true
}
}
pub fn check_section(&mut self, ind: [usize; 2]) -> bool{
let section_x = ind[0] / 3;
let section_y = ind[1] / 3;
let mut tab: Vec<u8> = Vec::new();
for i in (section_x * 3)..(section_x * 3 + 3){
for j in (section_y * 3)..(section_y * 3 + 3){
tab.push(self.cells[j][i].0);
}
}
if tab.contains(&0){
return false;
}else{
for i in 1..10 {
if !tab.contains(&i){
return false;
}
}
true
}
}
pub fn check_game(&mut self) -> bool{
let help: Vec<[usize; 2]> = vec![[1,1], [7,4], [1,4], [1,7], [4,1], [4,4], [4,7], [7,1], [7,7]];
for i in 0..9{
if !self.check_horizontally(i) || !self.check_vertically(i) || !self.check_section(help[i]){
return false;
}
}
true
}
} |
use core::{mem, ptr};
use orbclient::{Color, Renderer};
use std::fs::find;
use std::proto::Protocol;
use uefi::guid::Guid;
use uefi::status::{Error, Result};
use crate::display::{Display, ScaledDisplay, Output};
use crate::image::{self, Image};
use crate::key::{key, Key};
use crate::redoxfs;
use crate::text::TextDisplay;
use self::memory_map::memory_map;
use self::paging::paging;
mod memory_map;
mod paging;
mod partitions;
static KERNEL: &'static str = concat!("\\", env!("BASEDIR"), "\\kernel");
static SPLASHBMP: &'static [u8] = include_bytes!("../../../res/splash.bmp");
static KERNEL_OFFSET: u64 = 0xFFFF_FF00_0000_0000;
static KERNEL_PHYSICAL: u64 = 0x4000_0000;
static mut KERNEL_SIZE: u64 = 0;
static mut KERNEL_ENTRY: u64 = 0;
static mut DTB_PHYSICAL: u64 = 0;
#[no_mangle]
pub extern "C" fn __chkstk() {
//TODO
}
unsafe fn exit_boot_services(key: usize) {
let handle = std::handle();
let uefi = std::system_table();
let _ = (uefi.BootServices.ExitBootServices)(handle, key);
}
unsafe fn enter() -> ! {
let entry_fn: extern "C" fn(dtb: u64) -> ! = mem::transmute((
KERNEL_PHYSICAL + KERNEL_ENTRY - KERNEL_OFFSET
));
entry_fn(DTB_PHYSICAL);
}
fn get_correct_block_io() -> Result<redoxfs::Disk> {
// Get all BlockIo handles.
let mut handles = vec! [uefi::Handle(0); 128];
let mut size = handles.len() * mem::size_of::<uefi::Handle>();
(std::system_table().BootServices.LocateHandle)(uefi::boot::LocateSearchType::ByProtocol, &uefi::guid::BLOCK_IO_GUID, 0, &mut size, handles.as_mut_ptr())?;
let max_size = size / mem::size_of::<uefi::Handle>();
let actual_size = std::cmp::min(handles.len(), max_size);
// Return the handle that seems bootable.
for handle in handles.into_iter().take(actual_size) {
let block_io = redoxfs::Disk::handle_protocol(handle)?;
if !block_io.0.Media.LogicalPartition {
continue;
}
let part = partitions::PartitionProto::handle_protocol(handle)?.0;
if part.sys == 1 {
continue;
}
assert_eq!({part.rev}, partitions::PARTITION_INFO_PROTOCOL_REVISION);
if part.ty == partitions::PartitionProtoDataTy::Gpt as u32 {
let gpt = unsafe { part.info.gpt };
assert_ne!(gpt.part_ty_guid, partitions::ESP_GUID, "detected esp partition again");
if gpt.part_ty_guid == partitions::REDOX_FS_GUID || gpt.part_ty_guid == partitions::LINUX_FS_GUID {
return Ok(block_io);
}
} else if part.ty == partitions::PartitionProtoDataTy::Mbr as u32 {
let mbr = unsafe { part.info.mbr };
if mbr.ty == 0x83 {
return Ok(block_io);
}
} else {
continue;
}
}
panic!("Couldn't find handle for partition");
}
static DTB_GUID: Guid = Guid(0xb1b621d5, 0xf19c, 0x41a5, [0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0]);
fn find_dtb() -> Result<()> {
let cfg_tables = std::system_table().config_tables();
for cfg_table in cfg_tables.iter() {
if cfg_table.VendorGuid == DTB_GUID {
unsafe {
DTB_PHYSICAL = cfg_table.VendorTable as u64;
println!("DTB: {:X}", DTB_PHYSICAL);
}
return Ok(());
}
}
println!("Failed to find DTB");
Err(Error::NotFound)
}
fn redoxfs() -> Result<redoxfs::FileSystem> {
// TODO: Scan multiple partitions for a kernel.
redoxfs::FileSystem::open(get_correct_block_io()?)
}
const MB: usize = 1024 * 1024;
fn inner() -> Result<()> {
find_dtb()?;
{
println!("Loading Kernel...");
let (kernel, mut env): (Vec<u8>, String) = {
let (_i, mut kernel_file) = find(KERNEL)?;
let info = kernel_file.info()?;
let len = info.FileSize;
let mut kernel = Vec::with_capacity(len as usize);
let mut buf = vec![0; 4 * MB];
loop {
let percent = kernel.len() as u64 * 100 / len;
print!("\r{}% - {} MB", percent, kernel.len() / MB);
let count = kernel_file.read(&mut buf)?;
if count == 0 {
break;
}
kernel.extend(&buf[.. count]);
}
println!("");
(kernel, String::new())
};
println!("Copying Kernel...");
unsafe {
KERNEL_SIZE = kernel.len() as u64;
println!("Size: {}", KERNEL_SIZE);
KERNEL_ENTRY = *(kernel.as_ptr().offset(0x18) as *const u64);
println!("Entry: {:X}", KERNEL_ENTRY);
ptr::copy(kernel.as_ptr(), KERNEL_PHYSICAL as *mut u8, kernel.len());
}
println!("Done!");
}
unsafe {
let key = memory_map();
exit_boot_services(key);
}
unsafe {
asm!("msr daifset, #2");
paging();
}
unsafe {
enter();
}
}
fn select_mode(output: &mut Output) -> Result<u32> {
loop {
for i in 0..output.0.Mode.MaxMode {
let mut mode_ptr = ::core::ptr::null_mut();
let mut mode_size = 0;
(output.0.QueryMode)(output.0, i, &mut mode_size, &mut mode_ptr)?;
let mode = unsafe { &mut *mode_ptr };
let w = mode.HorizontalResolution;
let h = mode.VerticalResolution;
print!("\r{}x{}: Is this OK? (y)es/(n)o", w, h);
if key(true)? == Key::Character('y') {
println!("");
return Ok(i);
}
}
}
}
fn pretty_pipe<T, F: FnMut() -> Result<T>>(splash: &Image, f: F) -> Result<T> {
let mut display = Display::new(Output::one()?);
let mut display = ScaledDisplay::new(&mut display);
{
let bg = Color::rgb(0x4a, 0xa3, 0xfd);
display.set(bg);
{
let x = (display.width() as i32 - splash.width() as i32)/2;
let y = 16;
splash.draw(&mut display, x, y);
}
{
let prompt = format!(
"Redox Bootloader {} {}",
env!("CARGO_PKG_VERSION"),
env!("TARGET").split('-').next().unwrap_or("")
);
let mut x = (display.width() as i32 - prompt.len() as i32 * 8)/2;
let y = display.height() as i32 - 32;
for c in prompt.chars() {
display.char(x, y, c, Color::rgb(0xff, 0xff, 0xff));
x += 8;
}
}
display.sync();
}
{
let cols = 80;
let off_x = (display.width() as i32 - cols as i32 * 8)/2;
let off_y = 16 + splash.height() as i32 + 16;
let rows = (display.height() as i32 - 64 - off_y - 1) as usize/16;
display.rect(off_x, off_y, cols as u32 * 8, rows as u32 * 16, Color::rgb(0, 0, 0));
display.sync();
let mut text = TextDisplay::new(display);
text.off_x = off_x;
text.off_y = off_y;
text.cols = cols;
text.rows = rows;
text.pipe(f)
}
}
pub fn main() -> Result<()> {
inner()?;
/* TODO
if let Ok(mut output) = Output::one() {
let mut splash = Image::new(0, 0);
{
println!("Loading Splash...");
if let Ok(image) = image::bmp::parse(&SPLASHBMP) {
splash = image;
}
println!(" Done");
}
/* TODO
let mode = pretty_pipe(&splash, || {
select_mode(&mut output)
})?;
(output.0.SetMode)(output.0, mode)?;
*/
pretty_pipe(&splash, inner)?;
} else {
inner()?;
}
*/
Ok(())
}
|
use proc_macro2::Span;
fn main() {
let span = Span::call_site();
println!("span: {:?}", span);
#[cfg(procmacro2_semver_exempt)]
println!("source file: {:?}", span.source_file());
}
|
use std::{cmp::Eq, collections::HashSet, hash::Hash};
pub trait CellAutoSpec {
type T;
fn neighbors(x: &Self::T) -> Vec<Self::T>;
fn rule(alive: bool, alive_neighbors: usize) -> bool;
}
pub struct CellAuto<Spec: CellAutoSpec> {
pub cells: HashSet<Spec::T>,
}
impl<Spec: CellAutoSpec> CellAuto<Spec> {
#[must_use]
pub fn next(&self) -> Self
where
Spec::T: Eq + Hash + Clone,
{
let candidates: HashSet<Spec::T> = self.cells.iter().flat_map(Spec::neighbors).collect();
let cells = candidates
.iter()
.filter(|&cell| {
let alive = self.cells.contains(cell);
let alive_neighbors = Spec::neighbors(cell)
.iter()
.filter(|&other| self.cells.contains(other))
.count();
Spec::rule(alive, alive_neighbors)
})
.cloned()
.collect();
Self { cells }
}
}
|
use std::net::TcpStream;
use std::io::prelude::*;
use std::io;
use color_strip::ColorStrip;
pub struct OpcStrip {
stream: TcpStream,
data: Vec<u8>,
pub led_count: usize,
reversed: bool
}
impl OpcStrip {
pub fn connect(led_count: usize, reversed: bool) -> io::Result<OpcStrip> {
let mut data: Vec<u8> = vec![0; 4 + (led_count * 3) as usize];
data[3] = led_count as u8 * 3;
TcpStream::connect("127.0.0.1:7890").map(|stream| OpcStrip { stream, data, led_count, reversed })
}
pub fn send(&mut self, color_strip: &ColorStrip) {
for (i, color) in color_strip.pixel[0..self.led_count].iter().enumerate() {
let final_color = *color;
let mut j = if self.reversed { self.data.len() - i * 3 - 3 } else { i * 3 + 4 };
self.data[j] = final_color.r;
j += 1;
self.data[j] = final_color.g;
j += 1;
self.data[j] = final_color.b;
}
self.stream.write(&self.data[..]).ok();
}
}
|
use crate::config::CONFIG;
use crate::init::AppConnections;
use crate::{model::User, schema::user::dsl::*};
use async_session::{async_trait, Session, SessionStore};
use axum::http::HeaderMap;
use axum::{
extract::{Extension, FromRequest},
http::{self, StatusCode},
BoxError, Json,
};
use cookie::Cookie;
use diesel::prelude::*;
use diesel::{QueryDsl, RunQueryDsl};
use serde::{Deserialize, Serialize};
use tracing::debug;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct UserSessionBody {
pub user_id: i32,
pub username: String,
}
#[derive(Debug)]
pub enum UserSession {
CreateNewSession {
cookie: Cookie<'static>,
body: UserSessionBody,
},
GetSession(UserSessionBody),
}
#[derive(Deserialize)]
struct LoginPayload {
username: String,
password: String,
}
pub fn get_cookie_from_header(header: &HeaderMap) -> Option<Cookie<'_>> {
header
.get(http::header::COOKIE)
.and_then(|x| x.to_str().ok())
.map(|x| Cookie::parse(x).unwrap())
}
async fn create_session(
app_connections: &AppConnections,
login_payload: Json<LoginPayload>,
) -> Result<(Session, UserSessionBody), &'static str> {
let auth_user = user
.filter(username.eq(&login_payload.username))
.limit(1)
.first::<User>(&app_connections.db_connections.get().unwrap())
.unwrap();
// check password
let hashed_password = argon2::hash_encoded(
auth_user.password.as_bytes(),
CONFIG.auth.salt.as_bytes(),
&argon2::Config::default(),
)
.expect("invalid password");
let is_valid = argon2::verify_encoded(&hashed_password, auth_user.password.as_bytes())
.expect("invalid password");
if !is_valid {
panic!("invalid password");
}
let session_body = UserSessionBody {
user_id: auth_user.id,
username: auth_user.username,
};
debug!("create new sesion for user: {:?}", session_body);
let mut session = Session::new();
session
.insert(CONFIG.session.key.as_str(), session_body.clone())
.unwrap();
Ok((session, session_body))
}
#[async_trait]
impl<T> FromRequest<T> for UserSession
where
T: http_body::Body + Send,
T::Data: Send,
T::Error: Into<BoxError>,
{
type Rejection = (StatusCode, &'static str);
async fn from_request(
req: &mut axum::extract::RequestParts<T>,
) -> Result<Self, Self::Rejection> {
let Extension(app_connections) = Extension::<AppConnections>::from_request(req)
.await
.expect("AppConnections not found");
let header = req.headers().expect("headers not found");
let cookie = if let Some(cookie) = get_cookie_from_header(header) {
cookie.to_owned()
} else {
debug!("cookie not found, create new session base on username and password");
let login_payload = Json::<LoginPayload>::from_request(req).await;
if login_payload.is_err() {
return Err((StatusCode::UNAUTHORIZED, "invalid input"));
}
let (session, body) = create_session(&app_connections, login_payload.unwrap())
.await
.unwrap();
let cookie_value = app_connections
.session_store
.store_session(session)
.await
.unwrap()
.unwrap();
let cookie = Cookie::build(CONFIG.session.key.as_str(), cookie_value)
.path("/")
// .secure(true)
.http_only(true)
.max_age(time::Duration::days(1))
.finish();
return Ok(Self::CreateNewSession { cookie, body });
};
let store = app_connections.session_store;
let session = if let Some(session) = store
.load_session(cookie.value().to_string())
.await
.unwrap()
{
session
} else {
return Err((StatusCode::UNAUTHORIZED, "invalid user"));
};
let body = session
.get::<UserSessionBody>(CONFIG.session.key.as_str())
.unwrap();
Ok(Self::GetSession(body))
}
}
|
use graphics::math::*;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::HashMap;
use interpolation::Lerp;
use super::AABB::AABB;
use super::config;
use super::map::{Map, AreaIndex};
pub struct MovingObject {
pub object_id: String,
pub old_position: Vec2d,
pub position: Vec2d,
pub old_speed: Vec2d,
pub speed: Vec2d,
pub old_accelaration: Vec2d,
pub acceleration: Vec2d,
pub scale: Vec2d,
pub aabb: AABB,
pub aabb_offset: Vec2d,
pub pushed_right_wall: bool,
pub pushes_right_wall: bool,
pub pushed_left_wall: bool,
pub pushes_left_wall: bool,
pub was_on_ground: bool,
pub on_ground: bool,
pub was_at_ceiling: bool,
pub at_ceiling: bool,
pub on_one_way_platform: bool,
pub areas: Vec<AreaIndex>,
pub all_colliding_objects: HashMap<String, CollisionData>,
bounds: Vec2d,
accelerate: f64,
max_speed: f64,
jump_speed: f64,
one_way_platform_tsh: f64
}
impl MovingObject {
pub fn new(position: Vec2d, size: Vec2d, bounds: Vec2d, accelerate: f64, max_speed: f64, jump_speed: f64, object_id: String) -> MovingObject {
MovingObject {
object_id: object_id,
position: position,
old_position: [0.0, 0.0],
acceleration: [0.0, config::GRAVITY],
old_accelaration: [0.0, 0.0],
speed: [0.0, 0.0],
old_speed: [0.0, 0.0],
scale: [1.0, 1.0],
pushed_right_wall: false,
pushes_right_wall: false,
pushed_left_wall: false,
pushes_left_wall: false,
was_on_ground: true,
on_ground: true,
was_at_ceiling: false,
at_ceiling: false,
on_one_way_platform: false,
aabb: AABB::new(position, mul_scalar(size, 0.5)),
aabb_offset: mul_scalar(size, 0.5),
bounds: bounds,
accelerate: accelerate,
max_speed: max_speed,
jump_speed: jump_speed,
one_way_platform_tsh: 15.0,
areas: Vec::new(),
all_colliding_objects: HashMap::new()
}
}
fn handle_right_side_collision(&mut self, map: &Map) {
let (collides, wall_x) = self.collides_right_side(&map);
if collides && self.speed[0] > 0.0 {
self.position[0] = wall_x - self.aabb.half_size[0] * 2.0;
self.speed[0] = 0.0;
self.pushes_right_wall = true;
return;
}
self.pushes_right_wall = false;
}
fn handle_left_side_collision(&mut self, map: &Map) {
let (collides, wall_x) = self.collides_left_side(&map);
if collides && self.speed[0] < 0.0 {
self.position[0] = wall_x;
self.speed[0] = 0.0;
self.pushes_left_wall = true;
return;
}
self.pushes_left_wall = false;
}
fn check_ground_collision(&mut self, map: &Map) {
let height = map.tiles[0].len() as f64 * map.tile_size;
if self.position[1] >= height {
self.position[1] = height;
self.speed[1] = 0.0;
self.on_ground = true;
return
}
let (has_ground, calculated_ground) = self.has_ground(&map);
if self.speed[1] > 0.0 && has_ground {
self.position[1] = calculated_ground - self.aabb.half_size[1] * 2.0;
self.speed[1] = 0.0;
self.on_ground = true;
} else {
self.on_ground = false;
}
}
fn check_ceiling_collision(&mut self, map: &Map) {
let (has_ceiling, calculated_ceiling) = self.has_ceiling(&map);
if self.speed[1] < 0.0 && has_ceiling {
self.position[1] = calculated_ceiling;// - self.aabb_offset[1];
self.speed[1] = 0.0;
}
}
fn limit_walk_speed(&mut self, calculated_speed: Vec2d) {
if calculated_speed[0] > self.max_speed {
self.speed = [self.max_speed, calculated_speed[1]];
} else if calculated_speed[0] < -self.max_speed {
self.speed = [-self.max_speed, calculated_speed[1]];
} else {
self.speed = calculated_speed;
}
}
pub fn stop(&mut self) {
self.speed = [0.0, 0.0];
}
pub fn jump(&mut self) {
self.speed = add(self.speed, [0.0, -self.jump_speed]);
}
pub fn move_left(&mut self, factor: f64) {
self.acceleration = [-self.accelerate * factor, self.acceleration[1]];
}
pub fn move_right(&mut self, factor: f64) {
self.acceleration = [self.accelerate * factor, self.acceleration[1]];
}
pub fn drop(&mut self) {
if self.on_one_way_platform{
self.position = [self.position[0], self.position[1] + self.one_way_platform_tsh];
self.on_one_way_platform = false;
}
}
pub fn falling(&mut self) {
if self.speed[1] >= 0.0 {
self.acceleration[1] = config::GRAVITY * 7.5;
}
}
pub fn stop_falling(&mut self) {
if self.speed[1] >= 0.0 {
self.acceleration[1] = config::GRAVITY;
}
}
pub fn move_object(&mut self, x: f64, y: f64){
self.position[0] += x * config::MAP_TILES_PARRALAX_FACTOR;
self.position[1] += y * config::MAP_TILES_PARRALAX_FACTOR;
}
pub fn update_physics(&mut self, delta: f64, map: &Map) {
self.old_position = self.position;
self.old_speed = self.speed;
let calculated_speed = add(self.speed, mul_scalar(self.acceleration, delta));
self.limit_walk_speed(calculated_speed);
self.acceleration[0] = self.speed[0] * -config::FRICTION;
self.was_on_ground = self.on_ground;
self.pushed_right_wall = self.pushes_right_wall;
self.pushed_left_wall = self.pushes_left_wall;
self.was_at_ceiling = self.at_ceiling;
self.aabb.center = add(self.position, self.aabb_offset);
self.position = add(self.position, mul_scalar(self.speed, delta));
self.handle_left_side_collision(&map);
self.handle_right_side_collision(&map);
self.check_ground_collision(&map);
self.check_ceiling_collision(&map);
}
pub fn has_ground(&mut self, map: &Map) -> (bool, f64) {
let (new_bottom_right, new_bottom_left, _, _) = self.get_sensors(self.position);
let (old_bottom_right, old_bottom_left, _, _) = self.get_sensors(self.old_position);
let end_y = map.get_map_tile_y_at_point(new_bottom_left[1]);
let beg_y = (map.get_map_tile_y_at_point(old_bottom_left[1]) + 1).min(end_y);
let dist = (beg_y - end_y).abs().max(1);
for tileIndexY in beg_y..end_y + 1 {
let bottom_right = self.round_vector(old_bottom_right.lerp(&new_bottom_right, &((end_y - tileIndexY).abs() as f64 / dist as f64)));
let bottom_left = self.round_vector([bottom_right[0] - self.aabb.half_size[0] * 2.0 - 2.0, bottom_right[1]]);
let mut checked_tile = add(bottom_left, [1.0, 0.0]);
while checked_tile[0] < bottom_right[0] {
checked_tile[0] = checked_tile[0].min(new_bottom_right[0]);
let tile_index_x = map.get_map_tile_x_at_point(checked_tile[0]);
checked_tile[0] = checked_tile[0] + map.tile_size;
let ground_y = tileIndexY as f64 * map.tile_size + map.position[1];
if map.is_obstacle(tile_index_x, tileIndexY){
self.on_one_way_platform = false;
return (true, ground_y);
}
if map.is_one_way_platform(tile_index_x, tileIndexY)
&& (checked_tile[1] - ground_y).abs() < self.one_way_platform_tsh
{
self.on_one_way_platform = true;
return (true, ground_y);
}
}
}
(false, 0.0)
}
pub fn has_ceiling(&mut self, map: &Map) -> (bool, f64) {
let (_, _, new_top_right, _new_top_left) = self.get_sensors(self.position);
let (_, _, old_top_right, _old_top_left) = self.get_sensors(self.old_position);
let end_y = map.get_map_tile_y_at_point(new_top_right[1]);
let beg_y = (map.get_map_tile_y_at_point(old_top_right[1]) + 1).min(end_y);
let dist = (beg_y - end_y).abs().max(1);
for tileIndexY in beg_y..end_y + 1 {
let top_right = self.round_vector(old_top_right.lerp(&new_top_right, &((end_y - tileIndexY).abs() as f64 / dist as f64)));
let top_left = self.round_vector([top_right[0] - self.aabb.half_size[0] * 2.0 - 1.0, top_right[1]]);
let mut checked_tile = add(top_left, [1.0, 0.0]);
while checked_tile[0] < top_right[0] {
checked_tile[0] = checked_tile[0].min(new_top_right[0]);
let tile_index_x = map.get_map_tile_x_at_point(checked_tile[0]);
checked_tile[0] = checked_tile[0] + map.tile_size;
if map.is_obstacle(tile_index_x, tileIndexY){
let ceiling_y = tileIndexY as f64 * map.tile_size + map.tile_size + map.position[1];
return (true, ceiling_y);
}
}
}
(false, 0.0)
}
pub fn collides_left_side(&mut self, map: &Map) -> (bool, f64) {
let (_, new_bottom_left, _, _new_top_left) = self.get_sensors(self.position);
let (_, old_bottom_left, _, _old_top_left) = self.get_sensors(self.old_position);
let end_x = map.get_map_tile_x_at_point(new_bottom_left[0]);
let beg_x = (map.get_map_tile_x_at_point(old_bottom_left[0])).min(end_x);
let dist = (end_x - beg_x).abs().max(1);
for tileIndexX in (end_x..beg_x + 1).rev() {
let bottom_left =
self.round_vector(old_bottom_left.lerp(&new_bottom_left, &((end_x - tileIndexX).abs() as f64 / dist as f64)));
let top_left = self.round_vector([bottom_left[0], bottom_left[1] - self.aabb.half_size[1] * 2.0 - 2.0]);
let mut checked_tile = top_left;
while checked_tile[1] < bottom_left[1] {
let mut y = checked_tile[1];
if checked_tile[1] > bottom_left[1] {
y = bottom_left[1]
}
let tile_index_y = map.get_map_tile_y_at_point(y) - 1;
checked_tile[1] = checked_tile[1] + map.tile_size;
if map.is_obstacle(tileIndexX, tile_index_y){
let wall_x = tileIndexX as f64 * map.tile_size + map.tile_size + map.position[0];
return (true, wall_x);
}
}
}
(false, 0.0)
}
pub fn collides_right_side(&mut self, map: &Map) -> (bool, f64) {
let (new_bottom_right, _, _new_top_right, _) = self.get_sensors(self.position);
let (old_bottom_right, _, _old_top_right, _) = self.get_sensors(self.old_position);
let end_x = map.get_map_tile_x_at_point(new_bottom_right[0]);
let beg_x = (map.get_map_tile_x_at_point(old_bottom_right[0])).min(end_x);
let dist = (end_x - beg_x).abs().max(1);
for tileIndexX in beg_x..end_x + 1 {
let bottom_right =
self.round_vector(old_bottom_right.lerp(&new_bottom_right, &((end_x - tileIndexX).abs() as f64 / dist as f64)));
let top_right =
self.round_vector([bottom_right[0], bottom_right[1] - self.aabb.half_size[1] * 2.0 - 2.0]);
let mut checked_tile = top_right;
while checked_tile[1] < bottom_right[1] {
let mut y = checked_tile[1];
if checked_tile[1] > bottom_right[1] {
y = bottom_right[1]
}
let tile_index_y = map.get_map_tile_y_at_point(y) - 1;
checked_tile[1] = checked_tile[1] + map.tile_size;
if map.is_obstacle(tileIndexX, tile_index_y){
let wall_x = tileIndexX as f64 * map.tile_size + map.position[0];
return (true, wall_x);
}
}
}
(false, 0.0)
}
fn get_sensors(&self, position: Vec2d) -> (Vec2d, Vec2d, Vec2d, Vec2d) {
let center = add(position, self.aabb.half_size);
let bottom_right = add(add(center, self.aabb.half_size), [1.0, 1.0]);
let top_right = [center[0] + self.aabb.half_size[0] + 1.0, center[1] - self.aabb.half_size[1] - 1.0];
let bottom_left = [bottom_right[0] - self.aabb.half_size[0] * 2.0 - 2.0, bottom_right[1]];
let top_left = [top_right[0] - self.aabb.half_size[0] * 2.0 - 2.0, top_right[1]];
(self.round_vector(bottom_right),
self.round_vector(bottom_left),
self.round_vector(top_right),
self.round_vector(top_left))
}
fn round_vector(&self, vector: Vec2d) -> Vec2d {
[vector[0].round(), vector[1].round()]
}
}
pub struct CollisionData {
pub other_key: String,
pub overlap: Vec2d,
pub speed1: Vec2d,
pub speed2: Vec2d,
pub old_pos1: Vec2d,
pub old_pos2: Vec2d,
pub pos1: Vec2d,
pub pos2: Vec2d
} |
//! An `Edge` is a direct edge between `Block` in `ControlFlowGraph`
//!
//! A Falcon IL `Edge` has an optional condition. When the condition is present, the `Edge` is,
//! "Guarded," by the `Expression` in the condition. `Edge` conditions are `Expressions` that must
//! evaluate to a 1-bit `Constant`. When the condition evaluates to 1, the `Edge` may be taken.
//! Otherwise the `Edge` is not taken. When the condition is not present, the `Edge` is
//! unconditional and will always be taken.
//!
//! To create a new edge, call `ControlFlowGraph::unconditional_edge` or
//! `ControlFlowGraph::conditional_edge`.
use crate::il::*;
use serde::{Deserialize, Serialize};
use std::fmt;
/// Edge between IL blocks
#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, Default)]
pub struct Edge {
head: usize,
tail: usize,
condition: Option<Expression>,
comment: Option<String>,
}
impl Edge {
pub(crate) fn new(head: usize, tail: usize, condition: Option<Expression>) -> Edge {
Edge {
head,
tail,
condition,
comment: None,
}
}
/// Retrieve the condition for this `Edge`.
pub fn condition(&self) -> Option<&Expression> {
self.condition.as_ref()
}
/// Retrieve a mutable reference to the condition for this `Edge`
pub fn condition_mut(&mut self) -> Option<&mut Expression> {
self.condition.as_mut()
}
/// Retrieve the index of the head `Vertex` for this `Edge`.
pub fn head(&self) -> usize {
self.head
}
/// Retrieve the index of the tail `Vertex` for this `Edge`.
pub fn tail(&self) -> usize {
self.tail
}
/// Set the comment for this `Edge`.
pub fn set_comment(&mut self, comment: Option<String>) {
self.comment = comment;
}
/// Get the comment for this `Edge`.
pub fn comment(&self) -> &Option<String> {
&self.comment
}
}
impl fmt::Display for Edge {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref comment) = self.comment {
writeln!(f, "// {}", comment)?
}
if let Some(ref condition) = self.condition {
write!(
f,
"(0x{:X}->0x{:X}) ? ({})",
self.head, self.tail, condition
)?
} else {
write!(f, "(0x{:X}->0x{:X})", self.head, self.tail)?
}
Ok(())
}
}
impl graph::Edge for Edge {
fn head(&self) -> usize {
self.head
}
fn tail(&self) -> usize {
self.tail
}
fn dot_label(&self) -> String {
match self.condition {
Some(ref condition) => format!("{}", condition),
None => "".to_string(),
}
}
}
|
use std::fmt::{Debug, Display};
use std::iter::Sum;
pub trait Summary {
fn summarize(&self) -> String;
// with default implementation
fn summarize2(&self) -> String {
String::from("Read more...")
}
}
pub struct Tweet {
pub username: String,
pub content: String,
}
impl Summary for Tweet {
fn summarize(&self) -> String {
format!("{}: {}", self.username, self.content)
}
}
// traits as parameters
pub fn notify(item: &impl Summary) {}
// trait bound syntax, same as notify
pub fn notify2<T: Summary>(item: &T) {}
// multiple traits
pub fn notify3(item: &(impl Summary + Display)) {}
// trait bound with + syntax, same as notify3
pub fn notify4<T: Summary + Display>(item: &T) {}
// where syntax
fn f1<T, U>(t: &T, u: &U) -> ()
where T: Display + Clone,
U: Clone + Debug {}
// return traits
fn f2() -> impl Summary {
Tweet {
username: "a".to_string(),
content: "b".to_string(),
}
}
#[cfg(test)]
mod tests {
use crate::traits::{Summary, Tweet};
#[test]
fn it_works() {
let tweet = Tweet {
username: String::from("horse_ebooks"),
content: String::from("happy"1),
};
let s = tweet.summarize();
}
} |
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[cfg(feature = "Globalization_Collation")]
pub mod Collation;
#[cfg(feature = "Globalization_DateTimeFormatting")]
pub mod DateTimeFormatting;
#[cfg(feature = "Globalization_Fonts")]
pub mod Fonts;
#[cfg(feature = "Globalization_NumberFormatting")]
pub mod NumberFormatting;
#[cfg(feature = "Globalization_PhoneNumberFormatting")]
pub mod PhoneNumberFormatting;
#[link(name = "windows")]
extern "system" {}
pub type Calendar = *mut ::core::ffi::c_void;
pub type CurrencyAmount = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct DayOfWeek(pub i32);
impl DayOfWeek {
pub const Sunday: Self = Self(0i32);
pub const Monday: Self = Self(1i32);
pub const Tuesday: Self = Self(2i32);
pub const Wednesday: Self = Self(3i32);
pub const Thursday: Self = Self(4i32);
pub const Friday: Self = Self(5i32);
pub const Saturday: Self = Self(6i32);
}
impl ::core::marker::Copy for DayOfWeek {}
impl ::core::clone::Clone for DayOfWeek {
fn clone(&self) -> Self {
*self
}
}
pub type GeographicRegion = *mut ::core::ffi::c_void;
pub type JapanesePhoneme = *mut ::core::ffi::c_void;
pub type Language = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct LanguageLayoutDirection(pub i32);
impl LanguageLayoutDirection {
pub const Ltr: Self = Self(0i32);
pub const Rtl: Self = Self(1i32);
pub const TtbLtr: Self = Self(2i32);
pub const TtbRtl: Self = Self(3i32);
}
impl ::core::marker::Copy for LanguageLayoutDirection {}
impl ::core::clone::Clone for LanguageLayoutDirection {
fn clone(&self) -> Self {
*self
}
}
|
/* Copyright (C) 2016 Yutaka Kamei */
extern crate iron;
extern crate router;
extern crate rustc_serialize;
extern crate time;
extern crate scim;
use std::process;
use std::io::{self, Write};
use std::path::PathBuf;
use std::sync::Mutex;
use iron::{Chain, Iron, IronResult};
use iron::error::IronError;
use iron::request::Request;
use iron::response::Response;
use iron::mime::Mime;
use iron::middleware::Handler;
use iron::status;
use router::Router;
use rustc_serialize::json::as_pretty_json;
use scim::schema::{ListResponse, Resource, ScimError};
use scim::schema::resource::{User, Group, EnterpriseUser};
/*
struct Server {
value: Mutex<Vec<String>>,
}
impl Server {
fn new() -> Server {
Server {
value: Mutex::new(Vec::new()),
}
}
}
impl Handler for Server {
fn handle(&self, req: &mut Request) -> IronResult<Response> {
{
let mut value = match self.value.lock() {
Ok(guard) => guard,
Err(poison) => poison.into_inner(),
};
value.push(format!("{:?}", req.remote_addr));
}
println!("{:?}", self.value);
Ok(Response::with((status::Ok, "Hello world!")))
}
}
*/
macro_rules! scim_json_type {
() => { "application/scim+json; charset=utf-8".parse::<Mime>().unwrap() };
}
macro_rules! not_found {
( $e:expr ) => {
{
let detail = if $e.is_empty() { None } else { Some($e.to_string()) };
let scim_error = ScimError::not_found(detail);
let modifier = (scim_json_type!(),
status::NotFound,
format!("{}", as_pretty_json(&scim_error)));
Err(IronError::new(scim_error, modifier))
}
};
() => { not_found!("") };
}
fn resource_type_endpoint(req: &mut Request) -> IronResult<Response> {
let id = req.extensions.get::<Router>().unwrap().find("id").unwrap_or("/");
let body = match id {
"User" => format!("{}", as_pretty_json(&User::show_resource_type())),
"Group" => format!("{}", as_pretty_json(&Group::show_resource_type())),
"/" => {
let list_response = ListResponse::new(vec![
User::show_resource_type(),
Group::show_resource_type(),
]);
format!("{}", as_pretty_json(&list_response))
},
_ => return not_found!(),
};
Ok(Response::with((scim_json_type!(), status::Ok, body)))
}
fn schema_endpoint(req: &mut Request) -> IronResult<Response> {
let id = req.extensions.get::<Router>().unwrap().find("id").unwrap_or("/");
let body = match id {
"User" => format!("{}", as_pretty_json(&User::show_schema_info())),
"Group" => format!("{}", as_pretty_json(&Group::show_schema_info())),
"EnterpriseUser" => format!("{}", as_pretty_json(&EnterpriseUser::show_schema_info())),
"/" => {
let list_response = ListResponse::new(vec![
User::show_schema_info(),
Group::show_schema_info(),
EnterpriseUser::show_schema_info(),
]);
format!("{}", as_pretty_json(&list_response))
},
_ => return not_found!(),
};
Ok(Response::with((scim_json_type!(), status::Ok, body)))
}
fn main() {
let bind_addr = option_env!("BIND_ADDR").unwrap_or("127.0.0.1:3000");
let cert = option_env!("TLS_CERT_PATH");
let key = option_env!("TLS_KEY_PATH");
let mut router = Router::new();
router.get("/ResourceTypes", resource_type_endpoint);
router.get("/ResourceTypes/:id", resource_type_endpoint);
router.get("/Schemas", schema_endpoint);
router.get("/Schemas/:id", schema_endpoint);
let mut chain = Chain::new(router);
if cert.is_some() && key.is_some() {
let cert = PathBuf::from(cert.unwrap());
let key = PathBuf::from(key.unwrap());
Iron::new(chain).https(bind_addr, cert, key).unwrap();
} else {
Iron::new(chain).http(bind_addr).unwrap();
}
}
|
fn main() {
let mut sorted = vec![3,2,54,1,9,2,6];
for v in sorted.iter_mut(){
println!("{}", v.to_string());
}
}
|
use crate::message_render::MessageRender;
use crate::name::Name;
use crate::rr_type::RRType;
use std::net::{Ipv4Addr, Ipv6Addr};
use anyhow::Result;
pub fn name_to_wire(render: &mut MessageRender, name: &Name) -> Result<()> {
render.write_name(name, true)
}
pub fn name_uncompressed_to_wire(render: &mut MessageRender, name: &Name) -> Result<()> {
render.write_name(name, false)
}
pub fn ipv4_to_wire(render: &mut MessageRender, addr: Ipv4Addr) -> Result<()> {
for x in &addr.octets() {
render.write_u8(*x)?;
}
Ok(())
}
pub fn ipv6_to_wire(render: &mut MessageRender, addr: Ipv6Addr) -> Result<()> {
for x in &addr.octets() {
render.write_u8(*x)?;
}
Ok(())
}
pub fn u8_to_wire(render: &mut MessageRender, num: u8) -> Result<()> {
render.write_u8(num)
}
pub fn u16_to_wire(render: &mut MessageRender, num: u16) -> Result<()> {
render.write_u16(num)
}
pub fn rrtype_to_wire(render: &mut MessageRender, typ: RRType) -> Result<()> {
render.write_u16(typ.as_u16())
}
pub fn u32_to_wire(render: &mut MessageRender, num: u32) -> Result<()> {
render.write_u32(num)
}
pub fn text_to_wire(render: &mut MessageRender, data: &[Vec<u8>]) -> Result<()> {
for d in data {
byte_binary_to_wire(render, d)?;
}
Ok(())
}
pub fn byte_binary_to_wire(render: &mut MessageRender, data: &[u8]) -> Result<()> {
render.write_u8(data.len() as u8)?;
render.write_bytes(data)
}
pub fn binary_to_wire(render: &mut MessageRender, data: &[u8]) -> Result<()> {
render.write_bytes(data)
}
|
use ggez;
use ggez::event;
use ggez::graphics::{self, Color};
use ggez::{Context, GameResult};
use glam::*;
use rand;
use rand::Rng;
use rand::SeedableRng;
use std::time::SystemTime;
const BLACK: Color = Color {
r: 1.0,
g: 1.0,
b: 1.0,
a: 1.0,
};
const WHITE: Color = Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
};
const RED: Color = Color {
r: 1.0,
g: 0.0,
b: 0.0,
a: 1.0,
};
struct State {
x: usize,
y: usize,
map: Vec<Vec<f32>>,
prng: rand::rngs::StdRng,
seed: u64,
box_size: f32,
drawn: bool,
}
impl State {
fn new(x: usize, y: usize, box_size: f32, seed: Option<u64>) -> GameResult<Self> {
// Build the prng
let mut use_seed: u64 = 0;
if let Some(s) = seed {
use_seed = s;
} else {
if let Ok(n) = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) {
use_seed = n.as_secs();
}
}
println!("seed is: {}", use_seed);
let mut prng = rand::rngs::StdRng::seed_from_u64(use_seed);
Ok(Self {
x,
y,
map: vec![vec![prng.gen::<f32>(); x]; y],
prng,
seed: use_seed,
box_size,
drawn: false,
})
}
}
impl event::EventHandler for State {
fn update(&mut self, _ctx: &mut Context) -> GameResult {
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
graphics::clear(ctx, [0.0, 0.0, 0.0, 0.0].into());
if self.drawn {
return Ok(());
}
for x in 0..self.x {
for y in 0..self.y {
//let mut color = BLACK;
//if self.prng.gen::<u8>() > 128 {
// color = WHITE;
//}
let square = graphics::Mesh::new_rectangle(
ctx,
graphics::DrawMode::fill(),
graphics::Rect::new(
x as f32 * self.box_size,
y as f32 * self.box_size,
self.box_size,
self.box_size,
),
//color,
Color::new(
self.prng.gen::<f32>(),
self.prng.gen::<f32>(),
self.prng.gen::<f32>(),
1.0,
),
)?;
graphics::draw(ctx, &square, (Vec2::new(0.0, 0.0),))?;
}
}
self.drawn = true;
graphics::present(ctx)?;
Ok(())
}
}
fn main() -> GameResult {
let cb = ggez::ContextBuilder::new("terrain generation", "xoreo");
let (mut ctx, mut event_loop) = cb.build()?;
let mut state = State::new(100, 100, 10.0, None)?;
event::run(&mut ctx, &mut event_loop, &mut state)
}
|
use openexr_sys as sys;
pub use crate::core::{
error::Error,
refptr::{OpaquePtr, Ref, RefMut},
PixelType,
};
use std::marker::PhantomData;
use std::ffi::{CStr, CString};
use imath_traits::{Bound2, Vec2, Zero};
type Result<T, E = Error> = std::result::Result<T, E>;
pub struct FrameBuffer {
pub(crate) ptr: *mut sys::Imf_FrameBuffer_t,
pub(crate) frames: Option<Vec<Frame>>,
}
unsafe impl OpaquePtr for FrameBuffer {
type SysPointee = sys::Imf_FrameBuffer_t;
type Pointee = FrameBuffer;
}
pub type FrameBufferRef<'a> = Ref<'a, FrameBuffer>;
impl FrameBuffer {
/// Create a new `FrameBuffer`.
pub fn new() -> FrameBuffer {
let mut ptr = std::ptr::null_mut();
unsafe {
sys::Imf_FrameBuffer_ctor(&mut ptr);
}
FrameBuffer {
ptr,
frames: Some(Vec::new()),
}
}
/// Insert a [`Slice`] into the `FrameBuffer`.
///
/// # Errors
/// * [`Error::InvalidArgument`] - if name is the empty string
///
pub fn insert(&mut self, name: &str, slice: &Slice) -> Result<()> {
let c_name =
CString::new(name).expect("Internal null bytes in filename");
unsafe {
sys::Imf_FrameBuffer_insert(self.ptr, c_name.as_ptr(), &slice.0)
.into_result()?;
}
Ok(())
}
/// Find the [`Slice`] with the given `name` in the `FrameBuffer`
///
/// # Returns
/// * `Some([`SliceRef`])` - if the [`Slice`] is found
/// * `None` - otherwise
///
pub fn get_slice<'a>(&'a self, name: &str) -> Option<SliceRef<'a>> {
let c_name =
CString::new(name).expect("Internal null bytes in filename");
let mut ptr = std::ptr::null();
unsafe {
sys::Imf_FrameBuffer_findSlice_const(
self.ptr,
&mut ptr,
c_name.as_ptr(),
);
}
if ptr.is_null() {
None
} else {
Some(SliceRef::new(ptr))
}
}
/// Find the [`Slice`] with the given `name` in the `FrameBuffer` and get a
/// mutable Ref to it
///
/// # Returns
/// * `Some([`SliceRefMut`])` - if the [`Slice`] is found
/// * `None` - otherwise
///
pub fn get_slice_mut<'a>(
&'a mut self,
name: &str,
) -> Option<SliceRefMut<'a>> {
let c_name =
CString::new(name).expect("Internal null bytes in filename");
let mut ptr = std::ptr::null_mut();
unsafe {
sys::Imf_FrameBuffer_findSlice(self.ptr, &mut ptr, c_name.as_ptr());
}
if ptr.is_null() {
None
} else {
Some(SliceRefMut::new(ptr))
}
}
/// Get an iterator over the [`Slice`]s in this `FrameBuffer`
///
pub fn iter(&self) -> FrameBufferIter {
unsafe {
let mut ptr = sys::Imf_FrameBuffer_ConstIterator_t::default();
sys::Imf_FrameBuffer_begin_const(self.ptr, &mut ptr)
.into_result()
.unwrap();
let ptr = FrameBufferConstIterator(ptr);
let mut end = sys::Imf_FrameBuffer_ConstIterator_t::default();
sys::Imf_FrameBuffer_end_const(self.ptr, &mut end)
.into_result()
.unwrap();
let end = FrameBufferConstIterator(end);
FrameBufferIter {
ptr,
end,
_p: PhantomData,
}
}
}
pub fn insert_frame(&mut self, frame: Frame) -> Result<FrameHandle> {
let mut ptr = frame.ptr;
let w = frame.data_window[2] - frame.data_window[0] + 1;
let ystride = w as usize * frame.stride;
for chan in &frame.channel_names {
self.insert(
&chan,
&Slice::with_data_window(
frame.channel_type,
ptr,
frame.data_window,
)
.x_stride(frame.stride)
.y_stride(ystride)
.build()?,
)?;
ptr = unsafe { ptr.add(frame.channel_stride) };
}
let handle = match &mut self.frames {
Some(v) => {
let handle = v.len();
v.push(frame);
FrameHandle(handle)
}
_ => unreachable!(),
};
Ok(handle)
}
}
impl Drop for FrameBuffer {
fn drop(&mut self) {
unsafe {
sys::Imf_FrameBuffer_dtor(self.ptr);
}
}
}
impl Default for FrameBuffer {
fn default() -> Self {
FrameBuffer::new()
}
}
#[repr(transparent)]
#[derive(Clone)]
pub(crate) struct FrameBufferConstIterator(
pub(crate) sys::Imf_FrameBuffer_ConstIterator_t,
);
// #[repr(transparent)]
// pub(crate) struct FrameBufferIterator(
// pub(crate) sys::Imf_FrameBuffer_Iterator_t,
// );
pub struct FrameBufferIter<'a> {
ptr: FrameBufferConstIterator,
end: FrameBufferConstIterator,
_p: PhantomData<&'a FrameBuffer>,
}
impl<'a> Iterator for FrameBufferIter<'a> {
type Item = (&'a str, SliceRef<'a>);
fn next(&mut self) -> Option<(&'a str, SliceRef<'a>)> {
let ptr_curr = self.ptr.clone();
let mut ptr_next = self.ptr.clone();
unsafe {
let mut dummy = std::ptr::null_mut();
sys::Imf_FrameBuffer_ConstIterator__op_inc(
&mut ptr_next.0,
&mut dummy,
)
.into_result()
.unwrap();
}
if ptr_curr == self.end {
None
} else {
self.ptr = ptr_next;
unsafe {
let mut nameptr = std::ptr::null();
sys::Imf_FrameBuffer_ConstIterator_name(
&ptr_curr.0,
&mut nameptr,
)
.into_result()
.unwrap();
if nameptr.is_null() {
panic!("FrameBuffer::ConstIterator::name() returned NULL");
}
let mut sliceptr = std::ptr::null();
sys::Imf_FrameBuffer_ConstIterator_slice(
&ptr_curr.0,
&mut sliceptr,
)
.into_result()
.unwrap();
Some((
CStr::from_ptr(nameptr)
.to_str()
.expect("NUL bytes in channel name"),
SliceRef::new(sliceptr),
))
}
}
}
}
impl PartialEq for FrameBufferConstIterator {
fn eq(&self, rhs: &FrameBufferConstIterator) -> bool {
unsafe {
let mut result = false;
sys::Imf_frame_buffer_const_iter_eq(&mut result, &self.0, &rhs.0)
.into_result()
.unwrap();
result
}
}
}
#[repr(transparent)]
pub struct Slice(pub(crate) sys::Imf_Slice_t);
pub type SliceRef<'a, P = Slice> = Ref<'a, P>;
pub type SliceRefMut<'a, P = Slice> = RefMut<'a, P>;
unsafe impl OpaquePtr for Slice {
type SysPointee = sys::Imf_Slice_t;
type Pointee = Slice;
}
pub struct SliceBuilder {
pixel_type: PixelType,
data: *const u8,
origin: [i32; 2],
w: i64,
h: i64,
x_stride: usize,
y_stride: usize,
x_sampling: i32,
y_sampling: i32,
fill_value: f64,
x_tile_coords: bool,
y_tile_coords: bool,
}
impl SliceBuilder {
pub fn build(self) -> Result<Slice> {
let mut slice = sys::Imf_Slice_t::default();
unsafe {
sys::Imf_Slice_with_origin(
&mut slice,
self.pixel_type.into(),
self.data as *const std::os::raw::c_void,
self.origin.as_ptr() as *const sys::Imath_V2i_t,
self.w,
self.h,
self.x_stride as u64,
self.y_stride as u64,
self.x_sampling,
self.y_sampling,
self.fill_value,
self.x_tile_coords,
self.y_tile_coords,
)
.into_result()?;
}
Ok(Slice(slice))
}
/// Set the data window origin.
///
pub fn origin<V>(mut self, o: V) -> Self
where
V: Vec2<i32>,
{
let o = o.as_slice();
self.origin = *o;
self
}
pub fn fill_value(mut self, v: f64) -> Self {
self.fill_value = v;
self
}
pub fn x_stride(mut self, x: usize) -> Self {
self.x_stride = x;
self
}
pub fn y_stride(mut self, y: usize) -> Self {
self.y_stride = y;
self
}
pub fn x_sampling(mut self, x: i32) -> Self {
self.x_sampling = x;
self
}
pub fn y_sampling(mut self, y: i32) -> Self {
self.y_sampling = y;
self
}
pub fn x_tile_coords(mut self, x: bool) -> Self {
self.x_tile_coords = x;
self
}
pub fn y_tile_coords(mut self, y: bool) -> Self {
self.y_tile_coords = y;
self
}
}
impl Slice {
pub fn builder(
pixel_type: PixelType,
data: *const u8,
w: i64,
h: i64,
) -> SliceBuilder {
SliceBuilder {
pixel_type,
data,
origin: [0i32; 2],
w,
h,
x_stride: 0,
y_stride: 0,
x_sampling: 1,
y_sampling: 1,
fill_value: 0.0,
x_tile_coords: false,
y_tile_coords: false,
}
}
pub fn with_origin(
pixel_type: PixelType,
data: *const u8,
origin: [i32; 2],
w: i64,
h: i64,
) -> SliceBuilder {
SliceBuilder {
pixel_type,
data,
origin,
w,
h,
x_stride: 0,
y_stride: 0,
x_sampling: 1,
y_sampling: 1,
fill_value: 0.0,
x_tile_coords: false,
y_tile_coords: false,
}
}
pub fn with_data_window<B>(
pixel_type: PixelType,
data: *const u8,
data_window: B,
) -> SliceBuilder
where
B: Bound2<i32>,
{
let b = data_window.as_slice();
SliceBuilder {
pixel_type,
data,
origin: [b[0], b[1]],
w: b[2] as i64 + 1,
h: b[3] as i64 + 1,
x_stride: 0,
y_stride: 0,
x_sampling: 1,
y_sampling: 1,
fill_value: 0.0,
x_tile_coords: false,
y_tile_coords: false,
}
}
}
impl Drop for Slice {
fn drop(&mut self) {
unsafe {
sys::Imf_Slice_dtor(&mut self.0);
}
}
}
pub trait Pixel: Zero + Clone {
const CHANNEL_TYPE: PixelType;
const NUM_CHANNELS: usize;
const STRIDE: usize = std::mem::size_of::<Self>();
const CHANNEL_STRIDE: usize = std::mem::size_of::<Self>();
}
impl Pixel for half::f16 {
// type Type = Self;
const CHANNEL_TYPE: PixelType = PixelType::Half;
const NUM_CHANNELS: usize = 1;
}
impl Pixel for f32 {
// type Type = Self;
const CHANNEL_TYPE: PixelType = PixelType::Float;
const NUM_CHANNELS: usize = 1;
}
impl Pixel for u32 {
// type Type = Self;
const CHANNEL_TYPE: PixelType = PixelType::Uint;
const NUM_CHANNELS: usize = 1;
}
impl Pixel for crate::rgba::rgba::Rgba {
// type Type = Self;
const CHANNEL_TYPE: PixelType = PixelType::Half;
const NUM_CHANNELS: usize = 4;
const CHANNEL_STRIDE: usize = std::mem::size_of::<half::f16>();
}
#[cfg(feature = "imath_cgmath")]
mod impl_cgmath {
use super::{Pixel, PixelType};
impl Pixel for cgmath::Vector2<half::f16> {
const CHANNEL_TYPE: PixelType = half::f16::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 2;
const CHANNEL_STRIDE: usize = half::f16::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector2<f32> {
const CHANNEL_TYPE: PixelType = f32::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 2;
const CHANNEL_STRIDE: usize = f32::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector2<u32> {
const CHANNEL_TYPE: PixelType = u32::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 2;
const CHANNEL_STRIDE: usize = u32::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector3<half::f16> {
const CHANNEL_TYPE: PixelType = half::f16::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 3;
const CHANNEL_STRIDE: usize = half::f16::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector3<f32> {
const CHANNEL_TYPE: PixelType = f32::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 3;
const CHANNEL_STRIDE: usize = f32::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector3<u32> {
const CHANNEL_TYPE: PixelType = u32::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 3;
const CHANNEL_STRIDE: usize = u32::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector4<half::f16> {
const CHANNEL_TYPE: PixelType = half::f16::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 4;
const CHANNEL_STRIDE: usize = half::f16::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector4<f32> {
const CHANNEL_TYPE: PixelType = f32::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 4;
const CHANNEL_STRIDE: usize = f32::CHANNEL_STRIDE;
}
impl Pixel for cgmath::Vector4<u32> {
const CHANNEL_TYPE: PixelType = u32::CHANNEL_TYPE;
const NUM_CHANNELS: usize = 4;
const CHANNEL_STRIDE: usize = u32::CHANNEL_STRIDE;
}
}
/// `Frame` attempts to provide a safer API on top of OpenEXR's
/// [`Slice`](crate::core::frame_buffer::Slice) type.
///
/// Instead of providing a pointer and calculating offsets based on the data
/// window offset, as [`Slice`](crate::core::frame_buffer::Slice) does, `Frame` wraps up the data window offset
/// and handles memory allocation internally so that you can't get it wrong.
///
/// # Examples
/// ```no_run
/// # fn foo() -> Result<(), openexr::Error> {
/// use openexr::prelude::*;
///
/// let file = InputFile::new("test.exr", 4)?;
/// let data_window: [i32; 4] = *file.header().data_window();
///
/// let frame_rgba =
/// Frame::new::<Rgba, _, _>(
/// &["R", "G", "B", "A"],
/// data_window
/// )?;
///
/// let (file, frames) = file
/// .into_reader(vec![frame_rgba])?
/// .read_pixels(data_window[1], data_window[3])?;
///
/// # Ok(())
/// # }
/// ```
pub struct Frame {
pub(crate) channel_type: PixelType,
pub(crate) data_window: [i32; 4],
pub(crate) channel_names: Vec<String>,
pub(crate) stride: usize,
pub(crate) channel_stride: usize,
pub(crate) ptr: *mut u8,
pub(crate) byte_len: usize,
pub(crate) align: usize,
}
#[repr(transparent)]
#[derive(Copy, Clone, PartialEq, Hash)]
pub struct FrameHandle(usize);
use std::alloc::{GlobalAlloc, Layout, System};
impl Frame {
/// Constructs a new frame for the given `channel_names` and `data_window`
/// and allocates storage to hold the pixel data.
///
/// # Errors
/// [`Error::InvalidArgument`] - if the length of the `channel_names` slice
/// is not a multiple of `Pixel::NUM_CHANNELS`
///
pub fn new<T: Pixel, B: Bound2<i32>, S: AsRef<str>>(
channel_names: &[S],
data_window: B,
) -> Result<Frame> {
let channel_names = channel_names
.iter()
.map(|s| s.as_ref().to_string())
.collect::<Vec<String>>();
if channel_names.len() % T::NUM_CHANNELS != 0 {
return Err(Error::InvalidArgument(format!("channel_names ({:?}) has {} channels but pixel type has {} channels. Channel names length must be a multiple of pixel type num channels in order to pack.", channel_names, channel_names.len(), T::NUM_CHANNELS)));
}
let data_window = *data_window.as_slice();
let w = data_window[2] - data_window[0] + 1;
let h = data_window[3] - data_window[1] + 1;
// We're packing the data into an array where elements may have multiple
// channels themselves
let num_packed = channel_names.len() / T::NUM_CHANNELS;
let len = (w * h) as usize * num_packed;
let byte_len = len * std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
let ptr = unsafe { System.alloc(Layout::array::<T>(len).unwrap()) };
Ok(Frame {
channel_type: T::CHANNEL_TYPE,
data_window,
channel_names,
stride: T::STRIDE * num_packed,
channel_stride: T::CHANNEL_STRIDE,
ptr,
byte_len,
align,
})
}
/// Construct a new `Frame` using the storage provided in `vec`.
///
/// This can either be used to provide pixel data for writing, or to re-use
/// storage between reads.
///
/// # Errors
/// [`Error::InvalidArgument`] - if the length of the `channel_names` slice
/// is not a multiple of `Pixel::NUM_CHANNELS`
///
pub fn with_vec<T: Pixel, B: Bound2<i32>, S: AsRef<str>>(
channel_names: &[S],
mut vec: Vec<T>,
data_window: B,
) -> Result<Frame> {
let channel_names = channel_names
.iter()
.map(|s| s.as_ref().to_string())
.collect::<Vec<String>>();
if channel_names.len() != T::NUM_CHANNELS {
return Err(Error::InvalidArgument(format!("channel_names ({:?}) has {} channels but pixel type has {} channels.", channel_names, channel_names.len(), T::NUM_CHANNELS)));
}
let data_window = *data_window.as_slice();
let w = data_window[2] - data_window[0] + 1;
let h = data_window[3] - data_window[1] + 1;
let len = (w * h) as usize;
vec.resize(len, T::zero());
let mut vec = vec.into_boxed_slice();
let ptr = vec.as_mut_ptr() as *mut u8;
Box::leak(vec);
let byte_len = len * std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
Ok(Frame {
channel_type: T::CHANNEL_TYPE,
data_window,
channel_names,
stride: T::STRIDE,
channel_stride: T::CHANNEL_STRIDE,
ptr,
byte_len,
align,
})
}
/// Get a reference to the pixel data slice.
///
pub fn as_slice<T: Pixel>(&self) -> &[T] {
let stride = std::mem::size_of::<T>();
if T::STRIDE != stride {
panic!("Attempt to get a slice with a different type");
}
unsafe {
std::slice::from_raw_parts(
self.ptr as *const T,
self.byte_len / stride,
)
}
}
/// Get a mutable reference to the pixel data slice.
///
pub fn as_mut_slice<T: Pixel>(&mut self) -> &mut [T] {
let stride = std::mem::size_of::<T>();
if T::STRIDE != stride {
panic!("Attempt to get a slice with a different type");
}
unsafe {
std::slice::from_raw_parts_mut(
self.ptr as *mut T,
self.byte_len / stride,
)
}
}
/// Consume this `Frame` and return the pixel data as a `Vec`.
///
pub fn into_vec<T: Pixel>(self) -> Vec<T> {
self.as_slice().to_vec()
}
}
impl Drop for Frame {
fn drop(&mut self) {
unsafe {
System.dealloc(
self.ptr,
Layout::from_size_align(self.byte_len, self.align).unwrap(),
)
}
}
}
|
use num_bigint::BigUint;
use stark_curve::FieldElement;
use std::fmt::Write;
use std::fs::File;
use std::io::{self, BufRead};
use std::str::FromStr;
use std::{env, fs, path::Path};
const FULL_ROUNDS: usize = 8;
const PARTIAL_ROUNDS: usize = 83;
/// Generates poseidon_consts.rs
pub fn main() {
let manifest_dir = env::var_os("CARGO_MANIFEST_DIR").unwrap();
let out_dir = env::var_os("OUT_DIR").unwrap();
let output_to_src = false; // switch for debugging
let dest_path = if output_to_src {
Path::new(&manifest_dir).join("src/poseidon_consts.rs")
} else {
Path::new(&out_dir).join("poseidon_consts.rs")
};
let consts = generate_consts();
fs::write(dest_path, consts).expect("could not write $OUT_DIR/poseidon_consts.rs");
}
/// Generates Rust code to a string
fn generate_consts() -> String {
// Read constants
let constants = extract_roundkeys();
// Convert into field elements
let roundkeys = convert_roundkeys(&constants);
// Flatten the roundkeys
let flat = flatten_roundkeys(&roundkeys);
let flat_serialized = serialize_roundkeys(&flat);
// Compress roundkeys
let comp = compress_roundkeys(&roundkeys);
let comp_serialized = serialize_roundkeys(&comp);
// Write them to the buffer
let code_flat = generate_code("POSEIDON_CONSTS", &flat_serialized);
let code_comp = generate_code("POSEIDON_COMP_CONSTS", &comp_serialized);
format!("{code_flat}\n\n{code_comp}")
}
/// Flattens the roundkeys
pub fn flatten_roundkeys(rcs: &[[FieldElement; 3]]) -> Vec<FieldElement> {
let mut result = Vec::new();
for triple in rcs {
for entry in triple {
result.push(*entry);
}
}
result
}
/// Compress roundkeys
pub fn compress_roundkeys(rcs: &[[FieldElement; 3]]) -> Vec<FieldElement> {
let mut result = Vec::new();
// Add first full rounds
result.extend(rcs[..FULL_ROUNDS / 2].iter().flatten());
// Add compressed partial rounds and first of the last full rounds
result.extend(compress_roundkeys_partial(rcs));
// Add last full rounds except the first of them
result.extend(
rcs[(FULL_ROUNDS / 2 + PARTIAL_ROUNDS + 1)..]
.iter()
.flatten(),
);
result
}
pub fn compress_roundkeys_partial(rcs: &[[FieldElement; 3]]) -> Vec<FieldElement> {
let mut result = Vec::new();
let mut idx = FULL_ROUNDS / 2;
let mut state: [FieldElement; 3] = [FieldElement::ZERO; 3];
// Add keys for partial rounds
for _ in 0..PARTIAL_ROUNDS {
// AddRoundKey
state[0] += rcs[idx][0];
state[1] += rcs[idx][1];
state[2] += rcs[idx][2];
// Add last state
result.push(state[2]);
// Reset last state
state[2] = FieldElement::ZERO;
// MixLayer
let t = state[0] + state[1] + state[2];
state[0] = t + FieldElement::TWO * state[0];
state[1] = t - FieldElement::TWO * state[1];
state[2] = t - FieldElement::THREE * state[2];
idx += 1;
}
// Add keys for first of the last full rounds
state[0] += rcs[idx][0];
state[1] += rcs[idx][1];
state[2] += rcs[idx][2];
result.push(state[0]);
result.push(state[1]);
result.push(state[2]);
result
}
/// Serializes roundkeys to u64
pub fn serialize_roundkeys(rcs: &[FieldElement]) -> Vec<[u64; 4]> {
rcs.iter().map(|v| v.inner()).collect()
}
/// Generates the Rust code
pub fn generate_code(name: &str, rcs: &[[u64; 4]]) -> String {
let mut buf = String::with_capacity(1024 * 1024);
write!(buf, "pub const {}: [FieldElement; {}] = [", name, rcs.len()).unwrap();
let push_point = |buf: &mut String, rc: &[u64; 4]| {
buf.push_str("\n FieldElement::new([");
for r in rc {
write!(buf, "{r:>20}u64,").unwrap();
}
buf.push_str("]),");
};
for rc in rcs.iter() {
push_point(&mut buf, rc);
}
write!(buf, "\n];").unwrap();
buf
}
/// Parses a number into a field element
pub fn convert_number(n: &BigUint) -> stark_curve::FieldElement {
// Prepend zeros to fit 32 bytes
let mut bytes = n.to_bytes_be();
if bytes.len() < 32 {
let zeros_to_add = 32 - bytes.len();
let mut tmp = vec![0u8; zeros_to_add];
tmp.extend(bytes);
bytes = tmp;
}
// Convert bytes to field element
let felt = stark_hash::Felt::from_be_slice(&bytes).unwrap();
stark_curve::FieldElement::from(felt)
}
/// Converts roundkeys as big integers to u64 in Montgomery representation
pub fn convert_roundkeys(rcs: &[[BigUint; 3]]) -> Vec<[FieldElement; 3]> {
let mut result = Vec::new();
for rc in rcs.iter() {
let mut converted = [FieldElement::ZERO; 3];
for (idx, num) in rc.iter().enumerate() {
converted[idx] = convert_number(num);
}
result.push(converted);
}
result
}
/// Extracts roundkeys from https://github.com/starkware-industries/poseidon/blob/main/poseidon3.txt
pub fn extract_roundkeys() -> Vec<[BigUint; 3]> {
let manifest_dir = env::var_os("CARGO_MANIFEST_DIR").unwrap();
let read_path = Path::new(&manifest_dir).join("spec/poseidon3.txt");
let mut roundkeys = Vec::new();
// Parse by reading one line at a time
let file = File::open(read_path).expect("can read poseidon reference file");
let lines = io::BufReader::new(file).lines();
let mut at_keys = false;
let mut line_ctr = 0;
let mut buffer: [BigUint; 3] = [BigUint::default(), BigUint::default(), BigUint::default()];
for line in lines.flatten() {
// Skip until reaching RoundKeys
if line.contains("RoundKeys") {
at_keys = true;
}
// For each set of three RoundKeys
if at_keys && line.contains('[') {
line_ctr = 0;
}
// Read one element, append to buffer for ctr = 1,2,3
if at_keys && line_ctr > 0 && line_ctr < 4 {
let mut trimmed = line.trim().to_owned();
trimmed.truncate(trimmed.len() - 1); // remove comma
if let Ok(bn) = BigUint::from_str(&trimmed) {
buffer[line_ctr - 1] = bn;
}
}
// If buffer is full, push it to result
if at_keys && line_ctr == 3 {
roundkeys.push(buffer.clone());
}
line_ctr += 1;
}
roundkeys
}
|
//! A simple example of how to use PickleDB. It includes:
//! * Creating a new DB
//! * Loading an existing DB from a file
//! * Setting and getting key-value pairs of different types
use pickledb::{PickleDb, PickleDbDumpPolicy, SerializationMethod};
use serde::{Deserialize, Serialize};
use std::fmt::{self, Display, Formatter};
/// Define an example struct which represents a rectangle.
/// Next we'll show how to write and read it into the DB.
#[derive(Serialize, Deserialize)]
struct Rectangle {
width: i32,
length: i32,
}
impl Display for Rectangle {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "Rectangle: length={}, width={}", self.length, self.width)
}
}
fn main() {
// create a new DB with AutoDum, meaning every change is written to the file,
// and with Json serialization
let mut db = PickleDb::new(
"example.db",
PickleDbDumpPolicy::AutoDump,
SerializationMethod::Json,
);
// set the value 100 to the key 'key1'
db.set("key1", &100).unwrap();
// set the value 1.1 to the key 'key2'
db.set("key2", &1.1).unwrap();
// set the value 'hello world' to the key 'key3'
db.set("key3", &String::from("hello world")).unwrap();
// set a vector value to the key 'key4'
db.set("key4", &vec![1, 2, 3]).unwrap();
// set a Rectangle value to the key 'key5'
db.set(
"key5",
&Rectangle {
width: 4,
length: 10,
},
)
.unwrap();
// print the value of key1
println!("The value of key1 is: {}", db.get::<i32>("key1").unwrap());
// print the value of key2
println!("The value of key2 is: {}", db.get::<f32>("key2").unwrap());
// print the value of key3
println!(
"The value of key3 is: {}",
db.get::<String>("key3").unwrap()
);
// print the value of key4
println!(
"The value of key4 is: {:?}",
db.get::<Vec<i32>>("key4").unwrap()
);
// print the value of key5
println!(
"The value of key5 is: {}",
db.get::<Rectangle>("key5").unwrap()
);
// override the value of key1. Please note the new value is of a different type the former one
db.set("key1", &String::from("override")).unwrap();
// print the value of key1
println!(
"The value of key1 is: {}",
db.get::<String>("key1").unwrap()
);
// remove key2
db.rem("key2").unwrap();
// was key2 removed?
println!(
"key2 was removed. Is it still in the db? {}",
db.get::<f32>("key2").is_some()
);
// load an existing DB from a file (the same file in this case)
let db2 = PickleDb::load(
"example.db",
PickleDbDumpPolicy::DumpUponRequest,
SerializationMethod::Json,
)
.unwrap();
// print the value of key1
println!(
"Value of key1 as loaded from file is: {}",
db2.get::<String>("key1").unwrap()
);
// iterate over all keys and values in the db
for kv in db.iter() {
match kv.get_key() {
"key1" => println!(
"Value of {} is: {}",
kv.get_key(),
kv.get_value::<String>().unwrap()
),
"key3" => println!(
"Value of {} is: {}",
kv.get_key(),
kv.get_value::<String>().unwrap()
),
"key4" => println!(
"Value of {} is: {:?}",
kv.get_key(),
kv.get_value::<Vec<i32>>().unwrap()
),
"key5" => println!(
"Value of {} is: {}",
kv.get_key(),
kv.get_value::<Rectangle>().unwrap()
),
_ => (),
}
}
}
|
use super::{ClassMetrics, Registry, RequestMetrics, RetrySkipped, StatusMetrics};
use http;
use linkerd2_metrics::{latency, Counter, FmtLabels, FmtMetric, FmtMetrics, Histogram, Metric};
use std::fmt;
use std::hash::Hash;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use tokio_timer::clock;
use tracing::trace;
/// Reports HTTP metrics for prometheus.
#[derive(Clone, Debug)]
pub struct Report<T, C>
where
T: FmtLabels + Hash + Eq,
C: FmtLabels + Hash + Eq,
{
prefix: &'static str,
scope: Scope,
registry: Arc<Mutex<Registry<T, C>>>,
retain_idle: Duration,
}
struct Status(http::StatusCode);
#[derive(Clone, Debug)]
struct Scope {
request_total_key: String,
response_total_key: String,
response_latency_ms_key: String,
retry_skipped_total_key: String,
}
// ===== impl Report =====
impl<T, C> Report<T, C>
where
T: FmtLabels + Hash + Eq,
C: FmtLabels + Hash + Eq,
{
pub(super) fn new(retain_idle: Duration, registry: Arc<Mutex<Registry<T, C>>>) -> Self {
Self {
prefix: "",
registry,
retain_idle,
scope: Scope::default(),
}
}
pub fn with_prefix(self, prefix: &'static str) -> Self {
if prefix.is_empty() {
return self;
}
Self {
prefix,
scope: Scope::prefixed(prefix),
..self
}
}
}
impl<T, C> FmtMetrics for Report<T, C>
where
T: FmtLabels + Hash + Eq,
C: FmtLabels + Hash + Eq,
{
fn fmt_metrics(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
trace!("fmt_metrics({})", self.prefix);
let mut registry = match self.registry.lock() {
Err(_) => return Ok(()),
Ok(r) => r,
};
let now = clock::now();
let since = now - self.retain_idle;
trace!(
"fmt_metrics({}): retain_since: now={:?} since={:?}",
self.prefix,
now,
since
);
registry.retain_since(since);
let registry = registry;
trace!(
"fmt_metrics({}): by_target={}",
self.prefix,
registry.by_target.len()
);
if registry.by_target.is_empty() {
return Ok(());
}
self.scope.request_total().fmt_help(f)?;
registry.fmt_by_target(f, self.scope.request_total(), |s| &s.total)?;
self.scope.response_latency_ms().fmt_help(f)?;
registry.fmt_by_status(f, self.scope.response_latency_ms(), |s| &s.latency)?;
self.scope.response_total().fmt_help(f)?;
registry.fmt_by_class(f, self.scope.response_total(), |s| &s.total)?;
self.scope.retry_skipped_total().fmt_help(f)?;
registry.fmt_by_retry(f, self.scope.retry_skipped_total())?;
Ok(())
}
}
impl<T, C> Registry<T, C>
where
T: FmtLabels + Hash + Eq,
C: FmtLabels + Hash + Eq,
{
fn fmt_by_target<M, F>(
&self,
f: &mut fmt::Formatter<'_>,
metric: Metric<'_, M>,
get_metric: F,
) -> fmt::Result
where
M: FmtMetric,
F: Fn(&RequestMetrics<C>) -> &M,
{
for (tgt, tm) in &self.by_target {
if let Ok(m) = tm.lock() {
get_metric(&*m).fmt_metric_labeled(f, metric.name, tgt)?;
}
}
Ok(())
}
fn fmt_by_retry<M>(&self, f: &mut fmt::Formatter<'_>, metric: Metric<'_, M>) -> fmt::Result
where
M: FmtMetric,
{
for (tgt, tm) in &self.by_target {
if let Ok(tm) = tm.lock() {
for (retry, m) in &tm.by_retry_skipped {
let labels = (tgt, retry);
m.fmt_metric_labeled(f, metric.name, labels)?;
}
}
}
Ok(())
}
fn fmt_by_status<M, F>(
&self,
f: &mut fmt::Formatter<'_>,
metric: Metric<'_, M>,
get_metric: F,
) -> fmt::Result
where
M: FmtMetric,
F: Fn(&StatusMetrics<C>) -> &M,
{
for (tgt, tm) in &self.by_target {
if let Ok(tm) = tm.lock() {
for (status, m) in &tm.by_status {
let status = status.as_ref().map(|s| Status(*s));
let labels = (tgt, status);
get_metric(&*m).fmt_metric_labeled(f, metric.name, labels)?;
}
}
}
Ok(())
}
fn fmt_by_class<M, F>(
&self,
f: &mut fmt::Formatter<'_>,
metric: Metric<'_, M>,
get_metric: F,
) -> fmt::Result
where
M: FmtMetric,
F: Fn(&ClassMetrics) -> &M,
{
for (tgt, tm) in &self.by_target {
if let Ok(tm) = tm.lock() {
for (status, sm) in &tm.by_status {
for (cls, m) in &sm.by_class {
let status = status.as_ref().map(|s| Status(*s));
let labels = (tgt, (status, cls));
get_metric(&*m).fmt_metric_labeled(f, metric.name, labels)?;
}
}
}
}
Ok(())
}
}
// === impl Scope ===
impl Default for Scope {
fn default() -> Self {
Self {
request_total_key: "request_total".to_owned(),
response_total_key: "response_total".to_owned(),
response_latency_ms_key: "response_latency_ms".to_owned(),
retry_skipped_total_key: "retry_skipped_total".to_owned(),
}
}
}
impl Scope {
fn prefixed(prefix: &'static str) -> Self {
if prefix.is_empty() {
return Self::default();
}
Self {
request_total_key: format!("{}_request_total", prefix),
response_total_key: format!("{}_response_total", prefix),
response_latency_ms_key: format!("{}_response_latency_ms", prefix),
retry_skipped_total_key: format!("{}_retry_skipped_total", prefix),
}
}
fn request_total(&self) -> Metric<'_, Counter> {
Metric::new(&self.request_total_key, &Self::REQUEST_TOTAL_HELP)
}
fn response_total(&self) -> Metric<'_, Counter> {
Metric::new(&self.response_total_key, &Self::RESPONSE_TOTAL_HELP)
}
fn response_latency_ms(&self) -> Metric<'_, Histogram<latency::Ms>> {
Metric::new(
&self.response_latency_ms_key,
&Self::RESPONSE_LATENCY_MS_HELP,
)
}
fn retry_skipped_total(&self) -> Metric<'_, Counter> {
Metric::new(
&self.retry_skipped_total_key,
&Self::RETRY_SKIPPED_TOTAL_HELP,
)
}
const REQUEST_TOTAL_HELP: &'static str = "Total count of HTTP requests.";
const RESPONSE_TOTAL_HELP: &'static str = "Total count of HTTP responses.";
const RESPONSE_LATENCY_MS_HELP: &'static str =
"Elapsed times between a request's headers being received \
and its response stream completing";
const RETRY_SKIPPED_TOTAL_HELP: &'static str =
"Total count of retryable HTTP responses that were not retried.";
}
impl FmtLabels for Status {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "status_code=\"{}\"", self.0.as_u16())
}
}
impl FmtLabels for RetrySkipped {
fn fmt_labels(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"skipped=\"{}\"",
match self {
RetrySkipped::Budget => "budget",
}
)
}
}
|
use crate::io::Buf;
use crate::postgres::database::Postgres;
use byteorder::NetworkEndian;
use std::str;
#[derive(Debug)]
pub(crate) enum Authentication {
/// The authentication exchange is successfully completed.
Ok,
/// The frontend must now take part in a Kerberos V5 authentication dialog (not described
/// here, part of the Kerberos specification) with the server. If this is successful,
/// the server responds with an `AuthenticationOk`, otherwise it responds
/// with an `ErrorResponse`. This is no longer supported.
KerberosV5,
/// The frontend must now send a `PasswordMessage` containing the password in clear-text form.
/// If this is the correct password, the server responds with an `AuthenticationOk`, otherwise it
/// responds with an `ErrorResponse`.
CleartextPassword,
/// The frontend must now send a `PasswordMessage` containing the password (with user name)
/// encrypted via MD5, then encrypted again using the 4-byte random salt specified in the
/// `AuthenticationMD5Password` message. If this is the correct password, the server responds
/// with an `AuthenticationOk`, otherwise it responds with an `ErrorResponse`.
Md5Password,
/// This response is only possible for local Unix-domain connections on platforms that support
/// SCM credential messages. The frontend must issue an SCM credential message and then
/// send a single data byte.
ScmCredential,
/// The frontend must now initiate a GSSAPI negotiation. The frontend will send a
/// `GSSResponse` message with the first part of the GSSAPI data stream in response to this.
Gss,
/// The frontend must now initiate a SSPI negotiation.
/// The frontend will send a GSSResponse with the first part of the SSPI data stream in
/// response to this.
Sspi,
/// This message contains the response data from the previous step of GSSAPI
/// or SSPI negotiation.
GssContinue,
/// The frontend must now initiate a SASL negotiation, using one of the SASL mechanisms
/// listed in the message.
Sasl,
/// This message contains challenge data from the previous step of SASL negotiation.
SaslContinue,
/// SASL authentication has completed with additional mechanism-specific data for the client.
SaslFinal,
}
impl Authentication {
pub(crate) fn read(mut buf: &[u8]) -> crate::Result<Self> {
Ok(match buf.get_u32::<NetworkEndian>()? {
0 => Authentication::Ok,
2 => Authentication::KerberosV5,
3 => Authentication::CleartextPassword,
5 => Authentication::Md5Password,
6 => Authentication::ScmCredential,
7 => Authentication::Gss,
8 => Authentication::GssContinue,
9 => Authentication::Sspi,
10 => Authentication::Sasl,
11 => Authentication::SaslContinue,
12 => Authentication::SaslFinal,
type_ => {
return Err(protocol_err!("unknown authentication message type: {}", type_).into());
}
})
}
}
#[derive(Debug)]
pub(crate) struct AuthenticationMd5 {
pub(crate) salt: [u8; 4],
}
impl AuthenticationMd5 {
pub(crate) fn read(buf: &[u8]) -> crate::Result<Self> {
let mut salt = [0_u8; 4];
salt.copy_from_slice(buf);
Ok(Self { salt })
}
}
#[derive(Debug)]
pub(crate) struct AuthenticationSasl {
pub(crate) mechanisms: Box<[Box<str>]>,
}
impl AuthenticationSasl {
pub(crate) fn read(mut buf: &[u8]) -> crate::Result<Self> {
let mut mechanisms = Vec::new();
while buf[0] != 0 {
mechanisms.push(buf.get_str_nul()?.into());
}
Ok(Self {
mechanisms: mechanisms.into_boxed_slice(),
})
}
}
#[derive(Debug)]
pub(crate) struct AuthenticationSaslContinue {
pub(crate) salt: Vec<u8>,
pub(crate) iter_count: u32,
pub(crate) nonce: Vec<u8>,
pub(crate) data: String,
}
impl AuthenticationSaslContinue {
pub(crate) fn read(buf: &[u8]) -> crate::Result<Self> {
let mut salt: Vec<u8> = Vec::new();
let mut nonce: Vec<u8> = Vec::new();
let mut iter_count: u32 = 0;
let key_value: Vec<(char, &[u8])> = buf
.split(|byte| *byte == b',')
.map(|s| {
let (key, value) = s.split_at(1);
let value = value.split_at(1).1;
(key[0] as char, value)
})
.collect();
for (key, value) in key_value.iter() {
match key {
's' => salt = value.to_vec(),
'r' => nonce = value.to_vec(),
'i' => {
let s = str::from_utf8(&value).map_err(|_| {
protocol_err!(
"iteration count in sasl response was not a valid utf8 string"
)
})?;
iter_count = u32::from_str_radix(&s, 10).unwrap_or(0);
}
_ => {}
}
}
Ok(Self {
salt: base64::decode(&salt).map_err(|_| {
protocol_err!("salt value response from postgres was not base64 encoded")
})?,
nonce,
iter_count,
data: str::from_utf8(buf)
.map_err(|_| protocol_err!("SaslContinue response was not a valid utf8 string"))?
.to_string(),
})
}
}
#[cfg(test)]
mod tests {
use super::Authentication;
use crate::postgres::protocol::authentication::AuthenticationMd5;
const AUTH_OK: &[u8] = b"\0\0\0\0";
const AUTH_MD5: &[u8] = b"\0\0\0\x05\x93\x189\x98";
#[test]
fn it_reads_auth_ok() {
let m = Authentication::read(AUTH_OK).unwrap();
assert!(matches!(m, Authentication::Ok));
}
#[test]
fn it_reads_auth_md5_password() {
let m = Authentication::read(AUTH_MD5).unwrap();
let data = AuthenticationMd5::read(&AUTH_MD5[4..]).unwrap();
assert!(matches!(m, Authentication::Md5Password));
assert_eq!(data.salt, [147, 24, 57, 152]);
}
}
|
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use std::{
marker::PhantomData,
ptr::null_mut,
};
use winapi::{
shared::{
minwindef::FALSE,
winerror::WAIT_TIMEOUT,
},
um::{
errhandlingapi::GetLastError,
minwinbase::SECURITY_ATTRIBUTES,
processthreadsapi::GetCurrentProcess,
synchapi::{CreateMutexW, OpenMutexW, ReleaseMutex, WaitForSingleObject},
winbase::{INFINITE, WAIT_ABANDONED, WAIT_OBJECT_0},
winnt::{DUPLICATE_SAME_ACCESS, HANDLE, SYNCHRONIZE},
},
};
use error::Error;
use handle::Handle;
use wide::ToWide;
pub struct Mutex(Handle);
impl Mutex {
pub fn create(security_attributes: Option<&SECURITY_ATTRIBUTES>, name: &str) -> Result<Mutex, Error> {
unsafe {
let handle = CreateMutexW(
security_attributes.map(|x| x as *const _ as *mut _).unwrap_or(null_mut()),
0,
name.to_wide_null().as_ptr(),
);
if handle.is_null() {
return Error::last_result();
}
Ok(Mutex(Handle::new(handle)))
}
}
pub fn open(name: &str) -> Result<Mutex, Error> {
unsafe {
let handle = OpenMutexW(
SYNCHRONIZE,
FALSE,
name.to_wide_null().as_ptr(),
);
if handle.is_null() {
return Error::last_result();
}
Ok(Mutex(Handle::new(handle)))
}
}
/// The timeout is specified in milliseconds
/// Specifying None for the timeout means to wait forever
pub fn wait<'a>(&'a self, timeout: Option<u32>) -> Result<MutexGuard<'a>, WaitError<'a>> {
unsafe {
match WaitForSingleObject(*self.0, timeout.unwrap_or(INFINITE)) {
WAIT_ABANDONED => Err(WaitError::Abandoned(MutexGuard::new(self))),
WAIT_OBJECT_0 => Ok(MutexGuard::new(self)),
WAIT_TIMEOUT => Err(WaitError::Timeout),
_ => Err(WaitError::Other(Error::last())),
}
}
}
pub fn try_clone(&self) -> Result<Mutex, Error> {
unsafe {
let handle = Handle::duplicate_from(*self.0)?;
Ok(Mutex(handle))
}
}
}
unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {}
pub struct MutexGuard<'a>(HANDLE, PhantomData<&'a Mutex>);
impl<'a> MutexGuard<'a> {
unsafe fn new(mutex: &'a Mutex) -> MutexGuard<'a> {
MutexGuard(*mutex.0, PhantomData)
}
}
impl<'a> Drop for MutexGuard<'a> {
fn drop(&mut self) {
unsafe {
if ReleaseMutex(self.0) == 0 {
let err = GetLastError();
panic!("failed to call ReleaseMutex: {}", err);
}
}
}
}
pub enum WaitError<'a> {
Timeout,
Abandoned(MutexGuard<'a>),
Other(Error),
}
|
use std::borrow::Cow;
use std::iter::FromIterator;
#[derive(Debug, Clone, PartialEq)]
struct Country<'a> {
name: Cow<'a, str>,
cities: Vec<Cow<'a, str>>,
}
#[allow(dead_code)]
impl<'a> Country<'a> {
fn new<S>(name: S) -> Country<'a>
where
S: Into<Cow<'a, str>>,
{
Country {
name: name.into(),
cities: vec![],
}
}
fn add_city<S>(&mut self, city_name: S)
where
S: Into<Cow<'a, str>>,
{
self.cities.push(city_name.into());
}
}
impl<'a> IntoIterator for Country<'a> {
type Item = Cow<'a, str>;
type IntoIter = ::std::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.cities.to_vec().into_iter()
}
}
impl<'a> FromIterator<Cow<'a, str>> for Country<'a> {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = Cow<'a, str>>,
{
let mut data = Self::new("Restored");
for elem in iter {
data.add_city(elem);
}
data
}
}
#[test]
fn from_and_into_iterator_test() {
let mut country = Country::new("Spain");
country.add_city("Madrid");
country.add_city("Barcelona".to_string());
country.add_city("Cadiz".to_string());
let mut results = vec![];
for city in country.clone().into_iter() {
results.push(city);
}
assert_eq!(results, vec!["Madrid", "Barcelona", "Cadiz"]);
let it = country.into_iter();
let c = Country::from_iter(it);
assert_eq!(
c,
Country {
name: Cow::Borrowed("Restored"),
cities: vec![
Cow::Borrowed("Madrid"),
Cow::Borrowed("Barcelona"),
Cow::Borrowed("Cadiz")
]
}
);
}
|
use serde::{Deserialize, Serialize};
use std::collections::hash_map::RandomState;
use std::collections::HashMap;
use std::default::Default;
use bollard::models::ContainerInspectResponse;
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct PromConfigLabel {
pub job: String,
pub name: String,
pub id: String,
pub scheme: String,
pub metrics_path: String,
pub com_docker_compose_service: String,
}
impl Default for PromConfigLabel {
fn default() -> Self {
PromConfigLabel {
job: String::from(""),
name: String::from(""),
id: String::from(""),
scheme: String::from(""),
metrics_path: String::from(""),
com_docker_compose_service: String::from(""),
}
}
}
impl PromConfigLabel {
pub fn new() -> Self {
Default::default()
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct PromConfig {
// labels for container information
pub labels: PromConfigLabel,
pub targets: Vec<String>,
}
impl PromConfig {
pub fn new() -> Self {
Default::default()
}
}
impl Default for PromConfig {
fn default() -> Self {
PromConfig {
labels: PromConfigLabel::new(),
targets: Vec::new(),
}
}
}
pub fn get_scrape_enabled(hash: &HashMap<String, String, RandomState>) -> Option<bool> {
let scrape = &"prometheus-scrape.enabled".to_string();
let enabled_label = hash.get(scrape);
enabled_label.map(|e| e.eq(&String::from("true")))
}
pub fn get_config_job(hash: HashMap<String, String, RandomState>) -> String {
let config_job = &"prometheus-scrape.job_name".to_string();
if let Some(job) = hash.get(config_job) {
return String::from(job);
}
return String::from("");
}
pub fn get_config_port(hash: HashMap<String, String, RandomState>) -> String {
let config_port = &String::from("prometheus-scrape.port");
let port = String::from("9090");
if let Some(new_port) = hash.get(config_port) {
debug!("Port is set to {}.", new_port);
return String::from(new_port);
}
debug!("Job name is not set, using default value.");
return port;
}
pub fn get_config_scheme(hash: HashMap<String, String, RandomState>) -> String {
let config_port = &String::from("prometheus-scrape.scheme");
let scheme = String::from("http");
if let Some(new_scheme) = hash.get(config_port) {
debug!("Port is set to {}.", new_scheme);
return String::from(new_scheme);
}
debug!("Job name is not set, using default value.");
return scheme;
}
pub fn get_config_metrics_path(hash: HashMap<String, String, RandomState>) -> String {
let config_port = &String::from("prometheus-scrape.metrics_path");
let metrics_path = String::from("/metrics");
if let Some(new_path) = hash.get(config_port) {
debug!("Port is set to {}.", new_path);
return String::from(new_path);
}
debug!("Job name is not set, using default value.");
return String::from(metrics_path);
}
pub fn get_config_docker_compose_service(hash: HashMap<String, String, RandomState>) -> String {
let config_port = &String::from("com.docker.compose.service");
// TODO: Find out what is compose_service's default, given that it is not in documentation
let compose_service = String::from("");
if let Some(new_compose_service) = hash.get(config_port) {
debug!("Compose service name is set to {}.", new_compose_service);
return String::from(new_compose_service);
}
debug!("Job name is not set, using default value.");
return compose_service;
}
// Get hostname or ip address set in the docker's label.
// There are two option's label:
// 1. `prometheus-scrape.hostname`
// 2. `prometheus-scrape.ip_as_hostname`
// `prometheus-scrape.hostname` is preferred first, and then `prometheus-scrape.ip_as_hostname`
// in case both are not detected, the default container's name will be used.
pub fn get_config_hostname(hash: HashMap<String, String, RandomState>, cname: String) -> String {
let config_hostname = &String::from("prometheus-scrape.hostname");
let config_ip_hostname = &String::from("prometheus-scrape.ip_as_hostname");
if let Some(new_hostname) = hash.get(config_hostname) {
debug!("Hostname is set to {}.", new_hostname);
return String::from(new_hostname);
}
if let Some(new_ip_hostname) = hash.get(config_ip_hostname) {
match new_ip_hostname.eq(&String::from("true")) {
true => {
debug!("IP address for hostname is set to {}.", new_ip_hostname);
return String::from(new_ip_hostname);
}
false => {
debug!("hostname is not set, using default value.");
return cname;
}
}
}
debug!("hostname is not set, using default value.");
return cname;
}
pub fn get_container_name(ctr: ContainerInspectResponse) -> String {
match ctr.name {
Some(x) => x
.strip_prefix("/")
.map(|e| String::from(e))
.unwrap_or(String::from("")),
_ => String::from(""),
}
}
pub fn get_container_hostname(ctr: ContainerInspectResponse) -> String {
match ctr.config.and_then(|e| e.hostname) {
Some(x) => return x,
_ => String::from(""),
}
}
|
// copyright 2017 Kaz Wesley
//! Classic Blake in a Rustic setting
#![no_std]
extern crate block_buffer;
pub extern crate digest;
mod consts;
use block_buffer::BlockBuffer;
use core::mem;
use digest::generic_array::GenericArray;
pub use digest::Digest;
#[derive(Debug, Clone, Copy)]
#[repr(C)]
struct State<T> {
h: [T; 8],
s: [T; 4],
t: [T; 2],
nullt: bool,
}
macro_rules! define_compressor {
($compressor:ident, $word:ident, $Bufsz:ty, $deserializer:path, $uval:expr,
$rounds:expr, $shift0:expr, $shift1:expr, $shift2: expr, $shift3: expr) => {
#[derive(Clone, Copy, Debug)]
struct $compressor {
state: State<$word>,
}
impl $compressor {
fn increase_count(&mut self, count: $word) {
let (new_t0, carry) = self.state.t[0].overflowing_add(count * 8);
self.state.t[0] = new_t0;
if carry { self.state.t[1] += 1; }
}
fn put_block(&mut self, block: &GenericArray<u8, $Bufsz>) {
const U: [$word; 16] = $uval;
#[inline(always)]
fn g(v: &mut [$word; 16], m: &[$word; 16], sigma: &[u8; 16],
a: usize, b: usize, c: usize, d: usize, e: usize) {
v[a] = v[a].wrapping_add(m[sigma[e] as usize] ^ U[sigma[e+1] as usize])
.wrapping_add(v[b]);
v[d] = (v[d] ^ v[a]).rotate_right($shift0);
v[c] = v[c].wrapping_add(v[d]);
v[b] = (v[b] ^ v[c]).rotate_right($shift1);
v[a] = v[a].wrapping_add(m[sigma[e+1] as usize] ^ U[sigma[e] as usize])
.wrapping_add(v[b]);
v[d] = (v[d] ^ v[a]).rotate_right($shift2);
v[c] = v[c].wrapping_add(v[d]);
v[b] = (v[b] ^ v[c]).rotate_right($shift3);
}
let mut m = [0; 16];
for (mx, b) in m.iter_mut().zip(block.chunks(mem::size_of::<$word>())) {
*mx = $deserializer(b);
}
let mut v = [0; 16];
&v[..8].copy_from_slice(&self.state.h);
&v[8..].copy_from_slice(&U[..8]);
for (vx, sx) in v[8..11].iter_mut().zip(&self.state.s) {
*vx ^= *sx;
}
// don't xor t when the block is only padding
if !self.state.nullt {
v[12] ^= self.state.t[0];
v[13] ^= self.state.t[0];
v[14] ^= self.state.t[1];
v[15] ^= self.state.t[1];
}
for sigma in &SIGMA[..$rounds] {
// column step
g(&mut v, &m, sigma, 0, 4, 8, 12, 0 );
g(&mut v, &m, sigma, 1, 5, 9, 13, 2 );
g(&mut v, &m, sigma, 2, 6, 10, 14, 4 );
g(&mut v, &m, sigma, 3, 7, 11, 15, 6 );
// diagonal step
g(&mut v, &m, sigma, 0, 5, 10, 15, 8 );
g(&mut v, &m, sigma, 1, 6, 11, 12, 10 );
g(&mut v, &m, sigma, 2, 7, 8, 13, 12 );
g(&mut v, &m, sigma, 3, 4, 9, 14, 14 );
}
for (i, vx) in v.iter().enumerate() {
self.state.h[i % 8] ^= *vx;
}
for (i, hx) in self.state.h.iter_mut().enumerate() {
*hx ^= self.state.s[i % 4];
}
}
}
}
}
macro_rules! define_hasher {
($name:ident, $word:ident, $buf:expr, $Bufsz:ty, $bits:expr, $Bytes:ident,
$serializer:path, $compressor:ident, $iv:expr) => {
#[derive(Clone)]
pub struct $name {
compressor: $compressor,
buffer: BlockBuffer<$Bufsz>,
}
impl core::fmt::Debug for $name {
fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> {
f.debug_struct("Blake")
.field("compressor", &self.compressor)
.field("buffer.position()", &self.buffer.position())
.finish()
}
}
impl Default for $name {
fn default() -> Self {
Self {
compressor: $compressor {
state: State::<$word> {
h: $iv,
s: [0; 4],
t: [0; 2],
nullt: false,
}
},
buffer: BlockBuffer::default(),
}
}
}
impl digest::BlockInput for $name {
type BlockSize = $Bytes;
}
impl digest::Input for $name {
fn input<T: AsRef<[u8]>>(&mut self, data: T) {
let compressor = &mut self.compressor;
self.buffer.input(data.as_ref(), |block| {
compressor.increase_count((mem::size_of::<$word>() * 16) as $word);
compressor.put_block(block);
});
}
}
impl digest::FixedOutput for $name {
type OutputSize = $Bytes;
fn fixed_result(self) -> GenericArray<u8, $Bytes> {
let mut compressor = self.compressor;
let mut buffer = self.buffer;
compressor.increase_count(buffer.position() as $word);
let mut msglen = [0u8; $buf/8];
$serializer(&mut msglen[..$buf/16], compressor.state.t[1]);
$serializer(&mut msglen[$buf/16..], compressor.state.t[0]);
let footerlen = 1 + 2 * mem::size_of::<$word>();
// low bit indicates full-length variant
let isfull = ($bits == 8 * mem::size_of::<[$word; 8]>()) as u8;
// high bit indicates fit with no padding
let exactfit = if buffer.position() + footerlen != $buf { 0x00 } else { 0x80 };
let magic = isfull | exactfit;
// if header won't fit in last data block, pad to the end and start a new one
let extra_block = buffer.position() + footerlen > $buf;
if extra_block {
let pad = $buf - buffer.position();
buffer.input(&PADDING[..pad], |block| compressor.put_block(block));
debug_assert_eq!(buffer.position(), 0);
}
// pad last block up to footer start point
compressor.state.nullt = buffer.position() == 0;
// skip begin-padding byte if continuing padding
let x = extra_block as usize;
let (start, end) = (x, x + ($buf - footerlen - buffer.position()));
buffer.input(&PADDING[start..end], |_| unreachable!());
buffer.input(&[magic], |_| unreachable!());
buffer.input(&msglen, |block| compressor.put_block(block));
debug_assert_eq!(buffer.position(), 0);
let mut out = GenericArray::default();
for (h, out) in compressor.state.h.iter()
.zip(out.chunks_mut(mem::size_of::<$word>())) {
$serializer(out, *h);
}
out
}
}
impl digest::Reset for $name {
fn reset(&mut self) {
*self = Self::default()
}
}
};
}
use block_buffer::byteorder::{ByteOrder, BE};
use consts::{
BLAKE224_IV, BLAKE256_IV, BLAKE256_U, BLAKE384_IV, BLAKE512_IV, BLAKE512_U, PADDING, SIGMA,
};
use digest::generic_array::typenum::{U128, U28, U32, U48, U64};
#[cfg_attr(rustfmt, rustfmt_skip)]
define_compressor!(Compressor256, u32, U64, BE::read_u32, BLAKE256_U, 14, 16, 12, 8, 7);
#[cfg_attr(rustfmt, rustfmt_skip)]
define_hasher!(Blake224, u32, 64, U64, 224, U28, BE::write_u32, Compressor256, BLAKE224_IV);
#[cfg_attr(rustfmt, rustfmt_skip)]
define_hasher!(Blake256, u32, 64, U64, 256, U32, BE::write_u32, Compressor256, BLAKE256_IV);
#[cfg_attr(rustfmt, rustfmt_skip)]
define_compressor!(Compressor512, u64, U128, BE::read_u64, BLAKE512_U, 16, 32, 25, 16, 11);
#[cfg_attr(rustfmt, rustfmt_skip)]
define_hasher!(Blake384, u64, 128, U128, 384, U48, BE::write_u64, Compressor512, BLAKE384_IV);
#[cfg_attr(rustfmt, rustfmt_skip)]
define_hasher!(Blake512, u64, 128, U128, 512, U64, BE::write_u64, Compressor512, BLAKE512_IV);
|
#[derive(Copy, Clone, Eq, PartialEq, PartialOrd)]
pub struct Dim {
width: usize,
height: usize,
}
impl Dim {
pub fn new(width: usize, height: usize) -> Dim {
Dim { width, height }
}
}
impl Dimension for Dim {
fn width(&self) -> usize {
self.width
}
fn height(&self) -> usize {
self.height
}
}
pub trait Dimension {
fn width(&self) -> usize;
fn height(&self) -> usize;
fn at(&self, x: usize, y: usize) -> usize {
(x + y * self.width()) as usize
}
fn xy(&self, offset: usize) -> (usize, usize) {
let x = offset % self.width();
let y = offset / self.width();
(x, y)
}
}
#[cfg(test)]
mod tests {
use screen::dimension::Dimension;
use screen::drawer::Drawer;
use screen::screen::Screen;
use screen::smart_index::SmartIndex;
#[test]
fn test_dimension() {
let mut scr = Screen::new(17, 13);
for x in 0..scr.width() {
for y in 0..scr.height() {
let offset = scr.at(x, y);
let xy = scr.xy(offset);
let offset2 = scr.at(xy.0, xy.1);
assert_eq!(x, xy.0);
assert_eq!(y, xy.1);
assert_eq!(offset, offset2);
}
}
}
#[test]
fn test_index() {
let w = 17;
let h = 13;
let scr = Screen::new(w, h);
for expected in 0..(w * h) {
let (x, y) = scr.xy(expected);
let actuals = [
scr.index(x as usize, y as usize),
scr.index(x as i32, y as i32),
scr.index(x as i32 - w as i32, y as i32),
scr.index(x as i32, y as i32 - h as i32),
scr.index(x as i32 - w as i32, y as i32 - h as i32),
];
print!("{},{} :", x, y);
for &actual in actuals.iter() {
print!(" {}", actual);
assert_eq!(actual, expected);
}
println!();
}
}
#[test]
fn test_cursor() {
let w = 17;
let h = 13;
let scr = Screen::new(w, h);
}
}
|
use ::ir::variant::VariantType;
use ::FieldReference;
error_chain! {
links {
JsonParseError(
::frontend::protocol_json::Error,
::frontend::protocol_json::ErrorKind);
}
errors {
CompilerError(t: CompilerError) {
description("error under compilation")
display("{}", t.display())
}
}
}
#[derive(Debug, Clone)]
pub enum CompilerError {
/// The given variant does not have this property.
NoProperty { variant: VariantType, property: String, },
/// Attempted to resolve a nonexistent field on a variant.
ChildResolveError { parent_variant: String, name: String, },
/// Attempted to match on a type which does not support it.
UnmatchableType { variant: VariantType, },
/// Error while resolving a reference.
ReferenceError { reference: FieldReference, },
/// Error occurred while inside a variant.
InsideVariant { variant: VariantType, },
/// Error occurred while inside a named field.
InsideNamed { name: String, },
/// An error occurred in a nom parser.
NomParseError(::nom::verbose_errors::Err<usize>),
}
impl CompilerError {
pub fn display(&self) -> String {
match *self {
CompilerError::NoProperty { ref variant, ref property } =>
format!("'{:?}' variant has no property '{}'",
variant, property),
CompilerError::ChildResolveError { ref parent_variant, ref name } =>
format!("'{}' variant has no child with name '{}'",
parent_variant, name),
CompilerError::UnmatchableType { ref variant } =>
format!("'{:?}' does not support matching",
variant),
CompilerError::ReferenceError { ref reference } =>
format!("unable to resolve reference '{:?}'",
reference),
CompilerError::InsideVariant { ref variant } =>
format!("inside variant '{:?}'",
variant),
CompilerError::InsideNamed { ref name } =>
format!("inside named '{:?}'",
name),
CompilerError::NomParseError(_) =>
format!("nom parse errror"),
}
}
}
impl From<CompilerError> for Error {
fn from(typ: CompilerError) -> Error {
ErrorKind::CompilerError(typ).into()
}
}
impl From<CompilerError> for ErrorKind {
fn from(typ: CompilerError) -> ErrorKind {
ErrorKind::CompilerError(typ).into()
}
}
|
//! Parsing and processing for this form:
//! ```ignore
//! py_compile!(
//! // either:
//! source = "python_source_code",
//! // or
//! file = "file/path/relative/to/$CARGO_MANIFEST_DIR",
//!
//! // the mode to compile the code in
//! mode = "exec", // or "eval" or "single"
//! // the path put into the CodeObject, defaults to "frozen"
//! module_name = "frozen",
//! )
//! ```
use crate::{extract_spans, Diagnostic};
use once_cell::sync::Lazy;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use rustpython_compiler_core::{bytecode::CodeObject, frozen, Mode};
use std::{
collections::HashMap,
env, fs,
path::{Path, PathBuf},
};
use syn::{
self,
parse::{Parse, ParseStream, Result as ParseResult},
parse2,
spanned::Spanned,
Lit, LitByteStr, LitStr, Macro, Meta, MetaNameValue, Token,
};
static CARGO_MANIFEST_DIR: Lazy<PathBuf> = Lazy::new(|| {
PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is not present"))
});
enum CompilationSourceKind {
/// Source is a File (Path)
File(PathBuf),
/// Direct Raw source code
SourceCode(String),
/// Source is a directory
Dir(PathBuf),
}
struct CompiledModule {
code: CodeObject,
package: bool,
}
struct CompilationSource {
kind: CompilationSourceKind,
span: (Span, Span),
}
pub trait Compiler {
fn compile(
&self,
source: &str,
mode: Mode,
module_name: String,
) -> Result<CodeObject, Box<dyn std::error::Error>>;
}
impl CompilationSource {
fn compile_string<D: std::fmt::Display, F: FnOnce() -> D>(
&self,
source: &str,
mode: Mode,
module_name: String,
compiler: &dyn Compiler,
origin: F,
) -> Result<CodeObject, Diagnostic> {
compiler.compile(source, mode, module_name).map_err(|err| {
Diagnostic::spans_error(
self.span,
format!("Python compile error from {}: {}", origin(), err),
)
})
}
fn compile(
&self,
mode: Mode,
module_name: String,
compiler: &dyn Compiler,
) -> Result<HashMap<String, CompiledModule>, Diagnostic> {
match &self.kind {
CompilationSourceKind::Dir(rel_path) => self.compile_dir(
&CARGO_MANIFEST_DIR.join(rel_path),
String::new(),
mode,
compiler,
),
_ => Ok(hashmap! {
module_name.clone() => CompiledModule {
code: self.compile_single(mode, module_name, compiler)?,
package: false,
},
}),
}
}
fn compile_single(
&self,
mode: Mode,
module_name: String,
compiler: &dyn Compiler,
) -> Result<CodeObject, Diagnostic> {
match &self.kind {
CompilationSourceKind::File(rel_path) => {
let path = CARGO_MANIFEST_DIR.join(rel_path);
let source = fs::read_to_string(&path).map_err(|err| {
Diagnostic::spans_error(
self.span,
format!("Error reading file {path:?}: {err}"),
)
})?;
self.compile_string(&source, mode, module_name, compiler, || rel_path.display())
}
CompilationSourceKind::SourceCode(code) => {
self.compile_string(&textwrap::dedent(code), mode, module_name, compiler, || {
"string literal"
})
}
CompilationSourceKind::Dir(_) => {
unreachable!("Can't use compile_single with directory source")
}
}
}
fn compile_dir(
&self,
path: &Path,
parent: String,
mode: Mode,
compiler: &dyn Compiler,
) -> Result<HashMap<String, CompiledModule>, Diagnostic> {
let mut code_map = HashMap::new();
let paths = fs::read_dir(path)
.or_else(|e| {
if cfg!(windows) {
if let Ok(real_path) = fs::read_to_string(path.canonicalize().unwrap()) {
return fs::read_dir(real_path.trim());
}
}
Err(e)
})
.map_err(|err| {
Diagnostic::spans_error(self.span, format!("Error listing dir {path:?}: {err}"))
})?;
for path in paths {
let path = path.map_err(|err| {
Diagnostic::spans_error(self.span, format!("Failed to list file: {err}"))
})?;
let path = path.path();
let file_name = path.file_name().unwrap().to_str().ok_or_else(|| {
Diagnostic::spans_error(self.span, format!("Invalid UTF-8 in file name {path:?}"))
})?;
if path.is_dir() {
code_map.extend(self.compile_dir(
&path,
if parent.is_empty() {
file_name.to_string()
} else {
format!("{parent}.{file_name}")
},
mode,
compiler,
)?);
} else if file_name.ends_with(".py") {
let stem = path.file_stem().unwrap().to_str().unwrap();
let is_init = stem == "__init__";
let module_name = if is_init {
parent.clone()
} else if parent.is_empty() {
stem.to_owned()
} else {
format!("{parent}.{stem}")
};
let compile_path = |src_path: &Path| {
let source = fs::read_to_string(src_path).map_err(|err| {
Diagnostic::spans_error(
self.span,
format!("Error reading file {path:?}: {err}"),
)
})?;
self.compile_string(&source, mode, module_name.clone(), compiler, || {
path.strip_prefix(&*CARGO_MANIFEST_DIR)
.ok()
.unwrap_or(&path)
.display()
})
};
let code = compile_path(&path).or_else(|e| {
if cfg!(windows) {
if let Ok(real_path) = fs::read_to_string(path.canonicalize().unwrap()) {
let joined = path.parent().unwrap().join(real_path.trim());
if joined.exists() {
return compile_path(&joined);
} else {
return Err(e);
}
}
}
Err(e)
});
let code = match code {
Ok(code) => code,
Err(_)
if stem.starts_with("badsyntax_")
| parent.ends_with(".encoded_modules") =>
{
// TODO: handle with macro arg rather than hard-coded path
continue;
}
Err(e) => return Err(e),
};
code_map.insert(
module_name,
CompiledModule {
code,
package: is_init,
},
);
}
}
Ok(code_map)
}
}
/// This is essentially just a comma-separated list of Meta nodes, aka the inside of a MetaList.
struct PyCompileInput {
span: Span,
metas: Vec<Meta>,
}
impl PyCompileInput {
fn parse(&self, allow_dir: bool) -> Result<PyCompileArgs, Diagnostic> {
let mut module_name = None;
let mut mode = None;
let mut source: Option<CompilationSource> = None;
let mut crate_name = None;
fn assert_source_empty(source: &Option<CompilationSource>) -> Result<(), Diagnostic> {
if let Some(source) = source {
Err(Diagnostic::spans_error(
source.span,
"Cannot have more than one source",
))
} else {
Ok(())
}
}
for meta in &self.metas {
if let Meta::NameValue(name_value) = meta {
let ident = match name_value.path.get_ident() {
Some(ident) => ident,
None => continue,
};
let check_str = || match &name_value.lit {
Lit::Str(s) => Ok(s),
_ => Err(err_span!(name_value.lit, "{ident} must be a string")),
};
if ident == "mode" {
let s = check_str()?;
match s.value().parse() {
Ok(mode_val) => mode = Some(mode_val),
Err(e) => bail_span!(s, "{}", e),
}
} else if ident == "module_name" {
module_name = Some(check_str()?.value())
} else if ident == "source" {
assert_source_empty(&source)?;
let code = check_str()?.value();
source = Some(CompilationSource {
kind: CompilationSourceKind::SourceCode(code),
span: extract_spans(&name_value).unwrap(),
});
} else if ident == "file" {
assert_source_empty(&source)?;
let path = check_str()?.value().into();
source = Some(CompilationSource {
kind: CompilationSourceKind::File(path),
span: extract_spans(&name_value).unwrap(),
});
} else if ident == "dir" {
if !allow_dir {
bail_span!(ident, "py_compile doesn't accept dir")
}
assert_source_empty(&source)?;
let path = check_str()?.value().into();
source = Some(CompilationSource {
kind: CompilationSourceKind::Dir(path),
span: extract_spans(&name_value).unwrap(),
});
} else if ident == "crate_name" {
let name = check_str()?.parse()?;
crate_name = Some(name);
}
}
}
let source = source.ok_or_else(|| {
syn::Error::new(
self.span,
"Must have either file or source in py_compile!()/py_freeze!()",
)
})?;
Ok(PyCompileArgs {
source,
mode: mode.unwrap_or(Mode::Exec),
module_name: module_name.unwrap_or_else(|| "frozen".to_owned()),
crate_name: crate_name.unwrap_or_else(|| syn::parse_quote!(::rustpython_vm)),
})
}
}
fn parse_meta(input: ParseStream) -> ParseResult<Meta> {
let path = input.call(syn::Path::parse_mod_style)?;
let eq_token: Token![=] = input.parse()?;
let span = input.span();
if input.peek(LitStr) {
Ok(Meta::NameValue(MetaNameValue {
path,
eq_token,
lit: Lit::Str(input.parse()?),
}))
} else if let Ok(mac) = input.parse::<Macro>() {
Ok(Meta::NameValue(MetaNameValue {
path,
eq_token,
lit: Lit::Str(LitStr::new(&mac.tokens.to_string(), mac.span())),
}))
} else {
Err(syn::Error::new(span, "Expected string or stringify macro"))
}
}
impl Parse for PyCompileInput {
fn parse(input: ParseStream) -> ParseResult<Self> {
let span = input.cursor().span();
let metas = input
.parse_terminated::<Meta, Token![,]>(parse_meta)?
.into_iter()
.collect();
Ok(PyCompileInput { span, metas })
}
}
struct PyCompileArgs {
source: CompilationSource,
mode: Mode,
module_name: String,
crate_name: syn::Path,
}
pub fn impl_py_compile(
input: TokenStream,
compiler: &dyn Compiler,
) -> Result<TokenStream, Diagnostic> {
let input: PyCompileInput = parse2(input)?;
let args = input.parse(false)?;
let crate_name = args.crate_name;
let code = args
.source
.compile_single(args.mode, args.module_name, compiler)?;
let frozen = frozen::FrozenCodeObject::encode(&code);
let bytes = LitByteStr::new(&frozen.bytes, Span::call_site());
let output = quote! {
#crate_name::frozen::FrozenCodeObject { bytes: &#bytes[..] }
};
Ok(output)
}
pub fn impl_py_freeze(
input: TokenStream,
compiler: &dyn Compiler,
) -> Result<TokenStream, Diagnostic> {
let input: PyCompileInput = parse2(input)?;
let args = input.parse(true)?;
let crate_name = args.crate_name;
let code_map = args.source.compile(args.mode, args.module_name, compiler)?;
let data = frozen::FrozenLib::encode(code_map.iter().map(|(k, v)| {
let v = frozen::FrozenModule {
code: frozen::FrozenCodeObject::encode(&v.code),
package: v.package,
};
(&**k, v)
}));
let bytes = LitByteStr::new(&data.bytes, Span::call_site());
let output = quote! {
#crate_name::frozen::FrozenLib::from_ref(#bytes)
};
Ok(output)
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// Regression test for #21726: an issue arose around the rules for
// subtyping of projection types that resulted in an unconstrained
// region, yielding region inference failures.
// pretty-expanded FIXME #23616
fn main() { }
fn foo<'a>(s: &'a str) {
let b: B<()> = B::new(s, ());
b.get_short();
}
trait IntoRef<'a> {
type T: Clone;
fn into_ref(self, _: &'a str) -> Self::T;
}
impl<'a> IntoRef<'a> for () {
type T = &'a str;
fn into_ref(self, s: &'a str) -> &'a str {
s
}
}
struct B<'a, P: IntoRef<'a>>(P::T);
impl<'a, P: IntoRef<'a>> B<'a, P> {
fn new(s: &'a str, i: P) -> B<'a, P> {
B(i.into_ref(s))
}
fn get_short(&self) -> P::T {
self.0.clone()
}
}
|
#![no_std]
#![no_main]
extern crate libc;
use libc::{c_int,c_char,printf};
#[repr(C)]
struct NATSParser<'a> {
cb: extern fn(&'a mut NATSd) -> c_int,
cs: c_int,
user_data: &'a mut NATSd
}
#[link(name = "natsparser", kind="static")]
extern "C" {
fn natsparser_init (
parser: *mut NATSParser
) -> c_int;
fn natsparser_parse (
parser: *mut NATSParser,
string: *const c_char,
string_len: c_int
) -> c_int;
}
enum ParseResult {
ParseOK,
ParseError
}
impl<'a> NATSParser<'a> {
fn new(cb: extern "C" fn(&mut NATSd) -> c_int, natsd: &'a mut NATSd) -> Self {
let mut parser = NATSParser {
cb: cb,
cs: 0,
user_data: natsd
};
unsafe {
natsparser_init(&mut parser);
}
parser
}
fn parse(&mut self, string: &'static str) -> ParseResult {
match unsafe {
natsparser_parse(self, string.as_ptr() as *const _, string.len() as c_int)
} {
-1 => ParseResult::ParseError,
0 => ParseResult::ParseOK,
1 => ParseResult::ParseOK,
_ => ParseResult::ParseError
}
}
}
#[repr(C)]
struct NATSd {
a: i32
}
extern "C" fn mycallback(natsd: &mut NATSd) -> c_int {
let hello = "Callback called!\n\0";
(*natsd).a = 0;
unsafe {
printf(hello.as_ptr() as *const _);
}
0
}
#[no_mangle]
pub extern "C" fn main(_argc: isize, _argv: *const *const u8) -> isize {
// Since we are passing a C string the final null character is mandatory.
const HELLO: &'static str = "Hello, world!\n\0";
unsafe {
printf(HELLO.as_ptr() as *const _);
}
let mut natsd = NATSd {
a: 3
};
let mut parser = NATSParser::new(mycallback, &mut natsd);
match parser.parse(HELLO) {
ParseResult::ParseOK => unsafe { printf("Parse ok :)\n\0".as_ptr() as *const _); },
ParseResult::ParseError => unsafe { printf("Parser error!\n\0".as_ptr() as *const _); },
}
0
}
#[panic_handler]
fn my_panic(_info: &core::panic::PanicInfo) -> ! {
loop {}
}
|
use std::env;
use std::fs::{metadata, read_to_string};
use std::io::{stdout, BufWriter, Write};
use std::process;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
eprintln!("{}: flie name not given", args[0]);
process::exit(1);
}
for arg in args.iter().skip(1) {
do_cat(arg);
}
}
fn do_cat(arg: &String) -> () {
let out = stdout();
let mut out = BufWriter::new(out.lock());
let metadata = metadata(arg).unwrap();
if metadata.is_dir() {
eprintln!(" {} is directory", arg);
} else {
let n = read_to_string(arg);
if n.is_err() {
eprintln!("can not open {}", arg);
process::exit(1);
} else {
let n = n.unwrap();
if writeln!(out, "{}", n).is_err() {
eprintln!("can not write to stdout");
};
}
}
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
// aux-build:go_trait.rs
#![feature(specialization)]
extern crate go_trait;
use go_trait::{Go,GoMut};
use std::fmt::Debug;
use std::default::Default;
struct MyThingy;
impl Go for MyThingy {
fn go(&self, arg: isize) { }
}
impl GoMut for MyThingy {
fn go_mut(&mut self, arg: isize) { }
}
fn main() { }
|
pub struct Solution;
impl Solution {
pub fn group_anagrams(strs: Vec<String>) -> Vec<Vec<String>> {
use std::collections::HashMap;
let mut map = HashMap::new();
for word in strs {
let mut key = word.as_bytes().to_vec();
key.sort();
map.entry(key).or_insert_with(Vec::new).push(word);
}
map.into_iter().map(|(_key, value)| value).collect()
}
}
#[test]
fn test0049() {
let strs = vec![
"eat".to_string(),
"tea".to_string(),
"tan".to_string(),
"ate".to_string(),
"nat".to_string(),
"bat".to_string(),
];
let mut res = Solution::group_anagrams(strs);
for group in &mut res {
group.sort();
}
res.sort();
assert_eq!(
res,
vec![
vec!["ate".to_string(), "eat".to_string(), "tea".to_string()],
vec!["bat".to_string()],
vec!["nat".to_string(), "tan".to_string()],
],
);
}
|
extern crate gcc;
use std::process::Command;
use std::path::Path;
fn main() {
// check if `easy-ecc` has been downloaded.
if !Path::new("dep/easy-ecc/.git").exists() {
// if not, tell git to initialize submodules.
let cmd = Command::new("git")
.args(&["submodule", "update", "--init"])
.status()
.unwrap();
// if we got a nonzero exit code, don't continue.
if !cmd.success() {
panic!("failed to initialize git submodule: `easy-ecc`")
}
}
// compile `easy-ecc` into a static lib.
gcc::compile_library("libp256.a",&["dep/easy-ecc/ecc.c"]);
}
|
use crate::ast;
use crate::{
Parse, ParseError, Parser, Peek, Peeker, Resolve, ResolveError, ResolveOwned, Spanned, Storage,
ToTokens,
};
use runestick::Source;
use std::borrow::Cow;
/// Parse an object expression.
///
/// # Examples
///
/// ```rust
/// use rune::{testing, ast};
///
/// testing::roundtrip::<ast::ExprObject>("Foo {\"foo\": 42}");
/// testing::roundtrip::<ast::ExprObject>("#{\"foo\": 42}");
/// testing::roundtrip::<ast::ExprObject>("#{\"foo\": 42,}");
/// ```
#[derive(Debug, Clone, PartialEq, Eq, Parse, ToTokens, Spanned)]
pub struct ExprObject {
/// Attributes associated with object.
#[rune(iter, meta)]
pub attributes: Vec<ast::Attribute>,
/// An object identifier.
#[rune(meta)]
pub ident: ObjectIdent,
/// Assignments in the object.
pub assignments: ast::Braced<FieldAssign, T![,]>,
}
impl Peek for ExprObject {
fn peek(p: &mut Peeker<'_>) -> bool {
match (p.nth(0), p.nth(1)) {
(K![ident], K!['{']) => true,
(K![#], K!['{']) => true,
_ => false,
}
}
}
/// A literal object identifier.
#[derive(Debug, Clone, PartialEq, Eq, ToTokens, Spanned)]
pub enum ObjectIdent {
/// An anonymous object.
Anonymous(T![#]),
/// A named object.
Named(ast::Path),
}
impl Parse for ObjectIdent {
fn parse(p: &mut Parser) -> Result<Self, ParseError> {
Ok(match p.nth(0)? {
K![#] => Self::Anonymous(p.parse()?),
_ => Self::Named(p.parse()?),
})
}
}
/// A literal object field.
#[derive(Debug, Clone, PartialEq, Eq, ToTokens, Spanned)]
pub struct FieldAssign {
/// The key of the field.
pub key: ObjectKey,
/// The assigned expression of the field.
#[rune(iter)]
pub assign: Option<(T![:], ast::Expr)>,
}
/// Parse an object literal.
///
/// # Examples
///
/// ```rust
/// use rune::{testing, ast};
///
/// testing::roundtrip::<ast::FieldAssign>("\"foo\": 42");
/// testing::roundtrip::<ast::FieldAssign>("\"foo\": 42");
/// testing::roundtrip::<ast::FieldAssign>("\"foo\": 42");
/// ```
impl Parse for FieldAssign {
fn parse(p: &mut Parser) -> Result<Self, ParseError> {
let key = p.parse()?;
let assign = if p.peek::<T![:]>()? {
let colon = p.parse()?;
let expr = p.parse::<ast::Expr>()?;
Some((colon, expr))
} else {
None
};
Ok(Self { key, assign })
}
}
/// Possible literal object keys.
#[derive(Debug, Clone, PartialEq, Eq, ToTokens, Spanned)]
pub enum ObjectKey {
/// A literal string (with escapes).
LitStr(ast::LitStr),
/// A path, usually an identifier.
Path(ast::Path),
}
/// Parse an object literal.
///
/// # Examples
///
/// ```rust
/// use rune::{testing, ast};
///
/// testing::roundtrip::<ast::ObjectKey>("foo");
/// testing::roundtrip::<ast::ObjectKey>("\"foo \\n bar\"");
/// ```
impl Parse for ObjectKey {
fn parse(p: &mut Parser) -> Result<Self, ParseError> {
Ok(match p.nth(0)? {
K![str] => Self::LitStr(p.parse()?),
K![ident] => Self::Path(p.parse()?),
_ => {
return Err(ParseError::expected(&p.tok_at(0)?, "literal object key"));
}
})
}
}
/// A tag object to help peeking for anonymous object case to help
/// differentiate anonymous objects and attributes when parsing block
/// expressions.
pub struct AnonExprObject;
impl Peek for AnonExprObject {
fn peek(p: &mut Peeker<'_>) -> bool {
matches!((p.nth(0), p.nth(1)), (K![#], K!['{']))
}
}
impl<'a> Resolve<'a> for ObjectKey {
type Output = Cow<'a, str>;
fn resolve(&self, storage: &Storage, source: &'a Source) -> Result<Self::Output, ResolveError> {
Ok(match self {
Self::LitStr(lit_str) => lit_str.resolve(storage, source)?,
Self::Path(path) => {
let ident = match path.try_as_ident() {
Some(ident) => ident,
None => {
return Err(ResolveError::expected(path, "object key"));
}
};
ident.resolve(storage, source)?
}
})
}
}
impl ResolveOwned for ObjectKey {
type Owned = String;
fn resolve_owned(
&self,
storage: &Storage,
source: &Source,
) -> Result<Self::Owned, ResolveError> {
Ok(self.resolve(storage, source)?.into_owned())
}
}
|
pub mod coingecko;
pub mod cryptocompare;
|
use std::{time::Duration, collections::HashMap};
use ggez::audio as gaudio;
use ggez::audio::SoundSource;
pub type SoundData = gaudio::SoundData;
pub type PlayableSound = gaudio::Source;
pub type SoundHandler = usize;
#[derive(Clone)]
pub struct SoundPlayFlags {
fadein_mills: u64,
pitch: f32,
repeat: bool,
volume: f32,
}
impl SoundPlayFlags {
pub fn new(
fadein_mills: u64,
pitch: f32,
repeat: bool,
volume: f32,
) -> SoundPlayFlags {
SoundPlayFlags {
fadein_mills: fadein_mills,
pitch: pitch,
repeat: repeat,
volume: volume,
}
}
}
impl Default for SoundPlayFlags {
fn default() -> Self {
SoundPlayFlags {
fadein_mills: 50,
pitch: 1.0,
repeat: false,
volume: 1.0,
}
}
}
pub struct SoundManager {
playing_map: HashMap<SoundHandler, PlayableSound>,
next_sound_handler: SoundHandler,
}
impl SoundManager {
pub fn new() -> Self {
SoundManager {
playing_map: HashMap::new(),
next_sound_handler: 0,
}
}
pub fn play(
&mut self,
ctx: &mut ggez::Context,
sound_data: SoundData,
flags: Option<SoundPlayFlags>,
) -> SoundHandler {
let mut sound = PlayableSound::from_data(ctx, sound_data).unwrap();
if let Some(flags) = flags {
sound.set_fade_in(Duration::from_millis(flags.fadein_mills));
sound.set_pitch(flags.pitch);
sound.set_repeat(flags.repeat);
sound.set_volume(flags.volume);
}
let handler = self.issue_sound_handler();
sound.play_later().unwrap();
self.playing_map.insert(handler, sound);
handler
}
fn issue_sound_handler(&mut self) -> SoundHandler {
let ret = self.next_sound_handler;
self.next_sound_handler += 1;
ret
}
pub fn ref_sound(&self, handler: SoundHandler) -> &PlayableSound {
self.playing_map.get(&handler).unwrap()
}
pub fn ref_sound_mut(&mut self, handler: SoundHandler) -> &mut PlayableSound {
self.playing_map.get_mut(&handler).unwrap()
}
pub fn change_global_volume(&mut self, volume: f32) {
for (_, bgm) in self.playing_map.iter_mut() {
bgm.set_volume(volume);
}
}
pub fn stop(&mut self, handler: SoundHandler) {
if let Some(sound) = self.playing_map.get_mut(&handler) {
sound.stop();
}
}
}
|
use yarapi::rest::activity::DefaultActivity;
use yarapi::rest::Session;
pub async fn attach_debugger(session: Session, activity: DefaultActivity) -> anyhow::Result<()> {
drop(session);
drop(activity);
unimplemented!()
}
|
extern crate cmake;
use std::env;
fn main() {
let dst = cmake::Config::new("wabt")
.define("BUILD_TESTS", "OFF")
.build();
println!("cargo:rustc-link-search=native={}/build/", dst.display());
println!("cargo:rustc-link-lib=static=wabt");
// We need to link against C++ std lib
if let Some(cpp_stdlib) = get_cpp_stdlib() {
println!("cargo:rustc-link-lib={}", cpp_stdlib);
}
}
// See https://github.com/alexcrichton/gcc-rs/blob/88ac58e25/src/lib.rs#L1197
fn get_cpp_stdlib() -> Option<String> {
env::var("TARGET").ok().and_then(|target| {
if target.contains("msvc") {
None
} else if target.contains("darwin") {
Some("c++".to_string())
} else if target.contains("freebsd") {
Some("c++".to_string())
} else if target.contains("musl") {
Some("static=stdc++".to_string())
} else {
Some("stdc++".to_string())
}
})
}
|
//! GraphQL expression plan.
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
use std::rc::Rc;
use timely::dataflow::channels::pact::Pipeline;
use timely::dataflow::operators::generic::Operator;
use timely::dataflow::operators::{Delay, Exchange};
use timely::dataflow::scopes::child::Iterative;
use timely::dataflow::{Scope, Stream};
use timely::progress::Timestamp;
use differential_dataflow::hashable::Hashable;
use differential_dataflow::lattice::Lattice;
use graphql_parser::parse_query;
use graphql_parser::query::{Definition, Document, OperationDefinition, Selection, SelectionSet};
use graphql_parser::query::{Name, Value as GqValue};
use serde_json::Map;
use serde_json::Value as JValue;
use crate::binding::Binding;
use crate::domain::Domain;
use crate::plan::pull_v2::{PathId, Pull, PullAll, PullLevel};
use crate::plan::{gensym, Dependencies, Implementable};
use crate::plan::{Hector, Plan};
use crate::timestamp::{Rewind, Time};
use crate::ShutdownHandle;
use crate::{Aid, Output, Value, Var, VariableMap};
/// A plan for GraphQL queries, e.g. `{ Heroes { name age weight } }`.
#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Serialize, Deserialize)]
pub struct GraphQl {
/// String representation of GraphQL query
pub query: String,
/// Cached paths
paths: Vec<Pull>,
/// Required attributes to filter entities by
required_aids: Vec<Aid>,
}
impl GraphQl {
/// Creates a new GraphQl instance by parsing the AST obtained
/// from the provided query.
pub fn new(query: String) -> Self {
let ast = parse_query(&query).expect("graphQL ast parsing failed");
let empty_plan = Hector {
variables: vec![0],
bindings: vec![],
};
GraphQl {
query,
paths: ast.into_paths(empty_plan),
required_aids: vec![],
}
}
/// Creates a new GraphQl starting from the specified root plan.
pub fn with_plan(root_plan: Plan, query: String) -> Self {
let ast = parse_query(&query).expect("graphQL ast parsing failed");
let paths = ast.into_paths(Hector {
variables: root_plan.variables(),
bindings: root_plan.into_bindings(),
});
GraphQl {
query,
paths,
required_aids: vec![],
}
}
/// Creates a new GraphQl that filters top-level entities down to
/// only those with all of the required Aids present.
pub fn with_required_aids(query: String, required_aids: Vec<Aid>) -> Self {
let mut query = GraphQl::new(query);
query.required_aids = required_aids;
query
}
}
trait IntoPaths {
fn into_paths(&self, root_plan: Hector) -> Vec<Pull>;
}
impl IntoPaths for Document {
/// Transforms the provided GraphQL query AST into corresponding pull
/// paths. The structure of a typical parsed ast looks like this:
///
/// ```
/// Document {
/// definitions: [
/// Operation(SelectionSet(SelectionSet {
/// items: [
/// Field(Field {
/// name: ...,
/// selection_set: SelectionSet(...}
/// }),
/// ...
/// ]
/// }))
/// ]
/// }
/// ```
fn into_paths(&self, root_plan: Hector) -> Vec<Pull> {
self.definitions
.iter()
.flat_map(|definition| definition.into_paths(root_plan.clone()))
.collect()
}
}
impl IntoPaths for Definition {
fn into_paths(&self, root_plan: Hector) -> Vec<Pull> {
match self {
Definition::Operation(operation) => operation.into_paths(root_plan),
Definition::Fragment(_) => unimplemented!(),
}
}
}
impl IntoPaths for OperationDefinition {
fn into_paths(&self, root_plan: Hector) -> Vec<Pull> {
use OperationDefinition::{Query, SelectionSet};
match self {
Query(_) => unimplemented!(),
SelectionSet(selection_set) => {
selection_set_to_paths(&selection_set, root_plan, &[], &[])
}
_ => unimplemented!(),
}
}
}
/// Gathers the fields that we want to pull at a specific level. These
/// only include fields that do not refer to nested entities.
fn pull_attributes(selection_set: &SelectionSet) -> Vec<Aid> {
selection_set
.items
.iter()
.flat_map(|item| match item {
Selection::Field(field) => {
if field.selection_set.items.is_empty() {
Some(field.name.to_string())
} else {
None
}
}
_ => unimplemented!(),
})
.collect::<Vec<Aid>>()
}
/// Takes a GraphQL `SelectionSet` and recursively transforms it into
/// `PullLevel`s.
///
/// A `SelectionSet` consists of multiple items. We're interested in
/// items of type `Field`, which might contain a nested `SelectionSet`
/// themselves. We iterate through each field and construct (1) a
/// parent path, which describes how to traverse to the current
/// nesting level ("vertical"), and (2) pull attributes, which
/// describe the attributes pulled at the current nesting level
/// ("horizontal"); only attributes at the lowest nesting level can be
/// part of a `PullLevel`'s `pull_attributes`.
fn selection_set_to_paths(
selection_set: &SelectionSet,
mut plan: Hector,
arguments: &[(Name, GqValue)],
parent_path: &[String],
) -> Vec<Pull> {
// We must first construct the correct plan for this level,
// starting from that for the parent level. We do this even if no
// attributes are actually pulled at this level. In that case we
// will not synthesize this plan, but it still is required in
// order to pass all necessary bindings to nested levels.
// For any level after the first, we must introduce a binding
// linking the parent level to the current one.
if !parent_path.is_empty() {
let parent = *plan.variables.last().expect("plan has no variables");
let this = plan.variables.len() as Var;
let aid = parent_path.last().unwrap();
plan.variables.push(this);
plan.bindings.push(Binding::attribute(parent, aid, this));
}
let this = *plan.variables.last().expect("plan has no variables");
// Then we must introduce additional bindings for any arguments.
for (aid, v) in arguments.iter() {
// This variable is only relevant for tying the two clauses
// together, we do not want to include it into the output
// projection.
let vsym = gensym();
plan.bindings.push(Binding::attribute(this, aid, vsym));
plan.bindings
.push(Binding::constant(vsym, v.clone().into()));
}
// We will first gather the attributes that need to be retrieved
// at this level. These are the fields that do not refer to a
// nested entity. This is the easy part.
let pull_attributes = pull_attributes(selection_set);
// Now we process nested levels.
let nested_levels = selection_set
.items
.iter()
.flat_map(|item| match item {
Selection::Field(field) => {
if !field.selection_set.items.is_empty() {
let mut parent_path = parent_path.to_vec();
parent_path.push(field.name.to_string());
selection_set_to_paths(
&field.selection_set,
plan.clone(),
&field.arguments,
&parent_path,
)
} else {
vec![]
}
}
_ => unimplemented!(),
})
.collect::<Vec<Pull>>();
let mut levels = nested_levels;
// Here we don't actually want to include the current plan, if
// we're not interested in any attributes at this level.
if !pull_attributes.is_empty() {
if plan.bindings.is_empty() {
levels.push(Pull::All(PullAll { pull_attributes }));
} else {
levels.push(Pull::Level(PullLevel {
pull_attributes,
path_attributes: parent_path.to_vec(),
pull_variable: this,
plan: Box::new(Plan::Hector(plan)),
cardinality_many: false,
}));
}
}
levels
}
// @TODO read this from schema
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
enum Cardinality {
One,
Many,
}
impl GraphQl {
/// See Implementable::dependencies, as GraphQl v2 can't implement
/// Implementable directly.
pub fn dependencies(&self) -> Dependencies {
self.paths.iter().map(|path| path.dependencies()).sum()
}
/// See Implementable::implement, as GraphQl v2 can't implement
/// Implementable directly.
fn implement<'b, S>(
&self,
nested: &mut Iterative<'b, S, u64>,
domain: &mut Domain<Aid, S::Timestamp>,
_local_arrangements: &VariableMap<Self::A, Iterative<'b, S, u64>>,
) -> (Stream<S, Output>, ShutdownHandle)
where
S: Scope,
S::Timestamp: Timestamp + Lattice + Rewind + std::convert::Into<Time>,
{
use timely::dataflow::operators::Concatenate;
let states = Rc::new(RefCell::new(HashMap::new()));
let dummy = HashMap::new();
let mut paths = {
let mut paths_map = self
.paths
.iter()
.flat_map(|path| {
let (streams, shutdown) = path.implement(nested, domain, &dummy);
std::mem::forget(shutdown);
streams
})
.collect::<HashMap<PathId, _>>();
let mut paths = paths_map.drain().collect::<Vec<(PathId, _)>>();
// Important for cross-worker determinism.
paths.sort_by_key(|(path_id, _)| path_id.clone());
paths
};
let streams = paths.drain(..).map(|(path_id, stream)| {
let states = states.clone();
let mut buffer = HashMap::new();
let mut vector = Vec::new();
stream
.exchange(|(path, _t, _diff)| path[0].clone().hashed())
.delay(|(_path, t, _diff), _cap| t.clone())
.unary_frontier(Pipeline, "Changes", move |_cap, _info| {
move |input, output| {
input.for_each(|cap, data| {
data.swap(&mut vector);
buffer
.entry(cap.retain())
.or_insert_with(Vec::new)
.extend(vector.drain(..));
});
let mut states = states.borrow_mut();
let mut sorted_times: Vec<_> = buffer
.keys()
.filter(|cap| !input.frontier().less_equal(cap.time()))
.cloned()
.collect();
sorted_times.sort_by_key(|cap| cap.time().clone());
for cap in sorted_times.drain(..) {
if let Some(mut paths_at_time) = buffer.remove(&cap) {
let mut changes = Vec::<String>::new();
for (mut path, t, diff) in paths_at_time.drain(..) {
let aid = path_id
.last()
.expect("malformed path_id; no aid found")
.clone();
// @TODO read from schema
//
// For cardinality many fields, we need to wrap values in
// an array, rather than overwriting.
let cardinality = &Cardinality::One;
let value = JValue::from(
path.pop().expect("malformed path; no value found"),
);
// We construct the pointer somewhat awkwardly here,
// ignoring all but the last attribute. This has the effect
// of flattening the resulting json maps.
let pointer = if path_id.len() == 1 {
interleave(path, &path_id[..])
} else {
let root_eid = path
.first()
.expect("malformed path; no root eid found")
.clone();
if &aid == "db__ident" {
let aid = path_id[path_id.len() - 2].clone();
interleave(vec![root_eid], &[aid])
} else {
interleave(vec![root_eid], &[aid])
}
};
let mut map = states
.entry(cap.time().clone())
.or_insert_with(|| JValue::Object(Map::new()));
match cardinality {
&Cardinality::One => {
*pointer_mut(&mut map, &pointer, Cardinality::One) =
value;
}
&Cardinality::Many => {
pointer_mut(&mut map, &pointer, Cardinality::Many)
.as_array_mut()
.expect("not an array")
.push(value);
}
}
changes.push(pointer[0].clone());
}
output.session(&cap).give_iterator(changes.drain(..));
}
}
}
})
});
let mut change_keys = HashMap::new();
let mut excise_keys = Vec::new();
let mut vector = Vec::new();
let required_aids = self.required_aids.clone();
let mut merged = Map::new();
let snapshots = nested.parent.concatenate(streams).unary_notify(
Pipeline,
"GraphQl",
vec![],
move |input, output, notificator| {
input.for_each(|cap, data| {
data.swap(&mut vector);
change_keys
.entry(cap.time().clone())
.or_insert_with(HashSet::new)
.extend(vector.drain(..));
notificator.notify_at(cap.retain());
});
let mut states = states.borrow_mut();
notificator.for_each(|cap, _, _| {
let mut sorted_times: Vec<_> = states
.keys()
.filter(|t| *t <= cap.time())
.cloned()
.collect();
sorted_times.sort();
if !sorted_times.is_empty() {
for cap in sorted_times {
let available_states = states.remove(&cap).expect("key not found");
match available_states {
JValue::Object(map) => {
for (eid, diffs) in map {
let entry = merged.entry(eid).or_insert_with(|| {
JValue::Object(Map::<String, JValue>::new())
});
match diffs {
JValue::Object(diff_map) => {
entry
.as_object_mut()
.expect("couldn't unwrap entry")
.extend(diff_map.into_iter());
}
_ => panic!("wrong diff type"),
}
}
}
_ => panic!("not an object"),
}
}
if let Some(mut keys) = change_keys.remove(cap.time()) {
for key in keys.iter() {
if let Some(ref snapshot) = merged.get(key) {
for required_aid in required_aids.iter() {
if !snapshot.as_object().unwrap().contains_key(required_aid)
{
excise_keys.push(key.clone());
}
}
}
}
for key in excise_keys.drain(..) {
merged.remove(&key);
}
let t = cap.time().clone();
let snapshots = keys.drain().flat_map(|key| {
if let Some(snapshot) = merged.get(&key) {
Some(Output::Json(
"test".to_string(),
snapshot.clone(),
t.clone().into(),
1,
))
} else {
None
}
});
output.session(&cap).give_iterator(snapshots);
}
}
});
},
);
(snapshots, ShutdownHandle::empty())
}
}
fn interleave(mut values: Vec<Value>, constants: &[Aid]) -> Vec<String> {
if values.is_empty() {
values
.drain(..)
.map(|v| JValue::from(v).as_str().unwrap().to_string())
.collect()
} else if constants.is_empty() {
values
.drain(..)
.map(|v| JValue::from(v).as_str().unwrap().to_string())
.collect()
} else {
let size: usize = values.len() + constants.len();
// + 2, because we know there'll be a and v coming...
let mut result: Vec<String> = Vec::with_capacity(size + 2);
let mut next_const = 0;
let mut values = values.drain(..).rev().collect::<Vec<Value>>();
for i in 0..size {
if i % 2 == 0 {
// on even indices we take from the result tuple
let v: Value = values.pop().unwrap();
result.push(JValue::from(v).as_str().unwrap().to_string());
} else {
// on odd indices we interleave an attribute
let a = constants[next_const].to_string();
result.push(a);
next_const += 1;
}
}
result
}
}
fn pointer_mut<'a>(
v: &'a mut JValue,
tokens: &[String],
cardinality: Cardinality,
) -> &'a mut JValue {
if tokens.is_empty() {
v
} else {
let mut target = v;
for (idx, token) in tokens.iter().enumerate() {
// borrow checker gets confused about `target` being
// mutably borrowed too many times because of the loop
// this once-per-loop binding makes the scope clearer and
// circumvents the error
let target_once = target;
target = match *target_once {
JValue::Object(ref mut map) => {
if !map.contains_key(token) {
if cardinality == Cardinality::One || idx < tokens.len() - 1 {
map.insert(token.to_string(), JValue::Object(Map::new()));
} else {
map.insert(token.to_string(), JValue::Array(Vec::new()));
}
}
map.get_mut(token).unwrap()
}
// JValue::Array(ref mut list) => {
// dbg!(&token);
// dbg!(&tokens);
// parse_index(&token)
// .and_then(move |x| list.get_mut(x))
// .unwrap()
// }
_ => panic!("failed to acquire pointer to {:?} at {:?}", tokens, token),
};
}
target
}
}
// fn parse_index(s: &str) -> Option<usize> {
// if s.starts_with('+') || (s.starts_with('0') && s.len() != 1) {
// return None;
// }
// s.parse().ok()
// }
|
use std::rc::Rc;
use crate::{Material, Point3, Ray, Vec3};
pub struct HitRecord {
pub p: Point3,
pub normal: Vec3,
pub mat: Rc<dyn Material>,
pub t: f64,
pub front_face: bool,
}
impl HitRecord {
pub fn face_normal(r: &Ray, outward_normal: Vec3) -> (bool, Vec3) {
let front_face = r.direction().dot(outward_normal) < 0.0;
let normal = if front_face { outward_normal } else { -outward_normal };
(front_face, normal)
}
}
pub trait Hittable {
fn hit(&self, r: &Ray, t_min: f64, t_max: f64) -> Option<HitRecord>;
}
|
use crate::{CameraState, ScaledCamera2dBundle, ScaledOrthographicProjection};
use game_lib::{
bevy::{
prelude::*,
render::camera::{Camera, CameraProjection},
},
tracing::{self, instrument},
};
#[instrument(skip(commands))]
pub fn setup(mut commands: Commands) {
let ui_camera = commands.spawn_bundle(UiCameraBundle::default()).id();
let main_camera = commands
.spawn_bundle(ScaledCamera2dBundle {
orthographic_projection: ScaledOrthographicProjection {
zoom: 32.0,
..Default::default()
},
..Default::default()
})
.id();
commands.insert_resource(CameraState {
main_camera,
ui_camera,
});
}
#[instrument(skip(windows, query))]
pub fn camera_projection_changed(
windows: Res<Windows>,
mut query: Query<
(&mut Camera, &mut ScaledOrthographicProjection),
Changed<ScaledOrthographicProjection>,
>,
) {
for (mut camera, mut camera_projection) in query.iter_mut() {
if let Some(window) = windows.get(camera.window) {
camera_projection.update(window.width(), window.height());
camera.projection_matrix = camera_projection.get_projection_matrix();
camera.depth_calculation = camera_projection.depth_calculation();
}
}
}
|
#[macro_use]
extern crate criterion;
extern crate telamon_gen;
use criterion::Criterion;
use telamon_gen::lexer;
use std::ffi::OsStr;
use std::fs;
fn criterion_benchmark(c: &mut Criterion) {
let entries = fs::read_dir("cc_tests/src/").unwrap();
for entry in entries {
if let Ok(entry) = entry {
if entry.path().extension().eq(&Some(OsStr::new("exh"))) {
let path = entry.path();
let mut input = fs::File::open(&path).unwrap();
let mut name = String::from("lexer ");
name.push_str(path.file_stem().unwrap().to_str().unwrap());
c.bench_function(&name, move |b| {
b.iter(|| lexer::Lexer::from_input(&mut input))
});
}
}
}
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
|
use futures::FutureExt;
use grpc_binary_logger::{sink, BinaryLoggerLayer, Sink};
use grpc_binary_logger_proto::GrpcLogEntry;
use grpc_binary_logger_test_proto::{
test_client::TestClient,
test_server::{self, TestServer},
};
use std::{
net::SocketAddr,
sync::{Arc, Mutex},
};
use tokio_stream::wrappers::TcpListenerStream;
use tonic::transport::{Channel, Server};
#[derive(Debug)]
pub struct Fixture {
pub local_addr: String,
pub client: TestClient<Channel>,
shutdown_tx: tokio::sync::oneshot::Sender<()>,
}
impl Fixture {
/// Start up a grpc server listening on `port`, returning
/// a fixture with the server and client.
pub async fn new<T, K>(svc: T, sink: K) -> Result<Self, Box<dyn std::error::Error>>
where
T: test_server::Test,
K: Sink + 'static,
{
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
let addr: SocketAddr = "127.0.0.1:0".parse()?;
let listener = tokio::net::TcpListener::bind(addr).await?;
let local_addr = listener.local_addr()?;
let local_addr = format!("http://{local_addr}");
tokio::spawn(async move {
Server::builder()
.layer(BinaryLoggerLayer::new(sink))
.add_service(TestServer::new(svc))
.serve_with_incoming_shutdown(
TcpListenerStream::new(listener),
shutdown_rx.map(drop),
)
.await
.unwrap();
});
// Give the test server a few ms to become available
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
// Construct client and send request, extract response
let client = TestClient::connect(local_addr.clone())
.await
.expect("connect");
Ok(Self {
local_addr,
client,
shutdown_tx,
})
}
}
impl Drop for Fixture {
fn drop(&mut self) {
let (tmp_tx, _) = tokio::sync::oneshot::channel();
let shutdown_tx = std::mem::replace(&mut self.shutdown_tx, tmp_tx);
if let Err(e) = shutdown_tx.send(()) {
eprintln!("error shutting down text fixture: {e:?}");
}
}
}
#[derive(Clone, Debug)]
pub struct RecordingSink {
log: Arc<Mutex<Vec<GrpcLogEntry>>>,
}
impl RecordingSink {
pub fn new() -> Self {
Self {
log: Default::default(),
}
}
/// Return a copy of the recorded log entries.
pub fn entries(&self) -> Vec<GrpcLogEntry> {
self.log.lock().unwrap().clone()
}
}
impl Sink for RecordingSink {
type Error = ();
fn write(&self, data: GrpcLogEntry, _error_logger: impl sink::ErrorLogger<Self::Error>) {
let mut log = self.log.lock().expect("poisoned");
log.push(data);
}
}
|
use agent_client::*;
pub mod agent_client;
#[tokio::main]
async fn main() {
let plaintext = vec![
0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74,
0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c,
0x61, 0x73, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39, 0x3a, 0x20, 0x49, 0x66, 0x20,
0x49, 0x20, 0x63, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x20, 0x79,
0x6f, 0x75, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x74, 0x69, 0x70,
0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65,
0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73, 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f, 0x75,
0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, 0x74, 0x2e,
];
let ciphertext: Vec<u8> = vec![
0x6e, 0x2e, 0x35, 0x9a, 0x25, 0x68, 0xf9, 0x80, 0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69,
0x81, 0xe9, 0x7e, 0x7a, 0xec, 0x1d, 0x43, 0x60, 0xc2, 0x0a, 0x27, 0xaf, 0xcc, 0xfd, 0x9f,
0xae, 0x0b, 0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f, 0x59, 0x3d, 0xab, 0xcd,
0x62, 0xb3, 0x57, 0x16, 0x39, 0xd6, 0x24, 0xe6, 0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35,
0x9f, 0x08, 0x61, 0xd8, 0x07, 0xca, 0x0d, 0xbf, 0x50, 0x0d, 0x6a, 0x61, 0x56, 0xa3, 0x8e,
0x08, 0x8a, 0x22, 0xb6, 0x5e, 0x52, 0xbc, 0x51, 0x4d, 0x16, 0xcc, 0xf8, 0x06, 0x81, 0x8c,
0xe9, 0x1a, 0xb7, 0x79, 0x37, 0x36, 0x5a, 0xf9, 0x0b, 0xbf, 0x74, 0xa3, 0x5b, 0xe6, 0xb4,
0x0b, 0x8e, 0xed, 0xf2, 0x78, 0x5e, 0x42, 0x87, 0x4d,
];
let key = agent_classify_u8s(&vec![
0u8, 1u8, 2u8, 3u8, 4u8, 5u8, 6u8, 7u8, 8u8, 9u8, 10u8, 11u8, 12u8, 13u8, 14u8, 15u8, 16u8,
17u8, 18u8, 19u8, 20u8, 21u8, 22u8, 23u8, 24u8, 25u8, 26u8, 27u8, 28u8, 29u8, 30u8, 31u8,
]).await;
let nonce = vec![
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4a, 0x0, 0x0, 0x0, 0x0,
];
let computed_ciphertext = agent_chacha20_encrypt(&key, 1u32, &nonce, &plaintext).await;
for (i, (x1, x2)) in ciphertext.iter().zip(computed_ciphertext).enumerate() {
assert_eq!(*x1, x2, "at index {:?}", i);
}
} |
// Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{CbAction, EventId, OpMeta, SignalKind};
use std::mem::swap;
use tremor_script::prelude::*;
use tremor_script::{EventOriginUri, EventPayload, Value};
/// A tremor event
#[derive(
Debug, Clone, PartialEq, Default, simd_json_derive::Serialize, simd_json_derive::Deserialize,
)]
pub struct Event {
/// The event ID
pub id: EventId,
/// The event Data
pub data: EventPayload,
/// Nanoseconds at when the event was ingested
pub ingest_ns: u64,
/// URI to identify the origin of the event
pub origin_uri: Option<EventOriginUri>,
/// The kind of the event
pub kind: Option<SignalKind>,
/// If this event is batched (containing multiple events itself)
pub is_batch: bool,
/// Circuit breaker action
pub cb: CbAction,
/// Metadata for operators
pub op_meta: OpMeta,
/// this needs transactional data
pub transactional: bool,
}
impl Event {
/// turns the event in an insight given it's success
#[must_use]
pub fn insight(self, success: bool) -> Event {
Event {
cb: success.into(),
ingest_ns: self.ingest_ns,
id: self.id,
op_meta: self.op_meta,
origin_uri: self.origin_uri,
..Event::default()
}
}
/// Creates either a restore or trigger event
#[must_use]
pub fn restore_or_break(restore: bool, ingest_ns: u64) -> Self {
if restore {
Event::cb_restore(ingest_ns)
} else {
Event::cb_trigger(ingest_ns)
}
}
/// Creates either a ack or fail event
#[must_use]
pub fn ack_or_fail(ack: bool, ingest_ns: u64, ids: EventId) -> Self {
if ack {
Event::cb_ack(ingest_ns, ids)
} else {
Event::cb_fail(ingest_ns, ids)
}
}
/// Creates a new ack insight from the event, consumes the `op_meta` and
/// `origin_uri` of the event
#[must_use]
pub fn insight_ack(&mut self) -> Event {
let mut e = Event::cb_ack(self.ingest_ns, self.id.clone());
swap(&mut e.op_meta, &mut self.op_meta);
swap(&mut e.origin_uri, &mut self.origin_uri);
e
}
/// produce a `CBAction::Ack` insight event with the given time (in ms) in the metadata
#[must_use]
pub fn insight_ack_with_timing(&mut self, processing_time: u64) -> Event {
let mut e = self.insight_ack();
let mut meta = Object::with_capacity(1);
meta.insert("time".into(), Value::from(processing_time));
e.data = (Value::null(), Value::from(meta)).into();
e
}
/// Creates a new fail insight from the event, consumes the `op_meta` and `origin_uri` of the
/// event
#[must_use]
pub fn insight_fail(&mut self) -> Event {
let mut e = Event::cb_fail(self.ingest_ns, self.id.clone());
swap(&mut e.op_meta, &mut self.op_meta);
swap(&mut e.origin_uri, &mut self.origin_uri);
e
}
/// Creates a restore insight from the event, consumes the `op_meta` and `origin_uri` of the
/// event
#[must_use]
pub fn insight_restore(&mut self) -> Event {
let mut e = Event::cb_restore(self.ingest_ns);
swap(&mut e.op_meta, &mut self.op_meta);
swap(&mut e.origin_uri, &mut self.origin_uri);
e
}
/// Creates a trigger insight from the event, consums the `op_meta` and `origin_uri` of the
/// event
#[must_use]
pub fn insight_trigger(&mut self) -> Event {
let mut e = Event::cb_trigger(self.ingest_ns);
swap(&mut e.op_meta, &mut self.op_meta);
swap(&mut e.origin_uri, &mut self.origin_uri);
e
}
/// allows to iterate over the values and metadatas
/// in an event, if it is batched this can be multiple
/// otherwise it's a singular event
#[must_use]
pub fn value_meta_iter(&self) -> ValueMetaIter {
ValueMetaIter {
event: self,
idx: 0,
}
}
/// Creates a new event to restore a CB
#[must_use]
pub fn cb_restore(ingest_ns: u64) -> Self {
Event {
ingest_ns,
cb: CbAction::Open,
..Event::default()
}
}
/// Creates a new event to trigger a CB
#[must_use]
pub fn cb_trigger(ingest_ns: u64) -> Self {
Event {
ingest_ns,
cb: CbAction::Close,
..Event::default()
}
}
/// Creates a new event to trigger a CB
#[must_use]
pub fn cb_ack(ingest_ns: u64, id: EventId) -> Self {
Event {
ingest_ns,
id,
cb: CbAction::Ack,
..Event::default()
}
}
/// Creates a new event to trigger a CB
#[must_use]
pub fn cb_fail(ingest_ns: u64, id: EventId) -> Self {
Event {
ingest_ns,
id,
cb: CbAction::Fail,
..Event::default()
}
}
/// Creates a CB fail insight from the given `event` (the cause of this fail)
#[must_use]
pub fn to_fail(&self) -> Self {
Event {
id: self.id.clone(),
ingest_ns: self.ingest_ns,
op_meta: self.op_meta.clone(),
origin_uri: self.origin_uri.clone(),
cb: CbAction::Fail,
..Event::default()
}
}
/// Create a CB ack insight from the given `event` (the cause of this ack)
#[must_use]
pub fn to_ack(&self) -> Self {
Event {
id: self.id.clone(),
ingest_ns: self.ingest_ns,
op_meta: self.op_meta.clone(),
origin_uri: self.origin_uri.clone(),
cb: CbAction::Ack,
..Event::default()
}
}
#[must_use]
/// return the number of events contained within this event
/// normally 1, but for batched events possibly > 1
pub fn len(&self) -> usize {
if self.is_batch {
self.data.suffix().value().as_array().map_or(0, Vec::len)
} else {
1
}
}
/// returns true if this event is batched but has no wrapped events
#[must_use]
pub fn is_empty(&self) -> bool {
self.is_batch
&& self
.data
.suffix()
.value()
.as_array()
.map_or(true, Vec::is_empty)
}
/// Extracts the `$correlation` metadata into a `Vec` of `Option<Value<'static>>`.
/// We use a `Vec` to account for possibly batched events and `Option`s because single events might not have a value there.
/// We use `Value<'static>`, which requires a clone, as we need to pass the values on to another event anyways.
#[must_use]
pub fn correlation_metas(&self) -> Vec<Option<Value<'static>>> {
let mut res = Vec::with_capacity(self.len());
for (_, meta) in self.value_meta_iter() {
res.push(meta.get("correlation").map(Value::clone_static));
}
res
}
/// get the correlation metadata as a single value, if present
/// creates an array value for batched events
#[must_use]
pub fn correlation_meta(&self) -> Option<Value<'static>> {
if self.is_batch {
let cms = self.correlation_metas();
if cms.is_empty() {
None
} else {
Some(Value::from(cms))
}
} else {
self.data
.suffix()
.meta()
.get("correlation")
.map(Value::clone_static)
}
}
}
/// Iterator over the event value and metadata
/// if the event is a batch this will allow iterating
/// over all the batched events
pub struct ValueMetaIter<'value> {
event: &'value Event,
idx: usize,
}
// TODO: descend recursively into batched events in batched events ...
impl<'value> Iterator for ValueMetaIter<'value> {
type Item = (&'value Value<'value>, &'value Value<'value>);
fn next(&mut self) -> Option<Self::Item> {
if self.event.is_batch {
let r = self
.event
.data
.suffix()
.value()
.get_idx(self.idx)
.and_then(|e| {
let data = e.get("data")?;
Some((data.get("value")?, data.get("meta")?))
});
self.idx += 1;
r
} else if self.idx == 0 {
let v = self.event.data.suffix();
self.idx += 1;
Some((v.value(), v.meta()))
} else {
None
}
}
}
impl Event {
/// Iterate over the values in an event
/// this will result in multiple entries
/// if the event was batched otherwise
/// have only a single element
#[must_use]
pub fn value_iter(&self) -> ValueIter {
ValueIter {
event: self,
idx: 0,
}
}
}
/// Iterator over the values of an event
pub struct ValueIter<'value> {
event: &'value Event,
idx: usize,
}
impl<'value> Iterator for ValueIter<'value> {
type Item = &'value Value<'value>;
fn next(&mut self) -> Option<Self::Item> {
if self.event.is_batch {
let r = self
.event
.data
.suffix()
.value()
.get_idx(self.idx)
.and_then(|e| e.get("data")?.get("value"));
self.idx += 1;
r
} else if self.idx == 0 {
let v = &self.event.data.suffix().value();
self.idx += 1;
Some(v)
} else {
None
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::Result;
use simd_json::OwnedValue;
use tremor_script::{Object, ValueAndMeta};
fn merge<'iref, 'head>(
this: &'iref mut ValueAndMeta<'head>,
other: ValueAndMeta<'head>,
) -> Result<()> {
if let Some(ref mut a) = this.value_mut().as_array_mut() {
let mut e = Object::with_capacity(7);
// {"id":1,
// e.insert_nocheck("id".into(), id.into());
// "data": {
// "value": "snot", "meta":{}
// },
let mut data = Object::with_capacity(2);
let (value, meta) = other.into_parts();
data.insert_nocheck("value".into(), value);
data.insert_nocheck("meta".into(), meta);
e.insert_nocheck("data".into(), Value::from(data));
// "ingest_ns":1,
e.insert_nocheck("ingest_ns".into(), 1.into());
// "kind":null,
// kind is always null on events
e.insert_nocheck("kind".into(), Value::null());
// "is_batch":false
e.insert_nocheck("is_batch".into(), false.into());
// }
a.push(Value::from(e))
};
Ok(())
}
#[test]
fn value_iters() {
let mut b = Event {
data: (Value::array(), 2).into(),
is_batch: true,
..Event::default()
};
let e1 = Event {
data: (1, 2).into(),
..Event::default()
};
let e2 = Event {
data: (3, 4).into(),
..Event::default()
};
assert!(b.data.consume(e1.data, merge).is_ok());
assert!(b.data.consume(e2.data, merge).is_ok());
let mut vi = b.value_iter();
assert_eq!(vi.next().unwrap(), &1);
assert_eq!(vi.next().unwrap(), &3);
assert!(vi.next().is_none());
let mut vmi = b.value_meta_iter();
assert_eq!(vmi.next().unwrap(), (&1.into(), &2.into()));
assert_eq!(vmi.next().unwrap(), (&3.into(), &4.into()));
assert!(vmi.next().is_none());
}
#[test]
fn cb() {
let mut e = Event::default();
assert_eq!(e.clone().insight(true).cb, CbAction::Ack);
assert_eq!(e.clone().insight(false).cb, CbAction::Fail);
assert_eq!(
Event::ack_or_fail(true, 0, EventId::default()).cb,
CbAction::Ack
);
assert_eq!(Event::cb_ack(0, EventId::default()).cb, CbAction::Ack);
assert_eq!(e.insight_ack().cb, CbAction::Ack);
assert_eq!(
Event::ack_or_fail(false, 0, EventId::default()).cb,
CbAction::Fail
);
assert_eq!(Event::cb_fail(0, EventId::default()).cb, CbAction::Fail);
assert_eq!(e.insight_fail().cb, CbAction::Fail);
let mut clone = e.clone();
clone.op_meta.insert(1, OwnedValue::null());
let ack_with_timing = clone.insight_ack_with_timing(100);
assert_eq!(ack_with_timing.cb, CbAction::Ack);
assert!(ack_with_timing.op_meta.contains_key(1));
let (_, m) = ack_with_timing.data.parts();
assert_eq!(Some(100), m.get_u64("time"));
let mut clone2 = e.clone();
clone2.op_meta.insert(42, OwnedValue::null());
let clone_fail = clone2.to_fail();
assert_eq!(clone_fail.cb, CbAction::Fail);
assert!(clone_fail.op_meta.contains_key(42));
}
#[test]
fn gd() {
let mut e = Event::default();
assert_eq!(Event::restore_or_break(true, 0).cb, CbAction::Open);
assert_eq!(Event::cb_restore(0).cb, CbAction::Open);
assert_eq!(e.insight_restore().cb, CbAction::Open);
assert_eq!(Event::restore_or_break(false, 0).cb, CbAction::Close);
assert_eq!(Event::cb_trigger(0).cb, CbAction::Close);
assert_eq!(e.insight_trigger().cb, CbAction::Close);
}
#[test]
fn len() -> Result<()> {
// default non-batched event
let mut e = Event::default();
assert_eq!(1, e.len());
// batched event with 2 elements
e.is_batch = true;
let mut value = Value::array_with_capacity(2);
value.push(Value::from(true))?; // dummy events
value.push(Value::from(false))?;
e.data = (value, Value::object_with_capacity(0)).into();
assert_eq!(2, e.len());
// batched event with non-array value
e.data = (Value::null(), Value::object_with_capacity(0)).into();
assert_eq!(0, e.len());
// batched array with empty array value
e.data = (
Value::array_with_capacity(0),
Value::object_with_capacity(0),
)
.into();
assert_eq!(0, e.len());
Ok(())
}
#[test]
fn is_empty() -> Result<()> {
let mut e = Event::default();
assert_eq!(false, e.is_empty());
e.is_batch = true;
e.data = (Value::null(), Value::object()).into();
assert_eq!(true, e.is_empty());
e.data = (Value::array(), Value::object()).into();
assert_eq!(true, e.is_empty());
let mut value = Value::array_with_capacity(2);
value.push(Value::from(true))?; // dummy events
value.push(Value::from(false))?;
e.data = (value, Value::object()).into();
assert_eq!(false, e.is_empty());
Ok(())
}
#[test]
fn correlation_meta() -> Result<()> {
let mut e = Event::default();
assert!(e.correlation_meta().is_none());
let mut m = literal!({
"correlation": 1
});
e.data = (Value::null(), m.clone()).into();
assert_eq!(e.correlation_meta().unwrap(), 1);
let mut e2 = Event::default();
e2.is_batch = true;
e2.data = (Value::array(), m.clone()).into();
e2.data.consume(e.data.clone(), merge).unwrap();
m.try_insert("correlation", 2);
e.data = (Value::null(), m.clone()).into();
e2.data.consume(e.data, merge).unwrap();
assert_eq!(e2.correlation_meta().unwrap(), Value::from(vec![1, 2]));
Ok(())
}
}
|
use std::{
collections::{BTreeMap, BTreeSet},
sync::Arc,
time::Duration,
};
use crate::{AbstractTaskRegistry, TaskId, TaskRegistration, TaskTracker};
/// Function that extracts metric attributes from job metadata.
///
/// Note that some attributes like `"status"` will automatically be set/overwritten to ensure a certain consistency.
pub type FAttributes<T> = Box<dyn FnMut(&T) -> metric::Attributes + Send>;
/// Wraps a task registry and adds metrics.
#[derive(Debug)]
pub struct TaskRegistryWithMetrics<T, R>
where
T: std::fmt::Debug + Send + Sync,
R: AbstractTaskRegistry<T>,
{
registry: R,
metrics: RegistryMetrics<T>,
}
impl<T, R> TaskRegistryWithMetrics<T, R>
where
T: std::fmt::Debug + Send + Sync,
R: AbstractTaskRegistry<T>,
{
pub fn new(
inner: R,
metric_registry: Arc<metric::Registry>,
f_attributes: FAttributes<T>,
) -> Self {
Self {
registry: inner,
metrics: RegistryMetrics::new(metric_registry, f_attributes),
}
}
}
impl<T, R> AbstractTaskRegistry<T> for TaskRegistryWithMetrics<T, R>
where
T: std::fmt::Debug + Send + Sync,
R: AbstractTaskRegistry<T>,
{
fn register(&mut self, metadata: T) -> (TaskTracker<T>, TaskRegistration) {
self.registry.register(metadata)
}
fn get(&self, id: TaskId) -> Option<TaskTracker<T>> {
self.registry.get(id)
}
fn tracked_len(&self) -> usize {
self.registry.tracked_len()
}
fn tracked(&self) -> Vec<TaskTracker<T>> {
self.registry.tracked()
}
fn running(&self) -> Vec<TaskTracker<T>> {
self.registry.running()
}
fn reclaim(&mut self) -> Vec<TaskTracker<T>> {
let pruned = self.registry.reclaim();
self.metrics.update(&self.registry, &pruned);
pruned
}
}
struct RegistryMetrics<T>
where
T: std::fmt::Debug + Send + Sync,
{
active_gauge: metric::Metric<metric::U64Gauge>,
// Accumulates jobs that were pruned from the limited job history. This is required to not saturate the completed
// count after a while.
completed_accu: BTreeMap<metric::Attributes, u64>,
cpu_time_histogram: metric::Metric<metric::DurationHistogram>,
wall_time_histogram: metric::Metric<metric::DurationHistogram>,
// Set of jobs for which we already accounted data but that are still tracked. We must not account these
// jobs a second time.
completed_but_still_tracked: BTreeSet<TaskId>,
f_attributes: FAttributes<T>,
}
impl<T> std::fmt::Debug for RegistryMetrics<T>
where
T: std::fmt::Debug + Send + Sync,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("RegistryMetrics")
.field("active_gauge", &self.active_gauge)
.field("completed_accu", &self.completed_accu)
.field("cpu_time_histogram", &self.cpu_time_histogram)
.field("wall_time_histogram", &self.wall_time_histogram)
.field(
"completed_but_still_tracked",
&self.completed_but_still_tracked,
)
.finish_non_exhaustive()
}
}
impl<T> RegistryMetrics<T>
where
T: std::fmt::Debug + Send + Sync,
{
fn new(metric_registry: Arc<metric::Registry>, f_attributes: FAttributes<T>) -> Self {
Self {
active_gauge: metric_registry
.register_metric("influxdb_iox_job_count", "Number of known jobs"),
completed_accu: Default::default(),
cpu_time_histogram: metric_registry.register_metric_with_options(
"influxdb_iox_job_completed_cpu",
"CPU time of of completed jobs",
Self::duration_histogram_options,
),
wall_time_histogram: metric_registry.register_metric_with_options(
"influxdb_iox_job_completed_wall",
"Wall time of of completed jobs",
Self::duration_histogram_options,
),
completed_but_still_tracked: Default::default(),
f_attributes,
}
}
fn duration_histogram_options() -> metric::DurationHistogramOptions {
metric::DurationHistogramOptions::new(vec![
Duration::from_millis(10),
Duration::from_millis(100),
Duration::from_secs(1),
Duration::from_secs(10),
Duration::from_secs(100),
metric::DURATION_MAX,
])
}
fn update<R>(&mut self, registry: &R, pruned: &[TaskTracker<T>])
where
R: AbstractTaskRegistry<T>,
{
// scan pruned jobs
for job in pruned {
assert!(job.is_complete());
if self.completed_but_still_tracked.remove(&job.id()) {
// already accounted
continue;
}
self.process_completed_job(job);
}
// scan current completed jobs
let (tracked_completed, tracked_other): (Vec<_>, Vec<_>) = registry
.tracked()
.into_iter()
.partition(|job| job.is_complete());
for job in tracked_completed {
if !self.completed_but_still_tracked.insert(job.id()) {
// already accounted
continue;
}
self.process_completed_job(&job);
}
// scan current not-completed jobs
let mut accumulator: BTreeMap<metric::Attributes, u64> = self.completed_accu.clone();
for job in tracked_other {
let attr = self.job_to_gauge_attributes(&job);
accumulator
.entry(attr.clone())
.and_modify(|x| *x += 1)
.or_insert(1);
}
// emit metric
for (attr, count) in accumulator {
self.active_gauge.recorder(attr).set(count);
}
}
fn job_to_gauge_attributes(&mut self, job: &TaskTracker<T>) -> metric::Attributes
where
T: Send + Sync,
{
let metadata = job.metadata();
let status = job.get_status();
let mut attributes = (self.f_attributes)(metadata);
attributes.insert(
"status",
status
.result()
.map(|result| result.name())
.unwrap_or_else(|| status.name()),
);
attributes
}
fn process_completed_job(&mut self, job: &TaskTracker<T>) {
let attr = self.job_to_gauge_attributes(job);
self.completed_accu
.entry(attr.clone())
.and_modify(|x| *x += 1)
.or_insert(1);
let status = job.get_status();
if let Some(nanos) = status.cpu_nanos() {
self.cpu_time_histogram
.recorder(attr.clone())
.record(std::time::Duration::from_nanos(nanos as u64));
}
if let Some(nanos) = status.wall_nanos() {
self.wall_time_histogram
.recorder(attr)
.record(std::time::Duration::from_nanos(nanos as u64));
}
}
}
#[cfg(test)]
mod tests {
use metric::Observation;
use crate::{TaskRegistry, TrackedFutureExt};
use super::*;
#[test]
fn test_metrics() {
let time_provider = Arc::new(iox_time::SystemProvider::new());
let registry = TaskRegistry::new(time_provider);
let metric_registry = Arc::new(metric::Registry::new());
let mut reg = TaskRegistryWithMetrics::new(
registry,
Arc::clone(&metric_registry),
Box::new(extract_attributes),
);
fut().track(reg.register(0).1);
for i in 1..=3 {
reg.complete(i);
}
reg.reclaim();
let mut reporter = metric::RawReporter::default();
metric_registry.report(&mut reporter);
let gauge = reporter
.metric("influxdb_iox_job_count")
.unwrap()
.observation(&[("status", "Dropped"), ("is_even", "true")])
.unwrap();
assert_eq!(gauge, &Observation::U64Gauge(1));
let gauge = reporter
.metric("influxdb_iox_job_count")
.unwrap()
.observation(&[("status", "Success"), ("is_even", "false")])
.unwrap();
assert_eq!(gauge, &Observation::U64Gauge(2));
}
async fn fut() -> Result<(), ()> {
Ok(())
}
fn extract_attributes(job: &i32) -> metric::Attributes {
metric::Attributes::from(&[
("is_even", if job % 2 == 0 { "true" } else { "false" }),
("status", "will be overwritten"),
])
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.