text stringlengths 8 4.13M |
|---|
use crate::util;
pub fn whoami() -> anyhow::Result<()> {
let username = util::get_username()?.unwrap_or("(not logged in)".to_string());
println!("{}", username);
Ok(())
}
|
use hex;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
fn main() {
let input = File::open("file.txt").unwrap();
find_the_xor(BufReader::new(input))
}
fn score_ascii_byte(c: u8) -> i64 {
let c = if b'A' <= c && c <= b'Z' {
c - b'A' + b'a'
} else {
c
};
return match c as char {
'e' => 12,
't' | 'a' | 'o' => 8,
'i' | 'n' => 7,
's' | 'h' | 'r' => 6,
'd' | 'l' => 4,
'c' | 'u' => 3,
'm' | 'w' | 'f' | 'g' | 'y' | 'p' => 2,
'b' | 'v' | 'k' | ' ' => 1,
'j' | 'x' | 'q' | 'z' => 0,
_ => -1,
};
}
fn score_ascii(string: Vec<u8>) -> i64 {
string.into_iter().map(|x| score_ascii_byte(x)).sum()
}
fn find_the_xor(strings: impl BufRead) {
let answer = strings
.lines()
.map(|line| hex::decode(line.unwrap()).unwrap())
.map(|string| {
(0u8..std::u8::MAX)
.map(|i| {
let test: Vec<u8> = string.iter().map(|a| a ^ i).collect();
if let Ok(str) = String::from_utf8(test.clone()) {
if str.is_ascii() {
return (score_ascii(test), str, i.clone());
}
}
(0, "Not Found".into(), 0u8)
})
.max()
.unwrap()
})
.max();
println!("{:?}", answer);
}
|
mod collection;
mod contract;
mod publication;
mod user;
pub use collection::*;
pub use contract::*;
pub use publication::*;
pub use user::*;
|
/* Copyright 2020 Yuchen Wong */
use opencv::core::{Mat, Scalar_, Size_, Vec3f};
use opencv::prelude::{MatTrait, Vector};
use opencv::types::{VectorOfMat};
use std::error::Error;
#[path = "../base/math_utils.rs"] mod math_utils;
pub fn map(src: &Mat,
alpha: f32,
phi: f32,
epsilon: f32,
max_kernel_size: i32,
out_ldr: &mut Mat) -> Result<(), Box<dyn Error>> {
log::trace!("Starting tone mapping: PhotoGraphics Local.");
let mut l_d: Mat = Mat::default()?;
let mut l_w: Mat = Mat::default()?;
compute_radiance(src, alpha, phi, epsilon, max_kernel_size, &mut l_w, &mut l_d).unwrap();
let mut tmp_ldr: Mat = src.clone()?;
let mut tmp_ldr_array: VectorOfMat = VectorOfMat::new();
let mut out_ldr_array: VectorOfMat = VectorOfMat::new();
opencv::core::split(&tmp_ldr, &mut tmp_ldr_array).unwrap();
for i in 0..3 {
let cur_mat: Mat = tmp_ldr_array.get(i).unwrap();
let mut tmp_mat: Mat = Mat::default()?;
let mut out_mat: Mat = Mat::default()?;
opencv::core::divide2(&cur_mat, &l_w, &mut tmp_mat, 1.0, opencv::core::CV_32FC1).unwrap();
opencv::core::multiply(&tmp_mat, &l_d, &mut out_mat, 1.0, opencv::core::CV_32FC1).unwrap();
out_ldr_array.push(out_mat);
}
opencv::core::merge(&out_ldr_array, &mut tmp_ldr).unwrap();
let mut ldr_uncropped: Mat = Mat::default()?;
opencv::core::multiply(&tmp_ldr, &Scalar_::all(255.0), &mut ldr_uncropped, 1.0, -1).unwrap();
ldr_uncropped.convert_to(out_ldr, opencv::core::CV_8UC3, 1.0, 0.0).unwrap();
log::trace!("Tone mapping finished: PhotoGraphics Global.");
Ok(())
}
fn compute_radiance(src: &Mat,
alpha: f32,
phi: f32,
epsilon: f32,
max_kernel_size: i32,
out_l_w: &mut Mat,
out_radiance_map: &mut Mat) -> Result<(), Box<dyn Error>> {
compute_l_w(src, out_l_w).unwrap();
let mut l_w_log = Mat::default()?;
let mut l_w_tmp: Mat = Mat::default()?;
opencv::core::add(out_l_w, &Scalar_::all(0.0001),
&mut l_w_tmp, &opencv::core::no_array()?, opencv::core::CV_32FC1).unwrap();
opencv::core::log(&l_w_tmp, &mut l_w_log).unwrap();
l_w_tmp.release()?;
let l_w_hat: f32 = opencv::core::mean(&l_w_log, &opencv::core::no_array()?).unwrap()[0].exp() as f32;
log::trace!("{}", l_w_hat);
let rows: i32 = src.rows();
let cols: i32 = src.cols();
unsafe {
out_radiance_map.create_rows_cols(rows, cols, opencv::core::CV_32FC1).unwrap();
}
let mut l_m: Mat = Mat::default()?;
let mut l_m_tmp: Mat = Mat::default()?;
opencv::core::divide2(out_l_w, &Scalar_::all(l_w_hat as f64), &mut l_m_tmp, 1.0, opencv::core::CV_32FC1)?;
opencv::core::multiply(&l_m_tmp, &Scalar_::all(alpha as f64), &mut l_m, 1.0, opencv::core::CV_32FC1)?;
let mut gaussian_filters: VectorOfMat = VectorOfMat::new();
let mut cur_size: i32 = 1;
while cur_size <= max_kernel_size {
let mut cur_gaussian: Mat = Mat::default()?;
opencv::imgproc::gaussian_blur(&l_m, &mut cur_gaussian,
Size_::new(cur_size, cur_size), 0.0, 0.0, opencv::core::BORDER_DEFAULT)?;
gaussian_filters.push(cur_gaussian);
cur_size += 2;
}
let mut l_d_down = Mat::default()?;
unsafe {
l_d_down.create_rows_cols(rows, cols, opencv::core::CV_32FC1).unwrap();
}
log::info!("Starting doing local ops.");
let gaussian_num = gaussian_filters.len();
for i in 0..(gaussian_num-1) {
let s: f32 = 2.0 * (i as f32) + 1.0;
let cur_gaussian: Mat = gaussian_filters.get(i).unwrap();
let next_gaussian: Mat = gaussian_filters.get(i+1).unwrap();
let mut down: Mat = Mat::default()?;
opencv::core::add(&cur_gaussian, &Scalar_::all((phi.exp2()*alpha/s.powi(2)) as f64),
&mut down, &opencv::core::no_array()?, opencv::core::CV_32FC1)?;
let mut up: Mat = Mat::default()?;
opencv::core::subtract(&cur_gaussian, &next_gaussian, &mut up, &opencv::core::no_array()?, opencv::core::CV_32FC1)?;
let mut v: Mat = Mat::default()?;
opencv::core::divide2(&up, &down, &mut v, 1.0, opencv::core::CV_32FC1)?;
for row in 0..rows {
for col in 0..cols {
let cur_v: f32 = *v.at_2d::<f32>(row, col).unwrap();
if cur_v.abs() < epsilon {
*l_d_down.at_2d_mut::<f32>(row, col).unwrap() = 1.0 +
*cur_gaussian.at_2d::<f32>(row, col).unwrap();
}
}
}
}
opencv::core::divide2(&l_m, &l_d_down, out_radiance_map, 1.0, opencv::core::CV_32FC1)?;
Ok(())
}
fn compute_l_w(src: &Mat,
dst: &mut Mat) -> Result<(), Box<dyn Error>> {
let rows = src.rows();
let cols = src.cols();
unsafe {
dst.create_rows_cols(rows, cols, opencv::core::CV_32FC1)?;
}
for i in 0..rows {
for j in 0..cols {
let pixel_value: Vec3f = *src.at_2d::<Vec3f>(i, j).unwrap();
let pixel_b = pixel_value[0] as f32;
let pixel_g = pixel_value[1] as f32;
let pixel_r = pixel_value[2] as f32;
*dst.at_2d_mut::<f32>(i, j).unwrap() = 0.06 * pixel_b + 0.67 * pixel_g + 0.27 * pixel_r;
}
}
Ok(())
}
|
#[doc = "Register `MAXWLR` reader"]
pub type R = crate::R<MAXWLR_SPEC>;
#[doc = "Register `MAXWLR` writer"]
pub type W = crate::W<MAXWLR_SPEC>;
#[doc = "Field `MWL` reader - maximum data write length (when I3C is acting as target) This field is initially written by software when I3C_CFGR.EN=0 and updated by hardware on the reception of SETMWL command. Software is notified of a MWL update by the I3C_EVR.MWLUPF and the corresponding interrupt if enabled. This field is used by hardware to return the value on the I3C bus when the target receives a GETMWL CCC."]
pub type MWL_R = crate::FieldReader<u16>;
#[doc = "Field `MWL` writer - maximum data write length (when I3C is acting as target) This field is initially written by software when I3C_CFGR.EN=0 and updated by hardware on the reception of SETMWL command. Software is notified of a MWL update by the I3C_EVR.MWLUPF and the corresponding interrupt if enabled. This field is used by hardware to return the value on the I3C bus when the target receives a GETMWL CCC."]
pub type MWL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 16, O, u16>;
impl R {
#[doc = "Bits 0:15 - maximum data write length (when I3C is acting as target) This field is initially written by software when I3C_CFGR.EN=0 and updated by hardware on the reception of SETMWL command. Software is notified of a MWL update by the I3C_EVR.MWLUPF and the corresponding interrupt if enabled. This field is used by hardware to return the value on the I3C bus when the target receives a GETMWL CCC."]
#[inline(always)]
pub fn mwl(&self) -> MWL_R {
MWL_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - maximum data write length (when I3C is acting as target) This field is initially written by software when I3C_CFGR.EN=0 and updated by hardware on the reception of SETMWL command. Software is notified of a MWL update by the I3C_EVR.MWLUPF and the corresponding interrupt if enabled. This field is used by hardware to return the value on the I3C bus when the target receives a GETMWL CCC."]
#[inline(always)]
#[must_use]
pub fn mwl(&mut self) -> MWL_W<MAXWLR_SPEC, 0> {
MWL_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "I3C maximum write length register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`maxwlr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`maxwlr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct MAXWLR_SPEC;
impl crate::RegisterSpec for MAXWLR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`maxwlr::R`](R) reader structure"]
impl crate::Readable for MAXWLR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`maxwlr::W`](W) writer structure"]
impl crate::Writable for MAXWLR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets MAXWLR to value 0"]
impl crate::Resettable for MAXWLR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
mod http;
pub use self::http::index; |
//! A library for manipulating SMPTE timecodes.
use std::fmt;
use std::marker;
use std::ops;
use std::str;
mod frame_rate;
pub use frame_rate::{FrameRate, FrameRate2398, FrameRate24, FrameRate25,
FrameRate2997, FrameRate30, FrameRate50, FrameRate5994,
FrameRate60};
use frame_rate::NormalizeFrameNumber;
#[derive(Debug)]
/// All [Result](https://doc.rust-lang.org/std/result/enum.Result.html) values
/// returned by this library will use this for error values.
pub struct TimecodeError {
pub kind: TimecodeErrorKind,
}
#[derive(Debug)]
/// Error values for this library.
pub enum TimecodeErrorKind {
/// Timecode parsing failed due to input having an invalid format.
InvalidFormat,
/// Timecode had an invalid value. For instance the frame field might have
/// a value higher than the frame rate allows.
InvalidTimecode
}
/// Representation of a timecode as a struct, generic over types implementing
/// the trait [FrameRate](trait.FrameRate.html).
///
/// **Note**: Currently the user-facing values are open properties. These may
/// be replaced by getters to facilitate lazy evaluation.
///
/// ```
/// use video_timecode::*;
/// use std::str::FromStr;
///
/// let tc1 = Timecode::<FrameRate24>::new(0, 0, 0, 10).unwrap();
/// assert_eq!(tc1.frame_number, 10);
///
/// let tc2 = Timecode::<FrameRate24>::from_str("00:00:10:00").unwrap();
/// assert_eq!(tc2.frame_number, 240);
///
/// let mut tc3 = Timecode::<FrameRate24>::from(240);
/// assert_eq!(tc3.hour, 0);
/// assert_eq!(tc3.minute, 0);
/// assert_eq!(tc3.second, 10);
/// assert_eq!(tc3.frame, 0);
/// assert_eq!(tc3.frame_number, 240);
///
/// tc3 += tc1;
/// assert_eq!(tc3.hour, 0);
/// assert_eq!(tc3.minute, 0);
/// assert_eq!(tc3.second, 10);
/// assert_eq!(tc3.frame, 10);
/// assert_eq!(tc3.frame_number, 250);
/// ```
#[derive(Debug, PartialEq)]
pub struct Timecode<FrameRate> {
/// Frame number. The count of frames after `00:00:00:00`
pub frame_number: u32,
pub hour: u8,
pub minute: u8,
pub second: u8,
pub frame: u8,
frame_rate: marker::PhantomData<FrameRate>,
}
impl<T> Timecode<T> {
/// Returns a timecode with the given hour/minute/second/frame fields.
///
/// ```
/// use video_timecode::*;
///
/// let timecode = Timecode::<FrameRate24>::new(10, 0, 0, 0).unwrap();
/// assert_eq!(timecode.frame_number, 864000);
/// ```
pub fn new(
hour: u8,
minute: u8,
second: u8,
frame: u8,
) -> Result<Timecode<T>, TimecodeError>
where
T: FrameRate,
{
use self::TimecodeErrorKind::*;
let result = T::calculate_frame_number(
hour as u32,
minute as u32,
second as u32,
frame as u32,
);
match result {
Some(frame_number) => Ok(Timecode {
frame_number,
hour,
minute,
second,
frame,
frame_rate: marker::PhantomData,
}),
None => Err(TimecodeError {
kind: InvalidTimecode,
}),
}
}
}
/// Parse a string into a timecode.
///
/// Colon separator is alright for all types.
///
/// ```
/// use video_timecode::*;
/// use std::str::FromStr;
///
/// let tc1 = Timecode::<FrameRate24>::from_str("00:00:10:00").unwrap();
/// assert_eq!(tc1.frame_number, 240);
///
/// let tc2 = Timecode::<FrameRate2997>::from_str("00:00:10:00").unwrap();
/// assert_eq!(tc2.frame_number, 300);
/// ```
///
/// For frame rates with drop frame, the following formats are also allowed:
///
/// * `00:00:00;00`
/// * `00;00;00;00`
/// * `00.00.00.00`
/// * `00:00:00.00`
///
/// ```
/// use video_timecode::*;
/// use std::str::FromStr;
///
/// let tc1 = Timecode::<FrameRate2997>::from_str("00:00:10;00").unwrap();
/// assert_eq!(tc1.frame_number, 300);
///
/// let tc2 = Timecode::<FrameRate2997>::from_str("00;00;10;00").unwrap();
/// assert_eq!(tc2.frame_number, 300);
///
/// let tc3 = Timecode::<FrameRate2997>::from_str("00:00:10.00").unwrap();
/// assert_eq!(tc3.frame_number, 300);
///
/// let tc4 = Timecode::<FrameRate2997>::from_str("00.00.10.00").unwrap();
/// assert_eq!(tc4.frame_number, 300);
/// ```
impl<T> str::FromStr for Timecode<T>
where
T: FrameRate,
{
/// If parsing fails, a timecode error is returned.
///
/// If the input format is invalid in some way, the `TimecodeErrorKind` field
/// of the [TimecodeError](struct.TimecodeError.html) will be
/// [InvalidFormat](enum.TimecodeErrorKind.html#variant.InvalidFormat).
///
/// ```
/// use video_timecode::*;
/// use video_timecode::TimecodeErrorKind::*;
/// use std::str::FromStr;
///
/// // Semicolon notation only allowed for drop frame frame rates.
/// match Timecode::<FrameRate24>::from_str("00:00:10;00") {
/// Err(TimecodeError { kind: InvalidFormat }) => {}
/// _ => panic!()
/// }
/// ```
///
/// If the timecode is not valid for the given frame rate, it will be
/// [InvalidTimecode](enum.TimecodeErrorKind.html#variant.Timecode).
///
/// ```
/// use video_timecode::*;
/// use video_timecode::TimecodeErrorKind::*;
/// use std::str::FromStr;
///
/// // This is a dropped frame.
/// match Timecode::<FrameRate2997>::from_str("00:01:00;00") {
/// Err(TimecodeError { kind: InvalidTimecode }) => {}
/// _ => panic!()
/// }
/// ```
type Err = TimecodeError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::TimecodeErrorKind::*;
let mut colon_notation = false;
let mut semi_colon_notation = false;
let mut dot_notation = false;
let mut it = s.chars();
let hour_string: String = it.by_ref().take(2).collect();
let hour: u8 = match hour_string.parse() {
Ok(n) if n < 60 => n,
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
};
let minute_sep_char = it.next();
match minute_sep_char {
Some(':') => colon_notation = true,
Some(';') => semi_colon_notation = true,
Some('.') => dot_notation = true,
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
};
let minute_string: String = it.by_ref().take(2).collect();
let minute: u8 = match minute_string.parse() {
Ok(n) if n < 60 => n,
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
};
let second_sep_char = it.next();
match second_sep_char {
Some(':') if colon_notation => {}
Some(';') if semi_colon_notation => {}
Some('.') if dot_notation => {}
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
}
let second_string: String = it.by_ref().take(2).collect();
let second: u8 = match second_string.parse() {
Ok(n) if n < 60 => n,
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
};
let frame_sep_char = it.next();
let drop_frame = match frame_sep_char {
Some(':') if colon_notation => false,
Some(';') if semi_colon_notation || colon_notation => true,
Some('.') if dot_notation || colon_notation => true,
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
};
let frame_string: String = it.by_ref().take(2).collect();
let frame: u8 = match frame_string.parse() {
Ok(n) => n,
_ => {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
};
if it.next() != None {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
if drop_frame && !T::DROP_FRAME {
return Err(TimecodeError {
kind: InvalidFormat,
});
}
match Timecode::<T>::new(hour, minute, second, frame) {
Ok(timecode) => Ok(timecode),
Err(_) => Err(TimecodeError {
kind: InvalidTimecode,
}),
}
}
}
impl<T> fmt::Display for Timecode<T>
where
T: FrameRate,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let separator = match T::DROP_FRAME {
true => ';',
false => ':',
};
write!(
f,
"{:02}:{:02}:{:02}{}{:02}",
self.hour, self.minute, self.second, separator, self.frame
)
}
}
macro_rules! impl_int_all {
($($t:ty)*) => ($(
/// Create a timecode with the the given frame number.
impl<T> From<$t> for Timecode<T>
where
T: FrameRate,
{
fn from(frame_number: $t) -> Self {
let new_frame_number = frame_number.normalize(T::MAX_FRAMES as $t);
let (hour, minute, second, frame) =
T::calculate_time_code(new_frame_number);
Timecode {
frame_number: new_frame_number,
hour,
minute,
second,
frame,
frame_rate: marker::PhantomData,
}
}
}
/// Make a new timecode by adding a number of frames to a timecode.
impl<T> ops::Add<$t> for Timecode<T>
where
T: FrameRate,
{
type Output = Self;
fn add(self, other: $t) -> Self {
Timecode::<T>::from(self.frame_number as $t + other)
}
}
/// Add a number of frames to a timecode.
impl<T> ops::AddAssign<$t> for Timecode<T>
where
T: FrameRate,
{
fn add_assign(&mut self, other: $t) {
let new_frame_number = (self.frame_number as $t + other)
.normalize(T::MAX_FRAMES as $t);
let (hour, minute, second, frame) =
T::calculate_time_code(new_frame_number);
self.hour = hour;
self.minute = minute;
self.second = second;
self.frame = frame;
self.frame_number = new_frame_number;
}
}
/// Make a new timecode by removing a number of frames to a timecode.
impl<T> ops::Sub<$t> for Timecode<T>
where
T: FrameRate,
{
type Output = Self;
fn sub(self, other: $t) -> Self {
Timecode::<T>::from(self.frame_number as $t - other)
}
}
/// Remove a number of frames from a timecode.
impl<T> ops::SubAssign<$t> for Timecode<T>
where
T: FrameRate,
{
fn sub_assign(&mut self, other: $t) {
let new_frame_number = (self.frame_number as $t - other)
.normalize(T::MAX_FRAMES as $t);
let (hour, minute, second, frame) =
T::calculate_time_code(new_frame_number);
self.hour = hour;
self.minute = minute;
self.second = second;
self.frame = frame;
self.frame_number = new_frame_number;
}
}
)*)
}
impl_int_all! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
/// Make a new timecode by adding two timecodes together. The result is a
/// timecode where the field `frame_number` is the sum of the frame numbers
/// of the two added timecodes.
///
/// ```
/// use video_timecode::*;
///
/// let tc1 = Timecode::<FrameRate24>::new(0, 0, 20, 0).unwrap();
/// let tc2 = Timecode::<FrameRate24>::new(0, 0, 10, 0).unwrap();
/// let tc3 = tc1 + tc2;
/// assert_eq!(tc3, Timecode::<FrameRate24>::new(0, 0, 30, 0).unwrap());
/// ```
///
/// # Adding Timecodes of different frame rates
///
/// Adding timecodes of different framerates together is not supported.
///
/// Since adding Timecodes of different frame rates together normally does not make
/// any sense, it is better that the programmer has to mark this, by explicitly
/// adding the number of frames.
///
/// ```compile_fail
/// use video_timecode::*;
///
/// let tc1 = Timecode::<FrameRate2997>::new(0, 0, 0, 0).unwrap();
/// let tc2 = Timecode::<FrameRate24>::new(0, 0, 10, 0).unwrap();
/// let tc3 = tc1 + tc2;
/// ```
///
/// # Timecode roll-over
///
/// The timecode (including the `frame_number` field) will roll over when the
/// timecode reaches 24 hours.
///
/// ```
/// use video_timecode::*;
///
/// let tc1 = Timecode::<FrameRate24>::new(23, 59, 30, 0).unwrap();
/// let tc2 = Timecode::<FrameRate24>::new(0, 1, 0, 0).unwrap();
/// let tc3 = tc1 + tc2;
/// assert_eq!(tc3, Timecode::<FrameRate24>::new(0, 0, 30, 0).unwrap());
/// ```
impl<T> ops::Add for Timecode<T>
where
T: FrameRate,
{
type Output = Timecode<T>;
fn add(self, other: Self) -> Self {
self + other.frame_number
}
}
/// Add one timecode to another, of the same frame rate. The first timecode
/// will have a `frame_number` that's the sum of the frame numbers of the
/// two timecodes.
///
/// ```
/// use video_timecode::*;
///
/// let mut tc1 = Timecode::<FrameRate24>::new(0, 0, 10, 0).unwrap();
/// let tc2 = Timecode::<FrameRate24>::new(0, 0, 10, 0).unwrap();
/// tc1 += tc2;
///
/// assert_eq!(tc1, Timecode::<FrameRate24>::new(0, 0, 20, 0).unwrap());
/// ```
impl<T> ops::AddAssign for Timecode<T>
where
T: FrameRate,
{
fn add_assign(&mut self, other: Self) {
*self += other.frame_number;
}
}
|
use async_std::os::unix::net::UnixStream;
use async_std::task;
use hyperspace_common::*;
mod freemap;
mod session;
mod stream;
pub use hyperspace_common::codegen;
pub use session::*;
pub use stream::*;
/// Open a remote corestore
///
/// Example:
/// ```no_run
/// # #[async_std::main]
/// # async fn main () -> anyhow::Result<()> {
/// use hyperspace_client::open_corestore;
/// let mut corestore = open_corestore(None).await?;
/// let mut feed = corestore.open_by_name("somename").await?;
/// let block = "hello, world".as_bytes().to_vec();
/// feed.append(vec![block]).await?;
/// let _block = feed.get(0).await?;
/// Ok(())
/// # }
/// ```
pub async fn open_corestore(host: Option<String>) -> std::io::Result<RemoteCorestore> {
let socket_path = socket_path(host);
let socket = UnixStream::connect(socket_path).await?;
let mut rpc = hrpc::Rpc::new();
let corestore = RemoteCorestore::new(&mut rpc);
task::spawn(async move {
rpc.connect(socket).await.unwrap();
});
Ok(corestore)
}
|
#[doc = "Reader of register USBPHY_DIRECT_OVERRIDE"]
pub type R = crate::R<u32, super::USBPHY_DIRECT_OVERRIDE>;
#[doc = "Writer for register USBPHY_DIRECT_OVERRIDE"]
pub type W = crate::W<u32, super::USBPHY_DIRECT_OVERRIDE>;
#[doc = "Register USBPHY_DIRECT_OVERRIDE `reset()`'s with value 0"]
impl crate::ResetValue for super::USBPHY_DIRECT_OVERRIDE {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `TX_DIFFMODE_OVERRIDE_EN`"]
pub type TX_DIFFMODE_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_DIFFMODE_OVERRIDE_EN`"]
pub struct TX_DIFFMODE_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_DIFFMODE_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Reader of field `DM_PULLUP_OVERRIDE_EN`"]
pub type DM_PULLUP_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DM_PULLUP_OVERRIDE_EN`"]
pub struct DM_PULLUP_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DM_PULLUP_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Reader of field `TX_FSSLEW_OVERRIDE_EN`"]
pub type TX_FSSLEW_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_FSSLEW_OVERRIDE_EN`"]
pub struct TX_FSSLEW_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_FSSLEW_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `TX_PD_OVERRIDE_EN`"]
pub type TX_PD_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_PD_OVERRIDE_EN`"]
pub struct TX_PD_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_PD_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `RX_PD_OVERRIDE_EN`"]
pub type RX_PD_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RX_PD_OVERRIDE_EN`"]
pub struct RX_PD_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> RX_PD_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `TX_DM_OVERRIDE_EN`"]
pub type TX_DM_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_DM_OVERRIDE_EN`"]
pub struct TX_DM_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_DM_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `TX_DP_OVERRIDE_EN`"]
pub type TX_DP_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_DP_OVERRIDE_EN`"]
pub struct TX_DP_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_DP_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `TX_DM_OE_OVERRIDE_EN`"]
pub type TX_DM_OE_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_DM_OE_OVERRIDE_EN`"]
pub struct TX_DM_OE_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_DM_OE_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `TX_DP_OE_OVERRIDE_EN`"]
pub type TX_DP_OE_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `TX_DP_OE_OVERRIDE_EN`"]
pub struct TX_DP_OE_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> TX_DP_OE_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `DM_PULLDN_EN_OVERRIDE_EN`"]
pub type DM_PULLDN_EN_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DM_PULLDN_EN_OVERRIDE_EN`"]
pub struct DM_PULLDN_EN_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DM_PULLDN_EN_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `DP_PULLDN_EN_OVERRIDE_EN`"]
pub type DP_PULLDN_EN_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DP_PULLDN_EN_OVERRIDE_EN`"]
pub struct DP_PULLDN_EN_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DP_PULLDN_EN_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `DP_PULLUP_EN_OVERRIDE_EN`"]
pub type DP_PULLUP_EN_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DP_PULLUP_EN_OVERRIDE_EN`"]
pub struct DP_PULLUP_EN_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DP_PULLUP_EN_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `DM_PULLUP_HISEL_OVERRIDE_EN`"]
pub type DM_PULLUP_HISEL_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DM_PULLUP_HISEL_OVERRIDE_EN`"]
pub struct DM_PULLUP_HISEL_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DM_PULLUP_HISEL_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `DP_PULLUP_HISEL_OVERRIDE_EN`"]
pub type DP_PULLUP_HISEL_OVERRIDE_EN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `DP_PULLUP_HISEL_OVERRIDE_EN`"]
pub struct DP_PULLUP_HISEL_OVERRIDE_EN_W<'a> {
w: &'a mut W,
}
impl<'a> DP_PULLUP_HISEL_OVERRIDE_EN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 15"]
#[inline(always)]
pub fn tx_diffmode_override_en(&self) -> TX_DIFFMODE_OVERRIDE_EN_R {
TX_DIFFMODE_OVERRIDE_EN_R::new(((self.bits >> 15) & 0x01) != 0)
}
#[doc = "Bit 12"]
#[inline(always)]
pub fn dm_pullup_override_en(&self) -> DM_PULLUP_OVERRIDE_EN_R {
DM_PULLUP_OVERRIDE_EN_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 11"]
#[inline(always)]
pub fn tx_fsslew_override_en(&self) -> TX_FSSLEW_OVERRIDE_EN_R {
TX_FSSLEW_OVERRIDE_EN_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 10"]
#[inline(always)]
pub fn tx_pd_override_en(&self) -> TX_PD_OVERRIDE_EN_R {
TX_PD_OVERRIDE_EN_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 9"]
#[inline(always)]
pub fn rx_pd_override_en(&self) -> RX_PD_OVERRIDE_EN_R {
RX_PD_OVERRIDE_EN_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 8"]
#[inline(always)]
pub fn tx_dm_override_en(&self) -> TX_DM_OVERRIDE_EN_R {
TX_DM_OVERRIDE_EN_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 7"]
#[inline(always)]
pub fn tx_dp_override_en(&self) -> TX_DP_OVERRIDE_EN_R {
TX_DP_OVERRIDE_EN_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 6"]
#[inline(always)]
pub fn tx_dm_oe_override_en(&self) -> TX_DM_OE_OVERRIDE_EN_R {
TX_DM_OE_OVERRIDE_EN_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5"]
#[inline(always)]
pub fn tx_dp_oe_override_en(&self) -> TX_DP_OE_OVERRIDE_EN_R {
TX_DP_OE_OVERRIDE_EN_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4"]
#[inline(always)]
pub fn dm_pulldn_en_override_en(&self) -> DM_PULLDN_EN_OVERRIDE_EN_R {
DM_PULLDN_EN_OVERRIDE_EN_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3"]
#[inline(always)]
pub fn dp_pulldn_en_override_en(&self) -> DP_PULLDN_EN_OVERRIDE_EN_R {
DP_PULLDN_EN_OVERRIDE_EN_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2"]
#[inline(always)]
pub fn dp_pullup_en_override_en(&self) -> DP_PULLUP_EN_OVERRIDE_EN_R {
DP_PULLUP_EN_OVERRIDE_EN_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1"]
#[inline(always)]
pub fn dm_pullup_hisel_override_en(&self) -> DM_PULLUP_HISEL_OVERRIDE_EN_R {
DM_PULLUP_HISEL_OVERRIDE_EN_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0"]
#[inline(always)]
pub fn dp_pullup_hisel_override_en(&self) -> DP_PULLUP_HISEL_OVERRIDE_EN_R {
DP_PULLUP_HISEL_OVERRIDE_EN_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 15"]
#[inline(always)]
pub fn tx_diffmode_override_en(&mut self) -> TX_DIFFMODE_OVERRIDE_EN_W {
TX_DIFFMODE_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 12"]
#[inline(always)]
pub fn dm_pullup_override_en(&mut self) -> DM_PULLUP_OVERRIDE_EN_W {
DM_PULLUP_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 11"]
#[inline(always)]
pub fn tx_fsslew_override_en(&mut self) -> TX_FSSLEW_OVERRIDE_EN_W {
TX_FSSLEW_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 10"]
#[inline(always)]
pub fn tx_pd_override_en(&mut self) -> TX_PD_OVERRIDE_EN_W {
TX_PD_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 9"]
#[inline(always)]
pub fn rx_pd_override_en(&mut self) -> RX_PD_OVERRIDE_EN_W {
RX_PD_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 8"]
#[inline(always)]
pub fn tx_dm_override_en(&mut self) -> TX_DM_OVERRIDE_EN_W {
TX_DM_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 7"]
#[inline(always)]
pub fn tx_dp_override_en(&mut self) -> TX_DP_OVERRIDE_EN_W {
TX_DP_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 6"]
#[inline(always)]
pub fn tx_dm_oe_override_en(&mut self) -> TX_DM_OE_OVERRIDE_EN_W {
TX_DM_OE_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 5"]
#[inline(always)]
pub fn tx_dp_oe_override_en(&mut self) -> TX_DP_OE_OVERRIDE_EN_W {
TX_DP_OE_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 4"]
#[inline(always)]
pub fn dm_pulldn_en_override_en(&mut self) -> DM_PULLDN_EN_OVERRIDE_EN_W {
DM_PULLDN_EN_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 3"]
#[inline(always)]
pub fn dp_pulldn_en_override_en(&mut self) -> DP_PULLDN_EN_OVERRIDE_EN_W {
DP_PULLDN_EN_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 2"]
#[inline(always)]
pub fn dp_pullup_en_override_en(&mut self) -> DP_PULLUP_EN_OVERRIDE_EN_W {
DP_PULLUP_EN_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 1"]
#[inline(always)]
pub fn dm_pullup_hisel_override_en(&mut self) -> DM_PULLUP_HISEL_OVERRIDE_EN_W {
DM_PULLUP_HISEL_OVERRIDE_EN_W { w: self }
}
#[doc = "Bit 0"]
#[inline(always)]
pub fn dp_pullup_hisel_override_en(&mut self) -> DP_PULLUP_HISEL_OVERRIDE_EN_W {
DP_PULLUP_HISEL_OVERRIDE_EN_W { w: self }
}
}
|
/// AddTimeOption options for adding time to an issue
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct AddTimeOption {
pub created: Option<String>,
/// time in seconds
pub time: i64,
/// User who spent the time (optional)
pub user_name: Option<String>,
}
impl AddTimeOption {
/// Create a builder for this object.
#[inline]
pub fn builder() -> AddTimeOptionBuilder<crate::generics::MissingTime> {
AddTimeOptionBuilder {
body: Default::default(),
_time: core::marker::PhantomData,
}
}
#[inline]
pub fn issue_add_time() -> AddTimeOptionPostBuilder<crate::generics::MissingOwner, crate::generics::MissingRepo, crate::generics::MissingIndex, crate::generics::MissingTime> {
AddTimeOptionPostBuilder {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
_param_index: core::marker::PhantomData,
_time: core::marker::PhantomData,
}
}
}
impl Into<AddTimeOption> for AddTimeOptionBuilder<crate::generics::TimeExists> {
fn into(self) -> AddTimeOption {
self.body
}
}
impl Into<AddTimeOption> for AddTimeOptionPostBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists, crate::generics::TimeExists> {
fn into(self) -> AddTimeOption {
self.inner.body
}
}
/// Builder for [`AddTimeOption`](./struct.AddTimeOption.html) object.
#[derive(Debug, Clone)]
pub struct AddTimeOptionBuilder<Time> {
body: self::AddTimeOption,
_time: core::marker::PhantomData<Time>,
}
impl<Time> AddTimeOptionBuilder<Time> {
#[inline]
pub fn created(mut self, value: impl Into<String>) -> Self {
self.body.created = Some(value.into());
self
}
/// time in seconds
#[inline]
pub fn time(mut self, value: impl Into<i64>) -> AddTimeOptionBuilder<crate::generics::TimeExists> {
self.body.time = value.into();
unsafe { std::mem::transmute(self) }
}
/// User who spent the time (optional)
#[inline]
pub fn user_name(mut self, value: impl Into<String>) -> Self {
self.body.user_name = Some(value.into());
self
}
}
/// Builder created by [`AddTimeOption::issue_add_time`](./struct.AddTimeOption.html#method.issue_add_time) method for a `POST` operation associated with `AddTimeOption`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct AddTimeOptionPostBuilder<Owner, Repo, Index, Time> {
inner: AddTimeOptionPostBuilderContainer,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
_param_index: core::marker::PhantomData<Index>,
_time: core::marker::PhantomData<Time>,
}
#[derive(Debug, Default, Clone)]
struct AddTimeOptionPostBuilderContainer {
body: self::AddTimeOption,
param_owner: Option<String>,
param_repo: Option<String>,
param_index: Option<i64>,
}
impl<Owner, Repo, Index, Time> AddTimeOptionPostBuilder<Owner, Repo, Index, Time> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> AddTimeOptionPostBuilder<crate::generics::OwnerExists, Repo, Index, Time> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> AddTimeOptionPostBuilder<Owner, crate::generics::RepoExists, Index, Time> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// index of the issue
#[inline]
pub fn index(mut self, value: impl Into<i64>) -> AddTimeOptionPostBuilder<Owner, Repo, crate::generics::IndexExists, Time> {
self.inner.param_index = Some(value.into());
unsafe { std::mem::transmute(self) }
}
#[inline]
pub fn created(mut self, value: impl Into<String>) -> Self {
self.inner.body.created = Some(value.into());
self
}
/// time in seconds
#[inline]
pub fn time(mut self, value: impl Into<i64>) -> AddTimeOptionPostBuilder<Owner, Repo, Index, crate::generics::TimeExists> {
self.inner.body.time = value.into();
unsafe { std::mem::transmute(self) }
}
/// User who spent the time (optional)
#[inline]
pub fn user_name(mut self, value: impl Into<String>) -> Self {
self.inner.body.user_name = Some(value.into());
self
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for AddTimeOptionPostBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists, crate::generics::TimeExists> {
type Output = crate::tracked_time::TrackedTime;
const METHOD: http::Method = http::Method::POST;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/issues/{index}/times", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?"), index=self.inner.param_index.as_ref().expect("missing parameter index?")).into()
}
fn modify(&self, req: Client::Request) -> Result<Client::Request, crate::client::ApiError<Client::Response>> {
use crate::client::Request;
Ok(req
.json(&self.inner.body))
}
}
impl crate::client::ResponseWrapper<crate::tracked_time::TrackedTime, AddTimeOptionPostBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists, crate::generics::TimeExists>> {
#[inline]
pub fn message(&self) -> Option<String> {
self.headers.get("message").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
#[inline]
pub fn url(&self) -> Option<String> {
self.headers.get("url").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
}
|
use std::io;
use bincode::{serialize, deserialize, Infinite};
use bytes::{BytesMut, BufMut};
trait SequentialStreamCodec {
type In;
type Out;
type Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Self::In, Self::Error>;
fn encode(&mut self, msg: Self::Out, buf: &mut BytesMut);
}
struct SocketCodec;
impl SequentialStreamCodec for SocketCodec {
type In = Request;
type Out = Response;
type Error = io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Self::In, Self::Error> {
Ok(deserialize(&buf[..]).unwrap())
}
fn encode(&mut self, msg: Self::Out, buf: &mut BytesMut) {
let encoded: Vec<u8> = serialize(&msg, Infinite).unwrap();
buf.reserve(encoded.len());
buf.put(encoded);
}
}
|
#[doc = "Register `BGPFCCR` reader"]
pub type R = crate::R<BGPFCCR_SPEC>;
#[doc = "Register `BGPFCCR` writer"]
pub type W = crate::W<BGPFCCR_SPEC>;
#[doc = "Field `CM` reader - Color mode"]
pub type CM_R = crate::FieldReader;
#[doc = "Field `CM` writer - Color mode"]
pub type CM_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `CCM` reader - CLUT Color mode"]
pub type CCM_R = crate::BitReader;
#[doc = "Field `CCM` writer - CLUT Color mode"]
pub type CCM_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `START` reader - Start"]
pub type START_R = crate::BitReader;
#[doc = "Field `START` writer - Start"]
pub type START_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `CS` reader - CLUT size"]
pub type CS_R = crate::FieldReader;
#[doc = "Field `CS` writer - CLUT size"]
pub type CS_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>;
#[doc = "Field `AM` reader - Alpha mode"]
pub type AM_R = crate::FieldReader;
#[doc = "Field `AM` writer - Alpha mode"]
pub type AM_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `AI` reader - Alpha Inverted"]
pub type AI_R = crate::BitReader;
#[doc = "Field `AI` writer - Alpha Inverted"]
pub type AI_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RBS` reader - Red Blue Swap"]
pub type RBS_R = crate::BitReader;
#[doc = "Field `RBS` writer - Red Blue Swap"]
pub type RBS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ALPHA` reader - Alpha value"]
pub type ALPHA_R = crate::FieldReader;
#[doc = "Field `ALPHA` writer - Alpha value"]
pub type ALPHA_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>;
impl R {
#[doc = "Bits 0:3 - Color mode"]
#[inline(always)]
pub fn cm(&self) -> CM_R {
CM_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bit 4 - CLUT Color mode"]
#[inline(always)]
pub fn ccm(&self) -> CCM_R {
CCM_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Start"]
#[inline(always)]
pub fn start(&self) -> START_R {
START_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bits 8:15 - CLUT size"]
#[inline(always)]
pub fn cs(&self) -> CS_R {
CS_R::new(((self.bits >> 8) & 0xff) as u8)
}
#[doc = "Bits 16:17 - Alpha mode"]
#[inline(always)]
pub fn am(&self) -> AM_R {
AM_R::new(((self.bits >> 16) & 3) as u8)
}
#[doc = "Bit 20 - Alpha Inverted"]
#[inline(always)]
pub fn ai(&self) -> AI_R {
AI_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - Red Blue Swap"]
#[inline(always)]
pub fn rbs(&self) -> RBS_R {
RBS_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bits 24:31 - Alpha value"]
#[inline(always)]
pub fn alpha(&self) -> ALPHA_R {
ALPHA_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - Color mode"]
#[inline(always)]
#[must_use]
pub fn cm(&mut self) -> CM_W<BGPFCCR_SPEC, 0> {
CM_W::new(self)
}
#[doc = "Bit 4 - CLUT Color mode"]
#[inline(always)]
#[must_use]
pub fn ccm(&mut self) -> CCM_W<BGPFCCR_SPEC, 4> {
CCM_W::new(self)
}
#[doc = "Bit 5 - Start"]
#[inline(always)]
#[must_use]
pub fn start(&mut self) -> START_W<BGPFCCR_SPEC, 5> {
START_W::new(self)
}
#[doc = "Bits 8:15 - CLUT size"]
#[inline(always)]
#[must_use]
pub fn cs(&mut self) -> CS_W<BGPFCCR_SPEC, 8> {
CS_W::new(self)
}
#[doc = "Bits 16:17 - Alpha mode"]
#[inline(always)]
#[must_use]
pub fn am(&mut self) -> AM_W<BGPFCCR_SPEC, 16> {
AM_W::new(self)
}
#[doc = "Bit 20 - Alpha Inverted"]
#[inline(always)]
#[must_use]
pub fn ai(&mut self) -> AI_W<BGPFCCR_SPEC, 20> {
AI_W::new(self)
}
#[doc = "Bit 21 - Red Blue Swap"]
#[inline(always)]
#[must_use]
pub fn rbs(&mut self) -> RBS_W<BGPFCCR_SPEC, 21> {
RBS_W::new(self)
}
#[doc = "Bits 24:31 - Alpha value"]
#[inline(always)]
#[must_use]
pub fn alpha(&mut self) -> ALPHA_W<BGPFCCR_SPEC, 24> {
ALPHA_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "background PFC control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`bgpfccr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`bgpfccr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct BGPFCCR_SPEC;
impl crate::RegisterSpec for BGPFCCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`bgpfccr::R`](R) reader structure"]
impl crate::Readable for BGPFCCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`bgpfccr::W`](W) writer structure"]
impl crate::Writable for BGPFCCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets BGPFCCR to value 0"]
impl crate::Resettable for BGPFCCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use common::catalog::Catalog;
use common::logical_plan::*;
use common::table::*;
use common::{get_name, CrustyError, DataType, Field, PredicateOp};
use sqlparser::ast::{
BinaryOperator, Expr, Function, JoinConstraint, JoinOperator, SelectItem, SetExpr, TableFactor,
Value,
};
use std::collections::HashSet;
/// Translates input to a LogicalPlan
/// Validates the columns and tables referenced using the catalog
/// Shares lifetime 'a with catalog
pub struct TranslateAndValidate<'a, T: Catalog> {
/// Logical plan of operators encountered so far.
plan: LogicalPlan,
/// Catalog to validate the translations.
catalog: &'a T,
/// List of tables encountered. Used for field validation.
tables: Vec<String>,
}
impl<'a, T: 'a + Catalog> TranslateAndValidate<'a, T> {
/// Creates a new TranslateAndValidate object.
fn new(catalog: &'a T) -> Self {
Self {
plan: LogicalPlan::new(),
catalog,
tables: Vec::new(),
}
}
/// Given a column name, try to figure out what table it belongs to by looking through all of the tables.
///
/// # Arguments
///
/// * `identifiers` - a list of elements in a multi-part identifier e.g. table.column would be vec!["table", "column"]
///
/// # Returns
///
/// FieldIdent's of the form { table: table, column: table.column, alias: column }
/// or { table: table, column: table.column} if the full identifier is passed.
fn disambiguate_name(&self, identifiers: Vec<&str>) -> Result<FieldIdentifier, CrustyError> {
let orig = identifiers.join(".");
if identifiers.len() > 2 {
return Err(CrustyError::ValidationError(format!(
"No . table names supported in field {}",
orig
)));
}
if identifiers.len() == 2 {
let table_id = Table::get_table_id(&identifiers[0]);
if self.catalog.is_valid_column(table_id, &identifiers[1]) {
return Ok(FieldIdentifier::new(&identifiers[0], &orig));
}
return Err(CrustyError::ValidationError(format!(
"The field {} is not present in tables listed in the query",
orig
)));
}
let mut field = None;
for table in &self.tables {
let table_id = Table::get_table_id(table);
if self.catalog.is_valid_column(table_id, &orig) {
if field.is_some() {
return Err(CrustyError::ValidationError(format!(
"The field {} could refer to more than one table listed in the query",
orig
)));
}
let new_name = format!("{}.{}", table, orig);
field = Some(FieldIdentifier::new_column_alias(table, &new_name, &orig));
}
}
field.ok_or_else(|| {
CrustyError::ValidationError(format!(
"The field {} is not present in tables listed in the query",
orig
))
})
}
/// Translates a sqlparser::ast to a LogicalPlan.
///
/// Validates the columns and tables referenced using the catalog.
/// All table names referenced in from and join clauses are added to self.tables.
///
/// # Arguments
///
/// * `sql` - AST to transalte.
/// * `catalog` - Catalog for validation.
pub fn from_sql(sql: &sqlparser::ast::Query, catalog: &T) -> Result<LogicalPlan, CrustyError> {
let mut translator = TranslateAndValidate::new(catalog);
translator.process_query(sql)?;
Ok(translator.plan)
}
/// Helper function to recursively process sqlparser::ast::Query
///
/// # Arguments
///
/// * `query` - AST to process.
fn process_query(&mut self, query: &sqlparser::ast::Query) -> Result<(), CrustyError> {
match &query.body {
SetExpr::Select(b) => {
let select = &*b;
self.process_select(select)
}
SetExpr::Query(_) => {
//TODO NOT HANDLED
Err(CrustyError::ValidationError(String::from(
"Query ops not supported ",
)))
}
SetExpr::SetOperation {
op: _,
all: _,
left: _,
right: _,
} => {
//TODO NOT HANDLED
Err(CrustyError::ValidationError(String::from(
"Set operations not supported ",
)))
}
SetExpr::Values(_) => {
//TODO NOT HANDLED
Err(CrustyError::ValidationError(String::from(
"Value operation not supported ",
)))
}
}
}
/// Helper function to recursively process sqlparser::ast::Select
///
/// # Arguments
///
/// * `query` - AST of a select query to process.
fn process_select(&mut self, select: &sqlparser::ast::Select) -> Result<(), CrustyError> {
// Pointer to the current node.
let mut node = None;
// Distinct
if select.distinct {
//TODO NOT HANDLED
return Err(CrustyError::ValidationError(String::from(
"Distinct not supported ",
)));
}
// Doesn't need the for loop rn but keeping for the future when cross products are supported.
// From
if select.from.len() > 1 {
//TODO NOT HANDLED
return Err(CrustyError::ValidationError(String::from(
"Cross product not supported ",
)));
}
for sel in &select.from {
node = Some(self.process_table_factor(&sel.relation)?);
// Join
for join in &sel.joins {
let join_node = self.process_join(&join, node.unwrap())?;
node = Some(join_node);
}
}
// Where
if let Some(expr) = &select.selection {
let predicate = self.process_binary_op(expr)?;
// table references in filter
let table = match (&predicate.left, &predicate.right) {
(PredExpr::Literal(_), PredExpr::Ident(id)) => id.table().to_string(),
(PredExpr::Ident(id), PredExpr::Literal(_)) => id.table().to_string(),
_ => {
return Err(CrustyError::ValidationError(String::from("Only where predicates with at least one indentifier and at least one literal are supported")));
}
};
let op = FilterNode { table, predicate };
let idx = self.plan.add_node(LogicalOp::Filter(op));
self.plan.add_edge(idx, node.unwrap());
node = Some(idx);
}
if select.having.is_some() {
//TODO NOT HANDLED
return Err(CrustyError::ValidationError(String::from(
"Having not supported",
)));
}
// Select
let mut fields = Vec::new();
let mut has_agg = false;
let mut wildcard = false;
for item in &select.projection {
let field = match item {
SelectItem::Wildcard => {
if select.projection.len() > 1 {
return Err(CrustyError::ValidationError(String::from(
"Cannot select wildcard and exp in same select",
)));
}
wildcard = true;
break;
}
SelectItem::UnnamedExpr(expr) => self.expr_to_ident(expr)?,
SelectItem::ExprWithAlias { expr, alias } => {
let mut field = self.expr_to_ident(expr)?;
field.set_alias(alias.to_string());
field
}
_ => {
//TODO NOT HANDLED
return Err(CrustyError::ValidationError(String::from(
"Select unsupported expression",
)));
}
};
if field.agg_op().is_some() {
has_agg = true;
}
fields.push(field);
}
// Aggregates and group by
if has_agg {
let mut group_by = Vec::new();
{
let mut group_set = HashSet::new();
for expr in &select.group_by {
let col = match expr {
Expr::Identifier(name) => name,
_ => {
return Err(CrustyError::ValidationError(String::from(
"Group by unsupported expression",
)));
}
};
let field = self.disambiguate_name(vec![col])?;
group_set.insert(field.column().to_string());
group_by.push(field);
}
// Checks that only aggregates and group by fields are projected out
for f in &fields {
if f.agg_op().is_none() && !group_set.contains(f.column()) {
return Err(CrustyError::ValidationError(format!(
"The expression '{}' must be part of an aggregate function or group by",
f.column()
)));
}
}
}
let op = AggregateNode {
fields: fields.clone(),
group_by,
};
let idx = self.plan.add_node(LogicalOp::Aggregate(op));
self.plan.add_edge(idx, node.unwrap());
node = Some(idx);
// Replace field column names with aliases to project
fields = fields
.iter()
.map(|f| {
let name = f.alias().unwrap_or_else(|| f.column());
FieldIdentifier::new(f.table(), name)
})
.collect();
}
let identifiers = if wildcard {
ProjectIdentifiers::Wildcard
} else {
ProjectIdentifiers::List(fields)
};
let op = ProjectNode { identifiers };
let idx = self.plan.add_node(LogicalOp::Project(op));
self.plan.add_edge(idx, node.unwrap());
Ok(())
}
/// Creates a corresponding LogicalOp, adds it to self.plan, and returns the OpIndex.
///
/// Helper function to process sqlparser::ast::TableFactor.
///
/// # Arguments
///
/// * `tf` - Table to process.
fn process_table_factor(
&mut self,
tf: &sqlparser::ast::TableFactor,
) -> Result<OpIndex, CrustyError> {
match tf {
TableFactor::Table { name, .. } => {
let name = get_name(&name)?;
let table_id = Table::get_table_id(&name);
if !self.catalog.is_valid_table(table_id) {
return Err(CrustyError::ValidationError(String::from(
"Invalid table name",
)));
}
self.tables.push(name.clone());
let op = ScanNode { alias: name };
Ok(self.plan.add_node(LogicalOp::Scan(op)))
}
_ => Err(CrustyError::ValidationError(String::from(
"Nested joins and derived tables not supported",
))),
}
}
/// Returns the name of the table from the node, if the node is a table level operator, like scan. Otherwise, return none.
///
/// # Arguments
///
/// * `node` - Node to get the table name from.
fn get_table_alias_from_op(&self, node: OpIndex) -> Option<String> {
match &self.plan.get_operator(node)? {
LogicalOp::Scan(ScanNode { alias }) => Some(alias.clone()),
_ => None,
}
}
/// Parses sqlparser::ast::Join into a Join LogicalOp, adds the Op to
/// logical plan, and returns OpIndex of the join node.
///
/// # Arguments
///
/// * `join` - The join node to parse.
/// * `left_table_node` - Node containing the left table to join.
fn process_join(
&mut self,
join: &sqlparser::ast::Join,
left_table_node: OpIndex,
) -> Result<OpIndex, CrustyError> {
let right_table_node = self.process_table_factor(&join.relation)?;
let jc = match &join.join_operator {
JoinOperator::Inner(jc) => jc,
_ => {
return Err(CrustyError::ValidationError(String::from(
"Unsupported join type",
)));
}
};
if let JoinConstraint::On(expr) = jc {
let pred = self.process_binary_op(expr)?;
let left = pred
.left
.ident()
.ok_or_else(|| {
CrustyError::ValidationError(String::from("Invalid join predicate"))
})?
.clone();
let right = pred
.right
.ident()
.ok_or_else(|| {
CrustyError::ValidationError(String::from("Invalid join predicate"))
})?
.clone();
let op = JoinNode {
left,
right,
op: pred.op,
left_table: self.get_table_alias_from_op(left_table_node),
right_table: self.get_table_alias_from_op(right_table_node),
};
let idx = self.plan.add_node(LogicalOp::Join(op));
self.plan.add_edge(idx, right_table_node);
self.plan.add_edge(idx, left_table_node);
return Ok(idx);
}
Err(CrustyError::ValidationError(String::from(
"Unsupported join type",
)))
}
/// Parses an expression to a predicate node.
///
/// # Arguments
///
/// * `expr` - Expression to parse.
fn process_binary_op(&self, expr: &Expr) -> Result<PredicateNode, CrustyError> {
match expr {
Expr::BinaryOp { left, op, right } => Ok(PredicateNode {
left: self.expr_to_pred_expr(left)?,
right: self.expr_to_pred_expr(right)?,
op: Self::binary_operator_to_predicate(op)?,
}),
_ => Err(CrustyError::ValidationError(String::from(
"Unsupported binary operation",
))),
}
}
/// Parses the non-operator parts of the expression to predicate expressions.
///
/// # Arguments
///
/// * `expr` - Non-operator part of the expression to parse.
fn expr_to_pred_expr(&self, expr: &Expr) -> Result<PredExpr, CrustyError> {
match expr {
Expr::Value(val) => match val {
Value::Number(s) => {
let i = s.parse::<i32>().map_err(|_| {
CrustyError::ValidationError(format!("Unsupported literal {}", s))
})?;
let f = Field::IntField(i);
Ok(PredExpr::Literal(f))
}
Value::SingleQuotedString(s) => {
let f = Field::StringField(s.to_string());
Ok(PredExpr::Literal(f))
}
_ => Err(CrustyError::ValidationError(String::from(
"Unsupported literal in predicate",
))),
},
_ => Ok(PredExpr::Ident(self.expr_to_ident(expr)?)),
}
}
/// Prases binary operator to predicate operators.
///
/// # Arguments
///
/// * `op` - Binary operator to parse.
fn binary_operator_to_predicate(op: &BinaryOperator) -> Result<PredicateOp, CrustyError> {
match op {
BinaryOperator::Gt => Ok(PredicateOp::GreaterThan),
BinaryOperator::Lt => Ok(PredicateOp::LessThan),
BinaryOperator::GtEq => Ok(PredicateOp::GreaterThanOrEq),
BinaryOperator::LtEq => Ok(PredicateOp::LessThanOrEq),
BinaryOperator::Eq => Ok(PredicateOp::Equals),
BinaryOperator::NotEq => Ok(PredicateOp::NotEq),
_ => Err(CrustyError::ValidationError(String::from(
"Unsupported binary operation",
))),
}
}
/// Validates that an aggregate operation is valid for the type of field.
///
/// Field must
/// * be disambiguated so that field.column() returns a str of the form table.column
/// * have an associated op
///
/// # Arguments
///
/// * `field` - Field to be aggregated.
fn validate_aggregate(&self, field: &FieldIdentifier) -> Result<(), CrustyError> {
let split_field: Vec<&str> = field.column().split('.').collect();
if field.agg_op().is_none() || split_field.len() != 2 {
return Ok(());
}
let table_name = field.table();
let col_name = split_field[1];
let alias = field.alias().unwrap_or_else(|| field.column());
let op = field.agg_op().unwrap();
let table_id = Table::get_table_id(table_name);
let schema = self.catalog.get_table_schema(table_id)?;
let attr = schema
.get_attribute(*schema.get_field_index(col_name).unwrap())
.unwrap();
match attr.dtype() {
DataType::Int => Ok(()),
DataType::String => match op {
AggOp::Count | AggOp::Max | AggOp::Min => Ok(()),
_ => Err(CrustyError::ValidationError(format!(
"Cannot perform operation {} on field {}",
op, alias,
))),
},
}
}
/// Converts a sqparser::ast::Expr to a LogicalOp::FieldIdent.
///
/// # Arguments
///
/// * `expr` - Expression to be converted.
fn expr_to_ident(&self, expr: &Expr) -> Result<FieldIdentifier, CrustyError> {
match expr {
Expr::Identifier(name) => self.disambiguate_name(vec![name]),
Expr::CompoundIdentifier(names) => {
self.disambiguate_name(names.iter().map(|s| s.as_ref()).collect())
}
Expr::Function(Function { name, args, .. }) => {
let op = match &get_name(name)?.to_uppercase()[..] {
"AVG" => AggOp::Avg,
"COUNT" => AggOp::Count,
"MAX" => AggOp::Max,
"MIN" => AggOp::Min,
"SUM" => AggOp::Sum,
_ => {
return Err(CrustyError::ValidationError(String::from(
"Unsupported SQL function",
)));
}
};
if args.is_empty() || args.len() > 1 {
return Err(CrustyError::ValidationError(format!(
"Wrong number of args in {} operation",
name
)));
}
let mut field = match &args[0] {
Expr::Identifier(_) | Expr::CompoundIdentifier(_) => {
self.expr_to_ident(&args[0])?
}
_ => {
return Err(CrustyError::ValidationError(String::from(
"Aggregate over unsupported expression",
)));
}
};
field.set_op(op);
field.default_alias();
self.validate_aggregate(&field)?;
Ok(field)
}
_ => Err(CrustyError::ValidationError(String::from(
"Unsupported expression",
))),
}
}
}
|
// vim: tw=80
//! Attributes are applied to the mock object, too.
use mockall::*;
pub struct A{}
#[automock]
impl A {
// Neither A::foo nor MockA::foo should be defined
#[cfg(target_os = "multics")] pub fn foo(&self, x: DoesNotExist) {}
// Both A::bar and MockA::bar should be defined
#[cfg(not(target_os = "multics"))] pub fn bar(&self, _x: i32) -> i32 {0}
}
#[test]
fn returning() {
let mut mock = MockA::new();
mock.expect_bar()
.returning(|x| x);
assert_eq!(4, mock.bar(4));
}
|
use youchoose;
fn main() {
let mut menu = youchoose::Menu::new(0..100)
.preview(multiples) // Sets the preview function
.preview_pos(youchoose::ScreenSide::Bottom, 0.3) // Sets the position of the preview pane
.preview_label(" multiples ".to_string()) // Sets the text at the top of the preview pane
.multiselect() // Allows multiple items to be selected
.icon(":(") // Sets the default (not selected) icon for an item
.selected_icon(":)") // The icon for selected items
.add_multiselect_key('s' as i32) // Bind the 's' key to multiselect
.add_up_key('u' as i32) // Bind the 'u' key to up
.add_down_key('d' as i32) // Bind the 'd' key to down
.add_select_key('.' as i32); // Bind the '.' key to select
let _choice = menu.show();
}
fn multiples(num: i32) -> String {
// --- Snip ---
format!("very custom: {}", num)
}
|
use crate::demo::data::DemoTick;
use crate::demo::message::packetentities::EntityId;
use crate::demo::message::packetentities::PacketEntity;
use crate::demo::message::{Message, MessageType};
use crate::demo::packet::datatable::ClassId;
use crate::demo::packet::stringtable::StringTableEntry;
use crate::demo::parser::analyser::UserInfo;
use crate::demo::parser::gamestateanalyser::UserId;
use crate::demo::parser::handler::{BorrowMessageHandler, MessageHandler};
use crate::demo::sendprop::SendProp;
use crate::{ParserState, ReadResult, Stream};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
/**
* An analyzer that extracts player scoreboard information to get the stats for every player by the
* end of the demo. Essentially, this will capture all the information that would appear on the
* scoreboard for every player if they took a snapshot at the time the demo finishes (such as the end
* of a match or round).
*/
#[derive(Default, Debug, Serialize, Deserialize, PartialEq)]
pub struct PlayerSummaryAnalyzer {
state: PlayerSummaryState,
user_id_map: HashMap<EntityId, UserId>,
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Default)]
pub struct PlayerSummary {
pub points: u32,
pub kills: u32,
pub assists: u32,
pub deaths: u32,
pub buildings_destroyed: u32,
pub captures: u32,
pub defenses: u32,
pub dominations: u32,
pub revenges: u32,
pub ubercharges: u32,
pub headshots: u32,
pub teleports: u32,
pub healing: u32,
pub backstabs: u32,
pub bonus_points: u32,
pub support: u32,
pub damage_dealt: u32,
}
#[derive(Default, Debug, Serialize, Deserialize, PartialEq)]
pub struct PlayerSummaryState {
pub player_summaries: HashMap<UserId, PlayerSummary>,
pub users: BTreeMap<UserId, UserInfo>,
}
impl MessageHandler for PlayerSummaryAnalyzer {
type Output = PlayerSummaryState;
fn does_handle(message_type: MessageType) -> bool {
matches!(message_type, MessageType::PacketEntities)
}
fn handle_message(&mut self, message: &Message, _tick: DemoTick, parser_state: &ParserState) {
if let Message::PacketEntities(message) = message {
for entity in message.entities.iter() {
self.handle_packet_entity(entity, parser_state);
}
}
}
fn into_output(self, _parser_state: &ParserState) -> <Self as MessageHandler>::Output {
self.state
}
fn handle_string_entry(
&mut self,
table: &str,
index: usize,
entry: &StringTableEntry,
_parser_state: &ParserState,
) {
if table == "userinfo" {
let _ = self.parse_user_info(
index,
entry.text.as_ref().map(|s| s.as_ref()),
entry.extra_data.as_ref().map(|data| data.data.clone()),
);
}
}
}
impl BorrowMessageHandler for PlayerSummaryAnalyzer {
fn borrow_output(&self, _state: &ParserState) -> &Self::Output {
&self.state
}
}
/**
* Helper function to make processing integer properties easier.
*
* parse_integer_prop(packet, "DT_TFPlayerScoringDataExclusive", "m_iPoints", |points| { println!("Scored {} points", points) });
*/
fn parse_integer_prop<F>(
packet: &PacketEntity,
table: &str,
name: &str,
parser_state: &ParserState,
handler: F,
) where
F: FnOnce(u32),
{
use crate::demo::sendprop::SendPropValue;
if let Some(SendProp {
value: SendPropValue::Integer(val),
..
}) = packet.get_prop_by_name(table, name, parser_state)
{
handler(val as u32);
}
}
impl PlayerSummaryAnalyzer {
pub fn new() -> Self {
Self::default()
}
fn handle_packet_entity(&mut self, packet: &PacketEntity, parser_state: &ParserState) {
use crate::demo::sendprop::SendPropValue;
// println!("Known server classes: {:?}", parser_state.server_classes);
if let Some(class) = parser_state
.server_classes
.get(<ClassId as Into<usize>>::into(packet.server_class))
{
// println!("Got a {} data packet: {:?}", class.name, packet);
match class.name.as_str() {
"CTFPlayer" => {
if let Some(user_id) = self.user_id_map.get(&packet.entity_index) {
let summaries = &mut self.state.player_summaries;
let player_summary = summaries.entry(*user_id).or_default();
// Extract scoreboard information, if present, and update the player's summary accordingly
// NOTE: Multiple DT_TFPlayerScoringDataExclusive structures may be present - one for the entire match,
// and one for just the current round. Since we're only interested in the overall match scores,
// we need to ignore the round-specific values. Fortunately, this is easy - just ignore the
// lesser value (if multiple values are present), since none of these scores are able to decrement.
/*
* Member: m_iCaptures (offset 4) (type integer) (bits 10) (Unsigned)
* Member: m_iDefenses (offset 8) (type integer) (bits 10) (Unsigned)
* Member: m_iKills (offset 12) (type integer) (bits 10) (Unsigned)
* Member: m_iDeaths (offset 16) (type integer) (bits 10) (Unsigned)
* Member: m_iSuicides (offset 20) (type integer) (bits 10) (Unsigned)
* Member: m_iDominations (offset 24) (type integer) (bits 10) (Unsigned)
* Member: m_iRevenge (offset 28) (type integer) (bits 10) (Unsigned)
* Member: m_iBuildingsBuilt (offset 32) (type integer) (bits 10) (Unsigned)
* Member: m_iBuildingsDestroyed (offset 36) (type integer) (bits 10) (Unsigned)
* Member: m_iHeadshots (offset 40) (type integer) (bits 10) (Unsigned)
* Member: m_iBackstabs (offset 44) (type integer) (bits 10) (Unsigned)
* Member: m_iHealPoints (offset 48) (type integer) (bits 20) (Unsigned)
* Member: m_iInvulns (offset 52) (type integer) (bits 10) (Unsigned)
* Member: m_iTeleports (offset 56) (type integer) (bits 10) (Unsigned)
* Member: m_iDamageDone (offset 60) (type integer) (bits 20) (Unsigned)
* Member: m_iCrits (offset 64) (type integer) (bits 10) (Unsigned)
* Member: m_iResupplyPoints (offset 68) (type integer) (bits 10) (Unsigned)
* Member: m_iKillAssists (offset 72) (type integer) (bits 12) (Unsigned)
* Member: m_iBonusPoints (offset 76) (type integer) (bits 10) (Unsigned)
* Member: m_iPoints (offset 80) (type integer) (bits 10) (Unsigned)
*
* NOTE: support points aren't included here, but is equal to the sum of m_iHealingAssist and m_iDamageAssist
* TODO: pull data for support points
*/
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iCaptures",
parser_state,
|captures| {
if captures > player_summary.captures {
player_summary.captures = captures;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iDefenses",
parser_state,
|defenses| {
if defenses > player_summary.defenses {
player_summary.defenses = defenses;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iKills",
parser_state,
|kills| {
if kills > player_summary.kills {
// TODO: This might not be accruate. Tested with a demo file with 89 kills (88 on the scoreboard),
// but only a 83 were reported in the scoring data.
player_summary.kills = kills;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iDeaths",
parser_state,
|deaths| {
if deaths > player_summary.deaths {
player_summary.deaths = deaths;
}
},
);
// ignore m_iSuicides
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iDominations",
parser_state,
|dominations| {
if dominations > player_summary.dominations {
player_summary.dominations = dominations;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iRevenge",
parser_state,
|revenges| {
if revenges > player_summary.revenges {
player_summary.revenges = revenges;
}
},
);
// ignore m_iBuildingsBuilt
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iBuildingsDestroyed",
parser_state,
|buildings_destroyed| {
if buildings_destroyed > player_summary.buildings_destroyed {
player_summary.buildings_destroyed = buildings_destroyed;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iHeadshots",
parser_state,
|headshots| {
if headshots > player_summary.headshots {
player_summary.headshots = headshots;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iBackstabs",
parser_state,
|backstabs| {
if backstabs > player_summary.backstabs {
player_summary.backstabs = backstabs;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iHealPoints",
parser_state,
|healing| {
if healing > player_summary.healing {
player_summary.healing = healing;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iInvulns",
parser_state,
|ubercharges| {
if ubercharges > player_summary.ubercharges {
player_summary.ubercharges = ubercharges;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iTeleports",
parser_state,
|teleports| {
if teleports > player_summary.teleports {
player_summary.teleports = teleports;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iDamageDone",
parser_state,
|damage_dealt| {
if damage_dealt > player_summary.damage_dealt {
player_summary.damage_dealt = damage_dealt;
}
},
);
// ignore m_iCrits
// ignore m_iResupplyPoints
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iKillAssists",
parser_state,
|assists| {
if assists > player_summary.assists {
player_summary.assists = assists;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iBonusPoints",
parser_state,
|bonus_points| {
if bonus_points > player_summary.bonus_points {
player_summary.bonus_points = bonus_points;
}
},
);
parse_integer_prop(
packet,
"DT_TFPlayerScoringDataExclusive",
"m_iPoints",
parser_state,
|points| {
if points > player_summary.points {
player_summary.points = points;
}
},
);
}
}
"CTFPlayerResource" => {
// Player summaries - including entity IDs!
// look for props like m_iUserID.<entity_id> = <user_id>
// for example, `m_iUserID.024 = 2523` means entity 24 is user 2523
for i in 0..33 {
// 0 to 32, inclusive (1..33 might also work, not sure if there's a user 0 or not). Not exhaustive and doesn't work for servers with > 32 players
if let Some(SendProp {
value: SendPropValue::Integer(x),
..
}) = packet.get_prop_by_name(
"m_iUserID",
format!("{:0>3}", i).as_str(),
parser_state,
) {
let entity_id = EntityId::from(i as u32);
let user_id = UserId::from(x as u32);
self.user_id_map.insert(entity_id, user_id);
}
}
}
_other => {
// Don't care
}
}
}
}
fn parse_user_info(
&mut self,
index: usize,
text: Option<&str>,
data: Option<Stream>,
) -> ReadResult<()> {
if let Some(user_info) =
crate::demo::data::UserInfo::parse_from_string_table(index as u16, text, data)?
{
self.state
.users
.entry(user_info.player_info.user_id)
.and_modify(|info| {
info.entity_id = user_info.entity_id;
})
.or_insert_with(|| user_info.into());
}
Ok(())
}
}
|
use crate::models::{ComicId, ComicIdInvalidity, Token};
use crate::util::{ensure_is_authorized, ensure_is_valid, AddMonths};
use actix_web::{error, web, HttpResponse, Result};
use actix_web_grants::permissions::AuthDetails;
use chrono::{DateTime, TimeZone, Utc};
use database::models::{Comic as DatabaseComic, LogEntry};
use database::DbPool;
use parse_display::Display;
use semval::{context::Context as ValidationContext, Validate};
use serde::Deserialize;
use shared::token_permissions;
pub(crate) async fn set_publish_date(
pool: web::Data<DbPool>,
request: web::Json<SetPublishDateBody>,
auth: AuthDetails,
) -> Result<HttpResponse> {
ensure_is_authorized(&auth, token_permissions::CAN_CHANGE_COMIC_DATA)
.map_err(error::ErrorForbidden)?;
ensure_is_valid(&*request).map_err(error::ErrorBadRequest)?;
let mut transaction = pool
.begin()
.await
.map_err(error::ErrorInternalServerError)?;
DatabaseComic::ensure_exists_by_id(&mut *transaction, request.comic_id.into_inner())
.await
.map_err(error::ErrorInternalServerError)?;
let old_publish_date =
DatabaseComic::publish_date_by_id(&mut *transaction, request.comic_id.into_inner())
.await
.map_err(error::ErrorInternalServerError)?;
DatabaseComic::update_publish_date_by_id(
&mut *transaction,
request.comic_id.into_inner(),
request.publish_date,
request.is_accurate_publish_date,
)
.await
.map_err(error::ErrorInternalServerError)?;
if let Some(old_publish_date) = old_publish_date {
LogEntry::log_action(
&mut *transaction,
request.token.to_string(),
format!(
"Changed publish date on comic #{} from \"{}\" to \"{}\"",
request.comic_id,
Utc.from_utc_datetime(&old_publish_date)
.to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
request
.publish_date
.to_rfc3339_opts(chrono::SecondsFormat::Secs, true)
),
)
.await
.map_err(error::ErrorInternalServerError)?;
} else {
LogEntry::log_action(
&mut *transaction,
request.token.to_string(),
format!(
"Set publish date on comic #{} to \"{}\"",
request.comic_id,
request
.publish_date
.to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
),
)
.await
.map_err(error::ErrorInternalServerError)?;
}
transaction
.commit()
.await
.map_err(error::ErrorInternalServerError)?;
Ok(HttpResponse::Ok().finish())
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct SetPublishDateBody {
token: Token,
comic_id: ComicId,
publish_date: DateTime<Utc>,
is_accurate_publish_date: bool,
}
impl Validate for SetPublishDateBody {
type Invalidity = SetPublishDateBodyInvalidity;
fn validate(&self) -> semval::ValidationResult<Self::Invalidity> {
ValidationContext::new()
.validate_with(&self.comic_id, SetPublishDateBodyInvalidity::ComicId)
.invalidate_if(
self.publish_date < Utc.ymd(2003, 8, 1).and_hms(0, 0, 0)
|| self.publish_date > Utc::now().add_months(1),
SetPublishDateBodyInvalidity::PublishDate,
)
.into()
}
}
#[derive(Copy, Clone, Debug, Display, Eq, PartialEq)]
pub(crate) enum SetPublishDateBodyInvalidity {
#[display("{0}")]
ComicId(ComicIdInvalidity),
#[display("Provided publish date must be after the comic was started and no later than one month from today's date")]
PublishDate,
}
|
use nu_engine::{eval_block, CallExt};
use nu_protocol::ast::{Call, CellPath, PathMember};
use nu_protocol::engine::{CaptureBlock, Command, EngineState, Stack};
use nu_protocol::{
Category, Example, FromValue, IntoInterruptiblePipelineData, IntoPipelineData, PipelineData,
ShellError, Signature, Span, SyntaxShape, Value,
};
#[derive(Clone)]
pub struct Insert;
impl Command for Insert {
fn name(&self) -> &str {
"insert"
}
fn signature(&self) -> Signature {
Signature::build("insert")
.required(
"field",
SyntaxShape::CellPath,
"the name of the column to insert",
)
.required(
"new value",
SyntaxShape::Any,
"the new value to give the cell(s)",
)
.category(Category::Filters)
}
fn usage(&self) -> &str {
"Insert a new column."
}
fn search_terms(&self) -> Vec<&str> {
vec!["add"]
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
insert(engine_state, stack, call, input)
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "Insert a new value",
example: "echo {'name': 'nu', 'stars': 5} | insert alias 'Nushell'",
result: Some(Value::Record {
cols: vec!["name".into(), "stars".into(), "alias".into()],
vals: vec![
Value::test_string("nu"),
Value::test_int(5),
Value::test_string("Nushell"),
],
span: Span::test_data(),
}),
}]
}
}
fn insert(
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let span = call.head;
let cell_path: CellPath = call.req(engine_state, stack, 0)?;
let replacement: Value = call.req(engine_state, stack, 1)?;
let redirect_stdout = call.redirect_stdout;
let redirect_stderr = call.redirect_stderr;
let engine_state = engine_state.clone();
let ctrlc = engine_state.ctrlc.clone();
// Replace is a block, so set it up and run it instead of using it as the replacement
if replacement.as_block().is_ok() {
let capture_block: CaptureBlock = FromValue::from_value(&replacement)?;
let block = engine_state.get_block(capture_block.block_id).clone();
let mut stack = stack.captures_to_stack(&capture_block.captures);
let orig_env_vars = stack.env_vars.clone();
let orig_env_hidden = stack.env_hidden.clone();
input.map(
move |mut input| {
stack.with_env(&orig_env_vars, &orig_env_hidden);
if let Some(var) = block.signature.get_positional(0) {
if let Some(var_id) = &var.var_id {
stack.add_var(*var_id, input.clone())
}
}
let output = eval_block(
&engine_state,
&mut stack,
&block,
input.clone().into_pipeline_data(),
redirect_stdout,
redirect_stderr,
);
match output {
Ok(pd) => {
if let Err(e) =
input.insert_data_at_cell_path(&cell_path.members, pd.into_value(span))
{
return Value::Error { error: e };
}
input
}
Err(e) => Value::Error { error: e },
}
},
ctrlc,
)
} else {
if let Some(PathMember::Int { val, .. }) = cell_path.members.get(0) {
let mut input = input.into_iter();
let mut pre_elems = vec![];
for _ in 0..*val {
if let Some(v) = input.next() {
pre_elems.push(v);
} else {
pre_elems.push(Value::Nothing { span })
}
}
return Ok(pre_elems
.into_iter()
.chain(vec![replacement])
.chain(input)
.into_pipeline_data(ctrlc));
}
input.map(
move |mut input| {
let replacement = replacement.clone();
if let Err(e) = input.insert_data_at_cell_path(&cell_path.members, replacement) {
return Value::Error { error: e };
}
input
},
ctrlc,
)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_examples() {
use crate::test_examples;
test_examples(Insert {})
}
}
|
use std::io;
fn main() {
let numbers = (1, 2, 3, 4.5);
println!("{:?}", numbers);
println!("{:?}", numbers.0);
println!("{:?}", numbers.3);
let l0 = 'V';
println!("{l0}");
let s: String = "Vilmar, o gatinho".to_string();
println!("{}", s);
let mut vazia : String = String::new();
vazia.push_str("Vilmar");
vazia.push_str(" ");
vazia.push_str("Catafesta");
println!("{}", vazia);
let nome : String = String::from("vilmar");
println!("{}", nome);
let x: String = "Vilmar".into();
println!("{s}");
let mut cNome = String::new();
println!("Digite : ");
io::stdin().read_line(&mut cNome).expect("Erro: no input");
println!("Digitou {}", cNome);
println!("Letras {}", cNome.trim().len());
}
|
//! Tests auto-converted from "sass-spec/spec/libsass-closed-issues/issue_713"
#[allow(unused)]
use super::rsass;
// From "sass-spec/spec/libsass-closed-issues/issue_713/and.hrx"
// Ignoring "and", error tests are not supported yet.
// From "sass-spec/spec/libsass-closed-issues/issue_713/not.hrx"
// Ignoring "not", error tests are not supported yet.
// From "sass-spec/spec/libsass-closed-issues/issue_713/or.hrx"
// Ignoring "or", error tests are not supported yet.
|
#![feature(test)]
extern crate extended_collections;
extern crate rand;
extern crate test;
use extended_collections::avl_tree::AvlMap;
use extended_collections::skiplist::SkipMap;
use extended_collections::treap::TreapMap;
use self::rand::Rng;
use std::collections::BTreeMap;
use test::Bencher;
const NUM_OF_OPERATIONS: usize = 100;
#[bench]
fn bench_treapmap_insert(b: &mut Bencher) {
b.iter(|| {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = TreapMap::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
}
});
}
#[bench]
fn bench_treapmap_get(b: &mut Bencher) {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = TreapMap::new();
let mut values = Vec::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
values.push(key);
}
b.iter(|| {
for key in &values {
test::black_box(map.get(key));
}
});
}
#[bench]
fn bench_avlmap_insert(b: &mut Bencher) {
b.iter(|| {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = AvlMap::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
}
});
}
#[bench]
fn bench_avlmap_get(b: &mut Bencher) {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = AvlMap::new();
let mut values = Vec::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
values.push(key);
}
b.iter(|| {
for key in &values {
test::black_box(map.get(key));
}
});
}
#[bench]
fn bench_skipmap_insert(b: &mut Bencher) {
b.iter(|| {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = SkipMap::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
}
});
}
#[bench]
fn bench_skipmap_get(b: &mut Bencher) {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = SkipMap::new();
let mut values = Vec::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
values.push(key);
}
b.iter(|| {
for key in &values {
test::black_box(map.get(key));
}
});
}
#[bench]
fn bench_btreemap_insert(b: &mut Bencher) {
b.iter(|| {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = BTreeMap::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
}
});
}
#[bench]
fn bench_btreemap_get(b: &mut Bencher) {
let mut rng: rand::XorShiftRng = rand::SeedableRng::from_seed([1, 1, 1, 1]);
let mut map = BTreeMap::new();
let mut values = Vec::new();
for _ in 0..NUM_OF_OPERATIONS {
let key = rng.next_u32();
let val = rng.next_u32();
map.insert(key, val);
values.push(key);
}
b.iter(|| {
for key in &values {
test::black_box(map.get(key));
}
});
}
|
pub(crate) unsafe fn raw_syscall(num: i64, arg0: u64, arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64) -> i64 {
let mut num = num;
asm!("syscall",
inout("rax") num,
in("rdi") arg0,
in("rsi") arg1,
in("rdx") arg2,
in("r8") arg3,
in("r9") arg4,
in("r10") arg5,
out("r11") _,
out("rcx") _);
num
}
|
use std::{str::FromStr, num::ParseIntError};
use problem::{Problem, solve};
struct Input {
min_letter: u32,
max_letter: u32,
letter: char,
password: String,
}
impl Input {
fn is_valid(&self) -> bool {
let mut count = 0;
for c in self.password.chars() {
if c == self.letter {
count += 1;
}
}
count >= self.min_letter && count <= self.max_letter
}
fn is_valid_2(&self) -> bool {
let min_matches = self.password.chars().nth(self.min_letter as usize - 1).unwrap() == self.letter;
let max_matches = self.password.chars().nth(self.max_letter as usize - 1).unwrap() == self.letter;
min_matches != max_matches
}
}
impl FromStr for Input {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut pieces = s.split(' ');
let mut range = pieces.next().unwrap().split('-');
let min_letter = range.next().unwrap().parse()?;
let max_letter = range.next().unwrap().parse()?;
let letter = pieces.next().unwrap().chars().next().unwrap();
let password = pieces.next().unwrap().to_string();
Ok(Input {
min_letter,
max_letter,
letter,
password,
})
}
}
#[derive(Debug)]
enum Error {}
struct Day2;
impl Problem for Day2 {
type Input = Vec<Input>;
type Part1Output = usize;
type Part2Output = usize;
type Error = Error;
fn part_1(input: &Self::Input) -> Result<Self::Part1Output, Self::Error> {
Ok(input.iter().filter(|i| i.is_valid()).count())
}
fn part_2(input: &Self::Input) -> Result<Self::Part2Output, Self::Error> {
Ok(input.iter().filter(|i| i.is_valid_2()).count())
}
}
fn main() {
solve::<Day2>("input").unwrap();
} |
use image::{ImageBuffer, Rgb};
extern crate nom;
use nom::{
alt, character::complete::digit0, combinator::eof, do_parse, eof, many_till, map_res,
multi::many_till, named, opt, peek, tag, take_until,
};
use std::{fs::File, io::Read, path::PathBuf, str::FromStr, u32};
#[derive(Debug)]
struct Cluster {
pub color: Rgb<u8>,
pub pixels: Vec<PixelCoord>,
}
named!(uint8 <&str, u8>,
map_res!(digit0, FromStr::from_str)
);
named!(uint32 <&str, u32>,
map_res!(digit0, FromStr::from_str)
);
named!(colorf<&str, Rgb<u8>>, do_parse!(
tag!("(") >>
r: uint8 >>
tag!(",") >>
g: uint8 >>
tag!(",") >>
b: uint8 >>
tag!(")\n") >>
(Rgb([r, g, b]))
));
named!(pixelf<&str, PixelCoord>, do_parse!(
tag!("(") >>
x: uint32 >>
tag!(",") >>
y: uint32 >>
tag!(")") >>
take_until!("\n") >>
opt!(tag!("\n")) >>
(PixelCoord {x: x, y: y})
));
named!(pixelsf < &str, Vec < PixelCoord >>, do_parse!(
pixels: many_till!(
pixelf,
alt!(
eof!() |
peek!(tag!("--\n"))
)) >>
(pixels.0)
));
named!(
clusterf<&str, Cluster>,
do_parse!(
tag!("--\n") >>
color: colorf >>
tag!("-\n") >>
pixels: pixelsf >>
(Cluster { color: color, pixels: pixels })
)
);
#[derive(Debug)]
struct PixelCoord {
pub x: u32,
pub y: u32,
}
pub fn txt_to_img(input_path: PathBuf, output_path: PathBuf) {
let mut input_file = File::open(input_path).expect("Can't open input file");
let mut content: String = String::from("");
input_file.read_to_string(&mut content).unwrap();
let (_, (clusters, _)) = many_till(clusterf, eof)(&content).unwrap();
let mut width: u32 = 1;
let mut height: u32 = 1;
for cluster in &clusters {
for pixel in &cluster.pixels {
if pixel.x > width {
width = pixel.x;
}
if pixel.y > height {
height = pixel.y;
}
}
}
let mut output_buff = ImageBuffer::new(width + 1, height + 1);
for cluster in &clusters {
for pixel in &cluster.pixels {
output_buff.put_pixel(pixel.x, pixel.y, cluster.color);
}
}
output_buff
.save(output_path)
.expect("Can't open output file");
}
|
//! Module defining the `Filters` struct, which represents the possible filters applicable on network traffic.
use crate::{AppProtocol, IpVersion, TransProtocol};
/// Possible filters applicable to network traffic
#[derive(Clone, Copy)]
pub struct Filters {
/// Internet Protocol version
pub ip: IpVersion,
/// Transport layer protocol
pub transport: TransProtocol,
/// Application layer protocol
pub application: AppProtocol,
}
impl Default for Filters {
fn default() -> Self {
Self {
ip: IpVersion::Other,
transport: TransProtocol::Other,
application: AppProtocol::Other,
}
}
}
impl Filters {
/// Checks whether the filters match the current packet's protocols
pub fn matches(self, rhs: Self) -> bool {
(self.ip.eq(&IpVersion::Other) || self.ip.eq(&rhs.ip))
&& (self.transport.eq(&TransProtocol::Other) || self.transport.eq(&rhs.transport))
&& (self.application.eq(&AppProtocol::Other) || self.application.eq(&rhs.application))
}
}
|
#[doc = "Register `CIFR` reader"]
pub type R = crate::R<CIFR_SPEC>;
#[doc = "Field `LSI1RDYF` reader - LSI1 ready interrupt flag"]
pub type LSI1RDYF_R = crate::BitReader;
#[doc = "Field `LSERDYF` reader - LSE ready interrupt flag"]
pub type LSERDYF_R = crate::BitReader;
#[doc = "Field `MSIRDYF` reader - MSI ready interrupt flag"]
pub type MSIRDYF_R = crate::BitReader;
#[doc = "Field `HSIRDYF` reader - HSI ready interrupt flag"]
pub type HSIRDYF_R = crate::BitReader;
#[doc = "Field `HSERDYF` reader - HSE ready interrupt flag"]
pub type HSERDYF_R = crate::BitReader;
#[doc = "Field `PLLRDYF` reader - PLL ready interrupt flag"]
pub type PLLRDYF_R = crate::BitReader;
#[doc = "Field `PLLSAI1RDYF` reader - PLLSAI1 ready interrupt flag"]
pub type PLLSAI1RDYF_R = crate::BitReader;
#[doc = "Field `HSECSSF` reader - HSE Clock security system interrupt flag"]
pub type HSECSSF_R = crate::BitReader;
#[doc = "Field `LSECSSF` reader - LSE Clock security system interrupt flag"]
pub type LSECSSF_R = crate::BitReader;
#[doc = "Field `HSI48RDYF` reader - HSI48 ready interrupt flag"]
pub type HSI48RDYF_R = crate::BitReader;
#[doc = "Field `LSI2RDYF` reader - LSI2 ready interrupt flag"]
pub type LSI2RDYF_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - LSI1 ready interrupt flag"]
#[inline(always)]
pub fn lsi1rdyf(&self) -> LSI1RDYF_R {
LSI1RDYF_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - LSE ready interrupt flag"]
#[inline(always)]
pub fn lserdyf(&self) -> LSERDYF_R {
LSERDYF_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - MSI ready interrupt flag"]
#[inline(always)]
pub fn msirdyf(&self) -> MSIRDYF_R {
MSIRDYF_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - HSI ready interrupt flag"]
#[inline(always)]
pub fn hsirdyf(&self) -> HSIRDYF_R {
HSIRDYF_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - HSE ready interrupt flag"]
#[inline(always)]
pub fn hserdyf(&self) -> HSERDYF_R {
HSERDYF_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - PLL ready interrupt flag"]
#[inline(always)]
pub fn pllrdyf(&self) -> PLLRDYF_R {
PLLRDYF_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - PLLSAI1 ready interrupt flag"]
#[inline(always)]
pub fn pllsai1rdyf(&self) -> PLLSAI1RDYF_R {
PLLSAI1RDYF_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 8 - HSE Clock security system interrupt flag"]
#[inline(always)]
pub fn hsecssf(&self) -> HSECSSF_R {
HSECSSF_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - LSE Clock security system interrupt flag"]
#[inline(always)]
pub fn lsecssf(&self) -> LSECSSF_R {
LSECSSF_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - HSI48 ready interrupt flag"]
#[inline(always)]
pub fn hsi48rdyf(&self) -> HSI48RDYF_R {
HSI48RDYF_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - LSI2 ready interrupt flag"]
#[inline(always)]
pub fn lsi2rdyf(&self) -> LSI2RDYF_R {
LSI2RDYF_R::new(((self.bits >> 11) & 1) != 0)
}
}
#[doc = "Clock interrupt flag register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cifr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CIFR_SPEC;
impl crate::RegisterSpec for CIFR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cifr::R`](R) reader structure"]
impl crate::Readable for CIFR_SPEC {}
#[doc = "`reset()` method sets CIFR to value 0"]
impl crate::Resettable for CIFR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use chrono::{NaiveDateTime, Utc};
use discorsd::BotState;
use discorsd::errors::{BotError, HangmanError};
use discorsd::http::channel::GetMessages;
use discorsd::model::ids::{ChannelId, GuildId, Id, MessageId};
use itertools::Itertools;
use once_cell::sync::Lazy;
use rand::{Rng, thread_rng};
use rand::prelude::SliceRandom;
use reqwest::Client;
use serde::Deserialize;
use discorsd::model::channel::ChannelType;
use crate::Bot;
const MIN_WORD_LEN: usize = 5;
pub async fn channel_hist_word(state: &BotState<Bot>, channel: ChannelId, guild: Option<GuildId>) -> Result<(String, String), BotError> {
let channel_creation = channel.timestamp().timestamp();
println!("channel = {:?}", channel);
let now = Utc::now().timestamp();
let rand_time = {
let mut rng = thread_rng();
rng.gen_range(channel_creation..now)
};
println!("rand_time = {:?}", rand_time);
let time = NaiveDateTime::from_timestamp(rand_time, 0);
println!("time = {:?}", time);
let message = MessageId::from(time);
println!("message = {:?}", message);
let get = GetMessages::new().limit(100).around(message);
let messages = state.client.get_messages(channel, get).await?;
let mut rng = thread_rng();
messages.into_iter()
.find_map(|m| {
let mut vec = m.content.split_ascii_whitespace()
.filter(|s| s.chars().all(|c| c.is_ascii_alphabetic()))
.filter(|s| s.len() >= MIN_WORD_LEN)
.collect_vec();
println!("vec = {:?}", vec);
vec.shuffle(&mut rng);
(!vec.is_empty()).then(|| (
vec.swap_remove(0).to_ascii_lowercase(),
match guild {
Some(guild) => format!("https://discord.com/channels/{guild}/{channel}/{}", m.id),
None => format!("https://discord.com/channels/@me/{channel}/{}", m.id)
}
))
})
.ok_or_else(|| HangmanError::NoWords(channel, guild).into())
}
pub async fn server_hist_word(state: &BotState<Bot>, guild: Result<GuildId, ChannelId>) -> Result<(String, String), BotError> {
let (channel, guild) = match guild {
Ok(guild) => {
let guild = state.cache.guild(guild).await.unwrap();
let mut channels = guild.channels.iter()
.filter(|c| matches!(c.variant_type(), ChannelType::Text | ChannelType::Dm))
.collect_vec();
channels.shuffle(&mut thread_rng());
(channels[0].id(), Some(guild.id))
}
Err(channel) => (channel, None),
};
channel_hist_word(state, channel, guild).await
}
static WORDNIK_URL: Lazy<String> = Lazy::new(|| {
let key = std::fs::read_to_string("wordnik.txt").unwrap();
format!(
"https://api.wordnik.com/v4/words.json/randomWords?\
hasDictionaryDef=true&\
includePartOfSpeech=noun,adjective,verb,adverb,preposition&\
minLength={MIN_WORD_LEN}&\
limit=100&\
api_key={key}"
)
});
pub async fn wordnik_word(client: &Client) -> Result<(String, String), BotError> {
#[derive(Deserialize, Debug)]
struct Word {
word: String,
}
let words: Vec<Word> = client.get(&*WORDNIK_URL)
.send().await?
.json().await?;
let word = words.into_iter()
// all words from wordnik are lowercase
.find(|w| w.word.chars().all(|c| c.is_ascii_alphabetic()))
.unwrap()
.word;
let source = format!("https://www.wordnik.com/words/{word}");
Ok((word, source))
}
|
#[doc = "Register `SECCFGR` reader"]
pub type R = crate::R<SECCFGR_SPEC>;
#[doc = "Register `SECCFGR` writer"]
pub type W = crate::W<SECCFGR_SPEC>;
#[doc = "Field `WUP1SEC` reader - WUPx secure protection"]
pub type WUP1SEC_R = crate::BitReader;
#[doc = "Field `WUP1SEC` writer - WUPx secure protection"]
pub type WUP1SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP2SEC` reader - WUPx secure protection"]
pub type WUP2SEC_R = crate::BitReader;
#[doc = "Field `WUP2SEC` writer - WUPx secure protection"]
pub type WUP2SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP3SEC` reader - WUPx secure protection"]
pub type WUP3SEC_R = crate::BitReader;
#[doc = "Field `WUP3SEC` writer - WUPx secure protection"]
pub type WUP3SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP4SEC` reader - WUPx secure protection"]
pub type WUP4SEC_R = crate::BitReader;
#[doc = "Field `WUP4SEC` writer - WUPx secure protection"]
pub type WUP4SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP5SEC` reader - WUPx secure protection"]
pub type WUP5SEC_R = crate::BitReader;
#[doc = "Field `WUP5SEC` writer - WUPx secure protection"]
pub type WUP5SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP6SEC` reader - WUPx secure protection"]
pub type WUP6SEC_R = crate::BitReader;
#[doc = "Field `WUP6SEC` writer - WUPx secure protection"]
pub type WUP6SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP7SEC` reader - WUPx secure protection"]
pub type WUP7SEC_R = crate::BitReader;
#[doc = "Field `WUP7SEC` writer - WUPx secure protection"]
pub type WUP7SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `WUP8SEC` reader - WUPx secure protection"]
pub type WUP8SEC_R = crate::BitReader;
#[doc = "Field `WUP8SEC` writer - WUPx secure protection"]
pub type WUP8SEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RETSEC` reader - retention secure protection"]
pub type RETSEC_R = crate::BitReader;
#[doc = "Field `RETSEC` writer - retention secure protection"]
pub type RETSEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `LPMSEC` reader - low-power modes secure protection"]
pub type LPMSEC_R = crate::BitReader;
#[doc = "Field `LPMSEC` writer - low-power modes secure protection"]
pub type LPMSEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SCMSEC` reader - supply configuration and monitoring secure protection."]
pub type SCMSEC_R = crate::BitReader;
#[doc = "Field `SCMSEC` writer - supply configuration and monitoring secure protection."]
pub type SCMSEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `VBSEC` reader - backup domain secure protection"]
pub type VBSEC_R = crate::BitReader;
#[doc = "Field `VBSEC` writer - backup domain secure protection"]
pub type VBSEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `VUSBSEC` reader - voltage USB secure protection"]
pub type VUSBSEC_R = crate::BitReader;
#[doc = "Field `VUSBSEC` writer - voltage USB secure protection"]
pub type VUSBSEC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - WUPx secure protection"]
#[inline(always)]
pub fn wup1sec(&self) -> WUP1SEC_R {
WUP1SEC_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - WUPx secure protection"]
#[inline(always)]
pub fn wup2sec(&self) -> WUP2SEC_R {
WUP2SEC_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - WUPx secure protection"]
#[inline(always)]
pub fn wup3sec(&self) -> WUP3SEC_R {
WUP3SEC_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - WUPx secure protection"]
#[inline(always)]
pub fn wup4sec(&self) -> WUP4SEC_R {
WUP4SEC_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - WUPx secure protection"]
#[inline(always)]
pub fn wup5sec(&self) -> WUP5SEC_R {
WUP5SEC_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - WUPx secure protection"]
#[inline(always)]
pub fn wup6sec(&self) -> WUP6SEC_R {
WUP6SEC_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - WUPx secure protection"]
#[inline(always)]
pub fn wup7sec(&self) -> WUP7SEC_R {
WUP7SEC_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - WUPx secure protection"]
#[inline(always)]
pub fn wup8sec(&self) -> WUP8SEC_R {
WUP8SEC_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 11 - retention secure protection"]
#[inline(always)]
pub fn retsec(&self) -> RETSEC_R {
RETSEC_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - low-power modes secure protection"]
#[inline(always)]
pub fn lpmsec(&self) -> LPMSEC_R {
LPMSEC_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - supply configuration and monitoring secure protection."]
#[inline(always)]
pub fn scmsec(&self) -> SCMSEC_R {
SCMSEC_R::new(((self.bits >> 13) & 1) != 0)
}
#[doc = "Bit 14 - backup domain secure protection"]
#[inline(always)]
pub fn vbsec(&self) -> VBSEC_R {
VBSEC_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - voltage USB secure protection"]
#[inline(always)]
pub fn vusbsec(&self) -> VUSBSEC_R {
VUSBSEC_R::new(((self.bits >> 15) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup1sec(&mut self) -> WUP1SEC_W<SECCFGR_SPEC, 0> {
WUP1SEC_W::new(self)
}
#[doc = "Bit 1 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup2sec(&mut self) -> WUP2SEC_W<SECCFGR_SPEC, 1> {
WUP2SEC_W::new(self)
}
#[doc = "Bit 2 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup3sec(&mut self) -> WUP3SEC_W<SECCFGR_SPEC, 2> {
WUP3SEC_W::new(self)
}
#[doc = "Bit 3 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup4sec(&mut self) -> WUP4SEC_W<SECCFGR_SPEC, 3> {
WUP4SEC_W::new(self)
}
#[doc = "Bit 4 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup5sec(&mut self) -> WUP5SEC_W<SECCFGR_SPEC, 4> {
WUP5SEC_W::new(self)
}
#[doc = "Bit 5 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup6sec(&mut self) -> WUP6SEC_W<SECCFGR_SPEC, 5> {
WUP6SEC_W::new(self)
}
#[doc = "Bit 6 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup7sec(&mut self) -> WUP7SEC_W<SECCFGR_SPEC, 6> {
WUP7SEC_W::new(self)
}
#[doc = "Bit 7 - WUPx secure protection"]
#[inline(always)]
#[must_use]
pub fn wup8sec(&mut self) -> WUP8SEC_W<SECCFGR_SPEC, 7> {
WUP8SEC_W::new(self)
}
#[doc = "Bit 11 - retention secure protection"]
#[inline(always)]
#[must_use]
pub fn retsec(&mut self) -> RETSEC_W<SECCFGR_SPEC, 11> {
RETSEC_W::new(self)
}
#[doc = "Bit 12 - low-power modes secure protection"]
#[inline(always)]
#[must_use]
pub fn lpmsec(&mut self) -> LPMSEC_W<SECCFGR_SPEC, 12> {
LPMSEC_W::new(self)
}
#[doc = "Bit 13 - supply configuration and monitoring secure protection."]
#[inline(always)]
#[must_use]
pub fn scmsec(&mut self) -> SCMSEC_W<SECCFGR_SPEC, 13> {
SCMSEC_W::new(self)
}
#[doc = "Bit 14 - backup domain secure protection"]
#[inline(always)]
#[must_use]
pub fn vbsec(&mut self) -> VBSEC_W<SECCFGR_SPEC, 14> {
VBSEC_W::new(self)
}
#[doc = "Bit 15 - voltage USB secure protection"]
#[inline(always)]
#[must_use]
pub fn vusbsec(&mut self) -> VUSBSEC_W<SECCFGR_SPEC, 15> {
VUSBSEC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "PWR security configuration register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`seccfgr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`seccfgr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SECCFGR_SPEC;
impl crate::RegisterSpec for SECCFGR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`seccfgr::R`](R) reader structure"]
impl crate::Readable for SECCFGR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`seccfgr::W`](W) writer structure"]
impl crate::Writable for SECCFGR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets SECCFGR to value 0"]
impl crate::Resettable for SECCFGR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use pyo3_ffi::*;
use std::os::raw::c_char;
#[allow(non_snake_case)]
#[no_mangle]
pub unsafe extern "C" fn PyInit_pyo3_ffi_pure() -> *mut PyObject {
let module_name = "pyo3_ffi_pure\0".as_ptr() as *const c_char;
let init = PyModuleDef {
m_base: PyModuleDef_HEAD_INIT,
m_name: module_name,
m_doc: std::ptr::null(),
m_size: 0,
m_methods: std::ptr::null_mut(),
m_slots: std::ptr::null_mut(),
m_traverse: None,
m_clear: None,
m_free: None,
};
let mptr = PyModule_Create(Box::into_raw(Box::new(init)));
let wrapped_sum = PyMethodDef {
ml_name: "sum\0".as_ptr() as *const c_char,
ml_meth: PyMethodDefPointer {
PyCFunctionWithKeywords: sum,
},
ml_flags: METH_VARARGS | METH_KEYWORDS,
ml_doc: std::ptr::null_mut(),
};
PyModule_AddObject(
mptr,
"sum\0".as_ptr() as *const c_char,
PyCFunction_NewEx(
Box::into_raw(Box::new(wrapped_sum)),
std::ptr::null_mut(),
PyUnicode_InternFromString(module_name),
),
);
mptr
}
#[no_mangle]
pub unsafe extern "C" fn sum(
_self: *mut PyObject,
args: *mut PyObject,
_kwds: *mut PyObject,
) -> *mut PyObject {
// this is a minimal test of compilation, not good example code
let val_a = PyTuple_GetItem(args, 0);
let val_b = PyTuple_GetItem(args, 1);
let res: i64 = PyLong_AsLongLong(val_a) + PyLong_AsLongLong(val_b);
PyLong_FromLongLong(res)
}
|
use proc_macro::TokenStream;
use quote::quote;
use syn;
#[proc_macro_derive(AttributesContainer, attributes(attributes))]
pub fn attributes_container_macro_derive(input: TokenStream) -> TokenStream {
let ast = syn::parse(input).expect("Error parsing input");
impl_attributes_container_macro(&ast)
}
fn impl_attributes_container_macro(ast: &syn::DeriveInput) -> TokenStream {
let name = &ast.ident;
if let Some(field) = find_attrs_field(&ast) {
let field_name = field.ident.as_ref().unwrap();
let gen = quote! {
impl AttributesContainer for #name {
fn get_attr<T>(&self, name: T) -> Option<&str>
where
T: Into<String>
{
self.#field_name.get(name).map(|v| v.as_str())
}
fn with_attrs<T, K, V>(mut self, attrs: T) -> Self
where
T: AsRef<[(K, V)]>,
K: Into<String> + Clone,
V: Into<String> + Clone
{
self.#field_name.extend(attrs);
self
}
}
};
gen.into()
} else {
panic!("AttributesContainer should have field marked as #[attributes]");
}
}
fn find_attrs_field(ast: &syn::DeriveInput) -> Option<&syn::Field> {
let fields = match &ast.data {
syn::Data::Struct(syn::DataStruct {
fields: syn::Fields::Named(fields),
..
}) => &fields.named,
_ => panic!("expected a struct with named fields"),
};
fields.iter().find(|&field| {
field
.attrs
.iter()
.find(|attr| attr.path.is_ident("attributes"))
.is_some()
})
}
|
use std::io;
use std::io::prelude::*;
use std::process;
mod operation;
mod mixed_number;
mod fraction;
mod math;
/// Single evaluation mode evaluates the given expression and terminates
pub fn run_single_evaluation(expression: &str) {
let result = evaluate_expression(&expression);
if result.is_err() {
process::exit(1);
}
}
/// REPL evaluation runs in a loop than terminates only when the user enters 'q'
pub fn run_repl_evaluation() {
println!("Starting repl mode. Type 'q' to quit\n");
loop {
print!("? ");
io::stdout().flush().ok().expect("Unable to flush stdout!");
let mut expression = String::new();
io::stdin()
.read_line(&mut expression)
.expect("Failed to read expression");
if expression.trim() == "q" {
break;
}
evaluate_expression(&expression).ok();
}
}
fn evaluate_expression(expression: &str) -> Result<(), &str> {
let result = operation::Operation::parse_operation(expression);
match result {
Ok(operation) => process_operation(operation),
Err(e) => log_and_propagate_error(e)
}
}
fn process_operation(operation: operation::Operation) -> Result<(), &'static str> {
match operation.compute() {
Ok(result) => println!("= {}", result),
Err(e) => log_and_propagate_error(e)?
}
Ok(())
}
fn log_and_propagate_error(error: &str) -> Result<(), &'static str> {
eprintln!("Error: {}", error);
Err("Application error!")
}
|
pub mod stream_mapper;
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use super::enums::*;
#[repr(u8)]
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
pub enum OpCode {
AdvertisementPacket = 0,
CreateConnectionChannelResponse = 1,
ConnectionStatusChanged = 2,
ConnectionChannelRemoved = 3,
ButtonUpOrDown = 4,
ButtonClickOrHold = 5,
ButtonSingleOrDoubleClick = 6,
ButtonSingleOrDoubleClickOrHold = 7,
NewVerifiedButton = 8,
GetInfoResponse = 9,
NoSpaceForNewConnection = 10,
GotSpaceForNewConnection = 11,
BluetoothControllerStateChange = 12,
PingResponse = 13,
GetButtonInfoResponse = 14,
ScanWizardFoundPrivateButton = 15,
ScanWizardFoundPublicButton = 16,
ScanWizardButtonConnected = 17,
ScanWizardCompleted = 18,
ButtonDeleted = 19,
BatteryStatus = 20,
}
#[allow(dead_code)]
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Event {
NoOp,
CorruptEvent,
AdvertisementPacket {
scan_id: u32,
bd_addr: String,
name: String,
rssi: u8,
is_private: bool,
already_verified: bool,
already_connected_to_this_device: bool,
already_connected_to_other_device: bool,
},
CreateConnectionChannelResponse {
conn_id: u32,
error: CreateConnectionChannelError,
connection_status: ConnectionStatus,
},
ConnectionStatusChanged {
conn_id: u32,
connection_status: ConnectionStatus,
disconnect_reason: DisconnectReason,
},
ConnectionChannelRemoved {
conn_id: u32,
removed_reason: RemovedReason,
},
ButtonUpOrDown {
conn_id: u32,
click_type: ClickType,
was_queued: bool,
time_diff: i32,
},
ButtonClickOrHold {
conn_id: u32,
click_type: ClickType,
was_queued: bool,
time_diff: i32,
},
ButtonSingleOrDoubleClick {
conn_id: u32,
click_type: ClickType,
was_queued: bool,
time_diff: i32,
},
ButtonSingleOrDoubleClickOrHold {
conn_id: u32,
click_type: ClickType,
was_queued: bool,
time_diff: i32,
},
NewVerifiedButton {
bd_addr: String,
},
GetInfoResponse {
bluetooth_controller_state: BluetoothControllerState,
my_bd_addr: String,
my_bd_addr_type: BdAddrType,
max_pending_connections: u8,
max_concurrently_connected_buttons: i16,
current_pending_connections: u8,
currently_no_space_for_new_connection: bool,
bd_addr_of_verified_buttons: Vec<String>,
},
NoSpaceForNewConnection {
max_concurrently_connected_buttons: u8,
},
GotSpaceForNewConnection {
max_concurrently_connected_buttons: u8,
},
BluetoothControllerStateChange {
state: BluetoothControllerState,
},
PingResponse {
ping_id: u32,
},
GetButtonInfoResponse {
bd_addr: String,
uuid: String,
color: Option<String>,
serial_number: Option<String>,
},
ScanWizardFoundPrivateButton {
scan_wizard_id: u32,
},
ScanWizardFoundPublicButton {
scan_wizard_id: u32,
bd_addr: String,
name: String,
},
ScanWizardButtonConnected {
scan_wizard_id: u32,
},
ScanWizardCompleted {
scan_wizard_id: u32,
result: ScanWizardResult,
},
ButtonDeleted {
bd_addr: String,
deleted_by_this_client: bool,
},
BatteryStatus {
listener_id: u32,
battery_percentage: i8,
timestamp: u64,
},
}
|
//! Types used by [`crate::Transport`].
use crate::data::channel;
use crate::data::history;
use crate::data::message::Message;
use crate::data::object::Object;
use crate::data::presence;
use crate::data::timetoken::Timetoken;
use std::collections::HashMap;
/// A response to a publish request.
pub type Publish = Timetoken;
/// A response to a subscribe request.
pub type Subscribe = (Vec<Message>, Timetoken);
/// A response to a set state request.
pub type SetState = ();
/// A response to a get state request.
pub type GetState = Object;
/// A response to a here now request.
pub type HereNow<T> = <T as presence::respond_with::RespondWith>::Response;
/// A response to a global here now request.
pub type GlobalHereNow<T> = presence::GlobalInfo<T>;
/// A response to a where now request. List of channels.
pub type WhereNow = Vec<channel::Name>;
/// A response to a heartbeat request.
pub type Heartbeat = ();
/// A response to a PAMv3 grant request.
pub type Grant = String;
/// A response to a get history request.
pub type GetHistory = HashMap<channel::Name, Vec<history::Item>>;
/// A response to a delete history request.
pub type DeleteHistory = ();
/// A response to a message counts request.
pub type MessageCounts = HashMap<channel::Name, usize>;
/// A response to a message counts with timetoken request.
pub type MessageCountsWithTimetoken = HashMap<channel::Name, usize>;
/// A response to a message counts with channel timetokens request.
pub type MessageCountsWithChannelTimetokens = HashMap<channel::Name, usize>;
|
#![deny(missing_docs, warnings)]
#![allow(unstable)]
//! `Router` provides a fast router handler for the Iron web framework.
extern crate iron;
extern crate "route-recognizer" as recognizer;
#[cfg(test)] extern crate test;
pub use router::Router;
pub use recognizer::Params;
mod router;
|
/*******************************************************
* Copyright (C) 2019,2020 Jonathan Gerber <jlgerber@gmail.com>
*
* This file is part of packybara.
*
* packybara can not be copied and/or distributed without the express
* permission of Jonathan Gerber
*******************************************************/
use super::args::PbFind;
use packybara::db::traits::*;
use packybara::{
packrat::{Client, PackratDb},
Distribution,
};
use prettytable::{cell, format, row, table};
/// Pretty print the set of changes from the database that match the provided criteria
///
/// # Arguments
/// * `client` - A Client instance used to connect to the database
/// * `cmd` - A PbFind enum instance used to extract the relevant commandline arguments
///
/// # Returns
/// * a Unit if Ok, or a boxed error if Err
pub fn find(client: Client, cmd: PbFind) -> Result<(), Box<dyn std::error::Error>> {
if let PbFind::Changes { transaction_id, .. } = cmd {
let mut pb = PackratDb::new(client);
let mut results = pb.find_all_changes();
let results = results.transaction_id(transaction_id).query()?;
// For now I do this. I need to add packge handling into the query
// either by switching functions or handling the sql on this end
let mut table = table!([bFg => "ID","TX ID", "ACTION", "LEVEL", "ROLE", "PLATFORM","SITE", "PACKAGE", "OLD", "NEW"]);
for result in results {
table.add_row(row![
result.id,
result.transaction_id,
result.action,
result.level,
result.role,
result.platform,
result.site,
result.package,
result.old.unwrap_or(Distribution::empty()),
result.new
]);
}
table.set_format(*format::consts::FORMAT_CLEAN); //FORMAT_NO_LINESEP_WITH_TITLE FORMAT_NO_BORDER_LINE_SEPARATOR
table.printstd();
};
Ok(())
}
|
//! Interacting with debugging agent
//!
//! # Example
//!
//! This example will show how to terminate the QEMU session. The program
//! should be running under QEMU with semihosting enabled
//! (use `-semihosting` flag).
//!
//! Target program:
//!
//! ```no_run
//! use cortex_m_semihosting::debug::{self, EXIT_SUCCESS, EXIT_FAILURE};
//!
//! fn main() {
//! if 2 == 2 {
//! // report success
//! debug::exit(EXIT_SUCCESS);
//! } else {
//! // report failure
//! debug::exit(EXIT_FAILURE);
//! }
//! }
//!
/// This values are taken from section 5.5.2 of
/// ADS Debug Target Guide (DUI0058).
// TODO document
#[allow(missing_docs)]
pub enum Exception {
// Hardware reason codes
BranchThroughZero = 0x20000,
UndefinedInstr = 0x20001,
SoftwareInterrupt = 0x20002,
PrefetchAbort = 0x20003,
DataAbort = 0x20004,
AddressException = 0x20005,
IRQ = 0x20006,
FIQ = 0x20007,
// Software reason codes
BreakPoint = 0x20020,
WatchPoint = 0x20021,
StepComplete = 0x20022,
RunTimeErrorUnknown = 0x20023,
InternalError = 0x20024,
UserInterruption = 0x20025,
ApplicationExit = 0x20026,
StackOverflow = 0x20027,
DivisionByZero = 0x20028,
OSSpecific = 0x20029,
}
/// Status enum for `exit` syscall.
pub type ExitStatus = Result<(), ()>;
/// Successful execution of a program.
pub const EXIT_SUCCESS: ExitStatus = Ok(());
/// Unsuccessful execution of a program.
pub const EXIT_FAILURE: ExitStatus = Err(());
/// Reports to the debugger that the execution has completed.
///
/// This call can be used to terminate QEMU session and report back success
/// or failure. If you need to pass more than one type of error, consider
/// using `report_exception` syscall instead.
///
/// This call should not return. However, it is possible for the debugger
/// to request that the application continue. In that case this call
/// returns normally.
///
pub fn exit(status: ExitStatus) {
match status {
EXIT_SUCCESS => report_exception(Exception::ApplicationExit),
EXIT_FAILURE => report_exception(Exception::RunTimeErrorUnknown),
}
}
/// Report an exception to the debugger directly.
///
/// Exception handlers can use this SWI at the end of handler chains
/// as the default action, to indicate that the exception has not been handled.
///
/// This call should not return. However, it is possible for the debugger
/// to request that the application continue. In that case this call
/// returns normally.
///
/// # Arguments
///
/// * `reason` - A reason code reported back to the debugger.
///
pub fn report_exception(reason: Exception) {
let code = reason as usize;
unsafe {
syscall1!(REPORT_EXCEPTION, code);
}
}
|
/// Acceptor class handles the acceptance of inbound socket connections. It's
/// used to start listening on a local socket address, to accept incoming
/// connections and to handle network errors.
pub mod acceptor;
/// Async channel that handles the sending of messages across the network.
/// Public interface is used to create new channels, to stop and start
/// a channel, and to send messages.
///
/// Implements message functionality and the message subscriber subsystem.
pub mod channel;
/// Handles the creation of outbound connections. Used to establish an outbound
/// connection.
pub mod connector;
/// Defines a set of common network errors. Used for error handling.
pub mod error;
/// Hosts are a list of network addresses used when establishing an outbound
/// connection. Hosts are shared across the network through the address
/// protocol. When attempting to connect, a node will loop through addresses in
/// the host store until it finds ones to connect to.
pub mod hosts;
/// Generic publish/subscribe class that can dispatch any kind of message to a
/// subscribed list of dispatchers. Dispatchers subscribe to a single
/// message format of any type. This is a generalized version of the simple
/// publish-subscribe class in system::Subscriber.
///
/// Message Subsystem also enables the creation of new message subsystems,
/// adding new dispatchers and clearing inactive channels.
///
/// Message Subsystem maintains a list of dispatchers, which is a generalized
/// version of a subscriber. Pub-sub is called on dispatchers through the
/// functions 'subscribe' and 'notify'. Whereas system::Subscriber only allows
/// messages of a single type, dispatchers can handle any kind of message. This
/// generic message is called a payload and is processed and decoded by the
/// Message Dispatcher.
///
/// The Message Dispatcher is a class of subscribers that implements a
/// generic trait called Message Dispatcher Interface, which allows us to
/// process any kind of payload as a message.
pub mod message_subscriber;
/// Defines how to decode generic messages as well as implementing the common
/// network messages that are sent between nodes as described by the Protocol
/// submodule.
///
/// Implements a type called Packet which is the base message type. Packets are
/// converted into messages and passed to an event loop.
pub mod messages;
/// P2P provides all core functionality to interact with the peer-to-peer
/// network.
///
/// Used to create a network, to start and run it, to broadcast messages across
/// all channels, and to manage the channel store.
///
/// The channel store is a hashmap of channel address that we can use to add and
/// remove channels or check whether a channel is already is in the store.
pub mod p2p;
/// Defines the networking protocol used at each stage in a connection. Consists
/// of a series of messages that are sent across the network at the different
/// connection stages.
///
/// When a node connects to a network for the first time, it must follow a seed
/// protocol, which provides it with a list of network hosts to connect to. To
/// establish a connection to another node, nodes must send version and version
/// acknowledgement messages. During a connection, nodes continually get address
/// and get-address messages to inform eachother about what nodes are on the
/// network. Nodes also send out a ping and pong message which keeps the network
/// from shutting down.
///
/// Protocol submodule also implements a jobs manager than handles the
/// asynchronous execution of the protocols.
pub mod protocols;
/// Defines the interaction between nodes during a connection. Consists of an
/// inbound session, which describes how to set up an incoming connection, and
/// an outbound session, which describes setting up an outbound connection. Also
/// describes the seed session, which is the type of connection used when a node
/// connects to the network for the first time. Implements the session trait
/// which describes the common functions across all sessions.
pub mod sessions;
/// Network configuration settings.
pub mod settings;
/// Utility module that defines a sleep function used throughout the network.
pub mod utility;
pub use acceptor::{Acceptor, AcceptorPtr};
pub use channel::{Channel, ChannelPtr};
pub use connector::Connector;
pub use hosts::{Hosts, HostsPtr};
pub use p2p::P2p;
pub use settings::{Settings, SettingsPtr};
|
#[macro_use] extern crate modelone;
#[macro_use] extern crate uione;
use modelone::object::*;
use modelone::history::*;
use modelone::change_string::*;
use modelone::change_vec::*;
use modelone::change_value::*;
use uione::vec2::*;
use uione::*;
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq)]
struct NameRecord {
first_name: String,
last_name: String,
}
impl_revertable_struct!{NameRecordChange[NameRecordSignal] for NameRecord:
first_name: StringChange,
last_name: StringChange,
}
#[derive(Debug, Clone, PartialEq)]
pub struct Editor {
editor_title: String,
names: Vec<String>,
name_ref: Option<usize>,
sub_editors: Vec<Editor>,
user_name: NameRecord,
}
impl_revertable_struct!{EditorChange[EditorSignal] for Editor:
editor_title: StringChange,
names: ValueVecChange<String>,
name_ref: ValueChange<Option<usize>>,
sub_editors: Box<VecChange<Editor, EditorChange>>,
user_name: NameRecordChange,
}
/*struct EditorValidator;
impl Validator<Editor, EditorChange> for EditorValidator {
fn validate(&mut self, cxt: &mut ApplyContext<Editor, EditorChange>, changes: &Vec<EditorChange>) -> Result<(), String> {
for change in changes {
if let EditorChange::names(ref names_change) = *change {
let new_ref = names_change.updated_reference(self.name_ref);
cxt.apply(EditorChange::name_ref(ValueChange::Set(new_ref)));
}
}
Ok(())
}
}*/
#[derive(Debug, Clone, PartialEq)]
struct AppModel {
title: String,
editor: History<Editor, EditorChange>,
}
impl_changeable_struct!{AppModelChange[AppModelSignal] for AppModel:
title: StringChange,
editor: HistoryChange<Editor, EditorChange>,
}
// AppModelChange::wrap_title
/*struct AppModelValidator;
impl Validator<AppModel, AppModelChange> for AppModelValidator {
fn validate(&mut self, cxt: &mut ApplyContext<AppModel, AppModelChange>, changes: &Vec<AppModelChange>) -> Result<(), String> {
for change in changes {
match *change {
AppModelChange::DoNothing => {},
AppModelChange::title(_) => {
if self.title == "My App 2" {
//cxt.apply(AppModelChange::Title(ValueChange("My App 3".into())));
}
},
AppModelChange::editor(ref _editorchange) => {
//let subcxt = &mut SubApplyContext::new(cxt, &mut |c| ModelChange::Tags(c))
//EditorValidator{}.validate(subcxt, change.);
},
}
}
Ok(())
}
}*/
#[derive(Debug, PartialEq)]
struct AppUi {
item_data: ItemData,
title_field: TestWidget1,
editor_title_field: TestWidget1,
app_model: AppModel,
my_button: Button,
}
impl_changeable_struct!{AppUiChange[AppUiSignal] for AppUi:
item_data: ItemDataChange,
title_field: TestWidget1Change,
editor_title_field: TestWidget1Change,
app_model: AppModelChange,
my_button: ButtonChange,
}
/*#[derive(Debug, PartialEq, Clone)]
enum EnumThing {
S(String),
V(i32),
}
impl_revertable_enum!{EnumThingChange[EnumThingSignal] for EnumThing:
S: StringChange,
V: ValueChange<i32>,
}*/
impl AppUi {
fn new(app_model: AppModel) -> AppUi {
let mut my_button = Button::new(Arc::new("Press me".into()));
my_button.item_data.size.x = 100. * 10.;
my_button.item_data.size.y = 25. * 5.;
AppUi {
item_data: ItemData::new(),
title_field: TestWidget1::new(),
editor_title_field: TestWidget1::new(),
app_model: app_model,
my_button: my_button,
}
}
}
impl Object<AppUiChange> for AppUi {
fn update(&self, cxt: &mut ApplyContext<AppUiChange>, signal: &AppUiSignal) {
Anchors::new_fill_margin(AnchorRelation::Parent, self, 10.)
.apply(&self.title_field.item_data, sub_apply!(cxt, AppUiChange::title_field.TestWidget1Change::item_data));
if let AppUiSignal::my_button(ButtonSignal::clicked(())) = *signal {
println!("Clicked!");
}
dispatch_struct_update!{AppUiChange[AppUiSignal] for self, cxt, signal:
item_data: ItemData,
title_field: TestWidget1,
my_button: Button,
//editor_title_field: TestWidget1,
}
}
}
impl Item for AppUi {
impl_get_item!(item_data);
impl_children!{
title_field,
my_button,
//editor_title_field,
}
}
impl AppUi {
//fn event(_cxt: &mut ApplyContext<AppUi, AppUiChange>, _event: UiEvent) {
//for signal in self.title_field.event(consume_apply_wrap!(model, AppModelChange, Title title), event) {
//for signal in TestWidget1::event(&mut SubApplyContext::new(cxt, &|model| &model.title_field, &mut |change| AppUiChange::title_field(change)), event) {
// TODO
//}
//}
/*fn event_from_model(cxt: &mut ApplyContext<AppUi, AppUiChange>, model: &AppModel, signal: &AppModelSignal) {
match *signal {
AppModelSignal::title(ref _subsignal) => {
if model.title != self.title_field.content.model.text {
//TestWidget1::replace_text(&mut SubApplyContext::new(cxt, &|model| &model.title, &mut |change| AppModelChange::Title(change)), self.title_field);
}
},
AppModelSignal::editor(ref subsignal) => {
match *subsignal {
HistorySignal::Change(ref subsubsignal) => {
match *subsubsignal {
EditorSignal::editor_title(ref _editortitlesignal) => {
//cxt.apply(AppUiChange::EditorTitleField(TestWidget1Change::
//self.editor_title_field.update(&model.editor.model.editor_title, editortitlesignal);
//TestWidget1::replace_text(&mut SubApplyContext::new(cxt, &|model| &model.editor_title_field, &mut |change| AppUiChange::editor_title_field(change)), model.editor.model.editor_title.clone());
TestWidget1::replace_text(sub_apply!(cxt, AppUiChange::editor_title_field), model.editor.model.editor_title.clone());
},
_ => {},
}
},
HistorySignal::Reset => {
let changeable: &Changeable<StringChange> = &model.editor.model.editor_title;
for signal in changeable.reset_view_signals() {
println!("Reset Editor Signal: {:?}", signal);
//self.editor_title_field.update(&model.editor.model.editor_title, &signal);
}
},
_ => {},
}
},
}
}*/
}
fn main() {
let app_model = AppModel {
title: "My App".into(),
editor: History::new(Editor {
editor_title: "Blah".into(),
names: vec!["Hello World".into(), "Cool Beans".into()],
name_ref: Some(1),
sub_editors: vec![],
user_name: NameRecord { first_name: "Joe".into(), last_name: "Bloggs".into() },
})
};
let mut ui = AppUi::new(app_model);
ui.item_data.size = Vec2f::new(800., 600.);
let mut manager = Manager::<_, AppUiChange, _>::new(ui, NoValidator);
/*for signal in manager.reset_view_signals() {
println!("Reset Signal: {:?}", signal);
AppUi::update(&mut manager, &signal);
}*/
/*for change in manager.get().reset_view_changes() {
ui.update(manager.get(), change);
}*/
//manager.resolve_signals();
//println!("{:#?}", manager.get());
//println!("{:#?}", manager.get());
manager.apply(AppUiChange::app_model(AppModelChange::editor(HistoryChange::Push(EditorChange::editor_title(StringChange{index: 0, len: 0, new: "a".into()})))));
//ui.event(&mut manager, UiEvent::Text("a".into()));
//manager.apply(AppModelChange::Editor(HistoryChange::Push(EditorChange::EditorTitle(StringChange{index: 0, len: 0, new: "a".into()}))));
//manager.resolve_signals();
//println!("{:#?}", manager.get());
//manager.apply(AppModelChange::Editor(HistoryChange::Undo));
//manager.resolve_signals();
println!("{:#?}", manager.get());
//let scenegraph = SceneGraph::new();
println!("Change size: {}", std::mem::size_of::<AppModelChange>());
//mainloop_gfx_sdl::exec(&mut manager, get_root_item_change);
mainloop_gl_glutin::exec(manager, get_root_item_change, &|| {
vec![
Box::new(font_resource::FontResource::new())
]
});
//println!("{:05X}", 50);
}
fn get_root_item_change(item_change: ItemDataChange) -> AppUiChange {
AppUiChange::item_data(item_change)
}
|
pub trait DigitalOutput: Send {
fn set_value(&mut self, val: bool);
}
|
use crate::{server::UdpTuple, transport::TransportMsg, Error};
use common::rsip::{self, prelude::*, Transport};
use std::convert::{TryFrom, TryInto};
use std::net::SocketAddr;
//TODO: we probably need better naming here
#[derive(Debug, Clone)]
pub struct RequestMsg {
pub sip_request: rsip::Request,
pub peer: SocketAddr,
pub transport: Transport, //pub ttl: u32
}
impl RequestMsg {
pub fn new(sip_request: rsip::Request, peer: SocketAddr, transport: Transport) -> Self {
RequestMsg {
sip_request,
peer,
transport,
}
}
pub fn transaction_id(&self) -> Result<String, Error> {
Ok(self.sip_request.transaction_id()?)
}
}
impl From<(rsip::Request, SocketAddr, Transport)> for RequestMsg {
fn from(triple: (rsip::Request, SocketAddr, Transport)) -> Self {
Self {
sip_request: triple.0,
peer: triple.1,
transport: triple.2,
}
}
}
impl TryFrom<UdpTuple> for RequestMsg {
type Error = crate::Error;
fn try_from(udp_tuple: UdpTuple) -> Result<Self, Self::Error> {
Ok(Self {
sip_request: udp_tuple.bytes.try_into()?,
peer: udp_tuple.peer,
transport: Transport::Udp,
})
}
}
impl TryFrom<TransportMsg> for RequestMsg {
type Error = crate::Error;
fn try_from(transport_msg: TransportMsg) -> Result<RequestMsg, Self::Error> {
Ok(RequestMsg {
sip_request: transport_msg.sip_message.try_into()?,
peer: transport_msg.peer,
transport: transport_msg.transport,
})
}
}
|
extern crate byteorder;
extern crate encoding_rs;
#[macro_use]
extern crate failure;
extern crate goblin;
extern crate image;
extern crate regex;
extern crate rustc_demangle;
#[macro_use]
extern crate serde_derive;
extern crate serde;
extern crate standalone_syn as syn;
extern crate toml;
extern crate zip;
mod assembler;
mod banner;
mod config;
mod demangle;
mod dol;
mod file_source;
mod framework_map;
pub mod iso;
mod key_val_print;
mod linker;
use assembler::Assembler;
use assembler::Instruction;
use banner::Banner;
use config::Config;
use dol::DolFile;
use failure::{err_msg, Error, ResultExt};
use file_source::{FileSource, FileSystem};
use iso::virtual_file_system::Directory;
pub use key_val_print::{DontPrint, KeyValPrint, MessageKind};
use std::collections::HashMap;
use std::fs::{self, File, OpenOptions};
use std::io::{prelude::*, BufReader, BufWriter};
use std::mem;
use std::path::PathBuf;
use std::process::Command;
use zip::{write::FileOptions, ZipArchive, ZipWriter};
pub fn build<P: KeyValPrint>(printer: &P, debug: bool, patch: bool) -> Result<(), Error> {
let mut toml_buf = String::new();
File::open("RomHack.toml")
.context("Couldn't find \"RomHack.toml\".")?
.read_to_string(&mut toml_buf)
.context("Failed to read \"RomHack.toml\".")?;
let config: Config = toml::from_str(&toml_buf).context("Can't parse RomHack.toml")?;
printer.print(None, "Compiling", "");
{
let mut command = Command::new("cargo");
command
.args(&["build", "--target", "powerpc-unknown-linux-gnu"])
.env("RUSTFLAGS", "-C target-feature=+msync,+fres,+frsqrte");
if !debug {
command.arg("--release");
}
if let Some(ref src_dir) = config.src.src {
command.current_dir(src_dir);
}
let exit_code = command
.spawn()
.context("Couldn't build the project")?
.wait()?;
ensure!(exit_code.success(), "Couldn't build the project");
}
let path_to_compiled_lib =
find_compiled_library(debug).context("Couldn't find the compiled static library")?;
let compiled_lib =
fs::read(path_to_compiled_lib).context("Couldn't read the compiled static library")?;
if patch {
build_patch(printer, compiled_lib, config)
} else {
build_and_emit_iso(printer, FileSystem, compiled_lib, config)
}
}
pub fn apply_patch<P: KeyValPrint>(
printer: &P,
patch: PathBuf,
original_game: PathBuf,
output: PathBuf,
) -> Result<(), Error> {
printer.print(None, "Parsing", "patch");
let (zip, compiled_library, mut config) = open_config_from_patch(BufReader::new(
File::open(patch).context("Couldn't open the patch file")?,
))?;
config.src.iso = original_game;
config.build.iso = output;
build_and_emit_iso(printer, zip, compiled_library, config)
}
pub fn open_config_from_patch<R: Read + Seek>(
reader: R,
) -> Result<(ZipArchive<R>, Vec<u8>, Config), Error> {
let mut zip = ZipArchive::new(reader).context("Couldn't parse patch file")?;
let mut buffer = Vec::new();
let config: Config = {
let mut toml_file = zip
.by_name("RomHack.toml")
.context("The patch file doesn't contain the patch index")?;
toml_file
.read_to_end(&mut buffer)
.context("Couldn't read the patch index")?;
toml::from_slice(&buffer).context("Can't parse patch index")?
};
{
let mut compiled_library = zip
.by_name("libcompiled.a")
.context("The patch file doesn't contain the compiled library")?;
buffer.clear();
compiled_library
.read_to_end(&mut buffer)
.context("Couldn't read the compiled library")?;
}
Ok((zip, buffer, config))
}
fn build_patch<P: KeyValPrint>(
printer: &P,
compiled_library: Vec<u8>,
mut config: Config,
) -> Result<(), Error> {
printer.print(None, "Creating", "patch file");
config.build.iso.set_extension("patch");
let mut zip = ZipWriter::new(BufWriter::new(
File::create(&config.build.iso).context("Couldn't create the patch file")?,
));
printer.print(None, "Storing", "replacement files");
let mut new_map = HashMap::new();
for (index, (iso_path, actual_path)) in config.files.iter().enumerate() {
let zip_path = format!("replace{}.dat", index);
new_map.insert(iso_path.clone(), PathBuf::from(&zip_path));
zip.start_file(zip_path, FileOptions::default())
.context("Failed creating a new patch file entry")?;
zip.write_all(&fs::read(actual_path).with_context(|_| {
format!(
"Couldn't read the file \"{}\" to store it in the patch.",
actual_path.display()
)
})?).context("Failed storing a file in the patch")?;
}
config.files = new_map;
printer.print(None, "Storing", "libraries");
zip.start_file("libcompiled.a", FileOptions::default())
.context("Failed creating a new patch file entry for the compiled library")?;
zip.write_all(&compiled_library)
.context("Failed storing the compiled library in the patch")?;
for (index, lib_path) in config.link.libs.iter().flat_map(|x| x).enumerate() {
let zip_path = format!("lib{}.a", index);
zip.start_file(zip_path, FileOptions::default())
.context("Failed creating a new patch file entry")?;
let file_buf = fs::read(lib_path).with_context(|_| {
format!(
"Couldn't load \"{}\". Did you build the project correctly?",
lib_path.display()
)
})?;
zip.write_all(&file_buf)
.context("Failed storing a library in the patch")?;
}
if let Some(path) = &mut config.src.patch {
printer.print(None, "Storing", "patch.asm");
zip.start_file("patch.asm", FileOptions::default())
.context("Failed to create the patch.asm file in the patch")?;
let file_buf = fs::read(&*path).context("Couldn't read the patch.asm file")?;
zip.write_all(&file_buf)
.context("Failed storing the patch.asm file in the patch")?;
*path = PathBuf::from("patch.asm");
}
if let Some(path) = &mut config.info.image {
printer.print(None, "Storing", "banner");
zip.start_file("banner.dat", FileOptions::default())
.context("Failed to create the banner file in the patch")?;
let file_buf = fs::read(&*path).context("Couldn't read the banner file")?;
zip.write_all(&file_buf)
.context("Failed storing the banner file in the patch")?;
*path = PathBuf::from("banner.dat");
}
printer.print(None, "Storing", "patch index");
config.src.iso = PathBuf::new();
config.build = Default::default();
zip.start_file("RomHack.toml", FileOptions::default())
.context("Failed to create the patch index")?;
let config = toml::to_vec(&config).context("Couldn't encode the patch index")?;
zip.write_all(&config)
.context("Failed storing the patch index")?;
Ok(())
}
pub fn build_iso<'a, P: KeyValPrint, F: FileSource>(
printer: &P,
mut files: F,
original_iso: &'a [u8],
compiled_library: Vec<u8>,
config: &'a mut Config,
) -> Result<Directory<'a>, Error> {
let mut iso = iso::reader::load_iso(original_iso).context("Couldn't parse the ISO")?;
printer.print(None, "Replacing", "files");
for (iso_path, actual_path) in &config.files {
iso.resolve_and_create_path(iso_path).data = files
.read_to_vec(actual_path)
.with_context(|_| {
format!(
"Couldn't read the file \"{}\" to store it in the ISO.",
actual_path.display()
)
})?.into();
}
let mut original_symbols = HashMap::new();
if let Some(framework_map) = config.src.map.as_ref().and_then(|m| iso.resolve_path(m)) {
printer.print(None, "Parsing", "symbol map");
original_symbols = framework_map::parse(&framework_map.data)
.context("Couldn't parse the game's symbol map")?;
} else {
printer.print(
Some(MessageKind::Warning),
"Warning",
"No symbol map specified or it wasn't found",
);
}
printer.print(None, "Linking", "");
let mut libs_to_link = Vec::with_capacity(config.link.libs.as_ref().map_or(0, |x| x.len()) + 2);
libs_to_link.push(compiled_library);
for lib_path in config.link.libs.iter().flat_map(|x| x) {
let mut file_buf = files.read_to_vec(lib_path).with_context(|_| {
format!(
"Couldn't load \"{}\". Did you build the project correctly?",
lib_path.display()
)
})?;
libs_to_link.push(file_buf);
}
libs_to_link.push(linker::BASIC_LIB.to_owned());
let base_address: syn::LitInt =
syn::parse_str(&config.link.base).context("Invalid Base Address")?;
let linked = linker::link(
printer,
&libs_to_link,
base_address.value() as u32,
config.link.entries.clone(),
&original_symbols,
).context("Couldn't link the Rom Hack")?;
printer.print(None, "Creating", "symbol map");
// TODO NLL bind framework_map to local variable
framework_map::create(
&config,
config
.src
.map
.as_ref()
.and_then(|m| iso.resolve_path(m))
.map(|f| &*f.data),
&linked.sections,
).context("Couldn't create the new symbol map")?;
let mut instructions = Vec::new();
if let Some(patch) = config.src.patch.take() {
printer.print(None, "Parsing", "patch");
let asm = files
.read_to_string(&patch)
.with_context(|_| format!("Couldn't read the patch file \"{}\".", patch.display()))?;
let lines = &asm.lines().collect::<Vec<_>>();
let mut assembler = Assembler::new(linked.symbol_table, &original_symbols);
instructions = assembler
.assemble_all_lines(lines)
.context("Couldn't assemble the patch file lines")?;
}
{
printer.print(None, "Patching", "game");
let main_dol = iso
.main_dol_mut()
.ok_or_else(|| err_msg("Dol file not found"))?;
let original = DolFile::parse(&main_dol.data);
main_dol.data = patch_instructions(original, linked.dol, &instructions)
.context("Couldn't patch the game")?
.into();
}
{
printer.print(None, "Patching", "banner");
if let Some(banner_file) = iso.banner_mut() {
// TODO Not always true
let is_japanese = true;
let mut banner = Banner::parse(is_japanese, &banner_file.data)
.context("Couldn't parse the banner")?;
if let Some(game_name) = config.info.game_name.take() {
banner.game_name = game_name;
}
if let Some(developer_name) = config.info.developer_name.take() {
banner.developer_name = developer_name;
}
if let Some(full_game_name) = config.info.full_game_name.take() {
banner.full_game_name = full_game_name;
}
if let Some(full_developer_name) = config.info.full_developer_name.take() {
banner.full_developer_name = full_developer_name;
}
if let Some(game_description) = config.info.description.take() {
banner.game_description = game_description;
}
if let Some(image_path) = config.info.image.take() {
let image = files
.open_image(image_path)
.context("Couldn't open the banner replacement image")?
.to_rgba();
banner.image.copy_from_slice(&image);
}
banner_file.data = banner.to_bytes(is_japanese).to_vec().into();
} else {
printer.print(Some(MessageKind::Warning), "Warning", "No banner to patch");
}
}
Ok(iso)
}
pub fn build_and_emit_iso<P: KeyValPrint, F: FileSource>(
printer: &P,
files: F,
compiled_library: Vec<u8>,
mut config: Config,
) -> Result<(), Error> {
printer.print(None, "Loading", "original game");
let buf = iso::reader::load_iso_buf(&config.src.iso)
.with_context(|_| format!("Couldn't find \"{}\".", config.src.iso.display()))?;
let out_path = mem::replace(&mut config.build.iso, Default::default());
let iso = build_iso(printer, files, &buf, compiled_library, &mut config)?;
printer.print(None, "Building", "ISO");
iso::writer::write_iso(
BufWriter::with_capacity(
4 << 20,
File::create(out_path).context("Couldn't create the final ISO")?,
),
&iso,
).context("Couldn't write the final ISO")?;
Ok(())
}
pub fn new(name: &str) -> Result<(), Error> {
let exit_code = Command::new("cargo")
.args(&["new", "--lib", &name])
.spawn()
.context("Couldn't create the cargo project")?
.wait()?;
ensure!(exit_code.success(), "Couldn't create the cargo project");
let mut file = File::create(format!("{}/RomHack.toml", name))
.context("Couldn't create the RomHack.toml")?;
write!(
file,
r#"[info]
game-name = "{0}"
[src]
iso = "game.iso" # Provide the path of the game's ISO
patch = "src/patch.asm"
# Optionally specify the game's symbol map
# map = "maps/framework.map"
[files]
# You may replace or add new files to the game here
# "path/to/file/in/iso" = "path/to/file/on/harddrive"
[build]
map = "target/framework.map"
iso = "target/{0}.iso"
[link]
entries = ["init"] # Enter the exported function names here
base = "0x8040_1000" # Enter the start address of the Rom Hack's code here
"#,
name.replace('-', "_"),
).context("Couldn't write the RomHack.toml")?;
let mut file = File::create(format!("{}/src/lib.rs", name))
.context("Couldn't create the lib.rs source file")?;
write!(
file,
"{}",
r#"#![no_std]
pub mod panic;
#[no_mangle]
pub extern "C" fn init() {}
"#
).context("Couldn't write the lib.rs source file")?;
let mut file = File::create(format!("{}/src/panic.rs", name))
.context("Couldn't create the panic.rs source file")?;
write!(
file,
"{}",
r#"#[cfg(any(target_arch = "powerpc", target_arch = "wasm32"))]
#[panic_handler]
pub fn panic(_info: &::core::panic::PanicInfo) -> ! {
loop {}
}
"#
).context("Couldn't write the panic.rs source file")?;
let mut file = File::create(format!("{}/src/patch.asm", name))
.context("Couldn't create the default patch file")?;
write!(
file,
r#"; You can use this to patch the game's code to call into the Rom Hack's code
"#
).context("Couldn't write the default patch file")?;
let mut file = OpenOptions::new()
.append(true)
.open(format!("{}/Cargo.toml", name))
.context("Couldn't open the Cargo.toml")?;
write!(
file,
"{}",
r#"# Comment this in if you want to use the gcn crate in your rom hack.
# It requires the operating system symbols to be resolved via a map.
# gcn = { git = "https://github.com/CryZe/gcn", features = ["panic"] }
[lib]
crate-type = ["staticlib"]
[profile.dev]
panic = "abort"
opt-level = 1
[profile.release]
panic = "abort"
lto = true
"#
).context("Couldn't write into the Cargo.toml")?;
let mut file = File::create(format!("{}/.gitignore", name))
.context("Couldn't create the gitignore file")?;
write!(
file,
r#"/target
**/*.rs.bk
"#
).context("Couldn't write the gitignore file")?;
Ok(())
}
fn patch_instructions(
mut original: DolFile,
intermediate: DolFile,
instructions: &[Instruction],
) -> Result<Vec<u8>, Error> {
original.append(intermediate);
original
.patch(instructions)
.context("Couldn't patch the DOL")?;
Ok(original.to_bytes())
}
fn find_compiled_library(debug: bool) -> Result<PathBuf, Error> {
use std::iter::FromIterator;
let dir = fs::read_dir(PathBuf::from_iter(&[
"target",
"powerpc-unknown-linux-gnu",
if debug { "debug" } else { "release" },
])).context("Couldn't list entries of the compiler's target directory")?;
for entry in dir {
let entry = entry.context("Couldn't list an entry of the compiler's target directory")?;
let path = entry.path();
if path.extension() == Some("a".as_ref()) {
return Ok(path);
}
}
bail!("None of the files in the compiler's target directory match *.a")
}
|
use super::{operate, BytesArgument};
use nu_engine::CallExt;
use nu_protocol::ast::Call;
use nu_protocol::ast::CellPath;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, Example, PipelineData, ShellError, Signature, Span, SyntaxShape, Value,
};
use std::cmp::Ordering;
#[derive(Clone)]
pub struct BytesAt;
struct Arguments {
start: isize,
end: isize,
arg_span: Span,
column_paths: Option<Vec<CellPath>>,
}
impl BytesArgument for Arguments {
fn take_column_paths(&mut self) -> Option<Vec<CellPath>> {
self.column_paths.take()
}
}
/// ensure given `range` is valid, and returns [start, end, val_span] pair.
fn parse_range(range: Value, head: Span) -> Result<(isize, isize, Span), ShellError> {
let (start, end, span) = match range {
Value::List { mut vals, span } => {
if vals.len() != 2 {
return Err(ShellError::UnsupportedInput(
"More than two indices given".to_string(),
span,
));
} else {
let end = vals.pop().expect("Already check has size 2");
let end = match end {
Value::Int { val, .. } => val.to_string(),
Value::String { val, .. } => val,
other => {
return Err(ShellError::UnsupportedInput(
"could not perform subbytes. Expecting a string or int".to_string(),
other.span().unwrap_or(head),
))
}
};
let start = vals.pop().expect("Already check has size 1");
let start = match start {
Value::Int { val, .. } => val.to_string(),
Value::String { val, .. } => val,
other => {
return Err(ShellError::UnsupportedInput(
"could not perform subbytes. Expecting a string or int".to_string(),
other.span().unwrap_or(head),
))
}
};
(start, end, span)
}
}
Value::String { val, span } => {
let splitted_result = val.split_once(',');
match splitted_result {
Some((start, end)) => (start.to_string(), end.to_string(), span),
None => {
return Err(ShellError::UnsupportedInput(
"could not perform subbytes".to_string(),
span,
))
}
}
}
other => {
return Err(ShellError::UnsupportedInput(
"could not perform subbytes".to_string(),
other.span().unwrap_or(head),
))
}
};
let start: isize = if start.is_empty() || start == "_" {
0
} else {
match start.trim().parse() {
Ok(s) => s,
Err(_) => {
return Err(ShellError::UnsupportedInput(
"could not perform subbytes".to_string(),
span,
))
}
}
};
let end: isize = if end.is_empty() || end == "_" {
isize::max_value()
} else {
match end.trim().parse() {
Ok(s) => s,
Err(_) => {
return Err(ShellError::UnsupportedInput(
"could not perform subbytes".to_string(),
span,
))
}
}
};
Ok((start, end, span))
}
impl Command for BytesAt {
fn name(&self) -> &str {
"bytes at"
}
fn signature(&self) -> Signature {
Signature::build("bytes at")
.required("range", SyntaxShape::Any, "the indexes to get bytes")
.rest(
"rest",
SyntaxShape::CellPath,
"optionally get bytes by column paths",
)
.category(Category::Bytes)
}
fn usage(&self) -> &str {
"Get bytes defined by a range. Note that the start is included but the end is excluded, and that the first byte is index 0."
}
fn search_terms(&self) -> Vec<&str> {
vec!["slice"]
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let range: Value = call.req(engine_state, stack, 0)?;
let (start, end, arg_span) = parse_range(range, call.head)?;
let column_paths: Vec<CellPath> = call.rest(engine_state, stack, 1)?;
let column_paths = if column_paths.is_empty() {
None
} else {
Some(column_paths)
};
let arg = Arguments {
start,
end,
arg_span,
column_paths,
};
operate(at, arg, input, call.head, engine_state.ctrlc.clone())
}
fn examples(&self) -> Vec<Example> {
vec![
Example {
description: "Get a subbytes `0x[10 01]` from the bytes `0x[33 44 55 10 01 13]`",
example: " 0x[33 44 55 10 01 13] | bytes at [3 4]",
result: Some(Value::Binary {
val: vec![0x10],
span: Span::test_data(),
}),
},
Example {
description: "Alternatively, you can use the form",
example: " 0x[33 44 55 10 01 13] | bytes at '3,4'",
result: Some(Value::Binary {
val: vec![0x10],
span: Span::test_data(),
}),
},
Example {
description: "Drop the last `n` characters from the string",
example: " 0x[33 44 55 10 01 13] | bytes at ',-3'",
result: Some(Value::Binary {
val: vec![0x33, 0x44, 0x55],
span: Span::test_data(),
}),
},
Example {
description: "Get the remaining characters from a starting index",
example: " 0x[33 44 55 10 01 13] | bytes at '3,'",
result: Some(Value::Binary {
val: vec![0x10, 0x01, 0x13],
span: Span::test_data(),
}),
},
Example {
description: "Get the characters from the beginning until ending index",
example: " 0x[33 44 55 10 01 13] | bytes at ',4'",
result: Some(Value::Binary {
val: vec![0x33, 0x44, 0x55, 0x10],
span: Span::test_data(),
}),
},
Example {
description:
"Or the characters from the beginning until ending index inside a table",
example: r#" [[ColA ColB ColC]; [0x[11 12 13] 0x[14 15 16] 0x[17 18 19]]] | bytes at "1," ColB ColC"#,
result: Some(Value::List {
vals: vec![Value::Record {
cols: vec!["ColA".to_string(), "ColB".to_string(), "ColC".to_string()],
vals: vec![
Value::Binary {
val: vec![0x11, 0x12, 0x13],
span: Span::test_data(),
},
Value::Binary {
val: vec![0x15, 0x16],
span: Span::test_data(),
},
Value::Binary {
val: vec![0x18, 0x19],
span: Span::test_data(),
},
],
span: Span::test_data(),
}],
span: Span::test_data(),
}),
},
]
}
}
fn at(input: &[u8], arg: &Arguments, span: Span) -> Value {
let len: isize = input.len() as isize;
let start: isize = if arg.start < 0 {
arg.start + len
} else {
arg.start
};
let end: isize = if arg.end < 0 {
std::cmp::max(len + arg.end, 0)
} else {
arg.end
};
if start < len && end >= 0 {
match start.cmp(&end) {
Ordering::Equal => Value::Binary { val: vec![], span },
Ordering::Greater => Value::Error {
error: ShellError::UnsupportedInput(
"End must be greater than or equal to Start".to_string(),
arg.arg_span,
),
},
Ordering::Less => Value::Binary {
val: {
let input_iter = input.iter().copied().skip(start as usize);
if end == isize::max_value() {
input_iter.collect()
} else {
input_iter.take((end - start) as usize).collect()
}
},
span,
},
}
} else {
Value::Binary { val: vec![], span }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_examples() {
use crate::test_examples;
test_examples(BytesAt {})
}
}
|
use std::hash::Hash;
use std::marker::PhantomData;
use bit_vec::BitVec;
use hash::NthHash;
#[derive(Debug)]
pub struct BloomFilter<T: ?Sized> {
bits: BitVec,
slice_bitwidth: usize,
number_of_slices: usize,
_value: PhantomData<T>,
}
impl<T: Hash + ?Sized> BloomFilter<T> {
pub fn new(slice_bitwidth: usize, number_of_slices: usize) -> Self {
BloomFilter {
bits: BitVec::new(slice_bitwidth * number_of_slices),
slice_bitwidth,
number_of_slices,
_value: PhantomData,
}
}
#[inline]
pub fn insert<H: NthHash>(&mut self, value: &T, hasher: &H) {
for i in 0..self.number_of_slices {
let base = i * self.slice_bitwidth;
let offset = (hasher.nth_hash(value, i) as usize) % self.slice_bitwidth;
self.bits.insert(base + offset);
}
}
#[inline]
pub fn contains<H: NthHash>(&self, value: &T, hasher: &H) -> bool {
(0..self.number_of_slices).all(|i| {
let base = i * self.slice_bitwidth;
let offset = (hasher.nth_hash(value, i) as usize) % self.slice_bitwidth;
self.bits.contains(base + offset)
})
}
pub fn bits(&self) -> &BitVec {
&self.bits
}
pub fn slice_bitwidth(&self) -> usize {
self.slice_bitwidth
}
pub fn number_of_slices(&self) -> usize {
self.number_of_slices
}
}
#[cfg(test)]
mod test {
use hash::DefaultHasher;
use super::*;
#[test]
fn new_works() {
let filter = BloomFilter::<()>::new(128, 4);
assert_eq!(filter.bits().number_of_bits(), 128 * 4);
assert_eq!(filter.slice_bitwidth(), 128);
assert_eq!(filter.number_of_slices(), 4);
}
#[test]
fn insert_and_contains_works() {
let mut filter = BloomFilter::new(128, 4);
let hasher = &DefaultHasher;
for (i, x) in ["foo", "bar", "baz"].iter().enumerate() {
assert!(!filter.contains(x, hasher));
filter.insert(x, hasher);
assert!(filter.contains(x, hasher));
assert_eq!(filter.bits().number_of_one_bits(), (i + 1) * 4);
}
}
#[test]
fn dense_filter_works() {
let mut filter = BloomFilter::new(128, 4);
let hasher = &DefaultHasher;
for i in 0..127 {
filter.insert(&i, hasher);
assert!(filter.bits().number_of_one_bits() <= (i + 1) * 4);
}
assert!((0..127).all(|i| filter.contains(&i, hasher)));
assert_ne!(
filter.bits().number_of_one_bits(),
filter.bits().number_of_bits()
);
// Fills all bits
for i in 0..1000 {
filter.insert(&i, hasher);
}
assert!(filter.contains(&1001, hasher)); // false positive
assert_eq!(
filter.bits().number_of_one_bits(),
filter.bits().number_of_bits()
);
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - RAMECC interrupt enable register"]
pub ier: IER,
_reserved1: [u8; 0x1c],
#[doc = "0x20 - RAMECC monitor x configuration register"]
pub m1cr: M1CR,
#[doc = "0x24 - RAMECC monitor x status register"]
pub m1sr: M1SR,
#[doc = "0x28 - RAMECC monitor x failing address register"]
pub m1far: M1FAR,
#[doc = "0x2c - RAMECC monitor x failing data low register"]
pub m1fdrl: M1FDRL,
#[doc = "0x30 - RAMECC monitor x failing data high register"]
pub m1fdrh: M1FDRH,
#[doc = "0x34 - RAMECC monitor x failing ECC error code register"]
pub m1fecr: M1FECR,
_reserved7: [u8; 0x08],
#[doc = "0x40 - RAMECC monitor x configuration register"]
pub m2cr: M2CR,
#[doc = "0x44 - RAMECC monitor x status register"]
pub m2sr: M2SR,
#[doc = "0x48 - RAMECC monitor x failing address register"]
pub m2far: M2FAR,
#[doc = "0x4c - RAMECC monitor x failing data low register"]
pub m2fdrl: M2FDRL,
#[doc = "0x50 - RAMECC monitor x failing data high register"]
pub m2fdrh: M2FDRH,
_reserved12: [u8; 0x04],
#[doc = "0x58 - RAMECC monitor x failing ECC error code register"]
pub m2fecr: M2FECR,
}
#[doc = "IER (rw) register accessor: RAMECC interrupt enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ier::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ier::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ier`]
module"]
pub type IER = crate::Reg<ier::IER_SPEC>;
#[doc = "RAMECC interrupt enable register"]
pub mod ier;
#[doc = "M1CR (rw) register accessor: RAMECC monitor x configuration register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m1cr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m1cr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m1cr`]
module"]
pub type M1CR = crate::Reg<m1cr::M1CR_SPEC>;
#[doc = "RAMECC monitor x configuration register"]
pub mod m1cr;
#[doc = "M2CR (rw) register accessor: RAMECC monitor x configuration register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m2cr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m2cr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m2cr`]
module"]
pub type M2CR = crate::Reg<m2cr::M2CR_SPEC>;
#[doc = "RAMECC monitor x configuration register"]
pub mod m2cr;
#[doc = "M1SR (rw) register accessor: RAMECC monitor x status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m1sr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m1sr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m1sr`]
module"]
pub type M1SR = crate::Reg<m1sr::M1SR_SPEC>;
#[doc = "RAMECC monitor x status register"]
pub mod m1sr;
#[doc = "M2SR (rw) register accessor: RAMECC monitor x status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m2sr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m2sr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m2sr`]
module"]
pub type M2SR = crate::Reg<m2sr::M2SR_SPEC>;
#[doc = "RAMECC monitor x status register"]
pub mod m2sr;
#[doc = "M1FAR (r) register accessor: RAMECC monitor x failing address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m1far::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m1far`]
module"]
pub type M1FAR = crate::Reg<m1far::M1FAR_SPEC>;
#[doc = "RAMECC monitor x failing address register"]
pub mod m1far;
#[doc = "M2FAR (r) register accessor: RAMECC monitor x failing address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m2far::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m2far`]
module"]
pub type M2FAR = crate::Reg<m2far::M2FAR_SPEC>;
#[doc = "RAMECC monitor x failing address register"]
pub mod m2far;
#[doc = "M1FDRL (r) register accessor: RAMECC monitor x failing data low register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m1fdrl::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m1fdrl`]
module"]
pub type M1FDRL = crate::Reg<m1fdrl::M1FDRL_SPEC>;
#[doc = "RAMECC monitor x failing data low register"]
pub mod m1fdrl;
#[doc = "M2FDRL (r) register accessor: RAMECC monitor x failing data low register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m2fdrl::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m2fdrl`]
module"]
pub type M2FDRL = crate::Reg<m2fdrl::M2FDRL_SPEC>;
#[doc = "RAMECC monitor x failing data low register"]
pub mod m2fdrl;
#[doc = "M1FDRH (r) register accessor: RAMECC monitor x failing data high register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m1fdrh::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m1fdrh`]
module"]
pub type M1FDRH = crate::Reg<m1fdrh::M1FDRH_SPEC>;
#[doc = "RAMECC monitor x failing data high register"]
pub mod m1fdrh;
#[doc = "M2FDRH (rw) register accessor: RAMECC monitor x failing data high register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m2fdrh::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m2fdrh::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m2fdrh`]
module"]
pub type M2FDRH = crate::Reg<m2fdrh::M2FDRH_SPEC>;
#[doc = "RAMECC monitor x failing data high register"]
pub mod m2fdrh;
#[doc = "M1FECR (rw) register accessor: RAMECC monitor x failing ECC error code register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m1fecr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m1fecr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m1fecr`]
module"]
pub type M1FECR = crate::Reg<m1fecr::M1FECR_SPEC>;
#[doc = "RAMECC monitor x failing ECC error code register"]
pub mod m1fecr;
#[doc = "M2FECR (rw) register accessor: RAMECC monitor x failing ECC error code register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`m2fecr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`m2fecr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`m2fecr`]
module"]
pub type M2FECR = crate::Reg<m2fecr::M2FECR_SPEC>;
#[doc = "RAMECC monitor x failing ECC error code register"]
pub mod m2fecr;
|
use crate_with_external_symbols::test_not_using_extern;
fn main() {
test_not_using_extern();
println!("Hello, world!");
}
|
extern crate itertools;
extern crate rand;
use itertools::Itertools;
use rand::thread_rng;
mod genetic_architecture;
mod population;
mod samplers;
// Simulation of Hardy model
//
// Assumes a single diploid autosome with a single locus.
// There are two alleles, resulting in three possible genotypes.
// There is no mutation and the sex of the parents doesn't matter.
// Mating is random.
fn main() {
let population_size = 100000;
let generations = 100;
let n_gametes = 2usize;
let mut rng = thread_rng();
let mut genetic_architecture = genetic_architecture::GeneticArchitecture::new(n_gametes);
// 2 chromosomes (each chromosome has 2 gametes)
// each chromosome has 2 loci of 2 alleles
genetic_architecture.add_chromosome(2usize, vec![2u8, 2u8]);
genetic_architecture.add_chromosome(2usize, vec![2u8, 2u8]);
let mut sampler = samplers::UniformSampler::new();
let mut population = population::Population::new(&genetic_architecture,
population_size,
&mut sampler,
&mut rng);
for i in 0..generations {
println!("Generation: {}", i);
let genotype_counts = population.genotype_counts();
for (c, chrom_counts) in genotype_counts.iter().enumerate() {
println!("Chromosome: {}", c);
for locus_counts in chrom_counts {
for (genotype, count) in locus_counts {
let genotype_string = genotype.iter().map( |gt| gt.to_string() ).join(",");
println!("Genotype: ({}), Count: {}", genotype_string, count);
}
}
println!();
}
// for indiv in &population.parents {
// println!("{}", individual_to_string(indiv));
// println!();
// }
population.mate(&mut rng);
population.swap_generations();
}
}
|
use crate::ast;
use crate::{Parse, ParseError, Peek, Resolve, Spanned, Storage, ToTokens};
use runestick::Source;
use std::borrow::Cow;
/// A path, where each element is separated by a `::`.
#[derive(Debug, Clone, ToTokens, Spanned, Parse)]
pub struct Path {
/// The first component in the path.
pub first: ast::Ident,
/// The rest of the components in the path.
#[rune(iter)]
pub rest: Vec<(ast::Scope, ast::Ident)>,
/// Trailing scope.
#[rune(iter)]
pub trailing: Option<ast::Scope>,
}
impl Path {
/// Borrow as an identifier used for field access calls.
///
/// This is only allowed if there are no other path components.
pub fn try_as_ident(&self) -> Option<&ast::Ident> {
if self.rest.is_empty() && self.trailing.is_none() {
Some(&self.first)
} else {
None
}
}
/// Iterate over all components in path.
pub fn into_components(&self) -> impl Iterator<Item = &'_ ast::Ident> + '_ {
let mut first = Some(&self.first);
let mut it = self.rest.iter();
std::iter::from_fn(move || {
if let Some(first) = first.take() {
return Some(first);
}
Some(&it.next()?.1)
})
}
}
impl Peek for Path {
fn peek(t1: Option<ast::Token>, _: Option<ast::Token>) -> bool {
matches!(peek!(t1).kind, ast::Kind::Ident(..))
}
}
impl<'a> Resolve<'a> for Path {
type Output = Vec<Cow<'a, str>>;
fn resolve(
&self,
storage: &Storage,
source: &'a Source,
) -> Result<Vec<Cow<'a, str>>, ParseError> {
let mut output = Vec::new();
output.push(self.first.resolve(storage, source)?);
for (_, ident) in &self.rest {
output.push(ident.resolve(storage, source)?);
}
Ok(output)
}
}
|
mod convex_hull;
mod hanoi;
mod kmeans;
mod xorshift;
pub use self::convex_hull::convex_hull_graham;
pub use self::hanoi::hanoi;
pub use self::kmeans::f32::kmeans as kmeans_f32;
pub use self::kmeans::f64::kmeans as kmeans_f64;
pub use self::xorshift::Rand;
|
use std::cmp::{ Eq, PartialEq };
#[derive(Eq, PartialEq, Debug)]
pub struct ObjectKey {
object_type: String,
named: String,
}
impl ObjectKey {
pub fn new(object_type: &str) -> ObjectKey {
ObjectKey {
object_type: object_type.to_string(),
named: "".to_string(),
}
}
pub fn named(&mut self, name: &str) {
self.named = name.to_string()
}
}
#[cfg(test)]
mod test {
use objectkey::ObjectKey;
#[test]
fn default_named_is_empty() {
let key = ObjectKey::new("foo");
assert_eq!(key.named, "");
}
#[test]
fn object_equality_default_named_eq() {
let typed = "typed";
assert_eq!(
ObjectKey::new(typed),
ObjectKey::new(typed));
}
#[test]
fn object_equality_default_named_ne() {
assert_ne!(
ObjectKey::new("typed"),
ObjectKey::new(""));
}
#[test]
fn object_equality_named_eq() {
let typed = "typed";
let name = "name";
assert_eq!(
ObjectKey::new(typed).named(name),
ObjectKey::new(typed).named(name));
}
#[test]
fn object_equality_named_ne() {
let typed = "typed";
debug_assert_ne!(
ObjectKey::new(typed).named("name"),
ObjectKey::new(typed).named("bar"));
}
}
|
//! Classification of structurally significant JSON bytes.
//!
//! Provides the [`Structural`] struct and [`StructuralIterator`] trait
//! that allow effectively iterating over structural characters in a JSON document.
//!
//! Classifying [`Commas`](`Structural::Comma`) and [`Colons`](`Structural::Colon`) is disabled by default.
//! It can be enabled on demand by calling
//! [`StructuralIterator::turn_commas_on`]/[`StructuralIterator::turn_colons_on`].
//! This configuration is persisted across [`stop`](StructuralIterator::stop) and
//! [`resume`](StructuralIterator::resume) calls.
//!
//! A structural classifier needs ownership over a base
//! [`QuoteClassifiedIterator`](`crate::classification::quotes::QuoteClassifiedIterator`).
//!
//! # Examples
//! ```rust
//! use rsonpath::classification::structural::{BracketType, Structural, classify_structural_characters};
//! use rsonpath::input::{Input, OwnedBytes};
//! use rsonpath::result::empty::EmptyRecorder;
//! use rsonpath::FallibleIterator;
//!
//! let json = r#"{"x": [{"y": 42}, {}]}""#.to_owned();
//! let aligned = OwnedBytes::try_from(json).unwrap();
//! let iter = aligned.iter_blocks::<_, 64>(&EmptyRecorder);
//! let expected = vec![
//! Structural::Opening(BracketType::Curly, 0),
//! Structural::Opening(BracketType::Square, 6),
//! Structural::Opening(BracketType::Curly, 7),
//! Structural::Closing(BracketType::Curly, 15),
//! Structural::Opening(BracketType::Curly, 18),
//! Structural::Closing(BracketType::Curly, 19),
//! Structural::Closing(BracketType::Square, 20),
//! Structural::Closing(BracketType::Curly, 21)
//! ];
//! let quote_classifier = rsonpath::classification::quotes::classify_quoted_sequences(iter);
//! let actual = classify_structural_characters(quote_classifier).collect::<Vec<Structural>>().unwrap();
//! assert_eq!(expected, actual);
//! ```
//! ```rust
//! use rsonpath::classification::structural::{BracketType, Structural, classify_structural_characters};
//! use rsonpath::classification::quotes::classify_quoted_sequences;
//! use rsonpath::input::{Input, OwnedBytes};
//! use rsonpath::result::empty::EmptyRecorder;
//! use rsonpath::FallibleIterator;
//!
//! let json = r#"{"x": "[\"\"]"}""#.to_owned();
//! let aligned = OwnedBytes::try_from(json).unwrap();
//! let iter = aligned.iter_blocks::<_, 64>(&EmptyRecorder);
//! let expected = vec![
//! Structural::Opening(BracketType::Curly, 0),
//! Structural::Closing(BracketType::Curly, 14)
//! ];
//! let quote_classifier = classify_quoted_sequences(iter);
//! let actual = classify_structural_characters(quote_classifier).collect::<Vec<Structural>>().unwrap();
//! assert_eq!(expected, actual);
//! ```
use crate::{
classification::{quotes::QuoteClassifiedIterator, ResumeClassifierState},
input::{error::InputError, InputBlockIterator},
FallibleIterator, MaskType, BLOCK_SIZE,
};
use cfg_if::cfg_if;
/// Defines the kinds of brackets that can be identified as structural.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
#[repr(u8)]
pub enum BracketType {
/// Square brackets, '[' and ']'.
Square,
/// Curly braces, '{' and '}'.
Curly,
}
/// Defines structural characters in JSON documents.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum Structural {
/// Represents the closing square or curly brace, ']' or '}'.
Closing(BracketType, usize),
/// Represents the colon ':' character.
Colon(usize),
/// Represents the opening square or curly brace, '[' or '{'.
Opening(BracketType, usize),
/// Represents the comma ',' character.
Comma(usize),
}
use Structural::*;
impl Structural {
/// Returns the index of the character in the document,
/// i.e. which byte it is counting from 0.
#[inline(always)]
#[must_use]
pub fn idx(self) -> usize {
match self {
Closing(_, idx) | Colon(idx) | Opening(_, idx) | Comma(idx) => idx,
}
}
/// Add a given amount to the structural's index.
///
/// # Examples
/// ```rust
/// # use rsonpath::classification::structural::Structural;
///
/// let structural = Structural::Colon(42);
/// let offset_structural = structural.offset(10);
///
/// assert_eq!(structural.idx(), 42);
/// assert_eq!(offset_structural.idx(), 52);
/// ```
#[inline(always)]
#[must_use]
pub fn offset(self, amount: usize) -> Self {
match self {
Closing(b, idx) => Closing(b, idx + amount),
Colon(idx) => Colon(idx + amount),
Opening(b, idx) => Opening(b, idx + amount),
Comma(idx) => Comma(idx + amount),
}
}
/// Check if the structural represents a closing character,
/// i.e. a [`Closing`] with either of the [`BracketType`] variants.
///
/// # Examples
/// ```rust
/// # use rsonpath::classification::structural::{BracketType, Structural};
///
/// let brace = Structural::Closing(BracketType::Curly, 42);
/// let bracket = Structural::Closing(BracketType::Square, 43);
/// let neither = Structural::Comma(44);
///
/// assert!(brace.is_closing());
/// assert!(bracket.is_closing());
/// assert!(!neither.is_closing());
/// ```
#[inline(always)]
#[must_use]
pub fn is_closing(&self) -> bool {
matches!(self, Closing(_, _))
}
/// Check if the structural represents an opening character,
/// i.e. an [`Opening`] with either of the [`BracketType`] variants.
///
/// # Examples
/// ```rust
/// # use rsonpath::classification::structural::{BracketType, Structural};
///
/// let brace = Structural::Opening(BracketType::Curly, 42);
/// let bracket = Structural::Opening(BracketType::Square, 43);
/// let neither = Structural::Comma(44);
///
/// assert!(brace.is_opening());
/// assert!(bracket.is_opening());
/// assert!(!neither.is_opening());
/// ```
#[inline(always)]
#[must_use]
pub fn is_opening(&self) -> bool {
matches!(self, Opening(_, _))
}
}
/// Trait for classifier iterators, i.e. finite iterators of [`Structural`] characters
/// that hold a reference to the JSON document valid for `'a`.
pub trait StructuralIterator<'i, I, Q, M, const N: usize>:
FallibleIterator<Item = Structural, Error = InputError>
where
I: InputBlockIterator<'i, N>,
{
/// Stop classification and return a state object that can be used to resume
/// a classifier from the place in which the current one was stopped.
fn stop(self) -> ResumeClassifierState<'i, I, Q, M, N>;
/// Resume classification from a state retrieved by stopping a classifier.
fn resume(state: ResumeClassifierState<'i, I, Q, M, N>) -> Self;
/// Turn classification of [`Structural::Colon`] characters off.
fn turn_colons_off(&mut self);
/// Turn classification of [`Structural::Colon`] characters on.
///
/// The `idx` passed should be the index of the byte in the input
/// from which commas are to be classified. Passing an `idx` that
/// does not match the index which the internal [`QuoteClassifiedIterator`]
/// reached may result in incorrect results.
fn turn_colons_on(&mut self, idx: usize);
/// Turn classification of [`Structural::Comma`] characters off.
fn turn_commas_off(&mut self);
/// Turn classification of [`Structural::Comma`] characters on.
///
/// The `idx` passed should be the index of the byte in the input
/// from which commas are to be classified. Passing an `idx` that
/// does not match the index which the internal [`QuoteClassifiedIterator`]
/// reached may result in incorrect results.
fn turn_commas_on(&mut self, idx: usize);
/// Turn classification of both [`Structural::Comma`] and [`Structural::Colon`]
/// characters on. This is generally faster than calling
/// [`turn_colons_on`](`StructuralIterator::turn_colons_on`) and
/// [`turn_commas_on`](`StructuralIterator::turn_commas_on`)
/// in sequence.
fn turn_colons_and_commas_on(&mut self, idx: usize);
/// Turn classification of both [`Structural::Comma`] and [`Structural::Colon`]
/// characters off. This is generally faster than calling
/// [`turn_colons_on`](`StructuralIterator::turn_colons_off`) and
/// [`turn_commas_on`](`StructuralIterator::turn_commas_off`)
/// in sequence.
fn turn_colons_and_commas_off(&mut self);
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod avx2_32;
#[cfg(target_arch = "x86_64")]
mod avx2_64;
mod nosimd;
mod shared;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mod ssse3_32;
#[cfg(target_arch = "x86_64")]
mod ssse3_64;
cfg_if! {
if #[cfg(any(doc, not(feature = "simd")))] {
type ClassifierImpl<'a, I, Q, const N: usize> = nosimd::SequentialClassifier<'a, I, Q, N>;
}
else if #[cfg(all(simd = "avx2_64", target_arch = "x86_64"))] {
type ClassifierImpl<'a, I, Q> = avx2_64::Avx2Classifier64<'a, I, Q>;
}
else if #[cfg(all(simd = "avx2_32", any(target_arch = "x86_64", target_arch = "x86")))] {
type ClassifierImpl<'a, I, Q> = avx2_32::Avx2Classifier32<'a, I, Q>;
}
else if #[cfg(all(simd = "ssse3_64", target_arch = "x86_64"))] {
type ClassifierImpl<'a, I, Q> = ssse3_64::Ssse3Classifier64<'a, I, Q>;
}
else if #[cfg(all(simd = "ssse3_32", any(target_arch = "x86_64", target_arch = "x86")))] {
type ClassifierImpl<'a, I, Q> = ssse3_32::Ssse3Classifier32<'a, I, Q>;
}
else {
compile_error!("Target architecture is not supported by SIMD features of this crate. Disable the default `simd` feature.");
}
}
/// Walk through the JSON document represented by `bytes` and iterate over all
/// occurrences of structural characters in it.
#[inline(always)]
pub fn classify_structural_characters<'i, I, Q>(iter: Q) -> impl StructuralIterator<'i, I, Q, MaskType, BLOCK_SIZE>
where
I: InputBlockIterator<'i, BLOCK_SIZE>,
Q: QuoteClassifiedIterator<'i, I, MaskType, BLOCK_SIZE>,
{
ClassifierImpl::new(iter)
}
/// Resume classification using a state retrieved from a previously
/// used classifier via the `stop` function.
#[inline(always)]
pub fn resume_structural_classification<'i, I, Q>(
state: ResumeClassifierState<'i, I, Q, MaskType, BLOCK_SIZE>,
) -> impl StructuralIterator<'i, I, Q, MaskType, BLOCK_SIZE>
where
I: InputBlockIterator<'i, BLOCK_SIZE>,
Q: QuoteClassifiedIterator<'i, I, MaskType, BLOCK_SIZE>,
{
ClassifierImpl::resume(state)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
classification::quotes::classify_quoted_sequences,
input::{Input, OwnedBytes},
result::empty::EmptyRecorder,
};
#[test]
fn resumption_without_commas_or_colons() {
use BracketType::*;
use Structural::*;
let json = r#"{"a": [42, 36, { "b": { "c": 1, "d": 2 } }]}"#;
let json_string = json.to_owned();
let input = OwnedBytes::new(&json_string).unwrap();
let iter = input.iter_blocks(&EmptyRecorder);
let quotes = classify_quoted_sequences(iter);
let mut classifier = classify_structural_characters(quotes);
assert_eq!(Some(Opening(Curly, 0)), classifier.next().unwrap());
assert_eq!(Some(Opening(Square, 6)), classifier.next().unwrap());
let resume_state = classifier.stop();
let mut resumed_classifier = resume_structural_classification(resume_state);
assert_eq!(Some(Opening(Curly, 15)), resumed_classifier.next().unwrap());
assert_eq!(Some(Opening(Curly, 22)), resumed_classifier.next().unwrap());
}
#[test]
fn resumption_with_commas_but_no_colons() {
use BracketType::*;
use Structural::*;
let json = r#"{"a": [42, 36, { "b": { "c": 1, "d": 2 } }]}"#;
let json_string = json.to_owned();
let input = OwnedBytes::new(&json_string).unwrap();
let iter = input.iter_blocks(&EmptyRecorder);
let quotes = classify_quoted_sequences(iter);
let mut classifier = classify_structural_characters(quotes);
classifier.turn_commas_on(0);
assert_eq!(Some(Opening(Curly, 0)), classifier.next().unwrap());
assert_eq!(Some(Opening(Square, 6)), classifier.next().unwrap());
assert_eq!(Some(Comma(9)), classifier.next().unwrap());
assert_eq!(Some(Comma(13)), classifier.next().unwrap());
let resume_state = classifier.stop();
let mut resumed_classifier = resume_structural_classification(resume_state);
assert_eq!(Some(Opening(Curly, 15)), resumed_classifier.next().unwrap());
assert_eq!(Some(Opening(Curly, 22)), resumed_classifier.next().unwrap());
assert_eq!(Some(Comma(30)), resumed_classifier.next().unwrap());
}
#[test]
fn resumption_with_colons_but_no_commas() {
use BracketType::*;
use Structural::*;
let json = r#"{"a": [42, 36, { "b": { "c": 1, "d": 2 } }]}"#;
let json_string = json.to_owned();
let input = OwnedBytes::new(&json_string).unwrap();
let iter = input.iter_blocks(&EmptyRecorder);
let quotes = classify_quoted_sequences(iter);
let mut classifier = classify_structural_characters(quotes);
classifier.turn_colons_on(0);
assert_eq!(Some(Opening(Curly, 0)), classifier.next().unwrap());
assert_eq!(Some(Colon(4)), classifier.next().unwrap());
assert_eq!(Some(Opening(Square, 6)), classifier.next().unwrap());
let resume_state = classifier.stop();
let mut resumed_classifier = resume_structural_classification(resume_state);
assert_eq!(Some(Opening(Curly, 15)), resumed_classifier.next().unwrap());
assert_eq!(Some(Colon(20)), resumed_classifier.next().unwrap());
assert_eq!(Some(Opening(Curly, 22)), resumed_classifier.next().unwrap());
assert_eq!(Some(Colon(27)), resumed_classifier.next().unwrap());
}
#[test]
fn resumption_with_commas_and_colons() {
use BracketType::*;
use Structural::*;
let json = r#"{"a": [42, 36, { "b": { "c": 1, "d": 2 } }]}"#;
let json_string = json.to_owned();
let input = OwnedBytes::new(&json_string).unwrap();
let iter = input.iter_blocks(&EmptyRecorder);
let quotes = classify_quoted_sequences(iter);
let mut classifier = classify_structural_characters(quotes);
classifier.turn_commas_on(0);
classifier.turn_colons_on(0);
assert_eq!(Some(Opening(Curly, 0)), classifier.next().unwrap());
assert_eq!(Some(Colon(4)), classifier.next().unwrap());
assert_eq!(Some(Opening(Square, 6)), classifier.next().unwrap());
assert_eq!(Some(Comma(9)), classifier.next().unwrap());
assert_eq!(Some(Comma(13)), classifier.next().unwrap());
let resume_state = classifier.stop();
let mut resumed_classifier = resume_structural_classification(resume_state);
assert_eq!(Some(Opening(Curly, 15)), resumed_classifier.next().unwrap());
assert_eq!(Some(Colon(20)), resumed_classifier.next().unwrap());
assert_eq!(Some(Opening(Curly, 22)), resumed_classifier.next().unwrap());
assert_eq!(Some(Colon(27)), resumed_classifier.next().unwrap());
assert_eq!(Some(Comma(30)), resumed_classifier.next().unwrap());
}
}
|
use std::env;
use std::str::FromStr;
fn gcd(mut x: u64, mut y: u64) -> u64 {
assert!(x != 0 && y != 0);
while x != 0 {
if x < y {
let t = x;
x = y;
y = t;
}
x = x % y
}
y
}
fn main() {
let mut numbers: Vec<u64> = Vec::new();
for number in env::args().skip(1) {
numbers.push(u64::from_str(&number).expect("Expected a number."));
}
if numbers.len() == 0 {
eprintln!("Usage: program NUMBER1 NUMBER2 ...");
std::process::exit(1);
}
let mut g = numbers[0];
//for &num in &numbers[1..] {
for &num in numbers.iter().skip(1) {
g = gcd(g, num)
}
println!("GCD of {:?} is {}", numbers, g);
}
#[test]
fn test_gcd() {
assert_eq!(gcd(14, 15), 1);
assert_eq!(gcd(2 * 3 * 5 * 11 * 17, 3 * 7 * 11 * 13 * 19), 3 * 11);
}
|
// Copyright (C) 2021 Sebastian Dröge <sebastian@centricular.com>
//
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use nom::bytes::complete::take_while;
use nom::character::complete::space0;
use nom::character::is_alphanumeric;
use nom::{Err, IResult, Needed};
use std::str;
pub(super) fn cond_parser<I, O1, O2, E: nom::error::ParseError<I>, F, G>(
mut cond: F,
mut parser: G,
) -> impl FnMut(I) -> IResult<I, Option<O2>, E>
where
F: nom::Parser<I, O1, E>,
G: nom::Parser<I, O2, E>,
I: Clone,
{
move |input: I| {
let (input, res) = match cond.parse(input.clone()) {
Ok((input, output)) => Ok((input, Some(output))),
Err(Err::Error(_)) => Ok((input, None)),
Err(err) => Err(err),
}?;
if res.is_some() {
let (input, res) = parser.parse(input)?;
Ok((input, Some(res)))
} else {
Ok((input, None))
}
}
}
pub(super) fn trim<I, O, E: nom::error::ParseError<I>, F>(
mut parser: F,
) -> impl FnMut(I) -> IResult<I, O, E>
where
F: nom::Parser<I, O, E>,
I: nom::InputTakeAtPosition,
<I as nom::InputTakeAtPosition>::Item: nom::AsChar + Clone,
{
move |input: I| {
let (input, _) = space0(input)?;
let (input, val) = parser.parse(input)?;
let (input, _) = space0(input)?;
Ok((input, val))
}
}
pub(super) fn token(input: &[u8]) -> IResult<&[u8], &[u8]> {
pub(super) fn is_token_char(i: u8) -> bool {
is_alphanumeric(i) || b"!#$%&'*+-.^_`|~".contains(&i)
}
take_while(is_token_char)(input)
}
pub(super) fn rtsp_unreserved(input: &[u8]) -> IResult<&[u8], &[u8]> {
pub(super) fn is_rtsp_unreserved_char(i: u8) -> bool {
// rtsp_unreserved
is_alphanumeric(i) || b"$-_.+!*'()".contains(&i)
}
take_while(is_rtsp_unreserved_char)(input)
}
pub(super) fn quoted_string(input: &[u8]) -> IResult<&[u8], &[u8]> {
use std::num::NonZeroUsize;
if !input.starts_with(b"\"") {
return Err(Err::Error(nom::error::Error::new(
input,
nom::error::ErrorKind::Tag,
)));
}
let i = &input[1..];
let mut o = i;
while !o.is_empty() {
if o.len() >= 2 && o.starts_with(b"\\") {
o = &o[2..];
} else if o.starts_with(b"\\") {
return Err(Err::Incomplete(Needed::Size(NonZeroUsize::new(1).unwrap())));
} else if !o.starts_with(b"\"") {
o = &o[1..];
} else {
// Closing quote, also include it
o = &o[1..];
break;
}
}
let (fst, snd) = input.split_at(input.len() - o.len());
// Did not end with a quote
if !fst.ends_with(b"\"") {
return Err(Err::Incomplete(Needed::Size(NonZeroUsize::new(1).unwrap())));
}
// Must have the starting quote
assert!(fst.starts_with(b"\""));
Ok((snd, fst))
}
// FIXME: Remove once str::split_once is stabilized
pub(super) fn split_once(s: &str, d: char) -> Option<(&str, &str)> {
let idx = s.find(d)?;
let (fst, snd) = s.split_at(idx);
let (_, snd) = snd.split_at(snd.char_indices().nth(1).map(|(idx, _c)| idx).unwrap_or(1));
Some((fst, snd))
}
|
use crate::{
Load,
MemmyGenerator
};
use notices::{
DiagnosticSourceBuilder,
DiagnosticLevel
};
use ty::Ty;
use ir::{
Chunk,
hir::HIRInstruction
};
use ir_traits::ReadInstruction;
impl Load for Ty{
type Output = Ty;
fn load(chunk: &Chunk, memmy: &MemmyGenerator) -> Result<Self::Output, ()> {
let pos = match chunk.read_pos(){
Ok(pos) => pos,
Err(msg) => {
let diagnosis = DiagnosticSourceBuilder::new(memmy.module_name.clone(), 0)
.level(DiagnosticLevel::Error)
.message(msg)
.build();
memmy.emit_diagnostic(&[], &[diagnosis]);
return Err(())
}
};
let _ins = chunk.read_instruction() as Option<HIRInstruction>;
let ident = chunk.read_string().to_owned();
Ok(
Ty{
pos,
ident
}
)
}
} |
use sdl::video::{SurfaceFlag, VideoFlag};
pub fn init() -> (sdl::video::Surface, sdl::video::VideoInfo) {
sdl::init(&[sdl::InitFlag::Video]);
let best = sdl::video::get_video_info();
let screen = match sdl::video::set_video_mode(
best.width,
best.height,
best.format.bpp as isize,
&[SurfaceFlag::HWSurface],
&[VideoFlag::DoubleBuf],
) {
Ok(screen) => screen,
Err(err) => panic!("failed to set video mode: {}", err),
};
(screen, sdl::video::get_video_info())
}
|
use std::cell::RefCell;
use std::mem;
use std::rc::{Rc, Weak};
use iter_exact::{ChainExactExt, CollectExactExt};
use geometry::primitive::{Facet, Plane, Point, SimplexSubset};
use linalg::{Scalar, Vect, VectorNorm};
use num::traits::Float;
use typehack::prelude::*;
// TODO: This quickhull implementation is not well-suited for spitting out the resulting point set,
// unordered, without any faceting. This functionality is reasonable to have.
pub trait QuickHullExt<T: Scalar, D: Dim> {
fn quick_hull(&self, D) -> ConvexHull<T, D>;
}
#[derive(Debug, Clone)]
pub struct ConvexHull<'a, T: Scalar + 'a, D: Dim + 'a> {
source: &'a [Point<T, D>],
facets: Vec<FacetIndices<D>>,
}
impl<'a, T: Clone + Scalar, D: Dim> ConvexHull<'a, T, D> {
pub fn cloned_points(&self) -> Vec<Point<T, D>> {
self.point_indices().into_iter().map(|idx| self.source[idx].clone()).collect()
}
pub fn cloned_facets(&self) -> Vec<Facet<T, D>> {
self.facets
.iter()
.map(|facet| facet.iter().map(|&PointIdx(idx)| self.source[idx].clone()).collect_exact())
.collect()
}
pub fn point_indices(&self) -> Vec<usize> {
let mut pt_list: Vec<usize> =
self.facets.iter().flat_map(|facet| facet.iter().map(|&PointIdx(idx)| idx)).collect();
pt_list.sort();
pt_list.dedup();
pt_list
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
struct PointIdx(pub usize);
impl From<usize> for PointIdx {
fn from(i: usize) -> Self {
PointIdx(i)
}
}
impl From<PointIdx> for usize {
fn from(idx: PointIdx) -> Self {
idx.0
}
}
#[derive(Debug)]
struct QhElement<T: Scalar, D: Dim> {
facet: FacetIndices<D>,
plane: Plane<T, D>,
outside: Vec<PointIdx>,
neighbors: Vec<(QhFacetWeak<T, D>, RidgeIndices<D>)>,
visited: bool,
dead: bool,
self_ref: Option<QhFacetRef<T, D>>,
}
type FacetIndices<D: Dim> = Data<PointIdx, D>;
type RidgeIndices<D: Dim> = Data<PointIdx, D::Pred>;
type QhFacetRef<T: Scalar, D: Dim> = Rc<RefCell<QhElement<T, D>>>;
type QhFacetWeak<T: Scalar, D: Dim> = Weak<RefCell<QhElement<T, D>>>;
impl<T: Scalar + Float, D: Dim> QuickHullExt<T, D> for [Point<T, D>] {
fn quick_hull(&self, dim: D) -> ConvexHull<T, D> {
debug!("Beginning quickhull with {} points in {} dimensions.",
self.len(),
dim.reify());
let mut pt_indices: Vec<PointIdx> = (0..self.len()).map(PointIdx).collect();
debug!("Generated vector of {} point indices. Checking for extremes...",
pt_indices.len());
// We build an initial simplex by first finding the furthest pair of points in the set.
// First, we find the points with minimum and maximum coordinates:
let extremes: Data<PointIdx, D::Double> = {
let iter = self.iter().enumerate();
// We store minimums in even elements, and maximums in odd ones.
let mut initial = Data::from_elem(dim.double(), &PointIdx(0));
for (i, pt) in iter {
for j in 0..dim.reify() {
if pt[j] < self[initial[2 * j].0][j] {
initial[2 * j] = PointIdx(i);
}
if pt[j] > self[initial[2 * j + 1].0][j] {
initial[2 * j + 1] = PointIdx(i);
}
}
}
initial
};
debug!("Extreme points: {:?}.", extremes);
// Now, we find the pair of these points which is furthest apart, via brute force, and add
// those to our initial simplex:
let mut simplex: SimplexSubset<T, D> = SimplexSubset::with_capacity(dim.succ());
debug!("Simplex: {:?}", &*simplex);
let mut simplex_indices: DataVec<PointIdx, D::Succ> = DataVec::with_capacity(dim.succ());
{
let mut max_dist = T::zero();
let mut initial = (PointIdx(0), PointIdx(0));
for &PointIdx(i) in extremes.iter() {
for &PointIdx(j) in extremes.iter() {
let dist = (&self[i] - &self[j]).norm();
if dist > max_dist {
initial = (PointIdx(i), PointIdx(j));
max_dist = dist;
}
}
}
debug!("Furthest points determined to be {:?} and {:?}, with a distance of {:?}.",
initial.0,
initial.1,
max_dist);
simplex.push(&self[usize::from(initial.0)]);
simplex_indices.push(initial.0);
simplex.push(&self[usize::from(initial.1)]);
simplex_indices.push(initial.1);
if initial.0 > initial.1 {
pt_indices.remove(usize::from(initial.0));
pt_indices.remove(usize::from(initial.1));
} else {
pt_indices.remove(usize::from(initial.1));
pt_indices.remove(usize::from(initial.0));
}
}
debug!("Simplex: {:?}, with {} remaining potentially outside points. Building rest of \
simplex...",
&*simplex,
pt_indices.len());
// Now, until our simplex is full, we find points far from the simplex and add them in.
while !simplex.is_full() {
let max_idx = {
let (mut max_dist, mut max_idx) = {
let idx = 0;
(simplex.distance(&self[usize::from(pt_indices[idx])], dim), idx)
};
for idx in 0..pt_indices.len() {
let dist = simplex.distance(&self[usize::from(pt_indices[idx])], dim);
if dist > max_dist {
max_dist = dist;
max_idx = idx;
}
}
max_idx
};
debug!("Furthest point from simplex of {} points determined to be {:?}. Removing \
from point index list...",
simplex.len(),
pt_indices[max_idx]);
simplex.push(&self[usize::from(pt_indices[max_idx])]);
simplex_indices.push(pt_indices[max_idx]);
pt_indices.remove(max_idx);
}
// The simplex is a valid convex hull. By taking the average of all its vertices, we are
// guaranteed an interior point.
let guaranteed_interior_point: Point<T, D> = {
let sum: Vect<T, D> = simplex.iter().map(|&p| Vect::from(p.clone())).sum();
let vect = sum / T::from_usize(simplex.len());
vect.into()
};
debug!("Calculated {:?} to be a guaranteed interior point by averaging all points in the \
initial simplex.",
guaranteed_interior_point);
// We now have our full initial simplex. We can now generate our first few facets from it.
let mut elements: Vec<QhFacetWeak<T, D>> = Vec::with_capacity(dim.succ()
.reify());
let d = dim.succ().reify();
for i in 0..d {
debug!("Building initial facet {} from simplex (excluding {}th point from simplex).",
i,
i);
let facet: FacetIndices<D> =
(0..i).chain_exact(i + 1..d).map(|j| simplex_indices[j]).collect_exact();
debug!("Facet points: {:?}. Constructing plane from facet...",
facet);
let mut plane: Plane<T, D> = facet.iter()
.map(|&PointIdx(i)| self[i].clone())
.collect_exact::<Facet<T, D>>()
.into();
if plane.signed_distance(&guaranteed_interior_point).gt_zero() {
plane.n = -plane.n;
debug!("Plane signed distance has the wrong sign, flipping.");
}
debug!("Plane constructed from facet: {:?}.", plane);
let element: QhFacetRef<T, D> = Rc::new(RefCell::new(QhElement {
facet: facet,
plane: plane,
outside: Vec::new(),
neighbors: Vec::new(),
visited: false,
dead: false,
self_ref: None,
}));
element.borrow_mut().self_ref = Some(element.clone());
debug!("Constructing neighbors for facet ({} neighbors to link.)",
elements.len());
for (j, &ref neighbor) in elements.iter().enumerate() {
let neighbor = neighbor.upgrade()
.expect("Facets should not be linked to buried neighbors!");
let ridge: RidgeIndices<D> = (0..j)
.chain_exact(j + 1..i)
.chain_exact(i + 1..d)
.map(|i| simplex_indices[i])
.collect_exact();
debug!("Linking {:?} with its neighbor {:?}, with ridge {:?}.",
element.borrow().facet,
neighbor.borrow().facet,
ridge);
neighbor.borrow_mut().neighbors.push((Rc::downgrade(&element), ridge.clone()));
element.borrow_mut().neighbors.push((Rc::downgrade(&neighbor), ridge));
}
debug!("Facet {} constructed successfully.", i);
elements.push(Rc::downgrade(&element));
}
// We must now generate the conflict sets for our first facets. We do so by looping through
// our facets, and assigning all points above them to their conflict sets.
debug!("Generating conflict sets... {} points to assign.",
pt_indices.len());
for ref element in elements.iter_mut() {
let element = element.upgrade()
.expect("Initial simplex points should not yet have had a chance to be buried!");
let mut i = 0;
while i < pt_indices.len() {
let PointIdx(idx) = pt_indices[i];
debug!("Checking point {:?} against the hyperplane of element {:?}. Signed \
distance: {:?}",
PointIdx(idx),
element.borrow().facet,
element.borrow().plane.signed_distance(&self[idx]));
if element.borrow().plane.signed_distance(&self[idx]).gt_zero() {
element.borrow_mut().outside.push(pt_indices.swap_remove(i));
} else {
i += 1;
}
}
}
// Any points remaining in pt_indices are guaranteed to be interior points.
debug!("Filtering for nonempty facets...");
let mut nonempty_facets: Vec<_> = elements.iter()
.cloned()
.filter_map(|ref facet| {
let facet = facet.upgrade()
.expect("Initial simplex points should not yet have had a chance to be \
buried!");
debug!("Facet {:?} has {} conflict points;",
facet.borrow().facet,
facet.borrow().outside.len());
if !facet.borrow().outside.is_empty() {
Some(facet)
} else {
None
}
})
.collect();
debug!("Entering refinement loop:");
let mut iteration = 0;
while let Some(facet) = nonempty_facets.pop() {
debug!("********* ITERATION {} \
********************************************************",
iteration);
iteration += 1;
debug!("Selected facet with nonempty conflict list (facet: {:?}, {} conflicts.)",
facet.borrow().facet,
facet.borrow().outside.len());
// We select the furthest point p of our facet f's outside set.
let PointIdx(p_idx) = {
let mut max_dist = T::zero();
let mut max_idx = 0;
for (idx, &PointIdx(pt_idx)) in facet.borrow().outside.iter().enumerate() {
let dist = facet.borrow().plane.signed_distance(&self[pt_idx]);
if dist > max_dist {
max_dist = dist;
max_idx = idx;
}
}
facet.borrow_mut().outside.swap_remove(max_idx)
};
debug!("Selected {:?} as the furthest point of the conflict set.",
PointIdx(p_idx));
let p = &self[p_idx];
// We must find the "horizon". This is the set of ridges which form the boundary
// between the "visible" and "non-visible" facets. We do this by building a subgraph
// where the nodes are all visible facets; the ridges are then all edges where one node
// is in the subgraph and the other is not.
let mut visit_stack = vec![facet.clone()];
let mut visible = vec![facet.clone()];
let mut horizon = Vec::new();
facet.borrow_mut().visited = true;
facet.borrow_mut().dead = true;
debug!("Beginning horizon search...");
while let Some(facet) = visit_stack.pop() {
debug!("Searching facet {:?}.", facet.borrow().facet);
for (neighbor, ridge) in
facet.borrow().neighbors.iter().map(|&(ref nb, ref ridge)| (nb.clone(), ridge)) {
let neighbor = neighbor.upgrade()
.expect("Facet neighbors should not be buried yet!");
if !neighbor.borrow().visited {
debug!("Checking unvisited neighbor {:?}...",
neighbor.borrow().facet);
neighbor.borrow_mut().visited = true;
if neighbor.borrow().plane.signed_distance(p).gt_zero() {
debug!("Neighbor found to be visible; pushing to visible set and \
search stack.");
neighbor.borrow_mut().dead = true;
visible.push(neighbor.clone());
visit_stack.push(neighbor);
} else {
debug!("Neighbor found to not be visible; pushing to horizon set \
with ridge {:?}.",
ridge);
horizon.push((neighbor, ridge.clone()));
}
} else if !neighbor.borrow().dead {
debug!("Neighbor revisited, but not visible; pushing to horizon set \
with unique ridge {:?}.",
ridge);
horizon.push((neighbor, ridge.clone()));
} else {
debug!("Neighbor {:?} already visited and visible... skipping...",
neighbor.borrow().facet);
}
}
}
debug!("Resetting all horizon `visited` flags.");
for &(ref neighbor, _) in horizon.iter() {
neighbor.borrow_mut().visited = false;
}
let mut new_facets: Vec<QhFacetRef<T, D>> = Vec::new();
debug!("Generating new facets from {} horizon ridges.",
horizon.len());
for (neighbor, ridge) in horizon {
debug!("Generating new facet set...");
let new_facet: Data<PointIdx, D> = {
debug!("Collecting ridge points...");
let mut facet: DataVec<PointIdx, D> = ridge.iter().cloned().collect_exact();
debug!("Pushing eye point...");
facet.push(PointIdx(p_idx));
debug!("Converting DataVec into Data..");
facet.into()
};
debug!("Generating new facet hyperplane...");
let new_plane = {
let mut plane: Plane<T, D> = new_facet.iter()
.map(|&PointIdx(i)| self[i].clone())
.collect_exact::<Facet<T, D>>()
.into();
if plane.signed_distance(&guaranteed_interior_point)
.gt_zero() {
plane.n = -plane.n;
}
plane
};
debug!("Generating singleton neighbor set with neighbor {:?} and ridge {:?}...",
neighbor.borrow().facet,
ridge);
let new_element_rc = Rc::new(RefCell::new(QhElement {
facet: new_facet.clone(),
plane: new_plane,
outside: Vec::new(),
neighbors: vec![(Rc::downgrade(&neighbor), ridge.clone())],
visited: false,
dead: false,
self_ref: None,
}));
new_element_rc.borrow_mut().self_ref = Some(new_element_rc.clone());
debug!("Linking singleton neighbor...");
neighbor.borrow_mut().neighbors.push((Rc::downgrade(&new_element_rc), ridge));
elements.push(Rc::downgrade(&new_element_rc));
{
let mut new_element = new_element_rc.borrow_mut();
'finding_new_neighbors: for &ref facet in new_facets.iter() {
let mut skipped = None;
debug!("Checking potential neighbor {:?} for {:?}.",
facet.borrow().facet,
new_element.facet);
for (i, ref p_idx) in new_facet.iter().enumerate() {
if !facet.borrow().facet.contains(p_idx) {
if skipped.is_none() {
skipped = Some(i);
} else {
continue 'finding_new_neighbors;
}
}
}
let skipped_idx = match skipped {
Some(skipped_idx) => skipped_idx,
None => continue 'finding_new_neighbors,
};
debug!("Neighbor discovered, containing all elements but the {}th.",
skipped_idx);
let new_ridge = new_facet.clone().contract(skipped_idx);
debug!("Neighbor discovered: {:?} with ridge {:?}.",
facet.borrow().facet,
new_ridge);
facet.borrow_mut()
.neighbors
.push((Rc::downgrade(&new_element_rc), new_ridge.clone()));
new_element.neighbors.push((Rc::downgrade(&facet), new_ridge));
}
}
debug!("Generated new facet {:?} with {} neighbors.",
new_element_rc.borrow().facet,
new_element_rc.borrow().neighbors.len());
new_facets.push(new_element_rc);
}
debug!("Collecting points from facets to be deleted...");
let mut unassigned_pts = Vec::new();
for old_facet in visible.into_iter() {
debug!("Exterminating facet {:?}...", old_facet.borrow().facet);
// Unlink this old facet from all its neighbors.
for &(ref old_neighbor, _) in old_facet.borrow().neighbors.iter() {
let old_neighbor = old_neighbor.upgrade()
.expect("All neighbors to a given facet (even a dead one) should be \
valid.");
debug!("Unlinking neighbor {:?}...", old_neighbor.borrow().facet);
old_neighbor.borrow_mut().neighbors.retain(|&(ref reflexive, _)| {
let reflexive = reflexive.upgrade()
.expect("When searching for a reference in an old-neighbor list, no \
facets should be dead yet!");
(reflexive.as_ref() as *const _) != (old_facet.as_ref() as *const _)
});
}
// All references to this facet from other facets should be gone now.
// We took this facet by value, so it should be dead now as well. No cycles. One
// reference left.
let mut old_facet = old_facet.borrow_mut();
unassigned_pts.extend(old_facet.outside.drain(..));
mem::drop(old_facet.self_ref.take()); // We destroy this old facet by forcibly taking
// and dropping its self-reference - this will cause the Rc to have no more
// strong references, and be deallocated.
}
debug!("Assigning collected points to new facets... {} points to be assigned.",
unassigned_pts.len());
// For each new facet: we steal unassigned points from facets in V. Then, add facets
// with newly assigned facets into the nomepty_facets queue. We then push them into the
// nonempty facet queue.
for new_facet in new_facets.into_iter() {
let mut i = 0;
while i < unassigned_pts.len() {
if new_facet.borrow()
.plane
.signed_distance(&self[unassigned_pts[i].0])
.gt_zero() {
new_facet.borrow_mut().outside.push(unassigned_pts.swap_remove(i));
} else {
i += 1;
}
}
if new_facet.borrow().outside.len() > 0 {
nonempty_facets.push(new_facet);
}
}
}
debug!("Quickhull finished. Generating filtered point list...");
let facet_list: Vec<_> = elements.into_iter()
.filter_map(|e| e.upgrade())
.map(|e| {
mem::drop(e.borrow_mut().self_ref.take().unwrap());
Rc::try_unwrap(e)
.expect("Facet should only have one strong reference!")
.into_inner()
.facet
})
.collect();
ConvexHull {
source: self,
facets: facet_list,
}
}
}
#[cfg(test)]
mod tests {
extern crate env_logger;
use super::*;
use super::PointIdx;
use typehack::prelude::*;
macro_rules! assert_edge {
($hull:expr, $a:expr => $b:expr) => (assert!($hull.facets.contains(&data![PointIdx($a), PointIdx($b)]) ||
$hull.facets.contains(&data![PointIdx($b), PointIdx($a)])));
}
macro_rules! assert_face {
($hull:expr, $a:expr => $b:expr => $c:expr) => (
assert!($hull.facets.contains(&data![PointIdx($a), PointIdx($b), PointIdx($c)]) ||
$hull.facets.contains(&data![PointIdx($a), PointIdx($c), PointIdx($b)]) ||
$hull.facets.contains(&data![PointIdx($b), PointIdx($a), PointIdx($c)]) ||
$hull.facets.contains(&data![PointIdx($b), PointIdx($c), PointIdx($a)]) ||
$hull.facets.contains(&data![PointIdx($c), PointIdx($a), PointIdx($b)]) ||
$hull.facets.contains(&data![PointIdx($c), PointIdx($b), PointIdx($a)])));
}
macro_rules! assert_pts {
($points:expr, $($p:expr),*) => ({
$(assert!($points.binary_search(&$p).is_ok());)*
});
}
macro_rules! assert_not_pts {
($points:expr, $($p:expr),*) => ({
$(assert!($points.binary_search(&$p).is_err());)*
});
}
#[test]
fn qhull_2d_trivial_nondegenerate_1() {
let _ = env_logger::init();
let triangle = vec![Point![1., 2.], Point![2., 3.], Point![4., 1.]];
let _ = triangle.quick_hull(B2::as_data());
}
#[test]
fn qhull_2d_trivial_nondegenerate_2() {
let _ = env_logger::init();
let triangle = vec![Point![0., -2.], Point![-2., 3.], Point![2., 4.]];
let _ = triangle.quick_hull(B2::as_data());
}
#[test]
fn qhull_2d_nontrivial_nondegenerate_1() {
let _ = env_logger::init();
// No interior points.
let points = vec![Point![0., -2.], Point![-1., 3.], Point![-2., 0.], Point![2., 2.]];
let hull = points.quick_hull(B2::as_data());
assert_edge!(hull, 1 => 3);
assert_edge!(hull, 1 => 2);
assert_edge!(hull, 0 => 2);
assert_edge!(hull, 0 => 3);
}
#[test]
fn qhull_2d_nontrivial_nondegenerate_2() {
let _ = env_logger::init();
// No interior points.
let points = vec![Point![0., -2.],
Point![2., -1.],
Point![1., 1.],
Point![3., 2.],
Point![1., 3.],
Point![0., 4.],
Point![-1., 3.],
Point![-3., 2.],
Point![-1., 1.],
Point![-2., 0.]];
let hull = points.quick_hull(B2::as_data());
debug!("hull: {:?}", hull);
assert_edge!(hull, 0 => 1);
assert_edge!(hull, 1 => 3);
assert_edge!(hull, 3 => 5);
assert_edge!(hull, 5 => 7);
assert_edge!(hull, 7 => 9);
assert_edge!(hull, 9 => 0);
}
#[test]
fn qhull_2d_nontrivial_nondegenerate_3() {
let _ = env_logger::init();
// No interior points.
let points = vec![Point![0.3215348546593775, 0.03629583077160248], // 0
Point![0.02402358131857918, -0.2356728797179394], // 1
Point![0.04590851212470659, -0.4156409924995536], // 2
Point![0.3218384001607433, 0.1379850698988746], // 3
Point![0.11506479756447, -0.1059521474930943], // 4
Point![0.2622539999543261, -0.29702873322836], // 5
Point![-0.161920957418085, -0.4055339716426413], // 6
Point![0.1905378631228002, 0.3698601009043493], // 7
Point![0.2387090918968516, -0.01629827079949742], // 8
Point![0.07495888748668034, -0.1659825110491202], // 9
Point![0.3319341836794598, -0.1821814101954749], // 10
Point![0.07703635755650362, -0.2499430638271785], // 11
Point![0.2069242999022122, -0.2232970760420869], // 12
Point![0.04604079532068295, -0.1923573186549892], // 13
Point![0.05054295812784038, 0.4754929463150845], // 14
Point![-0.3900589168910486, 0.2797829520700341], // 15
Point![0.3120693385713448, -0.0506329867529059], // 16
Point![0.01138812723698857, 0.4002504701728471], // 17
Point![0.009645149586391732, 0.1060251100976254], // 18
Point![-0.03597933197019559, 0.2953639456959105], // 19
Point![0.1818290866742182, 0.001454397571696298], // 20
Point![0.444056063372694, 0.2502497166863175], // 21
Point![-0.05301752458607545, -0.06553921621808712], // 22
Point![0.4823896228171788, -0.4776170002088109], // 23
Point![-0.3089226845734964, -0.06356112199235814], // 24
Point![-0.271780741188471, 0.1810810595574612], // 25
Point![0.4293626522918815, 0.2980897964891882], // 26
Point![-0.004796652127799228, 0.382663812844701], // 27
Point![0.430695573269106, -0.2995073500084759], // 28
Point![0.1799668387323309, -0.2973467472915973], // 29
Point![0.4932166845474547, 0.4928094162538735], // 30
Point![-0.3521487911717489, 0.4352656197131292], // 31
Point![-0.4907368011686362, 0.1865826865533206], // 32
Point![-0.1047924716070224, -0.247073392148198], // 33
Point![0.4374961861758457, -0.001606279519951237], // 34
Point![0.003256207800708899, -0.2729194320486108], // 35
Point![0.04310378203457577, 0.4452604050238248], // 36
Point![0.4916198379282093, -0.345391701297268], // 37
Point![0.001675087028811806, 0.1531837672490476], // 38
Point![-0.4404289572876217, -0.2894855991839297]]; /* 39 */
let hull = points.quick_hull(B2::as_data());
debug!("hull: {:?}", hull);
let point_indices = hull.point_indices();
assert_pts!(point_indices, 6, 14, 23, 30, 31, 32, 37, 39);
assert_not_pts!(point_indices,
0,
1,
2,
3,
4,
5,
7,
8,
9,
10,
11,
12,
13,
15,
16,
17,
18,
19,
20,
21,
22,
24,
25,
26,
27,
28,
29,
33,
34,
35,
36,
38);
}
#[test]
fn qhull_3d_nontrivial_nondegenerate_1() {
let _ = env_logger::init();
let tetrahedron = vec![Point![1., 1., 1.],
Point![2., 3., 1.],
Point![-2., 0., 1.],
Point![1., 4., 2.],
Point![2., 1., 2.]];
let hull = tetrahedron.quick_hull(B3::as_data());
let point_indices = hull.point_indices();
debug!("hull: {:?}", hull);
assert_pts!(point_indices, 0, 1, 2, 3, 4);
assert_face!(hull, 1 => 3 => 2);
assert_face!(hull, 0 => 1 => 2);
assert_face!(hull, 3 => 4 => 2);
assert_face!(hull, 4 => 0 => 2);
assert_face!(hull, 4 => 3 => 1);
assert_face!(hull, 0 => 4 => 1);
}
#[test]
fn qhull_3d_nontrivial_nondegenerate_2() {
let _ = env_logger::init();
let tetrahedron = vec![Point![1., 1., 1.],
Point![2., 3., 1.],
Point![-2., 0., 1.],
Point![1., 4., 2.],
Point![0.5, 3., 1.7]];
let hull = tetrahedron.quick_hull(B3::as_data());
let point_indices = hull.point_indices();
debug!("hull: {:?}", hull);
assert_pts!(point_indices, 0, 1, 2, 3);
assert_not_pts!(point_indices, 4);
assert_face!(hull, 1 => 3 => 2);
assert_face!(hull, 0 => 1 => 2);
assert_face!(hull, 3 => 0 => 2);
assert_face!(hull, 0 => 3 => 1);
}
#[test]
fn qhull_3d_nontrivial_nondegenerate_3() {
let _ = env_logger::init();
let tetrahedron = vec![Point![0.346987, 0.594300, 0.395053],
Point![0.472077, 0.063314, 0.029606],
Point![0.606915, 0.641988, 0.167560],
Point![0.554433, 0.549847, 0.032239],
Point![0.118838, 0.496147, 0.367041]];
let hull = tetrahedron.quick_hull(B3::as_data());
let point_indices = hull.point_indices();
debug!("hull: {:?}", hull);
assert_pts!(point_indices, 0, 1, 2, 3, 4);
assert_face!(hull, 2 => 0 => 4);
assert_face!(hull, 0 => 1 => 4);
assert_face!(hull, 1 => 0 => 2);
assert_face!(hull, 3 => 2 => 4);
assert_face!(hull, 1 => 3 => 4);
assert_face!(hull, 3 => 1 => 2);
}
#[test]
fn qhull_3d_nontrivial_nondegenerate_4() {
let _ = env_logger::init();
let tetrahedron = vec![Point![0.177014, 0.572769, 0.201412],
Point![0.064319, 0.555407, 0.114194],
Point![0.494991, 0.666792, 0.947249],
Point![0.046340, 0.320490, 0.377621],
Point![0.946863, 0.737976, 0.371916],
Point![0.829540, 0.636103, 0.085375]];
let hull = tetrahedron.quick_hull(B3::as_data());
let point_indices = hull.point_indices();
debug!("hull: {:?}", hull);
assert_pts!(point_indices, 1, 2, 3, 4, 5);
assert_not_pts!(point_indices, 0);
assert_face!(hull, 2 => 4 => 3);
assert_face!(hull, 4 => 5 => 3);
assert_face!(hull, 1 => 2 => 3);
assert_face!(hull, 5 => 1 => 3);
assert_face!(hull, 2 => 1 => 4);
assert_face!(hull, 1 => 5 => 4);
}
#[test]
fn qhull_3d_nontrivial_nondegenerate_5() {
let _ = env_logger::init();
let points = vec![Point![0.3215426810286406, 0.1678336189760208, -0.2203710966001927], /* 0 */
Point![0.2229772524190855, -0.4213242506806965, -0.1966818060695024], /* 1 */
Point![0.3688830163971363, -0.1831502133823468, -0.2056387967482571], /* 2 */
Point![-0.1712592515826777, -0.3542439228428937, 0.2223876390814666], /* 3 */
Point![-0.3309556113844324, -0.370961861099081, 0.2439994981922204], /* 4 */
Point![-0.1004397059794885, -0.09014152417903909, -0.008600084584765189], /* 5 */
Point![0.458374538420117, -0.09914027349943322, -0.2505798421339875], /* 6 */
Point![-0.4954086979808367, -0.3339869997780649, -0.3195065691317492], /* 7 */
Point![0.053091190339151, 0.3036317017894533, 0.1380056861210668], /* 8 */
Point![0.4615616439483703, 0.4665423151725366, 0.1766835406205464], /* 9 */
Point![-0.4797380864431505, 0.0419809916447671, -0.4254776681079321], /* 10 */
Point![-0.003168473023146823, -0.2525299883005488, -0.27151530400991], /* 11 */
Point![-0.3577162826971303, -0.1375644040643837, -0.04494194644032229], /* 12 */
Point![-0.3392973838740004, 0.4288679723896719, -0.01599531622230571], /* 13 */
Point![0.1667164640191164, 0.003605551555385444, -0.4014989499947977], /* 14 */
Point![0.00714666676441833, 0.1140243407469469, 0.407090128778564], /* 15 */
Point![-0.03621271768232132, 0.3728502838619522, 0.4947140370446388], /* 16 */
Point![-0.3411871756810576, -0.3328629143842151, -0.4270033635450559], /* 17 */
Point![0.3544683273457627, -0.450828987127942, -0.0827870439577727], /* 18 */
Point![-0.4018510635028137, 0.08917494033386464, -0.2367824197158054], /* 19 */
Point![0.3978697768392692, -0.002667689232777493, 0.1641431727112673], /* 20 */
Point![-0.245701439441835, 0.495905311308713, -0.3194406286994373], /* 21 */
Point![0.161352035739787, -0.1563404972258401, 0.3852604361113724], /* 22 */
Point![0.07214279572678994, -0.4960366976410492, 0.1112227161519441], /* 23 */
Point![0.3201855824516951, 0.359077846965825, 0.02136723140381946], /* 24 */
Point![0.1190541238701475, -0.05734495917087884, 0.2032677509852384], /* 25 */
Point![0.3210853052521919, 0.4807189479290684, 0.4433501688235907], /* 26 */
Point![0.3862800354941562, 0.2085496142586224, 0.09336129957191763], /* 27 */
Point![0.1233572616459404, 0.265491605052251, 0.117400122450106], /* 28 */
Point![0.1438531872293476, -0.2594872752758556, -0.2026374435076839], /* 29 */
Point![0.2724846394476338, -0.3506708492996831, 0.2750346518820475], /* 30 */
Point![-0.4926118841325975, -0.3279366743079728, 0.3683135596740186], /* 31 */
Point![0.2459906458351674, 0.3647787136629026, -0.1641662355178652], /* 32 */
Point![-0.141922976953837, -0.2994764654892278, -0.3009570467294725], /* 33 */
Point![-0.1850859398814719, 0.2606059478228967, 0.004159106876849283], /* 34 */
Point![-0.09789466634196664, -0.3156603563722785, -0.303610991503681], /* 35 */
Point![0.2100642609503719, -0.4499717643018549, 0.3245569875692548], /* 36 */
Point![-0.1707163766685095, -0.2301452446078371, -0.05112823569320907], /* 37 */
Point![-0.312260808713977, -0.1674135249735914, 0.2808831662692904], /* 38 */
Point![-0.1966306233747216, 0.2291105671125563, -0.3387042454804333]]; /* 39 */
// 0.3215426810286406 0.1678336189760208 -0.2203710966001927 // 0
// 0.2229772524190855 -0.4213242506806965 -0.1966818060695024 // 1
// 0.458374538420117 -0.09914027349943322 -0.2505798421339875 // 6
// -0.4954086979808367 -0.3339869997780649 -0.3195065691317492 // 7
// 0.4615616439483703 0.4665423151725366 0.1766835406205464 // 9
// -0.4797380864431505 0.0419809916447671 -0.4254776681079321 // 10
// -0.3392973838740004 0.4288679723896719 -0.01599531622230571 // 13
// 0.1667164640191164 0.003605551555385444 -0.4014989499947977 // 14
// -0.03621271768232132 0.3728502838619522 0.4947140370446388 // 16
// -0.3411871756810576 -0.3328629143842151 -0.4270033635450559 // 17
// 0.3544683273457627 -0.450828987127942 -0.0827870439577727 // 18
// 0.3978697768392692 -0.002667689232777493 0.1641431727112673 // 20
// -0.245701439441835 0.495905311308713 -0.3194406286994373 // 21
// 0.161352035739787 -0.1563404972258401 0.3852604361113724 // 22
// 0.07214279572678994 -0.4960366976410492 0.1112227161519441 // 23
// 0.3210853052521919 0.4807189479290684 0.4433501688235907 // 24
// 0.2724846394476338 -0.3506708492996831 0.2750346518820475 // 30
// -0.4926118841325975 -0.3279366743079728 0.3683135596740186 // 31
// 0.2459906458351674 0.3647787136629026 -0.1641662355178652 // 32
// 0.2100642609503719 -0.4499717643018549 0.3245569875692548 // 36
let hull = points.quick_hull(B3::as_data());
debug!("hull: {:?}", hull);
assert_face!(hull, 31 => 23 => 7);
assert_face!(hull, 23 => 17 => 7);
assert_face!(hull, 1 => 17 => 23);
assert_face!(hull, 21 => 32 => 9);
assert_face!(hull, 31 => 13 => 16);
assert_face!(hull, 26 => 21 => 9);
assert_face!(hull, 13 => 26 => 16);
assert_face!(hull, 26 => 13 => 21);
assert_face!(hull, 6 => 20 => 9);
assert_face!(hull, 20 => 26 => 9);
assert_face!(hull, 26 => 20 => 30);
assert_face!(hull, 36 => 26 => 30);
assert_face!(hull, 31 => 36 => 23);
assert_face!(hull, 17 => 10 => 7);
assert_face!(hull, 13 => 10 => 21);
assert_face!(hull, 10 => 31 => 7);
assert_face!(hull, 10 => 13 => 31);
assert_face!(hull, 14 => 32 => 21);
assert_face!(hull, 10 => 14 => 21);
assert_face!(hull, 14 => 10 => 17);
assert_face!(hull, 14 => 1 => 6);
assert_face!(hull, 14 => 17 => 1);
assert_face!(hull, 22 => 31 => 16);
assert_face!(hull, 22 => 36 => 31);
assert_face!(hull, 26 => 22 => 16);
assert_face!(hull, 36 => 22 => 26);
assert_face!(hull, 1 => 18 => 6);
assert_face!(hull, 18 => 36 => 30);
assert_face!(hull, 18 => 1 => 23);
assert_face!(hull, 36 => 18 => 23);
assert_face!(hull, 18 => 20 => 6);
assert_face!(hull, 20 => 18 => 30);
assert_face!(hull, 0 => 14 => 6);
assert_face!(hull, 14 => 0 => 32);
assert_face!(hull, 0 => 6 => 9);
assert_face!(hull, 32 => 0 => 9);
}
}
|
use std::{error::Error, fmt, io};
#[derive(Debug)]
pub enum LoraWanError {
InvalidPacketType(u8),
InvalidFPortForFopts,
Io(io::Error),
}
impl fmt::Display for LoraWanError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
LoraWanError::InvalidPacketType(v) => write!(f, "Invalid packet type: {:#02x}", v),
LoraWanError::InvalidFPortForFopts => write!(f, "Invalid: fport 0 with fopts"),
LoraWanError::Io(err) => err.fmt(f),
}
}
}
impl Error for LoraWanError {}
impl From<io::Error> for LoraWanError {
fn from(err: io::Error) -> Self {
LoraWanError::Io(err)
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - the control interface must clear the STGEN_CNTCR.EN bit before writing to this register."]
pub stgenr_cntcvl: STGENR_CNTCVL,
#[doc = "0x04 - the control interface must clear the STGEN_CNTCR.EN bit before writing to this register."]
pub stgenr_cntcvu: STGENR_CNTCVU,
_reserved2: [u8; 0x0fc8],
#[doc = "0xfd0 - STGENR peripheral ID4 register"]
pub stgenr_pidr4: STGENR_PIDR4,
#[doc = "0xfd4 - STGENR peripheral ID5 register"]
pub stgenr_pidr5: STGENR_PIDR5,
#[doc = "0xfd8 - STGENR peripheral ID6 register"]
pub stgenr_pidr6: STGENR_PIDR6,
#[doc = "0xfdc - STGENR peripheral ID7 register"]
pub stgenr_pidr7: STGENR_PIDR7,
#[doc = "0xfe0 - STGENR peripheral ID0 register"]
pub stgenr_pidr0: STGENR_PIDR0,
#[doc = "0xfe4 - STGENR peripheral ID1 register"]
pub stgenr_pidr1: STGENR_PIDR1,
#[doc = "0xfe8 - STGENR peripheral ID2 register"]
pub stgenr_pidr2: STGENR_PIDR2,
#[doc = "0xfec - STGENR peripheral ID3 register"]
pub stgenr_pidr3: STGENR_PIDR3,
#[doc = "0xff0 - STGENR component ID0 register"]
pub stgenr_cidr0: STGENR_CIDR0,
#[doc = "0xff4 - STGENR component ID1 register"]
pub stgenr_cidr1: STGENR_CIDR1,
#[doc = "0xff8 - STGENR component ID2 register"]
pub stgenr_cidr2: STGENR_CIDR2,
#[doc = "0xffc - STGENR component ID3 register"]
pub stgenr_cidr3: STGENR_CIDR3,
}
#[doc = "STGENR_CNTCVL (r) register accessor: the control interface must clear the STGEN_CNTCR.EN bit before writing to this register.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_cntcvl::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_cntcvl`]
module"]
pub type STGENR_CNTCVL = crate::Reg<stgenr_cntcvl::STGENR_CNTCVL_SPEC>;
#[doc = "the control interface must clear the STGEN_CNTCR.EN bit before writing to this register."]
pub mod stgenr_cntcvl;
#[doc = "STGENR_CNTCVU (r) register accessor: the control interface must clear the STGEN_CNTCR.EN bit before writing to this register.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_cntcvu::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_cntcvu`]
module"]
pub type STGENR_CNTCVU = crate::Reg<stgenr_cntcvu::STGENR_CNTCVU_SPEC>;
#[doc = "the control interface must clear the STGEN_CNTCR.EN bit before writing to this register."]
pub mod stgenr_cntcvu;
#[doc = "STGENR_PIDR4 (r) register accessor: STGENR peripheral ID4 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr4::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr4`]
module"]
pub type STGENR_PIDR4 = crate::Reg<stgenr_pidr4::STGENR_PIDR4_SPEC>;
#[doc = "STGENR peripheral ID4 register"]
pub mod stgenr_pidr4;
#[doc = "STGENR_PIDR5 (r) register accessor: STGENR peripheral ID5 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr5::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr5`]
module"]
pub type STGENR_PIDR5 = crate::Reg<stgenr_pidr5::STGENR_PIDR5_SPEC>;
#[doc = "STGENR peripheral ID5 register"]
pub mod stgenr_pidr5;
#[doc = "STGENR_PIDR6 (r) register accessor: STGENR peripheral ID6 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr6::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr6`]
module"]
pub type STGENR_PIDR6 = crate::Reg<stgenr_pidr6::STGENR_PIDR6_SPEC>;
#[doc = "STGENR peripheral ID6 register"]
pub mod stgenr_pidr6;
#[doc = "STGENR_PIDR7 (r) register accessor: STGENR peripheral ID7 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr7::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr7`]
module"]
pub type STGENR_PIDR7 = crate::Reg<stgenr_pidr7::STGENR_PIDR7_SPEC>;
#[doc = "STGENR peripheral ID7 register"]
pub mod stgenr_pidr7;
#[doc = "STGENR_PIDR0 (r) register accessor: STGENR peripheral ID0 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr0::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr0`]
module"]
pub type STGENR_PIDR0 = crate::Reg<stgenr_pidr0::STGENR_PIDR0_SPEC>;
#[doc = "STGENR peripheral ID0 register"]
pub mod stgenr_pidr0;
#[doc = "STGENR_PIDR1 (r) register accessor: STGENR peripheral ID1 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr1::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr1`]
module"]
pub type STGENR_PIDR1 = crate::Reg<stgenr_pidr1::STGENR_PIDR1_SPEC>;
#[doc = "STGENR peripheral ID1 register"]
pub mod stgenr_pidr1;
#[doc = "STGENR_PIDR2 (r) register accessor: STGENR peripheral ID2 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr2::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr2`]
module"]
pub type STGENR_PIDR2 = crate::Reg<stgenr_pidr2::STGENR_PIDR2_SPEC>;
#[doc = "STGENR peripheral ID2 register"]
pub mod stgenr_pidr2;
#[doc = "STGENR_PIDR3 (r) register accessor: STGENR peripheral ID3 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_pidr3::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_pidr3`]
module"]
pub type STGENR_PIDR3 = crate::Reg<stgenr_pidr3::STGENR_PIDR3_SPEC>;
#[doc = "STGENR peripheral ID3 register"]
pub mod stgenr_pidr3;
#[doc = "STGENR_CIDR0 (r) register accessor: STGENR component ID0 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_cidr0::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_cidr0`]
module"]
pub type STGENR_CIDR0 = crate::Reg<stgenr_cidr0::STGENR_CIDR0_SPEC>;
#[doc = "STGENR component ID0 register"]
pub mod stgenr_cidr0;
#[doc = "STGENR_CIDR1 (r) register accessor: STGENR component ID1 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_cidr1::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_cidr1`]
module"]
pub type STGENR_CIDR1 = crate::Reg<stgenr_cidr1::STGENR_CIDR1_SPEC>;
#[doc = "STGENR component ID1 register"]
pub mod stgenr_cidr1;
#[doc = "STGENR_CIDR2 (r) register accessor: STGENR component ID2 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_cidr2::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_cidr2`]
module"]
pub type STGENR_CIDR2 = crate::Reg<stgenr_cidr2::STGENR_CIDR2_SPEC>;
#[doc = "STGENR component ID2 register"]
pub mod stgenr_cidr2;
#[doc = "STGENR_CIDR3 (r) register accessor: STGENR component ID3 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`stgenr_cidr3::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`stgenr_cidr3`]
module"]
pub type STGENR_CIDR3 = crate::Reg<stgenr_cidr3::STGENR_CIDR3_SPEC>;
#[doc = "STGENR component ID3 register"]
pub mod stgenr_cidr3;
|
use std::ops::{Add, Sub};
#[derive(Copy, Clone, Debug)]
pub struct Point {
pub x: f32,
pub y: f32,
}
impl Point {
pub fn new(x: f32, y: f32) -> Point {
Point { x: x, y: y }
}
pub fn from_tuple(t: (f32, f32)) -> Point {
Point::new(t.0, t.1)
}
pub fn zero() -> Point {
Point::new(0f32, 0f32)
}
pub fn to_tuple(&self) -> (f32, f32) {
(self.x, self.y)
}
}
impl Add for Point {
type Output = Point;
fn add(self, rhs: Point) -> Point {
Point::new(self.x + rhs.x, self.y + rhs.y)
}
}
impl Sub for Point {
type Output = Point;
fn sub(self, rhs: Point) -> Point {
Point::new(self.x - rhs.x, self.y - rhs.y)
}
}
#[derive(Copy, Clone, Debug)]
pub struct Size {
pub w: f32,
pub h: f32
}
impl Size {
pub fn new(w: f32, h: f32) -> Size {
Size { w: w, h: h }
}
pub fn zero() -> Size {
Size::new(0.0, 0.0)
}
pub fn from_tuple(t: (f32, f32)) -> Size {
Size::new(t.0, t.1)
}
pub fn to_tuple(&self) -> (f32, f32) {
(self.w, self.h)
}
}
#[derive(Copy, Clone, Debug)]
pub struct Color{
pub a: f32,
pub r: f32,
pub g: f32,
pub b: f32,
}
impl Color {
pub fn argb(a: f32, r: f32, g: f32, b: f32) -> Color {
Color {
a: a,
r: r,
g: g,
b: b
}
}
pub fn to_tuple_argb(&self) -> (f32, f32, f32, f32){
(self.a, self.r, self.g, self.b)
}
pub fn to_tuple_rgb(&self) -> (f32, f32, f32){
(self.r, self.g, self.b)
}
}
#[derive(Copy, Clone, Debug)]
pub struct Rect {
pub pos: Point,
pub size: Size,
}
impl Rect {
pub fn pos_size(pos: Point, size: Size) -> Rect {
Rect {
pos: pos,
size: size
}
}
pub fn from_size(size: Size) -> Rect {
Rect {
pos: Point::zero(),
size: size,
}
}
pub fn from_bounds(right: f32, top: f32, left: f32, bottom: f32) -> Rect {
Rect {
pos: Point::new(left, top),
size: Size::new(right - left, bottom - top),
}
}
pub fn zero() -> Rect {
Rect {
pos: Point::zero(),
size: Size::zero(),
}
}
pub fn left(&self) -> f32 {
self.pos.x
}
pub fn right(&self) -> f32 {
self.pos.x + self.size.w
}
pub fn bottom(&self) -> f32 {
self.pos.y + self.size.h
}
pub fn top(&self) -> f32 {
self.pos.y
}
pub fn to_pos_size_tuple(&self) -> ((f32, f32), (f32, f32)) {
(self.pos.to_tuple(), self.size.to_tuple())
}
pub fn transform_to_outer(&self, rect: Rect) -> Rect {
Rect::pos_size(self.pos + rect.pos, rect.size)
}
pub fn transform_to_inner(&self, rect: Rect) -> Rect {
Rect::pos_size(rect.pos - self.pos, rect.size)
}
pub fn expand(&self, margin: Thickness) -> Rect {
Rect::from_bounds(self.right() + margin.right, self.top() - margin.top,
self.left() - margin.left, self.bottom() + margin.bottom)
}
pub fn inset(&self, margin: Thickness) -> Rect {
Rect::from_bounds(self.right() - margin.right, self.top() + margin.top,
self.left() + margin.left, self.bottom() - margin.bottom)
}
}
#[derive(Copy, Clone, Debug)]
pub struct Thickness {
pub right: f32,
pub top: f32,
pub left: f32,
pub bottom: f32,
}
impl Thickness {
pub fn new(right: f32, top: f32, left: f32, bottom: f32) -> Thickness {
Thickness {
right: right,
top: top,
left: left,
bottom: bottom,
}
}
pub fn hv(horizontal: f32, vertical: f32) -> Thickness {
Thickness::new(horizontal, vertical, horizontal, vertical)
}
pub fn rect_in(&self, container: Size) -> Rect {
Rect::from_bounds(container.w - self.right, self.top, self.left, container.h - self.bottom)
}
} |
#[doc = "Register `ISR` reader"]
pub type R = crate::R<ISR_SPEC>;
#[doc = "Register `ISR` writer"]
pub type W = crate::W<ISR_SPEC>;
#[doc = "Field `ADRDY` reader - ADC ready flag"]
pub type ADRDY_R = crate::BitReader;
#[doc = "Field `ADRDY` writer - ADC ready flag"]
pub type ADRDY_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOSMP` reader - ADC group regular end of sampling flag"]
pub type EOSMP_R = crate::BitReader;
#[doc = "Field `EOSMP` writer - ADC group regular end of sampling flag"]
pub type EOSMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOC` reader - ADC group regular end of unitary conversion flag"]
pub type EOC_R = crate::BitReader;
#[doc = "Field `EOC` writer - ADC group regular end of unitary conversion flag"]
pub type EOC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOS` reader - ADC group regular end of sequence conversions flag"]
pub type EOS_R = crate::BitReader;
#[doc = "Field `EOS` writer - ADC group regular end of sequence conversions flag"]
pub type EOS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OVR` reader - ADC group regular overrun flag"]
pub type OVR_R = crate::BitReader;
#[doc = "Field `OVR` writer - ADC group regular overrun flag"]
pub type OVR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JEOC` reader - ADC group injected end of unitary conversion flag"]
pub type JEOC_R = crate::BitReader;
#[doc = "Field `JEOC` writer - ADC group injected end of unitary conversion flag"]
pub type JEOC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JEOS` reader - ADC group injected end of sequence conversions flag"]
pub type JEOS_R = crate::BitReader;
#[doc = "Field `JEOS` writer - ADC group injected end of sequence conversions flag"]
pub type JEOS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AWD1` reader - ADC analog watchdog 1 flag"]
pub type AWD1_R = crate::BitReader;
#[doc = "Field `AWD1` writer - ADC analog watchdog 1 flag"]
pub type AWD1_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AWD2` reader - ADC analog watchdog 2 flag"]
pub type AWD2_R = crate::BitReader;
#[doc = "Field `AWD2` writer - ADC analog watchdog 2 flag"]
pub type AWD2_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AWD3` reader - ADC analog watchdog 3 flag"]
pub type AWD3_R = crate::BitReader;
#[doc = "Field `AWD3` writer - ADC analog watchdog 3 flag"]
pub type AWD3_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JQOVF` reader - ADC group injected contexts queue overflow flag"]
pub type JQOVF_R = crate::BitReader;
#[doc = "Field `JQOVF` writer - ADC group injected contexts queue overflow flag"]
pub type JQOVF_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - ADC ready flag"]
#[inline(always)]
pub fn adrdy(&self) -> ADRDY_R {
ADRDY_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - ADC group regular end of sampling flag"]
#[inline(always)]
pub fn eosmp(&self) -> EOSMP_R {
EOSMP_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - ADC group regular end of unitary conversion flag"]
#[inline(always)]
pub fn eoc(&self) -> EOC_R {
EOC_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - ADC group regular end of sequence conversions flag"]
#[inline(always)]
pub fn eos(&self) -> EOS_R {
EOS_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - ADC group regular overrun flag"]
#[inline(always)]
pub fn ovr(&self) -> OVR_R {
OVR_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - ADC group injected end of unitary conversion flag"]
#[inline(always)]
pub fn jeoc(&self) -> JEOC_R {
JEOC_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - ADC group injected end of sequence conversions flag"]
#[inline(always)]
pub fn jeos(&self) -> JEOS_R {
JEOS_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - ADC analog watchdog 1 flag"]
#[inline(always)]
pub fn awd1(&self) -> AWD1_R {
AWD1_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - ADC analog watchdog 2 flag"]
#[inline(always)]
pub fn awd2(&self) -> AWD2_R {
AWD2_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - ADC analog watchdog 3 flag"]
#[inline(always)]
pub fn awd3(&self) -> AWD3_R {
AWD3_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - ADC group injected contexts queue overflow flag"]
#[inline(always)]
pub fn jqovf(&self) -> JQOVF_R {
JQOVF_R::new(((self.bits >> 10) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - ADC ready flag"]
#[inline(always)]
#[must_use]
pub fn adrdy(&mut self) -> ADRDY_W<ISR_SPEC, 0> {
ADRDY_W::new(self)
}
#[doc = "Bit 1 - ADC group regular end of sampling flag"]
#[inline(always)]
#[must_use]
pub fn eosmp(&mut self) -> EOSMP_W<ISR_SPEC, 1> {
EOSMP_W::new(self)
}
#[doc = "Bit 2 - ADC group regular end of unitary conversion flag"]
#[inline(always)]
#[must_use]
pub fn eoc(&mut self) -> EOC_W<ISR_SPEC, 2> {
EOC_W::new(self)
}
#[doc = "Bit 3 - ADC group regular end of sequence conversions flag"]
#[inline(always)]
#[must_use]
pub fn eos(&mut self) -> EOS_W<ISR_SPEC, 3> {
EOS_W::new(self)
}
#[doc = "Bit 4 - ADC group regular overrun flag"]
#[inline(always)]
#[must_use]
pub fn ovr(&mut self) -> OVR_W<ISR_SPEC, 4> {
OVR_W::new(self)
}
#[doc = "Bit 5 - ADC group injected end of unitary conversion flag"]
#[inline(always)]
#[must_use]
pub fn jeoc(&mut self) -> JEOC_W<ISR_SPEC, 5> {
JEOC_W::new(self)
}
#[doc = "Bit 6 - ADC group injected end of sequence conversions flag"]
#[inline(always)]
#[must_use]
pub fn jeos(&mut self) -> JEOS_W<ISR_SPEC, 6> {
JEOS_W::new(self)
}
#[doc = "Bit 7 - ADC analog watchdog 1 flag"]
#[inline(always)]
#[must_use]
pub fn awd1(&mut self) -> AWD1_W<ISR_SPEC, 7> {
AWD1_W::new(self)
}
#[doc = "Bit 8 - ADC analog watchdog 2 flag"]
#[inline(always)]
#[must_use]
pub fn awd2(&mut self) -> AWD2_W<ISR_SPEC, 8> {
AWD2_W::new(self)
}
#[doc = "Bit 9 - ADC analog watchdog 3 flag"]
#[inline(always)]
#[must_use]
pub fn awd3(&mut self) -> AWD3_W<ISR_SPEC, 9> {
AWD3_W::new(self)
}
#[doc = "Bit 10 - ADC group injected contexts queue overflow flag"]
#[inline(always)]
#[must_use]
pub fn jqovf(&mut self) -> JQOVF_W<ISR_SPEC, 10> {
JQOVF_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "ADC interrupt and status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`isr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`isr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ISR_SPEC;
impl crate::RegisterSpec for ISR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`isr::R`](R) reader structure"]
impl crate::Readable for ISR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`isr::W`](W) writer structure"]
impl crate::Writable for ISR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets ISR to value 0"]
impl crate::Resettable for ISR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::{
gui::{BuildContext, Ui, UiMessage, UiNode},
scene::commands::{
light::{
SetSpotLightDistanceCommand, SetSpotLightFalloffAngleDeltaCommand,
SetSpotLightHotspotCommand,
},
SceneCommand,
},
send_sync_message,
sidebar::{make_f32_input_field, make_section, make_text_mark, COLUMN_WIDTH, ROW_HEIGHT},
Message,
};
use rg3d::{
core::pool::Handle,
gui::{
grid::{Column, GridBuilder, Row},
message::{MessageDirection, NumericUpDownMessage, UiMessageData, WidgetMessage},
widget::WidgetBuilder,
},
scene::{light::Light, node::Node},
};
use std::sync::mpsc::Sender;
pub struct SpotLightSection {
pub section: Handle<UiNode>,
hotspot: Handle<UiNode>,
falloff_delta: Handle<UiNode>,
distance: Handle<UiNode>,
sender: Sender<Message>,
}
impl SpotLightSection {
pub fn new(ctx: &mut BuildContext, sender: Sender<Message>) -> Self {
let hotspot;
let falloff_delta;
let distance;
let section = make_section(
"Light Properties",
GridBuilder::new(
WidgetBuilder::new()
.with_child(make_text_mark(ctx, "Hotspot", 0))
.with_child({
hotspot = make_f32_input_field(ctx, 0, 0.0, std::f32::consts::PI, 0.1);
hotspot
})
.with_child(make_text_mark(ctx, "Falloff Delta", 1))
.with_child({
falloff_delta =
make_f32_input_field(ctx, 1, 0.0, std::f32::consts::PI, 0.01);
falloff_delta
})
.with_child(make_text_mark(ctx, "Radius", 2))
.with_child({
distance = make_f32_input_field(ctx, 2, 0.0, std::f32::MAX, 0.1);
distance
}),
)
.add_column(Column::strict(COLUMN_WIDTH))
.add_column(Column::stretch())
.add_row(Row::strict(ROW_HEIGHT))
.add_row(Row::strict(ROW_HEIGHT))
.add_row(Row::strict(ROW_HEIGHT))
.build(ctx),
ctx,
);
Self {
section,
hotspot,
falloff_delta,
distance,
sender,
}
}
pub fn sync_to_model(&mut self, node: &Node, ui: &mut Ui) {
let visible = if let Node::Light(Light::Spot(spot)) = node {
send_sync_message(
ui,
NumericUpDownMessage::value(
self.hotspot,
MessageDirection::ToWidget,
spot.hotspot_cone_angle(),
),
);
send_sync_message(
ui,
NumericUpDownMessage::value(
self.falloff_delta,
MessageDirection::ToWidget,
spot.falloff_angle_delta(),
),
);
send_sync_message(
ui,
NumericUpDownMessage::value(
self.distance,
MessageDirection::ToWidget,
spot.distance(),
),
);
true
} else {
false
};
send_sync_message(
ui,
WidgetMessage::visibility(self.section, MessageDirection::ToWidget, visible),
);
}
pub fn handle_message(&mut self, message: &UiMessage, node: &Node, handle: Handle<Node>) {
if let Node::Light(Light::Spot(spot)) = node {
if let UiMessageData::NumericUpDown(NumericUpDownMessage::Value(value)) =
*message.data()
{
if message.destination() == self.hotspot && spot.hotspot_cone_angle().ne(&value) {
self.sender
.send(Message::DoSceneCommand(SceneCommand::SetSpotLightHotspot(
SetSpotLightHotspotCommand::new(handle, value),
)))
.unwrap();
} else if message.destination() == self.falloff_delta
&& spot.falloff_angle_delta().ne(&value)
{
self.sender
.send(Message::DoSceneCommand(
SceneCommand::SetSpotLightFalloffAngleDelta(
SetSpotLightFalloffAngleDeltaCommand::new(handle, value),
),
))
.unwrap();
} else if message.destination() == self.distance && spot.distance().ne(&value) {
self.sender
.send(Message::DoSceneCommand(SceneCommand::SetSpotLightDistance(
SetSpotLightDistanceCommand::new(handle, value),
)))
.unwrap();
}
}
}
}
}
|
mod q2v1q;
mod q2v2q;
mod q2v3q;
mod q2v4q;
mod q2v5q;
mod q2v6q;
mod q2v7q;
mod q2v8q;
mod q2v9q;
use q2v1q::vec_macro;
use q2v2q::new_pixel_buffer;
use q2v3q::new_vec;
use q2v4q::it_vec;
use q2v5q::palindrome;
use q2v6q::capacity;
use q2v7q::in_rm_vec;
use q2v8q::pop_vec;
use q2v9q::imp_or_fun;
fn main() {
vec_macro();
println!("{:?}", new_pixel_buffer(2, 3));
println!("{:?}", new_pixel_buffer(3, 4));
new_vec();
it_vec();
println!("");
palindrome();
println!("");
capacity();
println!("");
in_rm_vec();
println!("");
pop_vec();
println!("");
imp_or_fun();
}
|
use super::*;
use crate::resources::ResourceType;
use crate::{requests, ReadonlyString};
use azure_core::HttpClient;
/// A client for Cosmos trigger resources.
#[derive(Debug, Clone)]
pub struct TriggerClient {
collection_client: CollectionClient,
trigger_name: ReadonlyString,
}
impl TriggerClient {
/// Create a new trigger client
pub(crate) fn new<S: Into<ReadonlyString>>(
collection_client: CollectionClient,
trigger_name: S,
) -> Self {
Self {
collection_client,
trigger_name: trigger_name.into(),
}
}
/// Get a [`CosmosClient`]
pub fn cosmos_client(&self) -> &CosmosClient {
self.collection_client.cosmos_client()
}
/// Get a [`DatabaseClient`]
pub fn database_client(&self) -> &DatabaseClient {
self.collection_client.database_client()
}
/// Get a [`CollectionClient`]
pub fn collection_client(&self) -> &CollectionClient {
&self.collection_client
}
/// Get the trigger name
pub fn trigger_name(&self) -> &str {
&self.trigger_name
}
/// Create a trigger
pub fn create_trigger(&self) -> requests::CreateOrReplaceTriggerBuilder<'_> {
requests::CreateOrReplaceTriggerBuilder::new(self, true)
}
/// Replace a trigger
pub fn replace_trigger(&self) -> requests::CreateOrReplaceTriggerBuilder<'_> {
requests::CreateOrReplaceTriggerBuilder::new(self, false)
}
/// Delete a trigger
pub fn delete_trigger(&self) -> requests::DeleteTriggerBuilder<'_, '_> {
requests::DeleteTriggerBuilder::new(self)
}
pub(crate) fn http_client(&self) -> &dyn HttpClient {
self.cosmos_client().http_client()
}
pub(crate) fn prepare_request_with_trigger_name(
&self,
method: http::Method,
) -> http::request::Builder {
self.cosmos_client().prepare_request(
&format!(
"dbs/{}/colls/{}/triggers/{}",
self.database_client().database_name(),
self.collection_client().collection_name(),
self.trigger_name()
),
method,
ResourceType::Triggers,
)
}
pub(crate) fn prepare_request(&self, method: http::Method) -> http::request::Builder {
self.cosmos_client().prepare_request(
&format!(
"dbs/{}/colls/{}/triggers",
self.database_client().database_name(),
self.collection_client().collection_name(),
),
method,
ResourceType::Triggers,
)
}
}
|
//! Helper types for working with GCS
use crate::error::Error;
use std::borrow::Cow;
/// A wrapper around strings meant to be used as bucket names,
/// to validate they conform to [Bucket Name Requirements](https://cloud.google.com/storage/docs/naming#requirements)
#[derive(Debug)]
pub struct BucketName<'a> {
name: Cow<'a, str>,
}
impl<'a> BucketName<'a> {
/// Creates a [`BucketName`] without validating it, meaning
/// that invalid names will result in API failures when
/// requests are actually made to GCS instead.
pub fn non_validated<S: AsRef<str> + ?Sized>(name: &'a S) -> Self {
Self {
name: Cow::Borrowed(name.as_ref()),
}
}
/// Validates the string is a syntactically valid bucket name
fn validate(name: &str) -> Result<(), Error> {
let count = name.chars().count();
// Bucket names must contain 3 to 63 characters.
if !(3..=63).contains(&count) {
return Err(Error::InvalidCharacterCount {
len: count,
min: 3,
max: 63,
});
}
let last = count - 1;
for (i, c) in name.chars().enumerate() {
if c.is_ascii_uppercase() {
return Err(Error::InvalidCharacter(i, c));
}
match c {
'a'..='z' | '0'..='9' => {}
'-' | '_' => {
// Bucket names must start and end with a number or letter.
if i == 0 || i == last {
return Err(Error::InvalidCharacter(i, c));
}
}
c => {
return Err(Error::InvalidCharacter(i, c));
}
}
}
// Bucket names cannot begin with the "goog" prefix.
if name.starts_with("goog") {
return Err(Error::InvalidPrefix("goog"));
}
// Bucket names cannot contain "google" or close misspellings, such as "g00gle".
// They don't really specify what counts as a "close" misspelling, so just check
// the ones they say, and let the API deny the rest
if name.contains("google") || name.contains("g00gle") {
return Err(Error::InvalidSequence("google"));
}
Ok(())
}
}
impl<'a> std::fmt::Display for BucketName<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.name.fmt(f)
}
}
impl<'a> AsRef<str> for BucketName<'a> {
fn as_ref(&self) -> &str {
self.name.as_ref()
}
}
impl<'a> AsRef<[u8]> for BucketName<'a> {
fn as_ref(&self) -> &[u8] {
self.name.as_bytes()
}
}
impl<'a> TryFrom<&'a str> for BucketName<'a> {
type Error = Error;
fn try_from(n: &'a str) -> Result<Self, Self::Error> {
Self::validate(n)?;
Ok(Self {
name: Cow::Borrowed(n),
})
}
}
impl<'a> TryFrom<String> for BucketName<'a> {
type Error = Error;
fn try_from(n: String) -> Result<Self, Self::Error> {
Self::validate(&n)?;
Ok(Self {
name: Cow::Owned(n),
})
}
}
/// A wrapper for strings meant to be used as object names, to validate
/// that they follow [Object Name Requirements](https://cloud.google.com/storage/docs/naming#objectnames)
#[derive(Debug)]
pub struct ObjectName<'a> {
name: Cow<'a, str>,
}
impl<'a> ObjectName<'a> {
/// Creates an `ObjectName` without validating it, meaning
/// that invalid names will result in API failures when
/// requests are actually made to GCS instead.
pub fn non_validated<S: AsRef<str> + ?Sized>(name: &'a S) -> Self {
Self {
name: Cow::Borrowed(name.as_ref()),
}
}
/// Validates the string is a syntactically valid object name
fn validate(name: &str) -> Result<(), Error> {
// Object names can contain any sequence of valid Unicode characters, of length 1-1024 bytes when UTF-8 encoded.
if name.is_empty() || name.len() > 1024 {
return Err(Error::InvalidLength {
min: 1,
max: 1024,
len: name.len(),
});
}
// Objects cannot be named . or ...
if name == "." || name == "..." {
return Err(Error::InvalidPrefix("."));
}
#[allow(clippy::match_same_arms)]
for (i, c) in name.chars().enumerate() {
match c {
// Object names cannot contain Carriage Return or Line Feed characters.
'\r' | '\n' => {}
// Avoid using "#" in your object names: gsutil interprets object names ending
// with #<numeric string> as version identifiers, so including "#" in object names
// can make it difficult or impossible to perform operations on such versioned
// objects using gsutil (see Object Versioning and Concurrency Control).
// Avoid using "[", "]", "*", or "?" in your object names: gsutil interprets
// these characters as wildcards, so including them in object names can make
// it difficult or impossible to perform wildcard operations using gsutil.
'#' | '[' | ']' | '*' | '?' => {}
// Avoid using control characters that are illegal in XML 1.0 (#x7F–#x84 and #x86–#x9F):
// these characters will cause XML listing issues when you try to list your objects.
'\u{7F}'..='\u{84}' | '\u{86}'..='\u{9F}' => {}
_ => {
continue;
}
}
return Err(Error::InvalidCharacter(i, c));
}
// Object names cannot start with .well-known/acme-challenge.
if name.starts_with(".well-known/acme-challenge") {
return Err(Error::InvalidPrefix(".well-known/acme-challenge"));
}
Ok(())
}
}
impl<'a> std::fmt::Display for ObjectName<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.name.fmt(f)
}
}
impl<'a> AsRef<str> for ObjectName<'a> {
fn as_ref(&self) -> &str {
self.name.as_ref()
}
}
impl<'a> AsRef<[u8]> for ObjectName<'a> {
fn as_ref(&self) -> &[u8] {
self.name.as_bytes()
}
}
impl<'a> TryFrom<&'a str> for ObjectName<'a> {
type Error = Error;
fn try_from(n: &'a str) -> Result<Self, Self::Error> {
Self::validate(n)?;
Ok(Self {
name: Cow::Borrowed(n),
})
}
}
impl<'a> TryFrom<String> for ObjectName<'a> {
type Error = Error;
fn try_from(n: String) -> Result<Self, Self::Error> {
Self::validate(&n)?;
Ok(Self {
name: Cow::Owned(n),
})
}
}
impl<'a> AsRef<BucketName<'a>> for (&'a BucketName<'a>, &'a ObjectName<'a>) {
fn as_ref(&self) -> &BucketName<'a> {
self.0
}
}
impl<'a> AsRef<ObjectName<'a>> for (&'a BucketName<'a>, &'a ObjectName<'a>) {
fn as_ref(&self) -> &ObjectName<'a> {
self.1
}
}
pub trait ObjectIdentifier<'a> {
fn bucket(&self) -> &BucketName<'a>;
fn object(&self) -> &ObjectName<'a>;
}
impl<'a, T> ObjectIdentifier<'a> for T
where
T: AsRef<BucketName<'a>> + AsRef<ObjectName<'a>>,
{
fn bucket(&self) -> &BucketName<'a> {
self.as_ref()
}
fn object(&self) -> &ObjectName<'a> {
self.as_ref()
}
}
/// A concrete object id which contains a valid bucket and object name
/// which fully specifies an object
pub struct ObjectId<'a> {
pub bucket: BucketName<'a>,
pub object: ObjectName<'a>,
}
impl<'a> ObjectId<'a> {
pub fn new<B, O>(bucket: B, object: O) -> Result<Self, Error>
where
B: std::convert::TryInto<BucketName<'a>, Error = Error> + ?Sized,
O: std::convert::TryInto<ObjectName<'a>, Error = Error> + ?Sized,
{
Ok(Self {
bucket: bucket.try_into()?,
object: object.try_into()?,
})
}
}
impl<'a> AsRef<BucketName<'a>> for ObjectId<'a> {
fn as_ref(&self) -> &BucketName<'a> {
&self.bucket
}
}
impl<'a> AsRef<ObjectName<'a>> for ObjectId<'a> {
fn as_ref(&self) -> &ObjectName<'a> {
&self.object
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn disallows_too_small() {
assert_eq!(
BucketName::try_from("no").unwrap_err(),
Error::InvalidCharacterCount {
len: 2,
min: 3,
max: 63,
}
);
}
#[test]
fn disallows_too_big() {
assert_eq!(
BucketName::try_from(
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
)
.unwrap_err(),
Error::InvalidCharacterCount {
len: 64,
min: 3,
max: 63
}
);
}
#[test]
fn disallows_uppercase() {
assert_eq!(
BucketName::try_from("uhOH").unwrap_err(),
Error::InvalidCharacter(2, 'O')
);
}
#[test]
fn disallows_dots() {
assert_eq!(
BucketName::try_from("uh.oh").unwrap_err(),
Error::InvalidCharacter(2, '.')
);
}
#[test]
fn disallows_hyphen_or_underscore_at_start() {
assert_eq!(
BucketName::try_from("_uhoh").unwrap_err(),
Error::InvalidCharacter(0, '_')
);
}
#[test]
fn disallows_hyphen_or_underscore_at_end() {
assert_eq!(
BucketName::try_from("uhoh-").unwrap_err(),
Error::InvalidCharacter(4, '-')
);
}
#[test]
fn disallows_goog_at_start() {
assert_eq!(
BucketName::try_from("googuhoh").unwrap_err(),
Error::InvalidPrefix("goog")
);
}
#[test]
fn disallows_google_sequence() {
assert_eq!(
BucketName::try_from("uhohg00gleuhoh").unwrap_err(),
Error::InvalidSequence("google")
);
}
}
|
#[derive(Debug, PartialEq, Clone)]
pub struct Program {
pub declarations: Option<Declarations>,
pub commands: Commands,
}
pub type Declarations = Vec<Declaration>;
#[derive(Debug, PartialEq, Clone)]
pub enum Declaration {
Var { name: String },
Array { name: String, start: i64, end: i64 },
}
impl Declaration {
pub fn name(&self) -> &str {
use Declaration::*;
match self {
Var { name } => name,
Array { name, .. } => name,
}
}
}
pub type Commands = Vec<Command>;
#[derive(Debug, PartialEq, Clone)]
pub enum Command {
IfElse {
condition: Condition,
positive: Commands,
negative: Commands,
},
If {
condition: Condition,
positive: Commands,
},
While {
condition: Condition,
commands: Commands,
},
Do {
commands: Commands,
condition: Condition,
},
For {
counter: String,
ascending: bool,
from: Value,
to: Value,
commands: Commands,
},
Read {
target: Identifier,
},
Write {
value: Value,
},
Assign {
target: Identifier,
expr: Expression,
},
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ExprOp {
Plus,
Minus,
Times,
Div,
Mod,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Expression {
Simple {
value: Value,
},
Compound {
left: Value,
op: ExprOp,
right: Value,
},
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum RelOp {
EQ,
NEQ,
LEQ,
LT,
GEQ,
GT,
}
#[derive(Debug, PartialEq, Clone)]
pub struct Condition {
pub left: Value,
pub op: RelOp,
pub right: Value,
}
#[derive(Debug, PartialEq, Clone)]
pub enum Value {
Num(i64),
Identifier(Identifier),
}
#[derive(Debug, PartialEq, Clone)]
pub enum Identifier {
VarAccess { name: String },
ArrAccess { name: String, index: String },
ArrConstAccess { name: String, index: i64 },
}
impl Identifier {
pub fn name(&self) -> &str {
match self {
Identifier::VarAccess { name } => name,
Identifier::ArrAccess { name, .. } => name,
Identifier::ArrConstAccess { name, .. } => name,
}
}
pub fn all_names(&self) -> Vec<&str> {
match self {
Identifier::VarAccess { name } => vec![name],
Identifier::ArrAccess { name, index } => vec![name, index],
Identifier::ArrConstAccess { name, .. } => vec![name],
}
}
}
|
// Copyright 2016 FullContact, Inc
// Copyright 2017 Jason Lingle
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Near-zero-cost, mostly-safe idiomatic bindings to LMDB.
//!
//! This crate provides an interface to LMDB which as much as possible is not
//! abstracted from the model LMDB itself provides, except as necessary to
//! integrate with the borrow checker. This means that you don't get easy
//! iterators, but also that you can do almost anything with LMDB through these
//! bindings as you can through C.
//!
//! # Example
//!
//! ```
//! extern crate lmdb_zero as lmdb;
//! extern crate tempdir;
//!
//! # fn main() {
//! # let tmp = tempdir::TempDir::new_in(".", "lmdbzero").unwrap();
//! # example(tmp.path().to_str().unwrap());
//! # }
//! #
//! fn example(path: &str) {
//! // Create the environment, that is, the file containing the database(s).
//! // This is unsafe because you need to ensure certain things about the
//! // host environment that these bindings can't help you with.
//! let env = unsafe {
//! lmdb::EnvBuilder::new().unwrap().open(
//! path, lmdb::open::Flags::empty(), 0o600).unwrap()
//! };
//! // Open the default database.
//! let db = lmdb::Database::open(
//! &env, None, &lmdb::DatabaseOptions::defaults())
//! .unwrap();
//! {
//! // Write some data in a transaction
//! let txn = lmdb::WriteTransaction::new(&env).unwrap();
//! // An accessor is used to control memory access.
//! // NB You can only have one live accessor from a particular transaction
//! // at a time. Violating this results in a panic at runtime.
//! {
//! let mut access = txn.access();
//! access.put(&db, "Germany", "Berlin", lmdb::put::Flags::empty()).unwrap();
//! access.put(&db, "France", "Paris", lmdb::put::Flags::empty()).unwrap();
//! access.put(&db, "Latvia", "Rīga", lmdb::put::Flags::empty()).unwrap();
//! }
//! // Commit the changes so they are visible to later transactions
//! txn.commit().unwrap();
//! }
//!
//! {
//! // Now let's read the data back
//! let txn = lmdb::ReadTransaction::new(&env).unwrap();
//! let access = txn.access();
//!
//! // Get the capital of Latvia. Note that the string is *not* copied; the
//! // reference actually points into the database memory, and is valid
//! // until the transaction is dropped or the accessor is mutated.
//! let capital_of_latvia: &str = access.get(&db, "Latvia").unwrap();
//! assert_eq!("Rīga", capital_of_latvia);
//!
//! // We can also use cursors to move over the contents of the database.
//! let mut cursor = txn.cursor(&db).unwrap();
//! assert_eq!(("France", "Paris"), cursor.first(&access).unwrap());
//! assert_eq!(("Germany", "Berlin"), cursor.next(&access).unwrap());
//! assert_eq!(("Latvia", "Rīga"), cursor.next(&access).unwrap());
//! assert!(cursor.next::<str,str>(&access).is_err());
//! }
//! }
//! ```
//!
//! # Anatomy of this crate
//!
//! `Environment` is the top-level structure. It is created with an
//! `EnvBuilder`. An `Environment` is a single file (by default in a
//! subdirectory) which stores the actual data of all the databases it
//! contains. It corresponds to an `MDB_env` in the C API.
//!
//! A `Database` is a single table of key/value pairs within an environment.
//! Each environment has a single anonymous database, and may contain a number
//! of named databases. Note that if you want to use named databases, you need
//! to use `EnvBuilder::set_maxdbs` before creating the `Environment` to make
//! room for the handles. A database can only have one `Database` handle per
//! environment at a time.
//!
//! All accesses to the data within an environment are done through
//! transactions. For this, there are the `ReadTransaction` and
//! `WriteTransaction` structs. Both of these deref to a `ConstTransaction`,
//! which provides most of the read-only functionality and can be used for
//! writing code that can run within either type of transaction. Note that read
//! transactions are far cheaper than write transactions.
//!
//! `ReadTransaction`s can be reused by using `reset()` to turn them into
//! `ResetTransaction`s and then `refresh()` to turn them into fresh
//! `ReadTransaction`s.
//!
//! One unusual property of this crate are the `ConstAccessor` and
//! `WriteAccessor` structs, which are obtained once from a transaction and
//! used to perform actual data manipulation. These are needed to work with the
//! borrow checker: Cursors have a lifetime bound by their transaction and thus
//! borrow it, so we need something else to permit borrowing mutable data. The
//! accessors reflect this borrowing: Reading from the database requires an
//! immutable borrow of the accessor, while writing (which may invalidate
//! pointers) requires a mutable borrow of the accessor, thus causing the
//! borrow checker to ensure that all read accesses are disposed before any
//! write.
//!
//! Finally, the `Cursor` struct can be created from a transaction to permit
//! more flexible access to a database. Each `Cursor` corresponds to a
//! `MDB_cursor`. Accessing data through a cursor requires borrowing
//! appropriately from the accessor of the transaction owning the cursor.
//!
//! If you want to define your own types to store in the database, see the
//! `lmdb_zero::traits` submodule.
//!
//! # Lifetimes and Ownership
//!
//! Lmdb-zero heavily uses lifetime parameters to allow user code to safely
//! retain handles into LMDB without extra runtime overhead.
//!
//! While this makes the library very flexible, it also makes it somewhat
//! harder to use when its types need to be referenced explicitly, for example
//! as struct members. The documentation for each type with lifetime parameters
//! therefore includes a short discussion of how the lifetimes are intended to
//! interact and how best to work with them.
//!
//! It is also possible to opt-out of compile-time lifetime tracking and
//! instead use `Arc` or `Rc` around various handles. In this case, all the
//! lifetime parameters simply become `'static`. See the next section for
//! details.
//!
//! ## Ownership Modes
//!
//! As of version 0.4.0, most APIs which construct a value which holds on to
//! some "parent" value (e.g., creating a `Database` within an `Environment`)
//! accept anything that can be converted into a [`Supercow`](https://docs.rs/supercow/0.1.0/supercow/).
//! Deep understanding of `Supercow` itself is not required to use `lmdb-zero`.
//! The only thing you need to know is that an `Into<Supercow<T>>` means that
//! you can pass in one of three classes of arguments:
//!
//! - `&T`. This is "borrowed mode". The majority of the documentation in this
//! crate uses borrowed mode. This is zero-overhead and is statically
//! verifiable (i.e., all usage is checked at compile-time), so it is
//! recommended that borrowed mode be used whenever reasonably possible. This
//! mode causes the "child" value to hold a normal reference to the parent,
//! which means that lifetimes must be tracked in the lifetime parameters. But
//! because of this, this mode can be inflexible; for example, you cannot use
//! safe Rust to create a `struct` holding both an `Environment` and its
//! `Database`s using borrowed mode.
//!
//! - `Arc<T>`. This is "shared mode". For `NonSyncSupercow`, `Rc<T>` may also
//! be used. The child will hold the `Arc` or `Rc`, thus ensuring the parent
//! lives at least as long as the child. Because of this, the related lifetime
//! parameters can simply be written as `'static`. It also means that
//! `Arc`/`Rc` references to the child and parent can be placed together in the
//! same struct with safe Rust. This comes at a cost: Constructing values in
//! shared mode incurs allocation; additionally, the ability to statically
//! verify the lifetime of the parent values is lost.
//!
//! - `T`. This is "owned mode". The parent is moved into the child value and
//! owned by the child from thereon. This is most useful when you only ever
//! want one child and don't care about retaining ownership of the parent. As
//! with shared mode, it also allows simply using `'static` as the relevant
//! lifetime parameters.
//!
//! # Major Differences from the LMDB C API
//!
//! Databases cannot be created or destroyed within a transaction due to the
//! awkward memory management semantics. For similar reasons, opening a
//! database more than once is not permitted (though note that LMDB doesn't
//! strictly allow this either --- it just silently returns an existing
//! handle).
//!
//! Access to data within the environment is guarded by transaction-specific
//! "accessors", which must be used in conjunction with the cursor or
//! transaction. This is how these bindings integrate with the borrow checker.
//!
//! APIs which obtain a reference to the owner of an object are not supported.
//!
//! Various APIs which radically change behaviour (including memory semantics)
//! in response to flags are separated into different calls which each express
//! their memory semantics clearly.
//!
//! # Non-Zero Cost
//!
//! There are three general areas where this wrapper adds non-zero-cost
//! abstractions:
//!
//! - Opening and closing databases adds locking overhead, since in LMDB it is
//! unsynchronised. This shouldn't have much impact since one rarely opens
//! and closes databases at a very high rate.
//!
//! - There is additional overhead in tracking what database handles are
//! currently open so that attempts to reopen one can be prevented.
//!
//! - Cursors and transactions track their owners separately. Additionally,
//! when two are used in conjunction, a runtime test is required to ensure
//! that they actually can be used together. This means that the handle
//! values are slightly larger and some function calls have an extra (very
//! predictable) branch if the optimiser does not optimise the branch away
//! entirely.
//!
//! - Using ownership modes other than borrowed (i.e., mundane references)
//! incurs extra allocations in addition to the overhead of inherent in that
//! ownership mode.
//!
//! # Using Zero-Copy
//!
//! This crate is primarily focussed on supporting zero-copy on all operations
//! where this is possible. The examples above demonstrate one aspect of this:
//! the `&str`s returned when querying for items are pointers into the database
//! itself, valid as long as the accessor is.
//!
//! The main traits to look at are `lmdb_zero::traits::AsLmdbBytes` and
//! `lmdb_zero::traits::FromLmdbBytes`, which are used to cast between byte
//! arrays and the types to be stored in the database.
//! `lmdb_zero::traits::FromReservedLmdbBytes` is used if you want to use the
//! `reserve` methods (in which you write the key only to the database and get
//! a pointer to a value to fill in after the fact). If you have a
//! `#[repr(C)]`, `Copy` struct, you can also use `lmdb_zero::traits::LmdbRaw`
//! if you just want to shove the raw struct itself into the database. All of
//! these have caveats which can be found on the struct documentation.
//!
//! Be aware that using zero-copy to save anything more interesting than byte
//! strings means your databases will not be portable to other architectures.
//! This mainly concerns byte-order, but types like `usize` whose size varies
//! by platform can also cause problems.
//!
//! # Notes on Memory Safety
//!
//! It is not possible to use lmdb-zero without at least one unsafe block,
//! because doing anything with a memory-mapped file requires making
//! assumptions about the host environment. Lmdb-zero is not in a position to
//! decide these assumptions, and so they are passed up to the caller.
//!
//! However, if these assumptions are met, it should be impossible to cause
//! memory unsafety (eg, aliasing mutable references; dangling pointers; buffer
//! under/overflows) by use of lmdb-zero's safe API.
//!
//! # Unavailable LMDB APIs
//!
//! - `mdb_env_copy`, `mdb_env_copyfd`: Only the `2`-suffixed versions that
//! take flags are exposed.
//!
//! - `mdb_env_set_userctx`, `mdb_env_get_userctx`: Not generally useful for
//! Rust; unclear how ownership would be expressed; would likely end up forcing
//! an almost-never-used generic arg on `Environment` on everyone.
//!
//! - `mdb_env_set_assert`: Does not seem useful enough to expose.
//!
//! - `mdb_txn_env`, `mdb_cursor_txn`, `mdb_cursor_dbi`: Would allow violating
//! borrow semantics.
//!
//! - `mdb_cmp`, `mdb_dcmp`: Doesn't seem useful; this would basically be a
//! reinterpret cast from the input values to whatever the table comparator
//! expects and then invoking the `Ord` implementation. If the types match,
//! this is strictly inferior to just using `Ord` directly; if they don't, it
//! at best is obfuscating, and at worst completely broken.
//!
//! - `mdb_set_relfunc`, `mdb_set_relctx`: Currently a noop in LMDB. Even if it
//! weren't, it is unlikely that there is any remotely safe or convenient way
//! to provide an interface to it.
//!
//! - `mdb_reader_list`: Doesn't seem useful enough to expose.
#![deny(missing_docs)]
extern crate liblmdb_sys as ffi;
extern crate libc;
extern crate supercow;
#[macro_use] extern crate bitflags;
#[cfg(test)] extern crate tempdir;
use std::ffi::CStr;
pub use ffi::mdb_mode_t as FileMode;
pub use ffi::mdb_filehandle_t as Fd;
macro_rules! lmdb_call {
($x:expr) => { {
let code = $x;
if 0 != code {
return Err($crate::Error::Code(code));
}
} }
}
/// Returns the LMDB version as a string.
pub fn version_str() -> &'static str {
let mut major: libc::c_int = 0;
let mut minor: libc::c_int = 0;
let mut rev: libc::c_int = 0;
unsafe {
CStr::from_ptr(ffi::mdb_version(&mut major, &mut minor, &mut rev))
.to_str().unwrap_or("(invalid)")
}
}
/// Returns the LMDB version as (major, minor, revision).
pub fn version() -> (i32, i32, i32) {
let mut major: libc::c_int = 0;
let mut minor: libc::c_int = 0;
let mut rev: libc::c_int = 0;
unsafe {
ffi::mdb_version(&mut major, &mut minor, &mut rev);
}
(major as i32, minor as i32, rev as i32)
}
/// Empty type used to indicate "don't care" when reading values from LMDB.
///
/// `FromLmdbBytes` is implemented for this type by simply returning its only
/// value without inspecting anything.
pub struct Ignore;
mod mdb_vals;
mod ffi2;
#[cfg(test)] mod test_helpers;
pub mod error;
pub use error::{Error, LmdbResultExt, Result};
mod env;
pub use env::{open, copy, EnvBuilder, Environment, Stat, EnvInfo};
mod dbi;
pub use dbi::{db, Database, DatabaseOptions};
pub mod traits;
mod unaligned;
pub use unaligned::{Unaligned, unaligned};
mod tx;
pub use tx::{ConstTransaction, ReadTransaction, WriteTransaction};
pub use tx::ResetTransaction;
pub use tx::{ConstAccessor, WriteAccessor};
pub use tx::{put, del};
mod cursor;
pub use cursor::{StaleCursor, Cursor};
mod iter;
pub use iter::{CursorIter, MaybeOwned};
|
#[derive(Clone)]
struct Person {
name: String,
age: i32,
height: i32,
weight: i32,
}
fn person_create(name: String, age: i32, height: i32, weight: i32) -> Box<Person> {
let who = Box::new(Person{name, age, height, weight});
return who;
}
fn person_print(who: Box<Person>) -> () {
println!("Name: {}\n", who.name);
println!("\tAge: {}\n", who.age);
println!("\tHeight: {}\n", who.height);
println!("\tWeight: {}\n", who.weight);
}
fn main() {
let mut joe: Box<Person> = person_create(String::from("Joe Alex"), 32, 64, 140);
let mut frank: Box<Person> = person_create(String::from("Frank Blank"), 20, 72, 180);
println!("Joe is at memory location {:p}:\n", &joe);
person_print(joe.clone());
println!("Frank is at memory location {:p}:\n", &frank);
person_print(frank.clone());
joe.age += 20;
joe.height -= 2;
joe.weight += 40;
person_print(joe);
frank.age += 20;
frank.weight +=20;
person_print(frank);
}
|
struct ShutdownRequest;
impl Request for ShutdownRequest {
fn make_message() -> RequestMessage<ShutdownRequest> {
IncompleteRequestMessage {
method: "shutdown",
params: ShutdownRequest
}
}
}
struct ShutDownResponse;
|
#![feature(proc_macro)]
// import macbuild!() and #[register]
extern crate macbuild_macros;
use macbuild_macros::*;
// this generates code to import bootstrap()
macbuild!();
fn main() {
bootstrap(); // this calls all functions annotated with #[register]
}
/// first registered function
#[register]
pub fn a() {
println!("A");
}
/// this module contains the second registered function
mod b;
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use juno::ast::{Node, NodeKind, NodePtr, SourceLoc, SourceRange, Visitor};
mod validate;
pub fn node(kind: NodeKind) -> Box<Node> {
let range = SourceRange {
file: 0,
start: SourceLoc { line: 0, col: 0 },
end: SourceLoc { line: 0, col: 0 },
};
Box::new(Node { range, kind })
}
#[test]
#[allow(clippy::float_cmp)]
fn test_visit() {
use NodeKind::*;
// Dummy range, we don't care about ranges in this test.
let range = SourceRange {
file: 0,
start: SourceLoc { line: 0, col: 0 },
end: SourceLoc { line: 0, col: 0 },
};
let ast = Box::new(Node {
range,
kind: BlockStatement {
body: vec![
NodePtr::new(Node {
range,
kind: ExpressionStatement {
expression: Box::new(Node {
range,
kind: NumericLiteral { value: 1.0 },
}),
directive: None,
},
}),
NodePtr::new(Node {
range,
kind: ExpressionStatement {
expression: Box::new(Node {
range,
kind: NumericLiteral { value: 2.0 },
}),
directive: None,
},
}),
],
},
});
// Accumulates the numbers found in the AST.
struct NumberFinder {
acc: Vec<f64>,
}
impl<'a> Visitor<'a> for NumberFinder {
fn call(&mut self, node: &'a Node, parent: Option<&'a Node>) {
if let NumericLiteral { value } = &node.kind {
assert!(matches!(parent.unwrap().kind, ExpressionStatement { .. }));
self.acc.push(*value);
}
node.visit_children(self);
}
}
let mut visitor = NumberFinder { acc: vec![] };
ast.visit(&mut visitor, None);
assert_eq!(visitor.acc, [1.0, 2.0]);
}
|
fn main() {
let func: fn(i32) -> i32 = plus_one; // This is a function pointer
/*
Having a function pointer enables us to point
to a specific function (as long as the return types match)
and thereby instantiate variables based on the function pointer
*/
let seven = func(6);
println!("{}", seven);
}
fn plus_one(i: i32) -> i32 {
i + 1
}
|
#![allow(dead_code)]
#![allow(unused_variables)]
fn main() {
/*
Let's define a 'method'.
Quite like 'function', but it
=> definied within the context of 'struct' (or enum, trait)
=> first param is `self` (like Python, I think), not all.
*/
let rc1 = Rect {width: 25, height: 40};
let rc2 = Rect {width: 15, height: 30};
let rc3 = Rect {width: 40, height: 50};
// simple use
println!("rc1: {}", rc1.area());
// passing inst
println!("{}", rc1.can_hold(&rc2));
println!("{}", rc1.can_hold(&rc3));
// "static method"
println!("{:#?}", Rect::square(25)); // :? for one-line
/*
< Summary(edited) from the official tutorial >
Struct keep associated pieces of data connected
Struct's method define the behavior that struct has
Struct's function let you namespace a functionality without inst
*/
}
/* ----- ----- ----- structs ----- ----- ----- */
#[derive(Debug)]
struct Rect {
width: u32,
height: u32,
}
/* ----- ----- ----- structs EOF ----- ----- ----- */
/* ----- ----- ----- impl #1 ----- ----- ----- */
impl Rect {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rect) -> bool {
(
self.width > other.width && // 'other' is just a inst of Rect
self.height > other.height // ... just don't overthink of it
)
}
fn square(size: u32) -> Rect {
/*
This one does NOT take 'self' as the 1st param.
It was called "associated functions".
Also, it was called 'static method' in other prog-langs.
How do ya call it?
appeared before String::from
call ur own func STRUCT::Function(param)
*/
Rect {width: size, height: size}
}
}
/* ----- ----- ----- impl EOF ----- ----- ----- */
/* ----- ----- ----- impl #2 ----- ----- ----- */
impl Rect {
/*
'Each struct is allowed to have multiple impl blocks'.
-> So it's the same to
-> that defining these inside the initial one
*/
fn greeting() {
println!("{}", "Hallo! ich bin Julia.");
}
}
impl Rect {
/*
Ah.. for normal methods, this doesn't make any sense ...
But, "we" will see a case
in which this syntax is useful in Chapter 10
*/
fn goodbye() {
println!("{}", "Auf Wiedersehen.")
}
}
/* ----- ----- ----- impl EOF ----- ----- ----- */ |
use std::{
collections::HashMap,
fmt,
future::Future,
io::{self, Read, Seek, SeekFrom},
mem,
pin::Pin,
process::exit,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
task::{Context, Poll},
thread,
time::{Duration, Instant},
};
use futures_util::{
future, future::FusedFuture, stream::futures_unordered::FuturesUnordered, StreamExt,
TryFutureExt,
};
use parking_lot::Mutex;
use symphonia::core::io::MediaSource;
use tokio::sync::{mpsc, oneshot};
use crate::{
audio::{
AudioDecrypt, AudioFile, StreamLoaderController, READ_AHEAD_BEFORE_PLAYBACK,
READ_AHEAD_DURING_PLAYBACK,
},
audio_backend::Sink,
config::{Bitrate, NormalisationMethod, NormalisationType, PlayerConfig},
convert::Converter,
core::{util::SeqGenerator, Error, Session, SpotifyId},
decoder::{AudioDecoder, AudioPacket, AudioPacketPosition, SymphoniaDecoder},
metadata::audio::{AudioFileFormat, AudioFiles, AudioItem},
mixer::VolumeGetter,
};
#[cfg(feature = "passthrough-decoder")]
use crate::decoder::PassthroughDecoder;
use crate::SAMPLES_PER_SECOND;
const PRELOAD_NEXT_TRACK_BEFORE_END_DURATION_MS: u32 = 30000;
pub const DB_VOLTAGE_RATIO: f64 = 20.0;
pub const PCM_AT_0DBFS: f64 = 1.0;
// Spotify inserts a custom Ogg packet at the start with custom metadata values, that you would
// otherwise expect in Vorbis comments. This packet isn't well-formed and players may balk at it.
const SPOTIFY_OGG_HEADER_END: u64 = 0xa7;
pub type PlayerResult = Result<(), Error>;
pub struct Player {
commands: Option<mpsc::UnboundedSender<PlayerCommand>>,
thread_handle: Option<thread::JoinHandle<()>>,
}
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
pub enum SinkStatus {
Running,
Closed,
TemporarilyClosed,
}
pub type SinkEventCallback = Box<dyn Fn(SinkStatus) + Send>;
struct PlayerInternal {
session: Session,
config: PlayerConfig,
commands: mpsc::UnboundedReceiver<PlayerCommand>,
load_handles: Arc<Mutex<HashMap<thread::ThreadId, thread::JoinHandle<()>>>>,
state: PlayerState,
preload: PlayerPreload,
sink: Box<dyn Sink>,
sink_status: SinkStatus,
sink_event_callback: Option<SinkEventCallback>,
volume_getter: Box<dyn VolumeGetter + Send>,
event_senders: Vec<mpsc::UnboundedSender<PlayerEvent>>,
converter: Converter,
normalisation_integrator: f64,
normalisation_peak: f64,
auto_normalise_as_album: bool,
player_id: usize,
play_request_id_generator: SeqGenerator<u64>,
}
static PLAYER_COUNTER: AtomicUsize = AtomicUsize::new(0);
enum PlayerCommand {
Load {
track_id: SpotifyId,
play: bool,
position_ms: u32,
},
Preload {
track_id: SpotifyId,
},
Play,
Pause,
Stop,
Seek(u32),
SetSession(Session),
AddEventSender(mpsc::UnboundedSender<PlayerEvent>),
SetSinkEventCallback(Option<SinkEventCallback>),
EmitVolumeChangedEvent(u16),
SetAutoNormaliseAsAlbum(bool),
EmitSessionDisconnectedEvent {
connection_id: String,
user_name: String,
},
EmitSessionConnectedEvent {
connection_id: String,
user_name: String,
},
EmitSessionClientChangedEvent {
client_id: String,
client_name: String,
client_brand_name: String,
client_model_name: String,
},
EmitFilterExplicitContentChangedEvent(bool),
EmitShuffleChangedEvent(bool),
EmitRepeatChangedEvent(bool),
EmitAutoPlayChangedEvent(bool),
}
#[derive(Debug, Clone)]
pub enum PlayerEvent {
// Play request id changed
PlayRequestIdChanged {
play_request_id: u64,
},
// Fired when the player is stopped (e.g. by issuing a "stop" command to the player).
Stopped {
play_request_id: u64,
track_id: SpotifyId,
},
// The player is delayed by loading a track.
Loading {
play_request_id: u64,
track_id: SpotifyId,
position_ms: u32,
},
// The player is preloading a track.
Preloading {
track_id: SpotifyId,
},
// The player is playing a track.
// This event is issued at the start of playback of whenever the position must be communicated
// because it is out of sync. This includes:
// start of a track
// un-pausing
// after a seek
// after a buffer-underrun
Playing {
play_request_id: u64,
track_id: SpotifyId,
position_ms: u32,
},
// The player entered a paused state.
Paused {
play_request_id: u64,
track_id: SpotifyId,
position_ms: u32,
},
// The player thinks it's a good idea to issue a preload command for the next track now.
// This event is intended for use within spirc.
TimeToPreloadNextTrack {
play_request_id: u64,
track_id: SpotifyId,
},
// The player reached the end of a track.
// This event is intended for use within spirc. Spirc will respond by issuing another command.
EndOfTrack {
play_request_id: u64,
track_id: SpotifyId,
},
// The player was unable to load the requested track.
Unavailable {
play_request_id: u64,
track_id: SpotifyId,
},
// The mixer volume was set to a new level.
VolumeChanged {
volume: u16,
},
PositionCorrection {
play_request_id: u64,
track_id: SpotifyId,
position_ms: u32,
},
Seeked {
play_request_id: u64,
track_id: SpotifyId,
position_ms: u32,
},
TrackChanged {
audio_item: Box<AudioItem>,
},
SessionConnected {
connection_id: String,
user_name: String,
},
SessionDisconnected {
connection_id: String,
user_name: String,
},
SessionClientChanged {
client_id: String,
client_name: String,
client_brand_name: String,
client_model_name: String,
},
ShuffleChanged {
shuffle: bool,
},
RepeatChanged {
repeat: bool,
},
AutoPlayChanged {
auto_play: bool,
},
FilterExplicitContentChanged {
filter: bool,
},
}
impl PlayerEvent {
pub fn get_play_request_id(&self) -> Option<u64> {
use PlayerEvent::*;
match self {
Loading {
play_request_id, ..
}
| Unavailable {
play_request_id, ..
}
| Playing {
play_request_id, ..
}
| TimeToPreloadNextTrack {
play_request_id, ..
}
| EndOfTrack {
play_request_id, ..
}
| Paused {
play_request_id, ..
}
| Stopped {
play_request_id, ..
}
| PositionCorrection {
play_request_id, ..
}
| Seeked {
play_request_id, ..
} => Some(*play_request_id),
_ => None,
}
}
}
pub type PlayerEventChannel = mpsc::UnboundedReceiver<PlayerEvent>;
pub fn db_to_ratio(db: f64) -> f64 {
f64::powf(10.0, db / DB_VOLTAGE_RATIO)
}
pub fn ratio_to_db(ratio: f64) -> f64 {
ratio.log10() * DB_VOLTAGE_RATIO
}
pub fn duration_to_coefficient(duration: Duration) -> f64 {
f64::exp(-1.0 / (duration.as_secs_f64() * SAMPLES_PER_SECOND as f64))
}
pub fn coefficient_to_duration(coefficient: f64) -> Duration {
Duration::from_secs_f64(-1.0 / f64::ln(coefficient) / SAMPLES_PER_SECOND as f64)
}
#[derive(Clone, Copy, Debug)]
pub struct NormalisationData {
// Spotify provides these as `f32`, but audio metadata can contain up to `f64`.
// Also, this negates the need for casting during sample processing.
pub track_gain_db: f64,
pub track_peak: f64,
pub album_gain_db: f64,
pub album_peak: f64,
}
impl Default for NormalisationData {
fn default() -> Self {
Self {
track_gain_db: 0.0,
track_peak: 1.0,
album_gain_db: 0.0,
album_peak: 1.0,
}
}
}
impl NormalisationData {
fn parse_from_ogg<T: Read + Seek>(mut file: T) -> io::Result<NormalisationData> {
const SPOTIFY_NORMALIZATION_HEADER_START_OFFSET: u64 = 144;
const NORMALISATION_DATA_SIZE: usize = 16;
let newpos = file.seek(SeekFrom::Start(SPOTIFY_NORMALIZATION_HEADER_START_OFFSET))?;
if newpos != SPOTIFY_NORMALIZATION_HEADER_START_OFFSET {
error!(
"NormalisationData::parse_from_file seeking to {} but position is now {}",
SPOTIFY_NORMALIZATION_HEADER_START_OFFSET, newpos
);
error!("Falling back to default (non-track and non-album) normalisation data.");
return Ok(NormalisationData::default());
}
let mut buf = [0u8; NORMALISATION_DATA_SIZE];
file.read_exact(&mut buf)?;
let track_gain_db = f32::from_le_bytes([buf[0], buf[1], buf[2], buf[3]]) as f64;
let track_peak = f32::from_le_bytes([buf[4], buf[5], buf[6], buf[7]]) as f64;
let album_gain_db = f32::from_le_bytes([buf[8], buf[9], buf[10], buf[11]]) as f64;
let album_peak = f32::from_le_bytes([buf[12], buf[13], buf[14], buf[15]]) as f64;
Ok(Self {
track_gain_db,
track_peak,
album_gain_db,
album_peak,
})
}
fn get_factor(config: &PlayerConfig, data: NormalisationData) -> f64 {
if !config.normalisation {
return 1.0;
}
let (gain_db, gain_peak) = if config.normalisation_type == NormalisationType::Album {
(data.album_gain_db, data.album_peak)
} else {
(data.track_gain_db, data.track_peak)
};
// As per the ReplayGain 1.0 & 2.0 (proposed) spec:
// https://wiki.hydrogenaud.io/index.php?title=ReplayGain_1.0_specification#Clipping_prevention
// https://wiki.hydrogenaud.io/index.php?title=ReplayGain_2.0_specification#Clipping_prevention
let normalisation_factor = if config.normalisation_method == NormalisationMethod::Basic {
// For Basic Normalisation, factor = min(ratio of (ReplayGain + PreGain), 1.0 / peak level).
// https://wiki.hydrogenaud.io/index.php?title=ReplayGain_1.0_specification#Peak_amplitude
// https://wiki.hydrogenaud.io/index.php?title=ReplayGain_2.0_specification#Peak_amplitude
// We then limit that to 1.0 as not to exceed dBFS (0.0 dB).
let factor = f64::min(
db_to_ratio(gain_db + config.normalisation_pregain_db),
PCM_AT_0DBFS / gain_peak,
);
if factor > PCM_AT_0DBFS {
info!(
"Lowering gain by {:.2} dB for the duration of this track to avoid potentially exceeding dBFS.",
ratio_to_db(factor)
);
PCM_AT_0DBFS
} else {
factor
}
} else {
// For Dynamic Normalisation it's up to the player to decide,
// factor = ratio of (ReplayGain + PreGain).
// We then let the dynamic limiter handle gain reduction.
let factor = db_to_ratio(gain_db + config.normalisation_pregain_db);
let threshold_ratio = db_to_ratio(config.normalisation_threshold_dbfs);
if factor > PCM_AT_0DBFS {
let factor_db = gain_db + config.normalisation_pregain_db;
let limiting_db = factor_db + config.normalisation_threshold_dbfs.abs();
warn!(
"This track may exceed dBFS by {:.2} dB and be subject to {:.2} dB of dynamic limiting at it's peak.",
factor_db, limiting_db
);
} else if factor > threshold_ratio {
let limiting_db = gain_db
+ config.normalisation_pregain_db
+ config.normalisation_threshold_dbfs.abs();
info!(
"This track may be subject to {:.2} dB of dynamic limiting at it's peak.",
limiting_db
);
}
factor
};
debug!("Normalisation Data: {:?}", data);
debug!(
"Calculated Normalisation Factor for {:?}: {:.2}%",
config.normalisation_type,
normalisation_factor * 100.0
);
normalisation_factor
}
}
impl Player {
pub fn new<F>(
config: PlayerConfig,
session: Session,
volume_getter: Box<dyn VolumeGetter + Send>,
sink_builder: F,
) -> Arc<Self>
where
F: FnOnce() -> Box<dyn Sink> + Send + 'static,
{
let (cmd_tx, cmd_rx) = mpsc::unbounded_channel();
if config.normalisation {
debug!("Normalisation Type: {:?}", config.normalisation_type);
debug!(
"Normalisation Pregain: {:.1} dB",
config.normalisation_pregain_db
);
debug!(
"Normalisation Threshold: {:.1} dBFS",
config.normalisation_threshold_dbfs
);
debug!("Normalisation Method: {:?}", config.normalisation_method);
if config.normalisation_method == NormalisationMethod::Dynamic {
// as_millis() has rounding errors (truncates)
debug!(
"Normalisation Attack: {:.0} ms",
coefficient_to_duration(config.normalisation_attack_cf).as_secs_f64() * 1000.
);
debug!(
"Normalisation Release: {:.0} ms",
coefficient_to_duration(config.normalisation_release_cf).as_secs_f64() * 1000.
);
debug!("Normalisation Knee: {} dB", config.normalisation_knee_db);
}
}
let handle = thread::spawn(move || {
let player_id = PLAYER_COUNTER.fetch_add(1, Ordering::AcqRel);
debug!("new Player [{}]", player_id);
let converter = Converter::new(config.ditherer);
let internal = PlayerInternal {
session,
config,
commands: cmd_rx,
load_handles: Arc::new(Mutex::new(HashMap::new())),
state: PlayerState::Stopped,
preload: PlayerPreload::None,
sink: sink_builder(),
sink_status: SinkStatus::Closed,
sink_event_callback: None,
volume_getter,
event_senders: vec![],
converter,
normalisation_peak: 0.0,
normalisation_integrator: 0.0,
auto_normalise_as_album: false,
player_id,
play_request_id_generator: SeqGenerator::new(0),
};
// While PlayerInternal is written as a future, it still contains blocking code.
// It must be run by using block_on() in a dedicated thread.
let runtime = tokio::runtime::Runtime::new().expect("Failed to create Tokio runtime");
runtime.block_on(internal);
debug!("PlayerInternal thread finished.");
});
Arc::new(Self {
commands: Some(cmd_tx),
thread_handle: Some(handle),
})
}
pub fn is_invalid(&self) -> bool {
if let Some(handle) = self.thread_handle.as_ref() {
return handle.is_finished();
}
true
}
fn command(&self, cmd: PlayerCommand) {
if let Some(commands) = self.commands.as_ref() {
if let Err(e) = commands.send(cmd) {
error!("Player Commands Error: {}", e);
}
}
}
pub fn load(&self, track_id: SpotifyId, start_playing: bool, position_ms: u32) {
self.command(PlayerCommand::Load {
track_id,
play: start_playing,
position_ms,
});
}
pub fn preload(&self, track_id: SpotifyId) {
self.command(PlayerCommand::Preload { track_id });
}
pub fn play(&self) {
self.command(PlayerCommand::Play)
}
pub fn pause(&self) {
self.command(PlayerCommand::Pause)
}
pub fn stop(&self) {
self.command(PlayerCommand::Stop)
}
pub fn seek(&self, position_ms: u32) {
self.command(PlayerCommand::Seek(position_ms));
}
pub fn set_session(&self, session: Session) {
self.command(PlayerCommand::SetSession(session));
}
pub fn get_player_event_channel(&self) -> PlayerEventChannel {
let (event_sender, event_receiver) = mpsc::unbounded_channel();
self.command(PlayerCommand::AddEventSender(event_sender));
event_receiver
}
pub async fn await_end_of_track(&self) {
let mut channel = self.get_player_event_channel();
while let Some(event) = channel.recv().await {
if matches!(
event,
PlayerEvent::EndOfTrack { .. } | PlayerEvent::Stopped { .. }
) {
return;
}
}
}
pub fn set_sink_event_callback(&self, callback: Option<SinkEventCallback>) {
self.command(PlayerCommand::SetSinkEventCallback(callback));
}
pub fn emit_volume_changed_event(&self, volume: u16) {
self.command(PlayerCommand::EmitVolumeChangedEvent(volume));
}
pub fn set_auto_normalise_as_album(&self, setting: bool) {
self.command(PlayerCommand::SetAutoNormaliseAsAlbum(setting));
}
pub fn emit_filter_explicit_content_changed_event(&self, filter: bool) {
self.command(PlayerCommand::EmitFilterExplicitContentChangedEvent(filter));
}
pub fn emit_session_connected_event(&self, connection_id: String, user_name: String) {
self.command(PlayerCommand::EmitSessionConnectedEvent {
connection_id,
user_name,
});
}
pub fn emit_session_disconnected_event(&self, connection_id: String, user_name: String) {
self.command(PlayerCommand::EmitSessionDisconnectedEvent {
connection_id,
user_name,
});
}
pub fn emit_session_client_changed_event(
&self,
client_id: String,
client_name: String,
client_brand_name: String,
client_model_name: String,
) {
self.command(PlayerCommand::EmitSessionClientChangedEvent {
client_id,
client_name,
client_brand_name,
client_model_name,
});
}
pub fn emit_shuffle_changed_event(&self, shuffle: bool) {
self.command(PlayerCommand::EmitShuffleChangedEvent(shuffle));
}
pub fn emit_repeat_changed_event(&self, repeat: bool) {
self.command(PlayerCommand::EmitRepeatChangedEvent(repeat));
}
pub fn emit_auto_play_changed_event(&self, auto_play: bool) {
self.command(PlayerCommand::EmitAutoPlayChangedEvent(auto_play));
}
}
impl Drop for Player {
fn drop(&mut self) {
debug!("Shutting down player thread ...");
self.commands = None;
if let Some(handle) = self.thread_handle.take() {
if let Err(e) = handle.join() {
error!("Player thread Error: {:?}", e);
}
}
}
}
struct PlayerLoadedTrackData {
decoder: Decoder,
normalisation_data: NormalisationData,
stream_loader_controller: StreamLoaderController,
audio_item: AudioItem,
bytes_per_second: usize,
duration_ms: u32,
stream_position_ms: u32,
is_explicit: bool,
}
enum PlayerPreload {
None,
Loading {
track_id: SpotifyId,
loader: Pin<Box<dyn FusedFuture<Output = Result<PlayerLoadedTrackData, ()>> + Send>>,
},
Ready {
track_id: SpotifyId,
loaded_track: Box<PlayerLoadedTrackData>,
},
}
type Decoder = Box<dyn AudioDecoder + Send>;
enum PlayerState {
Stopped,
Loading {
track_id: SpotifyId,
play_request_id: u64,
start_playback: bool,
loader: Pin<Box<dyn FusedFuture<Output = Result<PlayerLoadedTrackData, ()>> + Send>>,
},
Paused {
track_id: SpotifyId,
play_request_id: u64,
decoder: Decoder,
audio_item: AudioItem,
normalisation_data: NormalisationData,
normalisation_factor: f64,
stream_loader_controller: StreamLoaderController,
bytes_per_second: usize,
duration_ms: u32,
stream_position_ms: u32,
suggested_to_preload_next_track: bool,
is_explicit: bool,
},
Playing {
track_id: SpotifyId,
play_request_id: u64,
decoder: Decoder,
normalisation_data: NormalisationData,
audio_item: AudioItem,
normalisation_factor: f64,
stream_loader_controller: StreamLoaderController,
bytes_per_second: usize,
duration_ms: u32,
stream_position_ms: u32,
reported_nominal_start_time: Option<Instant>,
suggested_to_preload_next_track: bool,
is_explicit: bool,
},
EndOfTrack {
track_id: SpotifyId,
play_request_id: u64,
loaded_track: PlayerLoadedTrackData,
},
Invalid,
}
impl PlayerState {
fn is_playing(&self) -> bool {
use self::PlayerState::*;
match *self {
Stopped | EndOfTrack { .. } | Paused { .. } | Loading { .. } => false,
Playing { .. } => true,
Invalid => {
error!("PlayerState::is_playing in invalid state");
exit(1);
}
}
}
#[allow(dead_code)]
fn is_stopped(&self) -> bool {
use self::PlayerState::*;
matches!(self, Stopped)
}
#[allow(dead_code)]
fn is_loading(&self) -> bool {
use self::PlayerState::*;
matches!(self, Loading { .. })
}
fn decoder(&mut self) -> Option<&mut Decoder> {
use self::PlayerState::*;
match *self {
Stopped | EndOfTrack { .. } | Loading { .. } => None,
Paused {
ref mut decoder, ..
}
| Playing {
ref mut decoder, ..
} => Some(decoder),
Invalid => {
error!("PlayerState::decoder in invalid state");
exit(1);
}
}
}
fn playing_to_end_of_track(&mut self) {
use self::PlayerState::*;
let new_state = mem::replace(self, Invalid);
match new_state {
Playing {
track_id,
play_request_id,
decoder,
duration_ms,
bytes_per_second,
normalisation_data,
stream_loader_controller,
stream_position_ms,
is_explicit,
audio_item,
..
} => {
*self = EndOfTrack {
track_id,
play_request_id,
loaded_track: PlayerLoadedTrackData {
decoder,
normalisation_data,
stream_loader_controller,
audio_item,
bytes_per_second,
duration_ms,
stream_position_ms,
is_explicit,
},
};
}
_ => {
error!(
"Called playing_to_end_of_track in non-playing state: {:?}",
new_state
);
exit(1);
}
}
}
fn paused_to_playing(&mut self) {
use self::PlayerState::*;
let new_state = mem::replace(self, Invalid);
match new_state {
Paused {
track_id,
play_request_id,
decoder,
audio_item,
normalisation_data,
normalisation_factor,
stream_loader_controller,
duration_ms,
bytes_per_second,
stream_position_ms,
suggested_to_preload_next_track,
is_explicit,
} => {
*self = Playing {
track_id,
play_request_id,
decoder,
audio_item,
normalisation_data,
normalisation_factor,
stream_loader_controller,
duration_ms,
bytes_per_second,
stream_position_ms,
reported_nominal_start_time: Instant::now()
.checked_sub(Duration::from_millis(stream_position_ms as u64)),
suggested_to_preload_next_track,
is_explicit,
};
}
_ => {
error!(
"PlayerState::paused_to_playing in invalid state: {:?}",
new_state
);
exit(1);
}
}
}
fn playing_to_paused(&mut self) {
use self::PlayerState::*;
let new_state = mem::replace(self, Invalid);
match new_state {
Playing {
track_id,
play_request_id,
decoder,
audio_item,
normalisation_data,
normalisation_factor,
stream_loader_controller,
duration_ms,
bytes_per_second,
stream_position_ms,
suggested_to_preload_next_track,
is_explicit,
..
} => {
*self = Paused {
track_id,
play_request_id,
decoder,
audio_item,
normalisation_data,
normalisation_factor,
stream_loader_controller,
duration_ms,
bytes_per_second,
stream_position_ms,
suggested_to_preload_next_track,
is_explicit,
};
}
_ => {
error!(
"PlayerState::playing_to_paused in invalid state: {:?}",
new_state
);
exit(1);
}
}
}
}
struct PlayerTrackLoader {
session: Session,
config: PlayerConfig,
}
impl PlayerTrackLoader {
async fn find_available_alternative(&self, audio_item: AudioItem) -> Option<AudioItem> {
if let Err(e) = audio_item.availability {
error!("Track is unavailable: {}", e);
None
} else if !audio_item.files.is_empty() {
Some(audio_item)
} else if let Some(alternatives) = &audio_item.alternatives {
let alternatives: FuturesUnordered<_> = alternatives
.iter()
.map(|alt_id| AudioItem::get_file(&self.session, *alt_id))
.collect();
alternatives
.filter_map(|x| future::ready(x.ok()))
.filter(|x| future::ready(x.availability.is_ok()))
.next()
.await
} else {
error!("Track should be available, but no alternatives found.");
None
}
}
fn stream_data_rate(&self, format: AudioFileFormat) -> usize {
let kbps = match format {
AudioFileFormat::OGG_VORBIS_96 => 12,
AudioFileFormat::OGG_VORBIS_160 => 20,
AudioFileFormat::OGG_VORBIS_320 => 40,
AudioFileFormat::MP3_256 => 32,
AudioFileFormat::MP3_320 => 40,
AudioFileFormat::MP3_160 => 20,
AudioFileFormat::MP3_96 => 12,
AudioFileFormat::MP3_160_ENC => 20,
AudioFileFormat::AAC_24 => 3,
AudioFileFormat::AAC_48 => 6,
AudioFileFormat::FLAC_FLAC => 112, // assume 900 kbit/s on average
};
kbps * 1024
}
async fn load_track(
&self,
spotify_id: SpotifyId,
position_ms: u32,
) -> Option<PlayerLoadedTrackData> {
let audio_item = match AudioItem::get_file(&self.session, spotify_id).await {
Ok(audio) => match self.find_available_alternative(audio).await {
Some(audio) => audio,
None => {
warn!(
"<{}> is not available",
spotify_id.to_uri().unwrap_or_default()
);
return None;
}
},
Err(e) => {
error!("Unable to load audio item: {:?}", e);
return None;
}
};
info!(
"Loading <{}> with Spotify URI <{}>",
audio_item.name, audio_item.uri
);
// (Most) podcasts seem to support only 96 kbps Ogg Vorbis, so fall back to it
let formats = match self.config.bitrate {
Bitrate::Bitrate96 => [
AudioFileFormat::OGG_VORBIS_96,
AudioFileFormat::MP3_96,
AudioFileFormat::OGG_VORBIS_160,
AudioFileFormat::MP3_160,
AudioFileFormat::MP3_256,
AudioFileFormat::OGG_VORBIS_320,
AudioFileFormat::MP3_320,
],
Bitrate::Bitrate160 => [
AudioFileFormat::OGG_VORBIS_160,
AudioFileFormat::MP3_160,
AudioFileFormat::OGG_VORBIS_96,
AudioFileFormat::MP3_96,
AudioFileFormat::MP3_256,
AudioFileFormat::OGG_VORBIS_320,
AudioFileFormat::MP3_320,
],
Bitrate::Bitrate320 => [
AudioFileFormat::OGG_VORBIS_320,
AudioFileFormat::MP3_320,
AudioFileFormat::MP3_256,
AudioFileFormat::OGG_VORBIS_160,
AudioFileFormat::MP3_160,
AudioFileFormat::OGG_VORBIS_96,
AudioFileFormat::MP3_96,
],
};
let (format, file_id) =
match formats
.iter()
.find_map(|format| match audio_item.files.get(format) {
Some(&file_id) => Some((*format, file_id)),
_ => None,
}) {
Some(t) => t,
None => {
warn!(
"<{}> is not available in any supported format",
audio_item.name
);
return None;
}
};
let bytes_per_second = self.stream_data_rate(format);
// This is only a loop to be able to reload the file if an error occurred
// while opening a cached file.
loop {
let encrypted_file = AudioFile::open(&self.session, file_id, bytes_per_second);
let encrypted_file = match encrypted_file.await {
Ok(encrypted_file) => encrypted_file,
Err(e) => {
error!("Unable to load encrypted file: {:?}", e);
return None;
}
};
let is_cached = encrypted_file.is_cached();
let stream_loader_controller = encrypted_file.get_stream_loader_controller().ok()?;
// Not all audio files are encrypted. If we can't get a key, try loading the track
// without decryption. If the file was encrypted after all, the decoder will fail
// parsing and bail out, so we should be safe from outputting ear-piercing noise.
let key = match self.session.audio_key().request(spotify_id, file_id).await {
Ok(key) => Some(key),
Err(e) => {
warn!("Unable to load key, continuing without decryption: {}", e);
None
}
};
let mut decrypted_file = AudioDecrypt::new(key, encrypted_file);
let is_ogg_vorbis = AudioFiles::is_ogg_vorbis(format);
let (offset, mut normalisation_data) = if is_ogg_vorbis {
// Spotify stores normalisation data in a custom Ogg packet instead of Vorbis comments.
let normalisation_data =
NormalisationData::parse_from_ogg(&mut decrypted_file).ok();
(SPOTIFY_OGG_HEADER_END, normalisation_data)
} else {
(0, None)
};
let audio_file = match Subfile::new(
decrypted_file,
offset,
stream_loader_controller.len() as u64,
) {
Ok(audio_file) => audio_file,
Err(e) => {
error!("PlayerTrackLoader::load_track error opening subfile: {}", e);
return None;
}
};
let mut symphonia_decoder = |audio_file, format| {
SymphoniaDecoder::new(audio_file, format).map(|mut decoder| {
// For formats other that Vorbis, we'll try getting normalisation data from
// ReplayGain metadata fields, if present.
if normalisation_data.is_none() {
normalisation_data = decoder.normalisation_data();
}
Box::new(decoder) as Decoder
})
};
#[cfg(feature = "passthrough-decoder")]
let decoder_type = if self.config.passthrough {
PassthroughDecoder::new(audio_file, format).map(|x| Box::new(x) as Decoder)
} else {
symphonia_decoder(audio_file, format)
};
#[cfg(not(feature = "passthrough-decoder"))]
let decoder_type = symphonia_decoder(audio_file, format);
let normalisation_data = normalisation_data.unwrap_or_else(|| {
warn!("Unable to get normalisation data, continuing with defaults.");
NormalisationData::default()
});
let mut decoder = match decoder_type {
Ok(decoder) => decoder,
Err(e) if is_cached => {
warn!(
"Unable to read cached audio file: {}. Trying to download it.",
e
);
match self.session.cache() {
Some(cache) => {
if cache.remove_file(file_id).is_err() {
error!("Error removing file from cache");
return None;
}
}
None => {
error!("If the audio file is cached, a cache should exist");
return None;
}
}
// Just try it again
continue;
}
Err(e) => {
error!("Unable to read audio file: {}", e);
return None;
}
};
let duration_ms = audio_item.duration_ms;
// Don't try to seek past the track's duration.
// If the position is invalid just start from
// the beginning of the track.
let position_ms = if position_ms > duration_ms {
warn!("Invalid start position of {} ms exceeds track's duration of {} ms, starting track from the beginning", position_ms, duration_ms);
0
} else {
position_ms
};
// Ensure the starting position. Even when we want to play from the beginning,
// the cursor may have been moved by parsing normalisation data. This may not
// matter for playback (but won't hurt either), but may be useful for the
// passthrough decoder.
let stream_position_ms = match decoder.seek(position_ms) {
Ok(new_position_ms) => new_position_ms,
Err(e) => {
error!(
"PlayerTrackLoader::load_track error seeking to starting position {}: {}",
position_ms, e
);
return None;
}
};
// Ensure streaming mode now that we are ready to play from the requested position.
stream_loader_controller.set_stream_mode();
let is_explicit = audio_item.is_explicit;
info!("<{}> ({} ms) loaded", audio_item.name, duration_ms);
return Some(PlayerLoadedTrackData {
decoder,
normalisation_data,
stream_loader_controller,
audio_item,
bytes_per_second,
duration_ms,
stream_position_ms,
is_explicit,
});
}
}
}
impl Future for PlayerInternal {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
// While this is written as a future, it still contains blocking code.
// It must be run on its own thread.
let passthrough = self.config.passthrough;
loop {
let mut all_futures_completed_or_not_ready = true;
// process commands that were sent to us
let cmd = match self.commands.poll_recv(cx) {
Poll::Ready(None) => return Poll::Ready(()), // client has disconnected - shut down.
Poll::Ready(Some(cmd)) => {
all_futures_completed_or_not_ready = false;
Some(cmd)
}
_ => None,
};
if let Some(cmd) = cmd {
if let Err(e) = self.handle_command(cmd) {
error!("Error handling command: {}", e);
}
}
// Handle loading of a new track to play
if let PlayerState::Loading {
ref mut loader,
track_id,
start_playback,
play_request_id,
} = self.state
{
// The loader may be terminated if we are trying to load the same track
// as before, and that track failed to open before.
if !loader.as_mut().is_terminated() {
match loader.as_mut().poll(cx) {
Poll::Ready(Ok(loaded_track)) => {
self.start_playback(
track_id,
play_request_id,
loaded_track,
start_playback,
);
if let PlayerState::Loading { .. } = self.state {
error!("The state wasn't changed by start_playback()");
exit(1);
}
}
Poll::Ready(Err(e)) => {
error!(
"Skipping to next track, unable to load track <{:?}>: {:?}",
track_id, e
);
self.send_event(PlayerEvent::Unavailable {
track_id,
play_request_id,
})
}
Poll::Pending => (),
}
}
}
// handle pending preload requests.
if let PlayerPreload::Loading {
ref mut loader,
track_id,
} = self.preload
{
match loader.as_mut().poll(cx) {
Poll::Ready(Ok(loaded_track)) => {
self.send_event(PlayerEvent::Preloading { track_id });
self.preload = PlayerPreload::Ready {
track_id,
loaded_track: Box::new(loaded_track),
};
}
Poll::Ready(Err(_)) => {
debug!("Unable to preload {:?}", track_id);
self.preload = PlayerPreload::None;
// Let Spirc know that the track was unavailable.
if let PlayerState::Playing {
play_request_id, ..
}
| PlayerState::Paused {
play_request_id, ..
} = self.state
{
self.send_event(PlayerEvent::Unavailable {
track_id,
play_request_id,
});
}
}
Poll::Pending => (),
}
}
if self.state.is_playing() {
self.ensure_sink_running();
if let PlayerState::Playing {
track_id,
play_request_id,
ref mut decoder,
normalisation_factor,
ref mut stream_position_ms,
ref mut reported_nominal_start_time,
..
} = self.state
{
match decoder.next_packet() {
Ok(result) => {
if let Some((ref packet_position, ref packet)) = result {
let new_stream_position_ms = packet_position.position_ms;
let expected_position_ms = std::mem::replace(
&mut *stream_position_ms,
new_stream_position_ms,
);
if !passthrough {
match packet.samples() {
Ok(_) => {
let new_stream_position = Duration::from_millis(
new_stream_position_ms as u64,
);
let now = Instant::now();
// Only notify if we're skipped some packets *or* we are behind.
// If we're ahead it's probably due to a buffer of the backend
// and we're actually in time.
let notify_about_position =
match *reported_nominal_start_time {
None => true,
Some(reported_nominal_start_time) => {
let mut notify = false;
if packet_position.skipped {
if let Some(ahead) = new_stream_position
.checked_sub(Duration::from_millis(
expected_position_ms as u64,
))
{
notify |=
ahead >= Duration::from_secs(1)
}
}
if let Some(lag) = now
.checked_duration_since(
reported_nominal_start_time,
)
{
if let Some(lag) =
lag.checked_sub(new_stream_position)
{
notify |=
lag >= Duration::from_secs(1)
}
}
notify
}
};
if notify_about_position {
*reported_nominal_start_time =
now.checked_sub(new_stream_position);
self.send_event(PlayerEvent::PositionCorrection {
play_request_id,
track_id,
position_ms: new_stream_position_ms,
});
}
}
Err(e) => {
error!("Skipping to next track, unable to decode samples for track <{:?}>: {:?}", track_id, e);
self.send_event(PlayerEvent::EndOfTrack {
track_id,
play_request_id,
})
}
}
}
}
self.handle_packet(result, normalisation_factor);
}
Err(e) => {
error!("Skipping to next track, unable to get next packet for track <{:?}>: {:?}", track_id, e);
self.send_event(PlayerEvent::EndOfTrack {
track_id,
play_request_id,
})
}
}
} else {
error!("PlayerInternal poll: Invalid PlayerState");
exit(1);
};
}
if let PlayerState::Playing {
track_id,
play_request_id,
duration_ms,
stream_position_ms,
ref mut stream_loader_controller,
ref mut suggested_to_preload_next_track,
..
}
| PlayerState::Paused {
track_id,
play_request_id,
duration_ms,
stream_position_ms,
ref mut stream_loader_controller,
ref mut suggested_to_preload_next_track,
..
} = self.state
{
if (!*suggested_to_preload_next_track)
&& ((duration_ms as i64 - stream_position_ms as i64)
< PRELOAD_NEXT_TRACK_BEFORE_END_DURATION_MS as i64)
&& stream_loader_controller.range_to_end_available()
{
*suggested_to_preload_next_track = true;
self.send_event(PlayerEvent::TimeToPreloadNextTrack {
track_id,
play_request_id,
});
}
}
if (!self.state.is_playing()) && all_futures_completed_or_not_ready {
return Poll::Pending;
}
}
}
}
impl PlayerInternal {
fn ensure_sink_running(&mut self) {
if self.sink_status != SinkStatus::Running {
trace!("== Starting sink ==");
if let Some(callback) = &mut self.sink_event_callback {
callback(SinkStatus::Running);
}
match self.sink.start() {
Ok(()) => self.sink_status = SinkStatus::Running,
Err(e) => {
error!("{}", e);
self.handle_pause();
}
}
}
}
fn ensure_sink_stopped(&mut self, temporarily: bool) {
match self.sink_status {
SinkStatus::Running => {
trace!("== Stopping sink ==");
match self.sink.stop() {
Ok(()) => {
self.sink_status = if temporarily {
SinkStatus::TemporarilyClosed
} else {
SinkStatus::Closed
};
if let Some(callback) = &mut self.sink_event_callback {
callback(self.sink_status);
}
}
Err(e) => {
error!("{}", e);
exit(1);
}
}
}
SinkStatus::TemporarilyClosed => {
if !temporarily {
self.sink_status = SinkStatus::Closed;
if let Some(callback) = &mut self.sink_event_callback {
callback(SinkStatus::Closed);
}
}
}
SinkStatus::Closed => (),
}
}
fn handle_player_stop(&mut self) {
match self.state {
PlayerState::Playing {
track_id,
play_request_id,
..
}
| PlayerState::Paused {
track_id,
play_request_id,
..
}
| PlayerState::EndOfTrack {
track_id,
play_request_id,
..
}
| PlayerState::Loading {
track_id,
play_request_id,
..
} => {
self.ensure_sink_stopped(false);
self.send_event(PlayerEvent::Stopped {
track_id,
play_request_id,
});
self.state = PlayerState::Stopped;
}
PlayerState::Stopped => (),
PlayerState::Invalid => {
error!("PlayerInternal::handle_player_stop in invalid state");
exit(1);
}
}
}
fn handle_play(&mut self) {
match self.state {
PlayerState::Paused {
track_id,
play_request_id,
stream_position_ms,
..
} => {
self.state.paused_to_playing();
self.send_event(PlayerEvent::Playing {
track_id,
play_request_id,
position_ms: stream_position_ms,
});
self.ensure_sink_running();
}
PlayerState::Loading {
ref mut start_playback,
..
} => {
*start_playback = true;
}
_ => error!("Player::play called from invalid state: {:?}", self.state),
}
}
fn handle_pause(&mut self) {
match self.state {
PlayerState::Paused { .. } => self.ensure_sink_stopped(false),
PlayerState::Playing {
track_id,
play_request_id,
stream_position_ms,
..
} => {
self.state.playing_to_paused();
self.ensure_sink_stopped(false);
self.send_event(PlayerEvent::Paused {
track_id,
play_request_id,
position_ms: stream_position_ms,
});
}
PlayerState::Loading {
ref mut start_playback,
..
} => {
*start_playback = false;
}
_ => error!("Player::pause called from invalid state: {:?}", self.state),
}
}
fn handle_packet(
&mut self,
packet: Option<(AudioPacketPosition, AudioPacket)>,
normalisation_factor: f64,
) {
match packet {
Some((_, mut packet)) => {
if !packet.is_empty() {
if let AudioPacket::Samples(ref mut data) = packet {
// Get the volume for the packet.
// In the case of hardware volume control this will
// always be 1.0 (no change).
let volume = self.volume_getter.attenuation_factor();
// For the basic normalisation method, a normalisation factor of 1.0 indicates that
// there is nothing to normalise (all samples should pass unaltered). For the
// dynamic method, there may still be peaks that we want to shave off.
// No matter the case we apply volume attenuation last if there is any.
if !self.config.normalisation {
if volume < 1.0 {
for sample in data.iter_mut() {
*sample *= volume;
}
}
} else if self.config.normalisation_method == NormalisationMethod::Basic
&& (normalisation_factor < 1.0 || volume < 1.0)
{
for sample in data.iter_mut() {
*sample *= normalisation_factor * volume;
}
} else if self.config.normalisation_method == NormalisationMethod::Dynamic {
// zero-cost shorthands
let threshold_db = self.config.normalisation_threshold_dbfs;
let knee_db = self.config.normalisation_knee_db;
let attack_cf = self.config.normalisation_attack_cf;
let release_cf = self.config.normalisation_release_cf;
for sample in data.iter_mut() {
*sample *= normalisation_factor;
// Feedforward limiter in the log domain
// After: Giannoulis, D., Massberg, M., & Reiss, J.D. (2012). Digital Dynamic
// Range Compressor Design—A Tutorial and Analysis. Journal of The Audio
// Engineering Society, 60, 399-408.
// Some tracks have samples that are precisely 0.0. That's silence
// and we know we don't need to limit that, in which we can spare
// the CPU cycles.
//
// Also, calling `ratio_to_db(0.0)` returns `inf` and would get the
// peak detector stuck. Also catch the unlikely case where a sample
// is decoded as `NaN` or some other non-normal value.
let limiter_db = if sample.is_normal() {
// step 1-4: half-wave rectification and conversion into dB
// and gain computer with soft knee and subtractor
let bias_db = ratio_to_db(sample.abs()) - threshold_db;
let knee_boundary_db = bias_db * 2.0;
if knee_boundary_db < -knee_db {
0.0
} else if knee_boundary_db.abs() <= knee_db {
// The textbook equation:
// ratio_to_db(sample.abs()) - (ratio_to_db(sample.abs()) - (bias_db + knee_db / 2.0).powi(2) / (2.0 * knee_db))
// Simplifies to:
// ((2.0 * bias_db) + knee_db).powi(2) / (8.0 * knee_db)
// Which in our case further simplifies to:
// (knee_boundary_db + knee_db).powi(2) / (8.0 * knee_db)
// because knee_boundary_db is 2.0 * bias_db.
(knee_boundary_db + knee_db).powi(2) / (8.0 * knee_db)
} else {
// Textbook:
// ratio_to_db(sample.abs()) - threshold_db, which is already our bias_db.
bias_db
}
} else {
0.0
};
// Spare the CPU unless (1) the limiter is engaged, (2) we
// were in attack or (3) we were in release, and that attack/
// release wasn't finished yet.
if limiter_db > 0.0
|| self.normalisation_integrator > 0.0
|| self.normalisation_peak > 0.0
{
// step 5: smooth, decoupled peak detector
// Textbook:
// release_cf * self.normalisation_integrator + (1.0 - release_cf) * limiter_db
// Simplifies to:
// release_cf * self.normalisation_integrator - release_cf * limiter_db + limiter_db
self.normalisation_integrator = f64::max(
limiter_db,
release_cf * self.normalisation_integrator
- release_cf * limiter_db
+ limiter_db,
);
// Textbook:
// attack_cf * self.normalisation_peak + (1.0 - attack_cf) * self.normalisation_integrator
// Simplifies to:
// attack_cf * self.normalisation_peak - attack_cf * self.normalisation_integrator + self.normalisation_integrator
self.normalisation_peak = attack_cf * self.normalisation_peak
- attack_cf * self.normalisation_integrator
+ self.normalisation_integrator;
// step 6: make-up gain applied later (volume attenuation)
// Applying the standard normalisation factor here won't work,
// because there are tracks with peaks as high as 6 dB above
// the default threshold, so that would clip.
// steps 7-8: conversion into level and multiplication into gain stage
*sample *= db_to_ratio(-self.normalisation_peak);
}
*sample *= volume;
}
}
}
if let Err(e) = self.sink.write(packet, &mut self.converter) {
error!("{}", e);
self.handle_pause();
}
}
}
None => {
self.state.playing_to_end_of_track();
if let PlayerState::EndOfTrack {
track_id,
play_request_id,
..
} = self.state
{
self.send_event(PlayerEvent::EndOfTrack {
track_id,
play_request_id,
})
} else {
error!("PlayerInternal handle_packet: Invalid PlayerState");
exit(1);
}
}
}
}
fn start_playback(
&mut self,
track_id: SpotifyId,
play_request_id: u64,
loaded_track: PlayerLoadedTrackData,
start_playback: bool,
) {
let audio_item = Box::new(loaded_track.audio_item.clone());
self.send_event(PlayerEvent::TrackChanged { audio_item });
let position_ms = loaded_track.stream_position_ms;
let mut config = self.config.clone();
if config.normalisation_type == NormalisationType::Auto {
if self.auto_normalise_as_album {
config.normalisation_type = NormalisationType::Album;
} else {
config.normalisation_type = NormalisationType::Track;
}
};
let normalisation_factor =
NormalisationData::get_factor(&config, loaded_track.normalisation_data);
if start_playback {
self.ensure_sink_running();
self.send_event(PlayerEvent::Playing {
track_id,
play_request_id,
position_ms,
});
self.state = PlayerState::Playing {
track_id,
play_request_id,
decoder: loaded_track.decoder,
audio_item: loaded_track.audio_item,
normalisation_data: loaded_track.normalisation_data,
normalisation_factor,
stream_loader_controller: loaded_track.stream_loader_controller,
duration_ms: loaded_track.duration_ms,
bytes_per_second: loaded_track.bytes_per_second,
stream_position_ms: loaded_track.stream_position_ms,
reported_nominal_start_time: Instant::now()
.checked_sub(Duration::from_millis(position_ms as u64)),
suggested_to_preload_next_track: false,
is_explicit: loaded_track.is_explicit,
};
} else {
self.ensure_sink_stopped(false);
self.state = PlayerState::Paused {
track_id,
play_request_id,
decoder: loaded_track.decoder,
audio_item: loaded_track.audio_item,
normalisation_data: loaded_track.normalisation_data,
normalisation_factor,
stream_loader_controller: loaded_track.stream_loader_controller,
duration_ms: loaded_track.duration_ms,
bytes_per_second: loaded_track.bytes_per_second,
stream_position_ms: loaded_track.stream_position_ms,
suggested_to_preload_next_track: false,
is_explicit: loaded_track.is_explicit,
};
self.send_event(PlayerEvent::Paused {
track_id,
play_request_id,
position_ms,
});
}
}
fn handle_command_load(
&mut self,
track_id: SpotifyId,
play_request_id_option: Option<u64>,
play: bool,
position_ms: u32,
) -> PlayerResult {
let play_request_id =
play_request_id_option.unwrap_or(self.play_request_id_generator.get());
self.send_event(PlayerEvent::PlayRequestIdChanged { play_request_id });
if !self.config.gapless {
self.ensure_sink_stopped(play);
}
if matches!(self.state, PlayerState::Invalid { .. }) {
return Err(Error::internal(format!(
"Player::handle_command_load called from invalid state: {:?}",
self.state
)));
}
// Now we check at different positions whether we already have a pre-loaded version
// of this track somewhere. If so, use it and return.
// Check if there's a matching loaded track in the EndOfTrack player state.
// This is the case if we're repeating the same track again.
if let PlayerState::EndOfTrack {
track_id: previous_track_id,
..
} = self.state
{
if previous_track_id == track_id {
let mut loaded_track = match mem::replace(&mut self.state, PlayerState::Invalid) {
PlayerState::EndOfTrack { loaded_track, .. } => loaded_track,
_ => {
return Err(Error::internal(format!("PlayerInternal::handle_command_load repeating the same track: invalid state: {:?}", self.state)));
}
};
if position_ms != loaded_track.stream_position_ms {
// This may be blocking.
loaded_track.stream_position_ms = loaded_track.decoder.seek(position_ms)?;
}
self.preload = PlayerPreload::None;
self.start_playback(track_id, play_request_id, loaded_track, play);
if let PlayerState::Invalid = self.state {
return Err(Error::internal(format!("PlayerInternal::handle_command_load repeating the same track: start_playback() did not transition to valid player state: {:?}", self.state)));
}
return Ok(());
}
}
// Check if we are already playing the track. If so, just do a seek and update our info.
if let PlayerState::Playing {
track_id: current_track_id,
ref mut stream_position_ms,
ref mut decoder,
..
}
| PlayerState::Paused {
track_id: current_track_id,
ref mut stream_position_ms,
ref mut decoder,
..
} = self.state
{
if current_track_id == track_id {
// we can use the current decoder. Ensure it's at the correct position.
if position_ms != *stream_position_ms {
// This may be blocking.
*stream_position_ms = decoder.seek(position_ms)?;
}
// Move the info from the current state into a PlayerLoadedTrackData so we can use
// the usual code path to start playback.
let old_state = mem::replace(&mut self.state, PlayerState::Invalid);
if let PlayerState::Playing {
stream_position_ms,
decoder,
audio_item,
stream_loader_controller,
bytes_per_second,
duration_ms,
normalisation_data,
is_explicit,
..
}
| PlayerState::Paused {
stream_position_ms,
decoder,
audio_item,
stream_loader_controller,
bytes_per_second,
duration_ms,
normalisation_data,
is_explicit,
..
} = old_state
{
let loaded_track = PlayerLoadedTrackData {
decoder,
normalisation_data,
stream_loader_controller,
audio_item,
bytes_per_second,
duration_ms,
stream_position_ms,
is_explicit,
};
self.preload = PlayerPreload::None;
self.start_playback(track_id, play_request_id, loaded_track, play);
if let PlayerState::Invalid = self.state {
return Err(Error::internal(format!("PlayerInternal::handle_command_load already playing this track: start_playback() did not transition to valid player state: {:?}", self.state)));
}
return Ok(());
} else {
return Err(Error::internal(format!("PlayerInternal::handle_command_load already playing this track: invalid state: {:?}", self.state)));
}
}
}
// Check if the requested track has been preloaded already. If so use the preloaded data.
if let PlayerPreload::Ready {
track_id: loaded_track_id,
..
} = self.preload
{
if track_id == loaded_track_id {
let preload = std::mem::replace(&mut self.preload, PlayerPreload::None);
if let PlayerPreload::Ready {
track_id,
mut loaded_track,
} = preload
{
if position_ms != loaded_track.stream_position_ms {
// This may be blocking
loaded_track.stream_position_ms = loaded_track.decoder.seek(position_ms)?;
}
self.start_playback(track_id, play_request_id, *loaded_track, play);
return Ok(());
} else {
return Err(Error::internal(format!("PlayerInternal::handle_command_loading preloaded track: invalid state: {:?}", self.state)));
}
}
}
// We need to load the track - either from scratch or by completing a preload.
// In any case we go into a Loading state to load the track.
self.ensure_sink_stopped(play);
self.send_event(PlayerEvent::Loading {
track_id,
play_request_id,
position_ms,
});
// Try to extract a pending loader from the preloading mechanism
let loader = if let PlayerPreload::Loading {
track_id: loaded_track_id,
..
} = self.preload
{
if (track_id == loaded_track_id) && (position_ms == 0) {
let mut preload = PlayerPreload::None;
std::mem::swap(&mut preload, &mut self.preload);
if let PlayerPreload::Loading { loader, .. } = preload {
Some(loader)
} else {
None
}
} else {
None
}
} else {
None
};
self.preload = PlayerPreload::None;
// If we don't have a loader yet, create one from scratch.
let loader = loader.unwrap_or_else(|| Box::pin(self.load_track(track_id, position_ms)));
// Set ourselves to a loading state.
self.state = PlayerState::Loading {
track_id,
play_request_id,
start_playback: play,
loader,
};
Ok(())
}
fn handle_command_preload(&mut self, track_id: SpotifyId) {
debug!("Preloading track");
let mut preload_track = true;
// check whether the track is already loaded somewhere or being loaded.
if let PlayerPreload::Loading {
track_id: currently_loading,
..
}
| PlayerPreload::Ready {
track_id: currently_loading,
..
} = self.preload
{
if currently_loading == track_id {
// we're already preloading the requested track.
preload_track = false;
} else {
// we're preloading something else - cancel it.
self.preload = PlayerPreload::None;
}
}
if let PlayerState::Playing {
track_id: current_track_id,
..
}
| PlayerState::Paused {
track_id: current_track_id,
..
}
| PlayerState::EndOfTrack {
track_id: current_track_id,
..
} = self.state
{
if current_track_id == track_id {
// we already have the requested track loaded.
preload_track = false;
}
}
// schedule the preload of the current track if desired.
if preload_track {
let loader = self.load_track(track_id, 0);
self.preload = PlayerPreload::Loading {
track_id,
loader: Box::pin(loader),
}
}
}
fn handle_command_seek(&mut self, position_ms: u32) -> PlayerResult {
// When we are still loading, the user may immediately ask to
// seek to another position yet the decoder won't be ready for
// that. In this case just restart the loading process but
// with the requested position.
if let PlayerState::Loading {
track_id,
play_request_id,
start_playback,
..
} = self.state
{
return self.handle_command_load(
track_id,
Some(play_request_id),
start_playback,
position_ms,
);
}
if let Some(decoder) = self.state.decoder() {
match decoder.seek(position_ms) {
Ok(new_position_ms) => {
if let PlayerState::Playing {
ref mut stream_position_ms,
track_id,
play_request_id,
..
}
| PlayerState::Paused {
ref mut stream_position_ms,
track_id,
play_request_id,
..
} = self.state
{
*stream_position_ms = new_position_ms;
self.send_event(PlayerEvent::Seeked {
play_request_id,
track_id,
position_ms: new_position_ms,
});
}
}
Err(e) => error!("PlayerInternal::handle_command_seek error: {}", e),
}
} else {
error!("Player::seek called from invalid state: {:?}", self.state);
}
// ensure we have a bit of a buffer of downloaded data
self.preload_data_before_playback()?;
if let PlayerState::Playing {
ref mut reported_nominal_start_time,
..
} = self.state
{
*reported_nominal_start_time =
Instant::now().checked_sub(Duration::from_millis(position_ms as u64));
}
Ok(())
}
fn handle_command(&mut self, cmd: PlayerCommand) -> PlayerResult {
debug!("command={:?}", cmd);
match cmd {
PlayerCommand::Load {
track_id,
play,
position_ms,
} => self.handle_command_load(track_id, None, play, position_ms)?,
PlayerCommand::Preload { track_id } => self.handle_command_preload(track_id),
PlayerCommand::Seek(position_ms) => self.handle_command_seek(position_ms)?,
PlayerCommand::Play => self.handle_play(),
PlayerCommand::Pause => self.handle_pause(),
PlayerCommand::Stop => self.handle_player_stop(),
PlayerCommand::SetSession(session) => self.session = session,
PlayerCommand::AddEventSender(sender) => self.event_senders.push(sender),
PlayerCommand::SetSinkEventCallback(callback) => self.sink_event_callback = callback,
PlayerCommand::EmitVolumeChangedEvent(volume) => {
self.send_event(PlayerEvent::VolumeChanged { volume })
}
PlayerCommand::EmitRepeatChangedEvent(repeat) => {
self.send_event(PlayerEvent::RepeatChanged { repeat })
}
PlayerCommand::EmitShuffleChangedEvent(shuffle) => {
self.send_event(PlayerEvent::ShuffleChanged { shuffle })
}
PlayerCommand::EmitAutoPlayChangedEvent(auto_play) => {
self.send_event(PlayerEvent::AutoPlayChanged { auto_play })
}
PlayerCommand::EmitSessionClientChangedEvent {
client_id,
client_name,
client_brand_name,
client_model_name,
} => self.send_event(PlayerEvent::SessionClientChanged {
client_id,
client_name,
client_brand_name,
client_model_name,
}),
PlayerCommand::EmitSessionConnectedEvent {
connection_id,
user_name,
} => self.send_event(PlayerEvent::SessionConnected {
connection_id,
user_name,
}),
PlayerCommand::EmitSessionDisconnectedEvent {
connection_id,
user_name,
} => self.send_event(PlayerEvent::SessionDisconnected {
connection_id,
user_name,
}),
PlayerCommand::SetAutoNormaliseAsAlbum(setting) => {
self.auto_normalise_as_album = setting
}
PlayerCommand::EmitFilterExplicitContentChangedEvent(filter) => {
self.send_event(PlayerEvent::FilterExplicitContentChanged { filter });
if filter {
if let PlayerState::Playing {
track_id,
play_request_id,
is_explicit,
..
}
| PlayerState::Paused {
track_id,
play_request_id,
is_explicit,
..
} = self.state
{
if is_explicit {
warn!("Currently loaded track is explicit, which client setting forbids -- skipping to next track.");
self.send_event(PlayerEvent::EndOfTrack {
track_id,
play_request_id,
})
}
}
}
}
};
Ok(())
}
fn send_event(&mut self, event: PlayerEvent) {
self.event_senders
.retain(|sender| sender.send(event.clone()).is_ok());
}
fn load_track(
&mut self,
spotify_id: SpotifyId,
position_ms: u32,
) -> impl FusedFuture<Output = Result<PlayerLoadedTrackData, ()>> + Send + 'static {
// This method creates a future that returns the loaded stream and associated info.
// Ideally all work should be done using asynchronous code. However, seek() on the
// audio stream is implemented in a blocking fashion. Thus, we can't turn it into future
// easily. Instead we spawn a thread to do the work and return a one-shot channel as the
// future to work with.
let loader = PlayerTrackLoader {
session: self.session.clone(),
config: self.config.clone(),
};
let (result_tx, result_rx) = oneshot::channel();
let load_handles_clone = self.load_handles.clone();
let handle = tokio::runtime::Handle::current();
let load_handle = thread::spawn(move || {
let data = handle.block_on(loader.load_track(spotify_id, position_ms));
if let Some(data) = data {
let _ = result_tx.send(data);
}
let mut load_handles = load_handles_clone.lock();
load_handles.remove(&thread::current().id());
});
let mut load_handles = self.load_handles.lock();
load_handles.insert(load_handle.thread().id(), load_handle);
result_rx.map_err(|_| ())
}
fn preload_data_before_playback(&mut self) -> PlayerResult {
if let PlayerState::Playing {
bytes_per_second,
ref mut stream_loader_controller,
..
} = self.state
{
// Request our read ahead range
let request_data_length =
(READ_AHEAD_DURING_PLAYBACK.as_secs_f32() * bytes_per_second as f32) as usize;
// Request the part we want to wait for blocking. This effectively means we wait for the previous request to partially complete.
let wait_for_data_length =
(READ_AHEAD_BEFORE_PLAYBACK.as_secs_f32() * bytes_per_second as f32) as usize;
stream_loader_controller
.fetch_next_and_wait(request_data_length, wait_for_data_length)
.map_err(Into::into)
} else {
Ok(())
}
}
}
impl Drop for PlayerInternal {
fn drop(&mut self) {
debug!("drop PlayerInternal[{}]", self.player_id);
let handles: Vec<thread::JoinHandle<()>> = {
// waiting for the thread while holding the mutex would result in a deadlock
let mut load_handles = self.load_handles.lock();
load_handles
.drain()
.map(|(_thread_id, handle)| handle)
.collect()
};
for handle in handles {
let _ = handle.join();
}
}
}
impl fmt::Debug for PlayerCommand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PlayerCommand::Load {
track_id,
play,
position_ms,
..
} => f
.debug_tuple("Load")
.field(&track_id)
.field(&play)
.field(&position_ms)
.finish(),
PlayerCommand::Preload { track_id } => {
f.debug_tuple("Preload").field(&track_id).finish()
}
PlayerCommand::Play => f.debug_tuple("Play").finish(),
PlayerCommand::Pause => f.debug_tuple("Pause").finish(),
PlayerCommand::Stop => f.debug_tuple("Stop").finish(),
PlayerCommand::Seek(position) => f.debug_tuple("Seek").field(&position).finish(),
PlayerCommand::SetSession(_) => f.debug_tuple("SetSession").finish(),
PlayerCommand::AddEventSender(_) => f.debug_tuple("AddEventSender").finish(),
PlayerCommand::SetSinkEventCallback(_) => {
f.debug_tuple("SetSinkEventCallback").finish()
}
PlayerCommand::EmitVolumeChangedEvent(volume) => f
.debug_tuple("EmitVolumeChangedEvent")
.field(&volume)
.finish(),
PlayerCommand::SetAutoNormaliseAsAlbum(setting) => f
.debug_tuple("SetAutoNormaliseAsAlbum")
.field(&setting)
.finish(),
PlayerCommand::EmitFilterExplicitContentChangedEvent(filter) => f
.debug_tuple("EmitFilterExplicitContentChangedEvent")
.field(&filter)
.finish(),
PlayerCommand::EmitSessionConnectedEvent {
connection_id,
user_name,
} => f
.debug_tuple("EmitSessionConnectedEvent")
.field(&connection_id)
.field(&user_name)
.finish(),
PlayerCommand::EmitSessionDisconnectedEvent {
connection_id,
user_name,
} => f
.debug_tuple("EmitSessionDisconnectedEvent")
.field(&connection_id)
.field(&user_name)
.finish(),
PlayerCommand::EmitSessionClientChangedEvent {
client_id,
client_name,
client_brand_name,
client_model_name,
} => f
.debug_tuple("EmitSessionClientChangedEvent")
.field(&client_id)
.field(&client_name)
.field(&client_brand_name)
.field(&client_model_name)
.finish(),
PlayerCommand::EmitShuffleChangedEvent(shuffle) => f
.debug_tuple("EmitShuffleChangedEvent")
.field(&shuffle)
.finish(),
PlayerCommand::EmitRepeatChangedEvent(repeat) => f
.debug_tuple("EmitRepeatChangedEvent")
.field(&repeat)
.finish(),
PlayerCommand::EmitAutoPlayChangedEvent(auto_play) => f
.debug_tuple("EmitAutoPlayChangedEvent")
.field(&auto_play)
.finish(),
}
}
}
impl fmt::Debug for PlayerState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use PlayerState::*;
match *self {
Stopped => f.debug_struct("Stopped").finish(),
Loading {
track_id,
play_request_id,
..
} => f
.debug_struct("Loading")
.field("track_id", &track_id)
.field("play_request_id", &play_request_id)
.finish(),
Paused {
track_id,
play_request_id,
..
} => f
.debug_struct("Paused")
.field("track_id", &track_id)
.field("play_request_id", &play_request_id)
.finish(),
Playing {
track_id,
play_request_id,
..
} => f
.debug_struct("Playing")
.field("track_id", &track_id)
.field("play_request_id", &play_request_id)
.finish(),
EndOfTrack {
track_id,
play_request_id,
..
} => f
.debug_struct("EndOfTrack")
.field("track_id", &track_id)
.field("play_request_id", &play_request_id)
.finish(),
Invalid => f.debug_struct("Invalid").finish(),
}
}
}
struct Subfile<T: Read + Seek> {
stream: T,
offset: u64,
length: u64,
}
impl<T: Read + Seek> Subfile<T> {
pub fn new(mut stream: T, offset: u64, length: u64) -> Result<Subfile<T>, io::Error> {
let target = SeekFrom::Start(offset);
stream.seek(target)?;
Ok(Subfile {
stream,
offset,
length,
})
}
}
impl<T: Read + Seek> Read for Subfile<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.stream.read(buf)
}
}
impl<T: Read + Seek> Seek for Subfile<T> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
let pos = match pos {
SeekFrom::Start(offset) => SeekFrom::Start(offset + self.offset),
SeekFrom::End(offset) => {
if (self.length as i64 - offset) < self.offset as i64 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"newpos would be < self.offset",
));
}
pos
}
_ => pos,
};
let newpos = self.stream.seek(pos)?;
Ok(newpos - self.offset)
}
}
impl<R> MediaSource for Subfile<R>
where
R: Read + Seek + Send + Sync,
{
fn is_seekable(&self) -> bool {
true
}
fn byte_len(&self) -> Option<u64> {
Some(self.length)
}
}
|
use std::collections::HashMap;
use actix_web::{
web::{Data, Path, Query},
Responder,
};
use tracing::info;
use tracing::instrument;
use htsget_http::{get, Endpoint};
use htsget_search::htsget::HtsGet;
use crate::AppState;
use super::handle_response;
/// GET request reads endpoint
#[instrument(skip(app_state))]
pub async fn reads<H: HtsGet + Send + Sync + 'static>(
request: Query<HashMap<String, String>>,
path: Path<String>,
app_state: Data<AppState<H>>,
) -> impl Responder {
let mut query_information = request.into_inner();
query_information.insert("id".to_string(), path.into_inner());
info!(query = ?query_information, "reads endpoint GET request");
handle_response(
get(
app_state.get_ref().htsget.clone(),
query_information,
Endpoint::Reads,
)
.await,
)
}
/// GET request variants endpoint
#[instrument(skip(app_state))]
pub async fn variants<H: HtsGet + Send + Sync + 'static>(
request: Query<HashMap<String, String>>,
path: Path<String>,
app_state: Data<AppState<H>>,
) -> impl Responder {
let mut query_information = request.into_inner();
query_information.insert("id".to_string(), path.into_inner());
info!(query = ?query_information, "variants endpoint GET request");
handle_response(
get(
app_state.get_ref().htsget.clone(),
query_information,
Endpoint::Variants,
)
.await,
)
}
|
use super::request;
use std::fmt;
#[derive(Debug, PartialEq, Clone, Eq, Hash)]
pub struct KeyType(String);
impl fmt::Display for KeyType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl From<Key> for KeyType {
fn from(k: Key) -> Self {
use self::Mod::*;
let mut buf = String::new();
match k.modifier {
Ctrl => buf.push_str("C-"),
Alt => buf.push_str("A-"),
Shift => buf.push_str("S-"),
None => {}
};
match k.kind {
KeyKind::Num(n) => buf.push_str(&format!("{}", n)),
KeyKind::Char(ch) | KeyKind::Other(ch) => buf.push_str(&format!("{}", ch)),
_ => {}
}
KeyType(buf)
}
}
impl From<String> for KeyType {
fn from(s: String) -> Self {
KeyType(s)
}
}
impl<'a> From<&'a str> for KeyType {
fn from(s: &'a str) -> Self {
KeyType(s.into())
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct Keybinds(Vec<(KeyRequest, KeyType)>);
impl Keybinds {
pub fn get(&self, key: &KeyType) -> Option<&KeyRequest> {
if key.0.is_empty() {
return None;
}
for (v, k) in &self.0 {
if k == key {
return Some(v);
}
}
None
}
pub fn insert(&mut self, key: impl Into<KeyType>, req: KeyRequest) {
let key = key.into();
for (v, k) in &mut self.0 {
if *v == req {
*k = key.clone()
}
}
}
pub fn lookup(&self, req: KeyRequest) -> Option<&KeyType> {
if let Some(pos) = self.iter().position(|(r, _)| *r == req) {
return self.0.get(pos).map(|(_, v)| v);
}
None
}
pub fn lookup_key(&self, key: impl Into<KeyType>) -> Option<&KeyRequest> {
let key = key.into();
if let Some(pos) = self.iter().position(|(_, k)| *k == key) {
return self.0.get(pos).map(|(k, _)| k);
}
None
}
pub fn iter(&self) -> impl Iterator<Item = &(KeyRequest, KeyType)> {
self.0.iter()
}
}
impl Default for Keybinds {
fn default() -> Self {
let map = vec![
(KeyRequest::Clear, "C-l".into()),
(KeyRequest::RecallBackward, "A-p".into()),
(KeyRequest::RecallForward, "A-n".into()),
//
(KeyRequest::ToggleNickList, "A-k".into()),
//
(KeyRequest::MoveForward, "C-f".into()),
(KeyRequest::MoveBackward, "C-b".into()),
(KeyRequest::MoveForwardWord, "A-f".into()),
(KeyRequest::MoveBackwardWord, "A-b".into()),
(KeyRequest::MoveStart, "C-a".into()),
(KeyRequest::MoveEnd, "C-e".into()),
//
(KeyRequest::DeleteForward, "C-d".into()),
(KeyRequest::DeleteForwardWord, "A-d".into()),
(KeyRequest::DeleteBackwardWord, "C-w".into()),
(KeyRequest::DeleteBackward, "A-w".into()),
(KeyRequest::DeleteStart, "C-u".into()),
(KeyRequest::DeleteEnd, "C-k".into()),
//
(KeyRequest::SwapCaseForward, "".into()),
(KeyRequest::SwapCaseForwardWord, "A-u".into()),
(KeyRequest::SwapCaseBackwardWord, "".into()),
(KeyRequest::SwapCaseBackward, "".into()),
(KeyRequest::SwapCaseStart, "".into()),
(KeyRequest::SwapCaseEnd, "".into()),
//
(KeyRequest::PrevBuffer, "C-p".into()),
(KeyRequest::NextBuffer, "C-n".into()),
//
(KeyRequest::SwitchBuffer0, "C-0".into()),
(KeyRequest::SwitchBuffer1, "C-1".into()),
(KeyRequest::SwitchBuffer2, "C-2".into()),
(KeyRequest::SwitchBuffer3, "C-3".into()),
(KeyRequest::SwitchBuffer4, "C-4".into()),
(KeyRequest::SwitchBuffer5, "C-5".into()),
(KeyRequest::SwitchBuffer6, "C-6".into()),
(KeyRequest::SwitchBuffer7, "C-7".into()),
(KeyRequest::SwitchBuffer8, "C-8".into()),
(KeyRequest::SwitchBuffer9, "C-9".into()),
];
Self { 0: map }
}
}
// fully enumerated so they can show up in the config easier
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum KeyRequest {
Clear,
ToggleNickList,
PrevBuffer,
NextBuffer,
RecallBackward,
RecallForward,
MoveForward,
MoveBackward,
MoveForwardWord,
MoveBackwardWord,
MoveStart,
MoveEnd,
DeleteForward,
DeleteBackward,
DeleteForwardWord,
DeleteBackwardWord,
DeleteStart,
DeleteEnd,
SwapCaseForward,
SwapCaseBackward,
SwapCaseForwardWord,
SwapCaseBackwardWord,
SwapCaseStart,
SwapCaseEnd,
SwitchBuffer0,
SwitchBuffer1,
SwitchBuffer2,
SwitchBuffer3,
SwitchBuffer4,
SwitchBuffer5,
SwitchBuffer6,
SwitchBuffer7,
SwitchBuffer8,
SwitchBuffer9,
}
impl fmt::Display for KeyRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = format!("{:?}", self);
let mut buf = String::new();
for (i, ch) in s.chars().enumerate() {
if i > 0 && (ch.is_numeric() || ch.is_uppercase()) {
buf.push('_');
buf.push_str(&s[i..=i].to_ascii_lowercase());
} else {
buf.push(ch.to_ascii_lowercase());
}
}
write!(f, "{}", buf)
}
}
impl KeyRequest {
pub fn parse(s: impl AsRef<str>) -> Option<Self> {
use self::KeyRequest::*;
fn unsnakecase(s: &str) -> String {
let mut buf = String::new();
let mut prev = false;
for (i, c) in s.chars().enumerate() {
if i == 0 || prev {
buf.push(c.to_ascii_uppercase());
prev = false;
continue;
}
if c == '_' {
prev = true;
continue;
}
buf.push(c)
}
buf
}
let res = match unsnakecase(s.as_ref()).as_str() {
"Clear" => Clear,
"RecallBackward" => RecallBackward,
"RecallForward" => RecallForward,
"ToggleNickList" => ToggleNickList,
"PrevBuffer" => PrevBuffer,
"NextBuffer" => NextBuffer,
"MoveForward" => MoveForward,
"MoveBackward" => MoveBackward,
"MoveForwardWord" => MoveForwardWord,
"MoveBackwardWord" => MoveBackwardWord,
"MoveStart" => MoveStart,
"MoveEnd" => MoveEnd,
"DeleteForward" => DeleteForward,
"DeleteBackward" => DeleteBackward,
"DeleteForwardWord" => DeleteForwardWord,
"DeleteBackwardWord" => DeleteBackwardWord,
"DeleteStart" => DeleteStart,
"DeleteEnd" => DeleteEnd,
"SwapCaseForward" => SwapCaseForward,
"SwapCaseBackward" => SwapCaseBackward,
"SwapCaseForwardWord" => SwapCaseForwardWord,
"SwapCaseBackwardWord" => SwapCaseBackwardWord,
"SwapCaseStart" => SwapCaseStart,
"SwapCaseEnd" => SwapCaseEnd,
"SwitchBuffer0" => SwitchBuffer0,
"SwitchBuffer1" => SwitchBuffer1,
"SwitchBuffer2" => SwitchBuffer2,
"SwitchBuffer3" => SwitchBuffer3,
"SwitchBuffer4" => SwitchBuffer4,
"SwitchBuffer5" => SwitchBuffer5,
"SwitchBuffer6" => SwitchBuffer6,
"SwitchBuffer7" => SwitchBuffer7,
"SwitchBuffer8" => SwitchBuffer8,
"SwitchBuffer9" => SwitchBuffer9,
_ => return None,
};
Some(res)
}
}
impl request::Request {
pub fn parse(kr: KeyRequest) -> Option<Self> {
use self::KeyRequest::*;
use super::request::Request;
// for msg queue requests
let res = match kr {
Clear => Request::Clear(true),
ToggleNickList => Request::ToggleNickList,
PrevBuffer => Request::PrevBuffer,
NextBuffer => Request::NextBuffer,
SwitchBuffer0 => Request::SwitchBuffer(0),
SwitchBuffer1 => Request::SwitchBuffer(1),
SwitchBuffer2 => Request::SwitchBuffer(2),
SwitchBuffer3 => Request::SwitchBuffer(3),
SwitchBuffer4 => Request::SwitchBuffer(4),
SwitchBuffer5 => Request::SwitchBuffer(5),
SwitchBuffer6 => Request::SwitchBuffer(6),
SwitchBuffer7 => Request::SwitchBuffer(7),
SwitchBuffer8 => Request::SwitchBuffer(8),
SwitchBuffer9 => Request::SwitchBuffer(9),
_ => return None,
};
Some(res)
}
}
impl request::Command {
pub fn parse(kr: KeyRequest) -> Option<Self> {
use self::KeyRequest::*;
use super::request::Command::*;
use super::request::Move::*;
// for input commands
let res = match kr {
RecallForward => Recall(Forward),
RecallBackward => Recall(Backward),
MoveForward => Move(Forward),
MoveBackward => Move(Backward),
MoveForwardWord => Move(ForwardWord),
MoveBackwardWord => Move(BackwardWord),
MoveStart => Move(StartOfLine),
MoveEnd => Move(EndOfLine),
DeleteForward => Delete(Forward),
DeleteBackward => Delete(Backward),
DeleteForwardWord => Delete(ForwardWord),
DeleteBackwardWord => Delete(BackwardWord),
DeleteStart => Delete(StartOfLine),
DeleteEnd => Delete(EndOfLine),
SwapCaseForward => SwapCase(Forward),
SwapCaseBackward => SwapCase(Backward),
SwapCaseForwardWord => SwapCase(ForwardWord),
SwapCaseBackwardWord => SwapCase(BackwardWord),
SwapCaseStart => SwapCase(StartOfLine),
SwapCaseEnd => SwapCase(EndOfLine),
_ => return None,
};
Some(res)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Mod {
Ctrl,
Alt,
Shift,
None,
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct Key {
pub modifier: Mod,
pub kind: KeyKind,
}
impl Key {
pub fn parse(v: u16) -> Option<Self> {
let mut modifier = Mod::None;
let kind = KeyKind::new(v, &mut modifier)?;
Some(Key { modifier, kind })
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum KeyKind {
Backspace,
Enter,
Tab,
Num(usize),
Char(char),
Other(char), // TODO determine if Space (0x20) is a Char or an Other
}
#[rustfmt::skip]
impl KeyKind {
pub fn new(v: u16, m: &mut Mod) -> Option<KeyKind> {
use self::KeyKind::*;
let key = match v {
0xECF8 => { *m = Mod::Alt; Backspace }
0x007F => { *m = Mod::Ctrl; Backspace }
0x0008 => { *m = Mod::None; Backspace }
0xECEE => { *m = Mod::Alt; Enter }
0xED11 => { *m = Mod::Ctrl; Enter }
0x000A => { *m = Mod::None; Enter }
// Alt-tab won't work for .. obvious reasons
0xECE2 => { *m = Mod::Ctrl; Tab }
// this is a fake key
0xECED => { debug!("fake key, maybe not a tab");
*m = Mod::Shift; Tab }
0x0009 => { *m = Mod::None; Tab }
0xEC97...0xECA0 => { *m = Mod::Alt; Num((v - 0xEC97) as usize) }
0xED37...0xED40 => { *m = Mod::Ctrl; Num((v - 0xED37) as usize) }
0xECA1...0xECBA => { *m = Mod::Alt; Char(((v as u8) - 0x40) as char) },
0x0001...0x001A if v != 0x000A => { *m = Mod::Ctrl; Char(((v as u8) + 0x60) as char) },
0x0061...0x007A | 0x0040 => { *m = Mod::None; Char((v as u8) as char) },
0x0041...0x005A => { *m = Mod::Shift; Char(((v as u8) + 0x20) as char) },
_ => { *m = Mod::None; Other((v as u8) as char) },
};
eprintln!("0x{:04X} | {:>6} | {:?}, {:?}", v, v, m, key);
Some(key)
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum FKey {
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
}
|
extern crate url_serde;
extern crate serde_json;
use std::mem;
use std::collections::hash_map::DefaultHasher;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use chrono::prelude::*;
use sha2::{Sha256, Digest};
use byteorder::{BigEndian, WriteBytesExt};
use url::Url;
use futures::{Future, Stream};
use hyper::{Client, Chunk};
use tokio_core::reactor::Core;
// TODO: Remove iron from this module.
use iron::IronResult;
//
// Blockchain data types
//
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Blockchain {
pub chain: Vec<Block>,
pub current_transactions: Vec<Transaction>,
pub nodes: HashSet<Node>,
}
// Create an initialized blockchain.
pub fn new_blockchain() -> Blockchain {
let mut bc = Blockchain { ..Default::default() };
// add genesis block
bc.new_block(100, Some(1));
bc
}
impl Default for Blockchain {
fn default() -> Blockchain {
Blockchain {
chain: Vec::new(),
current_transactions: Vec::new(),
nodes: HashSet::new(),
}
}
}
impl Blockchain {
// Creates a new Block and adds it to the chain
pub fn new_block(&mut self, proof: u64, previous_hash: Option<u64>) {
let previous_hash = previous_hash.unwrap_or_else(|| Blockchain::hash(self.last_block()));
let mut previous_transactions = Vec::new();
mem::swap(&mut self.current_transactions, &mut previous_transactions);
let block = Block {
index: self.chain.len() + 1,
timestamp: Utc::now(),
transactions: previous_transactions,
proof: proof,
previous_hash: previous_hash,
};
self.chain.push(block);
}
// Adds a new transaction to the list of transactions
pub fn new_transaction(&mut self, transaction: Transaction) -> usize {
self.current_transactions.push(transaction);
self.last_block().index + 1
}
// Hashes a Block
fn hash(block: &Block) -> u64 {
let mut s = DefaultHasher::new();
block.hash(&mut s);
s.finish()
}
// Returns the last Block in the chain
pub fn last_block(&self) -> &Block {
&self.chain[self.chain.len() - 1]
}
pub fn proof_of_work(last_proof: u64) -> u64 {
let mut proof: u64 = 0;
while Blockchain::valid_proof(last_proof, proof) == false {
proof += 1;
}
proof
}
fn valid_proof(last_proof: u64, proof: u64) -> bool {
let mut wtr = vec![];
wtr.write_u64::<BigEndian>(last_proof).unwrap();
wtr.write_u64::<BigEndian>(proof).unwrap();
let mut hasher = Sha256::default();
hasher.input(&wtr[..]);
hasher.result()[..2] == b"00"[..2]
}
// register a new node (idempotent)
pub fn register_node(&mut self, node: Node) {
self.nodes.insert(node);
}
// Consensus
// Determine if the passed in chain is valid.
pub fn valid_chain(chain: &Vec<Block>) -> bool {
for i in 1..chain.len() {
let last_block = &chain[i - 1];
let block = &chain[i];
println!("last_block: {:?}", last_block);
println!("block: {:?}", block);
// Check that the hash of the block is correct.
if block.previous_hash != Blockchain::hash(last_block) {
return false;
}
// Check that the Proof of Work is correct.
if !Blockchain::valid_proof(last_block.proof, block.proof) {
return false;
}
}
// If all checks pass, the chain is valid.
true
}
// Consensus algorithm, resolving conflicts by using the longest chain in
// the network. Performs network calls to all other known nodes.
pub fn resolve_conflicts(&mut self) -> IronResult<bool> {
let cur_len = self.chain.len();
let mut max_len = cur_len;
let mut core = itry!(Core::new());
let client = Client::new(&core.handle());
for node in self.nodes.iter() {
info!("calling node: {:?}", node);
let mut target = node.address.to_owned();
target.set_path("/chain");
let uri = itry!(target.into_string().parse());
let work = client.get(uri).and_then(|res| {
res.body().concat2().and_then(move |body: Chunk| {
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ChainResp {
chain: Vec<Block>,
}
// Error handling for passing is handled later.
Ok(serde_json::from_slice::<ChainResp>(&body))
})
});
let chain = itry!(itry!(core.run(work))).chain;
let new_len = chain.len();
if new_len > cur_len && Blockchain::valid_chain(&chain) {
debug!("Found a better chain of len {} from: {}", new_len, node.address);
max_len = new_len;
self.chain = chain;
}
}
info!("max_len: {}, cur_len: {}", max_len, cur_len);
Ok(max_len > cur_len)
}
}
#[derive(Hash, Debug, Clone, Serialize, Deserialize)]
pub struct Block {
index: usize,
timestamp: DateTime<Utc>,
transactions: Vec<Transaction>,
pub proof: u64,
previous_hash: u64,
}
#[derive(Hash, Debug, Clone, Serialize, Deserialize)]
pub struct Transaction {
pub sender: String,
pub recipient: String,
pub amount: i64,
}
#[derive(Hash, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct Node {
#[serde(with = "url_serde")]
address: Url,
}
|
/*
* Copyright 2019-2023 Didier Plaindoux
=======
* Copyright 2019-2021 Didier Plaindoux
>>>>>>> 45ec19c (Manage compiler warnings and change License header)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#[cfg(test)]
mod tests {
use celma_lang::meta::syntax::ASTParsec::{PChar, PIdent};
use celma_lang::meta::token::First;
use celma_lang::meta::token::Token::{AllAtom, Atom};
#[test]
fn it_compute_first_for_ident() {
assert_eq!(vec![AllAtom], PIdent(String::from("test")).first());
}
#[test]
fn it_compute_first_for_char() {
assert_eq!(vec![Atom('a')], PChar('a').first());
}
}
|
#![feature(in_band_lifetimes, cell_update)]
mod camera;
mod inspect;
mod render;
mod transform;
mod world;
use futures::executor::block_on;
use imgui::{im_str, ComboBox, Condition, FontSource, ImString};
use imgui_inspect::{InspectArgsStruct, InspectRenderStruct};
use inspect::IntoInspect;
use log::info;
use nalgebra::Matrix4;
use render::{
binding, frame, model, renderpass, state, texture,
traits::{DrawFramebuffer, DrawGrid, DrawLight, Vertex},
};
use winit::{
dpi::LogicalPosition,
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use anyhow::*;
#[repr(C)]
#[derive(Copy, Clone)]
struct Light {
position: nalgebra::Vector3<f32>,
ty: f32,
color: nalgebra::Vector3<f32>,
}
unsafe impl bytemuck::Pod for Light {}
unsafe impl bytemuck::Zeroable for Light {}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct Uniforms {
view_position: nalgebra::Vector4<f32>,
view_proj: nalgebra::Matrix4<f32>,
view: nalgebra::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
impl Uniforms {
fn new() -> Self {
Self {
view_position: nalgebra::zero(),
view_proj: nalgebra::Matrix4::identity(),
view: nalgebra::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &camera::Camera) {
self.view_position = camera.eye.to_homogeneous();
self.view_proj = camera.view_proj;
self.view = camera.view_transform().to_homogeneous();
}
}
struct Engine {
window: Window,
state: state::WgpuState,
pipelines: render::Pipelines,
camera: camera::Camera,
camera_controller: camera::flycam::FlyCamController,
uniforms: Uniforms,
uniform_buffer: binding::Buffer,
uniform_group: binding::BufferGroup,
depth_texture: texture::Texture,
obj_model: model::Model,
light_buffer: binding::Buffer,
light_group: binding::BufferGroup,
light: Light,
last_mouse_pos: LogicalPosition<f64>,
current_mouse_pos: LogicalPosition<f64>,
mouse_pressed: bool,
imgui: imgui::Context,
imgui_renderer: imgui_wgpu::Renderer,
last_cursor: Option<imgui::MouseCursor>,
platform: imgui_winit_support::WinitPlatform,
light_depth_map: texture::Texture,
framebuffer: frame::Framebuffer,
layouts: render::Layouts,
world: world::World,
grid: render::grid::Grid,
}
impl Engine {
async fn new(window: Window) -> Result<Self> {
let state = state::WgpuState::new(&window, wgpu::TextureFormat::Bgra8UnormSrgb)
.await
.unwrap();
info!("Wgpu initialized");
let layouts = render::Layouts {
material: render::material_layout(&state),
uniforms: render::uniforms_layout(&state),
light: render::light_layout(&state),
frame: render::frame_layout(&state),
grid: render::grid_layout(&state),
};
let camera = camera::Camera::new(
[0.0, 5.0, 10.0].into(),
[0.0, 0.0, 0.0].into(),
camera::projection::Projection::new(state.width(), state.height(), 75.0, 0.1, 100.0),
);
let camera_controller = camera::flycam::FlyCamController::new(4.0, 100.0);
info!("Camera and controller initialized");
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = binding::Buffer::new_init(
&state,
"uniforms",
&[uniforms],
binding::BufferUsage::Uniform,
);
let uniform_group = binding::BufferGroup::from_buffer(
&state,
"uniforms",
&layouts.uniforms,
&[&uniform_buffer],
);
let light = Light {
position: [-0.25, 0.25, -0.25].into(),
ty: 0.0,
color: [1.0, 1.0, 1.0].into(),
};
let light_buffer =
binding::Buffer::new_init(&state, "light", &[light], binding::BufferUsage::Uniform);
let light_group =
binding::BufferGroup::from_buffer(&state, "light", &layouts.light, &[&light_buffer]);
let depth_texture = texture::Texture::create_depth_texture(&state, "depth_texture");
let forward_layout = state.create_pipeline_layout(
"forward",
&[&layouts.material, &layouts.uniforms, &layouts.light],
)?;
let light_layout =
state.create_pipeline_layout("light", &[&layouts.uniforms, &layouts.light])?;
let depth_layout =
state.create_pipeline_layout("depth", &[&layouts.frame, &layouts.uniforms])?;
let grid_layout =
state.create_pipeline_layout("grid", &[&layouts.uniforms, &layouts.grid])?;
let forward = state.create_render_pipeline(
&forward_layout,
"forward_pipeline",
state.format(),
wgpu::BlendDescriptor::REPLACE,
wgpu::BlendDescriptor::REPLACE,
(texture::Texture::DEPTH_FORMAT, true),
&[model::ModelVertex::desc(), transform::InstanceRaw::desc()],
"shader.vert.spv",
"shader.frag.spv",
true,
)?;
let light_pipeline = state.create_render_pipeline(
&light_layout,
"light_pipeline",
state.format(),
wgpu::BlendDescriptor::REPLACE,
wgpu::BlendDescriptor::REPLACE,
(texture::Texture::DEPTH_FORMAT, true),
&[model::ModelVertex::desc()],
"light.vert.spv",
"light.frag.spv",
true,
)?;
let depth_pipeline = state.create_render_pipeline(
&depth_layout,
"depth_pipeline",
state.format(),
wgpu::BlendDescriptor::REPLACE,
wgpu::BlendDescriptor::REPLACE,
None,
&[frame::FrameVertex::desc()],
"depth_frame.vert.spv",
"depth_frame.frag.spv",
true,
)?;
let grid_pipeline = state.create_render_pipeline(
&grid_layout,
"grid_pipeline",
state.format(),
wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
wgpu::BlendDescriptor {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
(texture::Texture::DEPTH_FORMAT, false),
&[render::grid::GridVertex::desc()],
"grid.vert.spv",
"grid.frag.spv",
false,
)?;
let pipelines = render::Pipelines {
forward,
light: light_pipeline,
depth: depth_pipeline,
grid: grid_pipeline,
};
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = model::Model::load(&state, &layouts.material, res_dir.join("cube.obj"))?;
let mut imgui = imgui::Context::create();
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&window,
imgui_winit_support::HiDpiMode::Default,
);
imgui.set_ini_filename(None);
let hidpi_factor = window.scale_factor();
let font_size = (13.0 * hidpi_factor) as f32;
imgui.io_mut().font_global_scale = (1.0 / hidpi_factor) as f32;
imgui.fonts().add_font(&[FontSource::DefaultFontData {
config: Some(imgui::FontConfig {
oversample_h: 1,
pixel_snap_h: true,
size_pixels: font_size,
..Default::default()
}),
}]);
let imgui_renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&state.device(),
&state.queue(),
imgui_wgpu::RendererConfig {
texture_format: state.format(),
..Default::default()
},
);
let light_depth_map = texture::Texture::create_depth_texture(&state, "light_depth_map");
let framebuffer = frame::Framebuffer::new(
&state,
"depth_framebuffer",
&layouts.frame,
&[&depth_texture],
);
let mut world = world::World::new();
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
world.load_model(&state, &layouts, "block", res_dir.join("cube.obj"))?;
world.load_model(
&state,
&layouts,
"pizza_box",
res_dir.join("14037_Pizza_Box_v2_L1.obj"),
)?;
world.push_entity((
world::ModelIdent("block".into()),
transform::Transform::new(&state, "block_transform"),
))?;
let mut transform = transform::Transform::new(&state, "block_transform");
transform.set_position(nalgebra::Translation3::new(-2.5, 0.0, 0.0));
world.push_entity((world::ModelIdent("block".into()), transform))?;
world.update_collision_world();
let grid = render::grid::Grid::new(&state, "grid", &layouts.grid);
Ok(Self {
window,
state,
pipelines,
camera,
camera_controller,
uniforms,
uniform_buffer,
uniform_group,
depth_texture,
obj_model,
light_buffer,
light_group,
light,
last_mouse_pos: (0.0, 0.0).into(),
current_mouse_pos: (0.0, 0.0).into(),
mouse_pressed: false,
imgui,
imgui_renderer,
last_cursor: None,
platform,
light_depth_map,
framebuffer,
layouts,
world,
grid,
})
}
fn size(&self) -> winit::dpi::PhysicalSize<u32> {
winit::dpi::PhysicalSize::<u32> {
width: self.state.width(),
height: self.state.height(),
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
info!(
"Resize from {:?} to {:?}",
(self.state.width() as u32, self.state.height() as u32),
(new_size.width as u32, new_size.height as u32)
);
self.camera.resize(new_size.width, new_size.height);
self.state
.recreate_swapchain(new_size.width, new_size.height);
self.depth_texture = texture::Texture::create_depth_texture(&self.state, "depth_texture");
self.framebuffer
.update_textures(&self.state, &self.layouts.frame, &[&self.depth_texture]);
}
fn input(&mut self, event: &WindowEvent) -> bool {
match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
..
} => self.camera_controller.process_keyboard(*key, *state),
/*WindowEvent::MouseWheel { delta, .. } => {
self.camera_controller.process_scroll(delta);
true
}*/
WindowEvent::MouseInput {
button: MouseButton::Left,
state,
..
} => {
self.mouse_pressed = *state == ElementState::Pressed;
true
}
WindowEvent::CursorMoved { position, .. } => {
self.current_mouse_pos = LogicalPosition {
x: position.to_logical::<f64>(self.state.width() as f64).x,
y: position.to_logical::<f64>(self.state.height() as f64).y,
};
true
}
_ => false,
}
}
fn update(&mut self, dt: std::time::Duration) {
/*let old_position: cgmath::Vector3<_> = self.light.position.into();
self.light.position = cgmath::Quaternion::from_axis_angle(
(0.0, 1.0, 0.0).into(),
cgmath::Deg(60.0 * dt.as_secs_f32()),
) * old_position;
self.light_buffer.write(&self.state, &[self.light]);*/
self.imgui.io_mut().update_delta_time(dt);
self.world.update_collision_world();
}
fn render(&mut self, dt: std::time::Duration) -> Result<(), wgpu::SwapChainError> {
struct UIData<'a> {
entry: Option<legion::world::Entry<'a>>,
models: Vec<String>,
}
let mut encoder = self.state.encoder();
let sc = self.state.frame()?.output;
let raycast = self.world.raycast(&self.camera.ray(), 1024.0);
let models = self
.world
.models
.keys()
.map(|m| m.0.clone())
.collect::<Vec<_>>();
let entry = if let Some(entity) = raycast {
if let Some(entry) = self.world.entry(entity) {
Some(entry)
} else {
None
}
} else {
None
};
let ui_data = UIData { entry, models };
let mut updated_transform = false;
let ui = self.imgui.frame();
{
let window = imgui::Window::new(im_str!("Hello Imgui from WGPU!"));
window
.size(
[self.state.width() as f32, self.state.height() as f32],
Condition::Always,
)
.title_bar(false)
.position([0.0, 0.0], Condition::Always)
.draw_background(false)
.menu_bar(false)
.bring_to_front_on_focus(false)
.mouse_inputs(false)
.build(&ui, || {
ui.text(im_str!("FPS: {}", (1.0 / dt.as_secs_f32()).round()));
ui.separator();
let mouse_pos = ui.io().mouse_pos;
ui.text(im_str!(
"Mouse Position: ({:.1}, {:.1})",
mouse_pos[0],
mouse_pos[1],
));
if ui_data.entry.is_some() {
let inspect_window = imgui::Window::new(im_str!("Inspect"));
inspect_window.always_auto_resize(true).build(&ui, || {
if let Some(mut entry) = ui_data.entry {
{
let transform =
entry.get_component_mut::<transform::Transform>().ok();
if let Some(mut transform) = transform {
let mut inspect = transform.into_inspect();
let init_inspect = inspect.clone();
<inspect::InspectTransform as InspectRenderStruct<
inspect::InspectTransform,
>>::render_mut(
&mut [&mut inspect],
"Transform",
&ui,
&InspectArgsStruct::default(),
);
if inspect != init_inspect {
transform
.set_position(inspect.position())
.set_rotation(inspect.rotation())
.set_scale(inspect.scale());
updated_transform = true;
transform.dirty = true;
}
}
}
{
ui.text("Model");
let model = entry.get_component_mut::<world::ModelIdent>().ok();
if let Some(mut model) = model {
let mut index = ui_data
.models
.iter()
.enumerate()
.find(|(_, m)| *m == &model.0)
.map(|(i, _)| i)
.expect("Must have model");
let init = index;
let imstrs = ui_data
.models
.iter()
.map(|m| im_str!("{}", m))
.collect::<Vec<_>>();
ComboBox::new(im_str!("model")).build_simple(
&ui,
&mut index,
imstrs.as_slice(),
&|s: &ImString| s.into(),
);
if init != index {
model.0 = ui_data.models[index].clone();
updated_transform = true;
}
}
}
}
});
}
});
}
if updated_transform {
self.world
.update_entity_world_transform(raycast.unwrap())
.expect("Internal err");
}
if self.mouse_pressed && !ui.io().want_capture_mouse {
let mouse_dx = self.current_mouse_pos.x - self.last_mouse_pos.x;
let mouse_dy = self.current_mouse_pos.y - self.last_mouse_pos.y;
self.camera_controller.process_mouse(mouse_dx, mouse_dy);
}
self.camera_controller.update_camera(&mut self.camera, dt);
self.last_mouse_pos = self.current_mouse_pos;
self.uniforms.update_view_proj(&self.camera);
self.uniform_buffer.write(&self.state, &[self.uniforms]);
{
let color_attachments: &[&dyn renderpass::IntoColorAttachment] = &[&(
&sc.view,
wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
)];
let depth_attachment: &dyn renderpass::IntoDepthAttachment =
&(&self.depth_texture.view, wgpu::LoadOp::Clear(1.0));
let mut render_pass =
renderpass::render_pass(&mut encoder, color_attachments, depth_attachment);
render_pass.set_pipeline(&self.pipelines.forward);
self.world
.render(
&self.state,
&mut render_pass,
&self.uniform_group,
&self.light_group,
)
.expect("Error rendering");
render_pass.set_pipeline(&self.pipelines.light);
render_pass.draw_light_model(&self.obj_model, &self.uniform_group, &self.light_group);
render_pass.set_pipeline(&self.pipelines.grid);
render_pass.draw_grid(&self.grid, &self.uniform_group);
}
{
let color_attachments: &[&dyn renderpass::IntoColorAttachment] =
&[&(&sc.view, wgpu::LoadOp::Load)];
let mut render_pass = renderpass::render_pass(&mut encoder, color_attachments, None);
render_pass.set_viewport(0.0, 0.0, 200.0, 200.0, 0.0, 1.0);
render_pass.set_pipeline(&self.pipelines.depth);
render_pass.draw_framebuffer(&self.framebuffer, &self.uniform_group);
}
{
if self.last_cursor != ui.mouse_cursor() {
self.last_cursor = ui.mouse_cursor();
self.platform.prepare_render(&ui, &self.window);
}
let color_attachments: &[&dyn renderpass::IntoColorAttachment] =
&[&(&sc.view, wgpu::LoadOp::Load)];
let mut render_pass = renderpass::render_pass(&mut encoder, color_attachments, None);
self.imgui_renderer
.render(
ui.render(),
&self.state.queue(),
&self.state.device(),
&mut render_pass,
)
.expect("Failed to render UI!");
}
self.state.queue().submit(std::iter::once(encoder.finish()));
Ok(())
}
#[allow(dead_code)]
pub fn set_title(&self, title: &str) {
self.window.set_title(title);
}
pub fn request_redraw(&self) {
self.window.request_redraw();
}
pub fn window_id(&self) -> winit::window::WindowId {
self.window.id()
}
pub fn inmgui_event<T>(&mut self, event: &Event<T>) {
self.platform
.handle_event(self.imgui.io_mut(), &self.window, event)
}
}
fn main() {
simplelog::TermLogger::init(
log::LevelFilter::Info,
simplelog::Config::default(),
simplelog::TerminalMode::Mixed,
)
.unwrap();
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("Nodas engine")
.build(&event_loop)
.unwrap();
info!("Window intialized");
let mut engine = block_on(Engine::new(window)).unwrap();
let mut last_render_time = std::time::Instant::now();
event_loop.run(move |event, _, control_flow| {
match event {
Event::RedrawRequested(_) => {
let now = std::time::Instant::now();
let dt = now - last_render_time;
last_render_time = now;
engine.update(dt);
match engine.render(dt) {
Ok(_) => {}
Err(wgpu::SwapChainError::Lost) => engine.resize(engine.size()),
Err(wgpu::SwapChainError::OutOfMemory) => *control_flow = ControlFlow::Exit,
Err(e) => eprintln!("{:?}", e),
}
}
Event::MainEventsCleared => {
engine.request_redraw();
}
Event::WindowEvent {
ref event,
window_id,
} if window_id == engine.window_id() => {
if !engine.input(event) {
match event {
WindowEvent::Resized(physical_size) => {
engine.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
engine.resize(**new_inner_size);
}
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
_ => {}
}
}
}
_ => {}
}
engine.inmgui_event(&event);
});
}
|
mod q02asf1q;
use q02asf1q::gcd;
fn main () {
let n = gcd(12, 16);
println!("greatest common divisor of 12 and 16 is {}", n);
}
|
use super::Provider;
use crate::{BoxFuture, Result};
pub trait ProviderFactory: Send + Sync + 'static {
type Provider: Provider;
fn create_provider(&self) -> BoxFuture<'_, Result<Self::Provider>>;
}
|
//! # Storage initialization
//!
//! This modules initializes the storage, by inserting values into the node using the ICS26
//! interface.
//!
//! The initial values are taken from the configuration (see `config` module).
use std::str::FromStr;
use ibc::{
ics02_client::client_state::AnyClientState, ics02_client::client_type::ClientType,
ics02_client::context::ClientKeeper, ics07_tendermint::client_state::AllowUpdate,
ics07_tendermint::client_state::ClientState, ics24_host::identifier::ClientId, Height,
};
use tendermint::trust_threshold::TrustThresholdFraction;
use crate::config::{Client, Config};
/// Initialize the client keeper by registering all the client present in the configuration.
pub fn init<T: ClientKeeper>(keeper: &mut T, config: &Config) {
for client in &config.clients {
add_client(keeper, client, config);
}
}
fn add_client<T: ClientKeeper>(keeper: &mut T, client: &Client, config: &Config) {
let client_id = ClientId::from_str(&client.id)
.unwrap_or_else(|_| panic!("Invalid client id: {}", &client.id));
let client_state = new_client_state(config);
keeper
.store_client_state(client_id.clone(), client_state)
.unwrap();
keeper
.store_client_type(client_id, ClientType::Tendermint)
.unwrap();
}
fn new_client_state(config: &Config) -> AnyClientState {
let duration = std::time::Duration::new(3600 * 24 * 30, 0);
let height = Height::new(1, 1);
let client_state = ClientState {
chain_id: String::from(&config.chain_id).parse().unwrap(),
trusting_period: duration,
trust_level: TrustThresholdFraction::new(1, 3).unwrap(),
unbonding_period: duration,
max_clock_drift: duration,
frozen_height: height,
latest_height: height,
upgrade_path: vec![String::from("path")],
allow_update: AllowUpdate {
after_expiry: false,
after_misbehaviour: false,
},
};
AnyClientState::Tendermint(client_state)
}
|
// Fichero que usamos para reimportar modulos
// Asi tenemos todos los algoritmos disponibles con crate::algorithms::<nombre>
pub mod local_search;
pub mod copkmeans;
pub mod generational_genetic;
pub mod steady_genetic;
pub mod memetic;
pub mod multistart_local_search;
pub mod iterative_local_search;
pub mod simulated_annealing;
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Workspace {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceProperties {
#[serde(rename = "workspaceId", default, skip_serializing_if = "Option::is_none")]
pub workspace_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<String>,
#[serde(rename = "applicationInsights", default, skip_serializing_if = "Option::is_none")]
pub application_insights: Option<String>,
#[serde(rename = "containerRegistry", default, skip_serializing_if = "Option::is_none")]
pub container_registry: Option<String>,
#[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")]
pub storage_account: Option<String>,
#[serde(rename = "discoveryUrl", default, skip_serializing_if = "Option::is_none")]
pub discovery_url: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<workspace_properties::ProvisioningState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<EncryptionProperty>,
#[serde(rename = "hbiWorkspace", default, skip_serializing_if = "Option::is_none")]
pub hbi_workspace: Option<bool>,
#[serde(rename = "serviceProvisionedResourceGroup", default, skip_serializing_if = "Option::is_none")]
pub service_provisioned_resource_group: Option<String>,
#[serde(rename = "privateLinkCount", default, skip_serializing_if = "Option::is_none")]
pub private_link_count: Option<i32>,
#[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")]
pub image_build_compute: Option<String>,
#[serde(rename = "allowPublicAccessWhenBehindVnet", default, skip_serializing_if = "Option::is_none")]
pub allow_public_access_when_behind_vnet: Option<bool>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<workspace_properties::PublicNetworkAccess>,
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<PrivateEndpointConnection>,
#[serde(rename = "sharedPrivateLinkResources", default, skip_serializing_if = "Vec::is_empty")]
pub shared_private_link_resources: Vec<SharedPrivateLinkResource>,
#[serde(rename = "notebookInfo", default, skip_serializing_if = "Option::is_none")]
pub notebook_info: Option<NotebookResourceInfo>,
#[serde(rename = "serviceManagedResourcesSettings", default, skip_serializing_if = "Option::is_none")]
pub service_managed_resources_settings: Option<ServiceManagedResourcesSettings>,
#[serde(rename = "primaryUserAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub primary_user_assigned_identity: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "storageHnsEnabled", default, skip_serializing_if = "Option::is_none")]
pub storage_hns_enabled: Option<bool>,
#[serde(rename = "mlFlowTrackingUri", default, skip_serializing_if = "Option::is_none")]
pub ml_flow_tracking_uri: Option<String>,
}
pub mod workspace_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Unknown,
Updating,
Creating,
Deleting,
Succeeded,
Failed,
Canceled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspacePropertiesUpdateParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspacePropertiesUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")]
pub image_build_compute: Option<String>,
#[serde(rename = "serviceManagedResourcesSettings", default, skip_serializing_if = "Option::is_none")]
pub service_managed_resources_settings: Option<ServiceManagedResourcesSettings>,
#[serde(rename = "primaryUserAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub primary_user_assigned_identity: Option<String>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<workspace_properties_update_parameters::PublicNetworkAccess>,
}
pub mod workspace_properties_update_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsageName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Usage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "amlWorkspaceLocation", default, skip_serializing_if = "Option::is_none")]
pub aml_workspace_location: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<usage::Unit>,
#[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")]
pub current_value: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<UsageName>,
}
pub mod usage {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListUsagesResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Usage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSize {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(rename = "vCPUs", default, skip_serializing_if = "Option::is_none")]
pub v_cp_us: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gpus: Option<i32>,
#[serde(rename = "osVhdSizeMB", default, skip_serializing_if = "Option::is_none")]
pub os_vhd_size_mb: Option<i32>,
#[serde(rename = "maxResourceVolumeMB", default, skip_serializing_if = "Option::is_none")]
pub max_resource_volume_mb: Option<i32>,
#[serde(rename = "memoryGB", default, skip_serializing_if = "Option::is_none")]
pub memory_gb: Option<f64>,
#[serde(rename = "lowPriorityCapable", default, skip_serializing_if = "Option::is_none")]
pub low_priority_capable: Option<bool>,
#[serde(rename = "premiumIO", default, skip_serializing_if = "Option::is_none")]
pub premium_io: Option<bool>,
#[serde(rename = "estimatedVMPrices", default, skip_serializing_if = "Option::is_none")]
pub estimated_vm_prices: Option<EstimatedVmPrices>,
#[serde(rename = "supportedComputeTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_compute_types: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EstimatedVmPrices {
#[serde(rename = "billingCurrency")]
pub billing_currency: estimated_vm_prices::BillingCurrency,
#[serde(rename = "unitOfMeasure")]
pub unit_of_measure: estimated_vm_prices::UnitOfMeasure,
pub values: Vec<EstimatedVmPrice>,
}
pub mod estimated_vm_prices {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum BillingCurrency {
#[serde(rename = "USD")]
Usd,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum UnitOfMeasure {
OneHour,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EstimatedVmPrice {
#[serde(rename = "retailPrice")]
pub retail_price: f64,
#[serde(rename = "osType")]
pub os_type: estimated_vm_price::OsType,
#[serde(rename = "vmTier")]
pub vm_tier: estimated_vm_price::VmTier,
}
pub mod estimated_vm_price {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Linux,
Windows,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmTier {
Standard,
LowPriority,
Spot,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSizeListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualMachineSize>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Workspace>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaBaseProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<quota_base_properties::Unit>,
}
pub mod quota_base_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaUpdateParameters {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<QuotaBaseProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnoseRequestProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub udr: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub nsg: Option<serde_json::Value>,
#[serde(rename = "resourceLock", default, skip_serializing_if = "Option::is_none")]
pub resource_lock: Option<serde_json::Value>,
#[serde(rename = "dnsResolution", default, skip_serializing_if = "Option::is_none")]
pub dns_resolution: Option<serde_json::Value>,
#[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")]
pub storage_account: Option<serde_json::Value>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<serde_json::Value>,
#[serde(rename = "containerRegistry", default, skip_serializing_if = "Option::is_none")]
pub container_registry: Option<serde_json::Value>,
#[serde(rename = "applicationInsights", default, skip_serializing_if = "Option::is_none")]
pub application_insights: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub others: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnoseWorkspaceParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<DiagnoseRequestProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnoseResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<diagnose_result::Level>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
pub mod diagnose_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Level {
Warning,
Error,
Information,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnoseResponseResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<diagnose_response_result::Value>,
}
pub mod diagnose_response_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Value {
#[serde(rename = "userDefinedRouteResults", default, skip_serializing_if = "Vec::is_empty")]
pub user_defined_route_results: Vec<DiagnoseResult>,
#[serde(rename = "networkSecurityRuleResults", default, skip_serializing_if = "Vec::is_empty")]
pub network_security_rule_results: Vec<DiagnoseResult>,
#[serde(rename = "resourceLockResults", default, skip_serializing_if = "Vec::is_empty")]
pub resource_lock_results: Vec<DiagnoseResult>,
#[serde(rename = "dnsResolutionResults", default, skip_serializing_if = "Vec::is_empty")]
pub dns_resolution_results: Vec<DiagnoseResult>,
#[serde(rename = "storageAccountResults", default, skip_serializing_if = "Vec::is_empty")]
pub storage_account_results: Vec<DiagnoseResult>,
#[serde(rename = "keyVaultResults", default, skip_serializing_if = "Vec::is_empty")]
pub key_vault_results: Vec<DiagnoseResult>,
#[serde(rename = "containerRegistryResults", default, skip_serializing_if = "Vec::is_empty")]
pub container_registry_results: Vec<DiagnoseResult>,
#[serde(rename = "applicationInsightsResults", default, skip_serializing_if = "Vec::is_empty")]
pub application_insights_results: Vec<DiagnoseResult>,
#[serde(rename = "otherResults", default, skip_serializing_if = "Vec::is_empty")]
pub other_results: Vec<DiagnoseResult>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateWorkspaceQuotasResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<UpdateWorkspaceQuotas>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateWorkspaceQuotas {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<update_workspace_quotas::Unit>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<update_workspace_quotas::Status>,
}
pub mod update_workspace_quotas {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Undefined,
Success,
Failure,
InvalidQuotaBelowClusterMinimum,
InvalidQuotaExceedsSubscriptionLimit,
#[serde(rename = "InvalidVMFamilyName")]
InvalidVmFamilyName,
OperationNotSupportedForSku,
OperationNotEnabledForRegion,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceQuota {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "amlWorkspaceLocation", default, skip_serializing_if = "Option::is_none")]
pub aml_workspace_location: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<ResourceName>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<resource_quota::Unit>,
}
pub mod resource_quota {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListWorkspaceQuotas {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ResourceQuota>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Identity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<identity::Type>,
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<UserAssignedIdentities>,
}
pub mod identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
#[serde(rename = "SystemAssigned,UserAssigned")]
SystemAssignedUserAssigned,
UserAssigned,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentities {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAssignedIdentity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceId {
pub id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListWorkspaceKeysResult {
#[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")]
pub user_storage_key: Option<String>,
#[serde(rename = "userStorageResourceId", default, skip_serializing_if = "Option::is_none")]
pub user_storage_resource_id: Option<String>,
#[serde(rename = "appInsightsInstrumentationKey", default, skip_serializing_if = "Option::is_none")]
pub app_insights_instrumentation_key: Option<String>,
#[serde(rename = "containerRegistryCredentials", default, skip_serializing_if = "Option::is_none")]
pub container_registry_credentials: Option<RegistryListCredentialsResult>,
#[serde(rename = "notebookAccessKeys", default, skip_serializing_if = "Option::is_none")]
pub notebook_access_keys: Option<ListNotebookKeysResult>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NotebookAccessTokenResult {
#[serde(rename = "notebookResourceId", default, skip_serializing_if = "Option::is_none")]
pub notebook_resource_id: Option<String>,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<String>,
#[serde(rename = "publicDns", default, skip_serializing_if = "Option::is_none")]
pub public_dns: Option<String>,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<String>,
#[serde(rename = "tokenType", default, skip_serializing_if = "Option::is_none")]
pub token_type: Option<String>,
#[serde(rename = "expiresIn", default, skip_serializing_if = "Option::is_none")]
pub expires_in: Option<i32>,
#[serde(rename = "refreshToken", default, skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegistryListCredentialsResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub passwords: Vec<Password>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Password {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaginatedComputeResourcesList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ComputeResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Compute {
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
#[serde(rename = "computeLocation", default, skip_serializing_if = "Option::is_none")]
pub compute_location: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<compute::ProvisioningState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")]
pub created_on: Option<String>,
#[serde(rename = "modifiedOn", default, skip_serializing_if = "Option::is_none")]
pub modified_on: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "provisioningErrors", default, skip_serializing_if = "Vec::is_empty")]
pub provisioning_errors: Vec<ErrorResponse>,
#[serde(rename = "isAttachedCompute", default, skip_serializing_if = "Option::is_none")]
pub is_attached_compute: Option<bool>,
#[serde(rename = "disableLocalAuth", default, skip_serializing_if = "Option::is_none")]
pub disable_local_auth: Option<bool>,
}
pub mod compute {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Unknown,
Updating,
Creating,
Deleting,
Succeeded,
Failed,
Canceled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Aks {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Kubernetes {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub kubernetes_schema: KubernetesSchema,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KubernetesSchema {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<KubernetesProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KubernetesProperties {
#[serde(rename = "relayConnectionString", default, skip_serializing_if = "Option::is_none")]
pub relay_connection_string: Option<String>,
#[serde(rename = "serviceBusConnectionString", default, skip_serializing_if = "Option::is_none")]
pub service_bus_connection_string: Option<String>,
#[serde(rename = "extensionPrincipalId", default, skip_serializing_if = "Option::is_none")]
pub extension_principal_id: Option<String>,
#[serde(rename = "extensionInstanceReleaseTrain", default, skip_serializing_if = "Option::is_none")]
pub extension_instance_release_train: Option<String>,
#[serde(rename = "vcName", default, skip_serializing_if = "Option::is_none")]
pub vc_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
#[serde(rename = "defaultInstanceType", default, skip_serializing_if = "Option::is_none")]
pub default_instance_type: Option<String>,
#[serde(rename = "instanceTypes", default, skip_serializing_if = "Option::is_none")]
pub instance_types: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlComputeProperties {
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<aml_compute_properties::OsType>,
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<String>,
#[serde(rename = "vmPriority", default, skip_serializing_if = "Option::is_none")]
pub vm_priority: Option<aml_compute_properties::VmPriority>,
#[serde(rename = "virtualMachineImage", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine_image: Option<VirtualMachineImage>,
#[serde(rename = "isolatedNetwork", default, skip_serializing_if = "Option::is_none")]
pub isolated_network: Option<bool>,
#[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")]
pub scale_settings: Option<ScaleSettings>,
#[serde(rename = "userAccountCredentials", default, skip_serializing_if = "Option::is_none")]
pub user_account_credentials: Option<UserAccountCredentials>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<ResourceId>,
#[serde(rename = "remoteLoginPortPublicAccess", default, skip_serializing_if = "Option::is_none")]
pub remote_login_port_public_access: Option<aml_compute_properties::RemoteLoginPortPublicAccess>,
#[serde(rename = "allocationState", default, skip_serializing_if = "Option::is_none")]
pub allocation_state: Option<aml_compute_properties::AllocationState>,
#[serde(rename = "allocationStateTransitionTime", default, skip_serializing_if = "Option::is_none")]
pub allocation_state_transition_time: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<ErrorResponse>,
#[serde(rename = "currentNodeCount", default, skip_serializing_if = "Option::is_none")]
pub current_node_count: Option<i32>,
#[serde(rename = "targetNodeCount", default, skip_serializing_if = "Option::is_none")]
pub target_node_count: Option<i32>,
#[serde(rename = "nodeStateCounts", default, skip_serializing_if = "Option::is_none")]
pub node_state_counts: Option<NodeStateCounts>,
#[serde(rename = "enableNodePublicIp", default, skip_serializing_if = "Option::is_none")]
pub enable_node_public_ip: Option<bool>,
}
pub mod aml_compute_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Linux,
Windows,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmPriority {
Dedicated,
LowPriority,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RemoteLoginPortPublicAccess {
Enabled,
Disabled,
NotSpecified,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AllocationState {
Steady,
Resizing,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlCompute {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstanceProperties {
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<ResourceId>,
#[serde(rename = "applicationSharingPolicy", default, skip_serializing_if = "Option::is_none")]
pub application_sharing_policy: Option<compute_instance_properties::ApplicationSharingPolicy>,
#[serde(rename = "sshSettings", default, skip_serializing_if = "Option::is_none")]
pub ssh_settings: Option<ComputeInstanceSshSettings>,
#[serde(rename = "connectivityEndpoints", default, skip_serializing_if = "Option::is_none")]
pub connectivity_endpoints: Option<ComputeInstanceConnectivityEndpoints>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub applications: Vec<ComputeInstanceApplication>,
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<ComputeInstanceCreatedBy>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<ErrorResponse>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<ComputeInstanceState>,
#[serde(rename = "computeInstanceAuthorizationType", default, skip_serializing_if = "Option::is_none")]
pub compute_instance_authorization_type: Option<compute_instance_properties::ComputeInstanceAuthorizationType>,
#[serde(rename = "personalComputeInstanceSettings", default, skip_serializing_if = "Option::is_none")]
pub personal_compute_instance_settings: Option<PersonalComputeInstanceSettings>,
#[serde(rename = "setupScripts", default, skip_serializing_if = "Option::is_none")]
pub setup_scripts: Option<SetupScripts>,
#[serde(rename = "lastOperation", default, skip_serializing_if = "Option::is_none")]
pub last_operation: Option<ComputeInstanceLastOperation>,
}
pub mod compute_instance_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ApplicationSharingPolicy {
Personal,
Shared,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeInstanceAuthorizationType {
#[serde(rename = "personal")]
Personal,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstance {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachine {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsightProperties {
#[serde(rename = "sshPort", default, skip_serializing_if = "Option::is_none")]
pub ssh_port: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[serde(rename = "administratorAccount", default, skip_serializing_if = "Option::is_none")]
pub administrator_account: Option<VirtualMachineSshCredentials>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsight {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFactory {
#[serde(flatten)]
pub compute: Compute,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksProperties {
#[serde(rename = "databricksAccessToken", default, skip_serializing_if = "Option::is_none")]
pub databricks_access_token: Option<String>,
#[serde(rename = "workspaceUrl", default, skip_serializing_if = "Option::is_none")]
pub workspace_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Databricks {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataLakeAnalytics {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SynapseSpark {
#[serde(flatten)]
pub compute: Compute,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<synapse_spark::Properties>,
}
pub mod synapse_spark {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(rename = "autoScaleProperties", default, skip_serializing_if = "Option::is_none")]
pub auto_scale_properties: Option<AutoScaleProperties>,
#[serde(rename = "autoPauseProperties", default, skip_serializing_if = "Option::is_none")]
pub auto_pause_properties: Option<AutoPauseProperties>,
#[serde(rename = "sparkVersion", default, skip_serializing_if = "Option::is_none")]
pub spark_version: Option<String>,
#[serde(rename = "nodeCount", default, skip_serializing_if = "Option::is_none")]
pub node_count: Option<i32>,
#[serde(rename = "nodeSize", default, skip_serializing_if = "Option::is_none")]
pub node_size: Option<String>,
#[serde(rename = "nodeSizeFamily", default, skip_serializing_if = "Option::is_none")]
pub node_size_family: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")]
pub resource_group: Option<String>,
#[serde(rename = "workspaceName", default, skip_serializing_if = "Option::is_none")]
pub workspace_name: Option<String>,
#[serde(rename = "poolName", default, skip_serializing_if = "Option::is_none")]
pub pool_name: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicePrincipalCredentials {
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "clientSecret")]
pub client_secret: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemService {
#[serde(rename = "systemServiceType", default, skip_serializing_if = "Option::is_none")]
pub system_service_type: Option<String>,
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SslConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<ssl_configuration::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cert: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cname: Option<String>,
#[serde(rename = "leafDomainLabel", default, skip_serializing_if = "Option::is_none")]
pub leaf_domain_label: Option<String>,
#[serde(rename = "overwriteExistingDomain", default, skip_serializing_if = "Option::is_none")]
pub overwrite_existing_domain: Option<bool>,
}
pub mod ssl_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Disabled,
Enabled,
Auto,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksNetworkingConfiguration {
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(rename = "serviceCidr", default, skip_serializing_if = "Option::is_none")]
pub service_cidr: Option<String>,
#[serde(rename = "dnsServiceIP", default, skip_serializing_if = "Option::is_none")]
pub dns_service_ip: Option<String>,
#[serde(rename = "dockerBridgeCidr", default, skip_serializing_if = "Option::is_none")]
pub docker_bridge_cidr: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAccountCredentials {
#[serde(rename = "adminUserName")]
pub admin_user_name: String,
#[serde(rename = "adminUserSshPublicKey", default, skip_serializing_if = "Option::is_none")]
pub admin_user_ssh_public_key: Option<String>,
#[serde(rename = "adminUserPassword", default, skip_serializing_if = "Option::is_none")]
pub admin_user_password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScaleSettings {
#[serde(rename = "maxNodeCount")]
pub max_node_count: i32,
#[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")]
pub min_node_count: Option<i32>,
#[serde(rename = "nodeIdleTimeBeforeScaleDown", default, skip_serializing_if = "Option::is_none")]
pub node_idle_time_before_scale_down: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImage {
pub id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NodeStateCounts {
#[serde(rename = "idleNodeCount", default, skip_serializing_if = "Option::is_none")]
pub idle_node_count: Option<i32>,
#[serde(rename = "runningNodeCount", default, skip_serializing_if = "Option::is_none")]
pub running_node_count: Option<i32>,
#[serde(rename = "preparingNodeCount", default, skip_serializing_if = "Option::is_none")]
pub preparing_node_count: Option<i32>,
#[serde(rename = "unusableNodeCount", default, skip_serializing_if = "Option::is_none")]
pub unusable_node_count: Option<i32>,
#[serde(rename = "leavingNodeCount", default, skip_serializing_if = "Option::is_none")]
pub leaving_node_count: Option<i32>,
#[serde(rename = "preemptedNodeCount", default, skip_serializing_if = "Option::is_none")]
pub preempted_node_count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScaleSettingsInformation {
#[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")]
pub scale_settings: Option<ScaleSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClusterUpdateProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ScaleSettingsInformation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClusterUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ClusterUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlComputeNodesInformation {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub nodes: Vec<AmlComputeNodeInformation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlComputeNodeInformation {
#[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")]
pub node_id: Option<String>,
#[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<f64>,
#[serde(rename = "nodeState", default, skip_serializing_if = "Option::is_none")]
pub node_state: Option<aml_compute_node_information::NodeState>,
#[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")]
pub run_id: Option<String>,
}
pub mod aml_compute_node_information {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NodeState {
#[serde(rename = "idle")]
Idle,
#[serde(rename = "running")]
Running,
#[serde(rename = "preparing")]
Preparing,
#[serde(rename = "unusable")]
Unusable,
#[serde(rename = "leaving")]
Leaving,
#[serde(rename = "preempted")]
Preempted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSshCredentials {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "publicKeyData", default, skip_serializing_if = "Option::is_none")]
pub public_key_data: Option<String>,
#[serde(rename = "privateKeyData", default, skip_serializing_if = "Option::is_none")]
pub private_key_data: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeSecrets {
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksComputeSecretsProperties {
#[serde(rename = "userKubeConfig", default, skip_serializing_if = "Option::is_none")]
pub user_kube_config: Option<String>,
#[serde(rename = "adminKubeConfig", default, skip_serializing_if = "Option::is_none")]
pub admin_kube_config: Option<String>,
#[serde(rename = "imagePullSecretName", default, skip_serializing_if = "Option::is_none")]
pub image_pull_secret_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksComputeSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[serde(flatten)]
pub aks_compute_secrets_properties: AksComputeSecretsProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[serde(flatten)]
pub serde_json_value: serde_json::Value,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksComputeSecretsProperties {
#[serde(rename = "databricksAccessToken", default, skip_serializing_if = "Option::is_none")]
pub databricks_access_token: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksComputeSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[serde(flatten)]
pub databricks_compute_secrets_properties: DatabricksComputeSecretsProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "AKS")]
Aks,
Kubernetes,
AmlCompute,
ComputeInstance,
DataFactory,
VirtualMachine,
#[serde(rename = "HDInsight")]
HdInsight,
Databricks,
DataLakeAnalytics,
SynapseSpark,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateEndpointConnection>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "subnetArmId", default, skip_serializing_if = "Option::is_none")]
pub subnet_arm_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServiceConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
Disconnected,
Timeout,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedPrivateLinkResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SharedPrivateLinkResourceProperty>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedPrivateLinkResourceProperty {
#[serde(rename = "privateLinkResourceId", default, skip_serializing_if = "Option::is_none")]
pub private_link_resource_id: Option<String>,
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requestMessage", default, skip_serializing_if = "Option::is_none")]
pub request_message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProperty {
pub status: encryption_property::Status,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<IdentityForCmk>,
#[serde(rename = "keyVaultProperties")]
pub key_vault_properties: KeyVaultProperties,
}
pub mod encryption_property {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultProperties {
#[serde(rename = "keyVaultArmId")]
pub key_vault_arm_id: String,
#[serde(rename = "keyIdentifier")]
pub key_identifier: String,
#[serde(rename = "identityClientId", default, skip_serializing_if = "Option::is_none")]
pub identity_client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityForCmk {
#[serde(rename = "userAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identity: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerResourceRequirements {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cpu: Option<f64>,
#[serde(rename = "cpuLimit", default, skip_serializing_if = "Option::is_none")]
pub cpu_limit: Option<f64>,
#[serde(rename = "memoryInGB", default, skip_serializing_if = "Option::is_none")]
pub memory_in_gb: Option<f64>,
#[serde(rename = "memoryInGBLimit", default, skip_serializing_if = "Option::is_none")]
pub memory_in_gb_limit: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gpu: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fpga: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstanceSshSettings {
#[serde(rename = "sshPublicAccess", default, skip_serializing_if = "Option::is_none")]
pub ssh_public_access: Option<compute_instance_ssh_settings::SshPublicAccess>,
#[serde(rename = "adminUserName", default, skip_serializing_if = "Option::is_none")]
pub admin_user_name: Option<String>,
#[serde(rename = "sshPort", default, skip_serializing_if = "Option::is_none")]
pub ssh_port: Option<i32>,
#[serde(rename = "adminPublicKey", default, skip_serializing_if = "Option::is_none")]
pub admin_public_key: Option<String>,
}
pub mod compute_instance_ssh_settings {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SshPublicAccess {
Enabled,
Disabled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeInstanceState {
Creating,
CreateFailed,
Deleting,
Running,
Restarting,
JobRunning,
SettingUp,
SetupFailed,
Starting,
Stopped,
Stopping,
UserSettingUp,
UserSetupFailed,
Unknown,
Unusable,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstanceLastOperation {
#[serde(rename = "operationName", default, skip_serializing_if = "Option::is_none")]
pub operation_name: Option<compute_instance_last_operation::OperationName>,
#[serde(rename = "operationTime", default, skip_serializing_if = "Option::is_none")]
pub operation_time: Option<String>,
#[serde(rename = "operationStatus", default, skip_serializing_if = "Option::is_none")]
pub operation_status: Option<compute_instance_last_operation::OperationStatus>,
}
pub mod compute_instance_last_operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationName {
Create,
Start,
Stop,
Restart,
Reimage,
Delete,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationStatus {
InProgress,
Succeeded,
CreateFailed,
StartFailed,
StopFailed,
RestartFailed,
ReimageFailed,
DeleteFailed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstanceApplication {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")]
pub endpoint_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstanceConnectivityEndpoints {
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstanceCreatedBy {
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
#[serde(rename = "userOrgId", default, skip_serializing_if = "Option::is_none")]
pub user_org_id: Option<String>,
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PersonalComputeInstanceSettings {
#[serde(rename = "assignedUser", default, skip_serializing_if = "Option::is_none")]
pub assigned_user: Option<AssignedUser>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AssignedUser {
#[serde(rename = "objectId")]
pub object_id: String,
#[serde(rename = "tenantId")]
pub tenant_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceManagedResourcesSettings {
#[serde(rename = "cosmosDb", default, skip_serializing_if = "Option::is_none")]
pub cosmos_db: Option<CosmosDbSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CosmosDbSettings {
#[serde(rename = "collectionsThroughput", default, skip_serializing_if = "Option::is_none")]
pub collections_throughput: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NotebookResourceInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fqdn: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "notebookPreparationError", default, skip_serializing_if = "Option::is_none")]
pub notebook_preparation_error: Option<NotebookPreparationError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NotebookPreparationError {
#[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")]
pub error_message: Option<String>,
#[serde(rename = "statusCode", default, skip_serializing_if = "Option::is_none")]
pub status_code: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListNotebookKeysResult {
#[serde(rename = "primaryAccessKey", default, skip_serializing_if = "Option::is_none")]
pub primary_access_key: Option<String>,
#[serde(rename = "secondaryAccessKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_access_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListStorageAccountKeysResult {
#[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")]
pub user_storage_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PaginatedWorkspaceConnectionsList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<WorkspaceConnection>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceConnection {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceConnectionProps>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceConnectionProps {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(rename = "authType", default, skip_serializing_if = "Option::is_none")]
pub auth_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "valueFormat", default, skip_serializing_if = "Option::is_none")]
pub value_format: Option<workspace_connection_props::ValueFormat>,
}
pub mod workspace_connection_props {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ValueFormat {
#[serde(rename = "JSON")]
Json,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SetupScripts {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scripts: Option<ScriptsToExecute>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScriptsToExecute {
#[serde(rename = "startupScript", default, skip_serializing_if = "Option::is_none")]
pub startup_script: Option<ScriptReference>,
#[serde(rename = "creationScript", default, skip_serializing_if = "Option::is_none")]
pub creation_script: Option<ScriptReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScriptReference {
#[serde(rename = "scriptSource", default, skip_serializing_if = "Option::is_none")]
pub script_source: Option<String>,
#[serde(rename = "scriptData", default, skip_serializing_if = "Option::is_none")]
pub script_data: Option<String>,
#[serde(rename = "scriptArguments", default, skip_serializing_if = "Option::is_none")]
pub script_arguments: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AutoScaleProperties {
#[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")]
pub min_node_count: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "maxNodeCount", default, skip_serializing_if = "Option::is_none")]
pub max_node_count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AutoPauseProperties {
#[serde(rename = "delayInMinutes", default, skip_serializing_if = "Option::is_none")]
pub delay_in_minutes: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceTypeSchema {
#[serde(rename = "nodeSelector", default, skip_serializing_if = "Option::is_none")]
pub node_selector: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resources: Option<instance_type_schema::Resources>,
}
pub mod instance_type_schema {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resources {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub requests: Option<InstanceResourceSchema>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limits: Option<InstanceResourceSchema>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceResourceSchema {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FqdnEndpointDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FqdnEndpoint {
#[serde(rename = "domainName", default, skip_serializing_if = "Option::is_none")]
pub domain_name: Option<String>,
#[serde(rename = "endpointDetails", default, skip_serializing_if = "Vec::is_empty")]
pub endpoint_details: Vec<FqdnEndpointDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FqdnEndpointsProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub endpoints: Vec<FqdnEndpoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FqdnEndpoints {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<FqdnEndpointsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExternalFqdnResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<FqdnEndpoints>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlUserFeature {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListAmlUserFeatureResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AmlUserFeature>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<WorkspaceSku>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SkuCapability {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Restriction {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[serde(rename = "reasonCode", default, skip_serializing_if = "Option::is_none")]
pub reason_code: Option<restriction::ReasonCode>,
}
pub mod restriction {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReasonCode {
NotSpecified,
NotAvailableForRegion,
NotAvailableForSubscription,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuLocationInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "zoneDetails", default, skip_serializing_if = "Vec::is_empty")]
pub zone_details: Vec<ResourceSkuZoneDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuZoneDetails {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub name: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<SkuCapability>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkspaceSku {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[serde(rename = "locationInfo", default, skip_serializing_if = "Vec::is_empty")]
pub location_info: Vec<ResourceSkuLocationInfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<SkuCapability>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<Restriction>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
|
use rand::rngs::StdRng;
use rand::{RngCore, SeedableRng};
use hybridpir::server::HybridPirServer;
fn main() {
env_logger::init();
let id = std::env::args().nth(1).unwrap().parse().unwrap();
let mut prng = StdRng::seed_from_u64(1234);
let size = 1 << 22;
let raidpir_servers = 2;
let raidpir_redundancy = 2;
let raidpir_size = 1 << 12;
let mut db: Vec<Vec<u8>> = Vec::with_capacity(size);
for _i in 0..size {
let mut buffer = vec![0; 8];
prng.fill_bytes(&mut buffer);
db.push(buffer);
}
db[size >> 1] = b"deadbeef".to_vec();
let server = HybridPirServer::new(&db,
id, raidpir_servers, raidpir_redundancy, raidpir_size, false,
2048, 12, 2);
server.accept_connections(("0.0.0.0", (7000 + id) as u16)).unwrap();
}
|
// Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs;
use std::io::{Read, Write};
use std::sync::Arc;
use rocksdb::*;
use super::tempdir_with_prefix;
pub fn gen_sst(
opt: ColumnFamilyOptions,
cf: Option<&CFHandle>,
path: &str,
data: &[(&[u8], &[u8])],
) {
let _ = fs::remove_file(path);
let env_opt = EnvOptions::new();
let mut writer = if cf.is_some() {
SstFileWriter::new_cf(env_opt, opt, cf.unwrap())
} else {
SstFileWriter::new(env_opt, opt)
};
writer.open(path).unwrap();
for &(k, v) in data {
writer.put(k, v).unwrap();
}
writer.finish().unwrap();
}
fn gen_sst_put(opt: ColumnFamilyOptions, cf: Option<&CFHandle>, path: &str) {
let _ = fs::remove_file(path);
let env_opt = EnvOptions::new();
let mut writer = if cf.is_some() {
SstFileWriter::new_cf(env_opt, opt, cf.unwrap())
} else {
SstFileWriter::new(env_opt, opt)
};
writer.open(path).unwrap();
writer.put(b"k1", b"a").unwrap();
writer.put(b"k2", b"b").unwrap();
writer.put(b"k3", b"c").unwrap();
writer.finish().unwrap();
}
fn gen_sst_merge(opt: ColumnFamilyOptions, cf: Option<&CFHandle>, path: &str) {
let _ = fs::remove_file(path);
let env_opt = EnvOptions::new();
let mut writer = if cf.is_some() {
SstFileWriter::new_cf(env_opt, opt, cf.unwrap())
} else {
SstFileWriter::new(env_opt, opt)
};
writer.open(path).unwrap();
writer.merge(b"k3", b"d").unwrap();
writer.finish().unwrap();
}
fn gen_sst_delete(opt: ColumnFamilyOptions, cf: Option<&CFHandle>, path: &str) {
let _ = fs::remove_file(path);
let env_opt = EnvOptions::new();
let mut writer = if cf.is_some() {
SstFileWriter::new_cf(env_opt, opt, cf.unwrap())
} else {
SstFileWriter::new(env_opt, opt)
};
writer.open(path).unwrap();
writer.delete(b"k3").unwrap();
writer.finish().unwrap();
}
fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOperands) -> Vec<u8> {
let mut result: Vec<u8> = Vec::with_capacity(operands.size_hint().0);
match existing_val {
Some(v) => {
for e in v {
result.push(*e)
}
}
None => (),
}
for op in operands {
for e in op {
result.push(*e);
}
}
result
}
#[test]
fn test_ingest_external_file() {
let path = tempdir_with_prefix("_rust_rocksdb_ingest_sst");
let mut db = create_default_database(&path);
db.create_cf("cf1").unwrap();
let handle = db.cf_handle("cf1").unwrap();
let gen_path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_gen");
let test_sstfile = gen_path.path().join("test_sst_file");
let test_sstfile_str = test_sstfile.to_str().unwrap();
let default_options = db.get_options();
gen_sst(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
&[(b"k1", b"v1"), (b"k2", b"v2")],
);
let mut ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
assert!(test_sstfile.exists());
assert_eq!(db.get(b"k1").unwrap().unwrap(), b"v1");
assert_eq!(db.get(b"k2").unwrap().unwrap(), b"v2");
gen_sst(
ColumnFamilyOptions::new(),
None,
test_sstfile_str,
&[(b"k1", b"v3"), (b"k2", b"v4")],
);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"v3");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"v4");
let snap = db.snapshot();
gen_sst(
ColumnFamilyOptions::new(),
None,
test_sstfile_str,
&[(b"k2", b"v5"), (b"k3", b"v6")],
);
ingest_opt.move_files(true);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"v3");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"v5");
assert_eq!(db.get_cf(handle, b"k3").unwrap().unwrap(), b"v6");
assert_eq!(snap.get_cf(handle, b"k1").unwrap().unwrap(), b"v3");
assert_eq!(snap.get_cf(handle, b"k2").unwrap().unwrap(), b"v4");
assert!(snap.get_cf(handle, b"k3").unwrap().is_none());
}
#[test]
fn test_ingest_external_file_new() {
let path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_new");
let path_str = path.path().to_str().unwrap();
let mut opts = DBOptions::new();
opts.create_if_missing(true);
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.add_merge_operator("merge operator", concat_merge);
let db = DB::open_cf(opts, path_str, vec![("default", cf_opts)]).unwrap();
let gen_path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_gen_new");
let test_sstfile = gen_path.path().join("test_sst_file_new");
let test_sstfile_str = test_sstfile.to_str().unwrap();
let default_options = db.get_options();
gen_sst_put(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
);
let mut ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
assert!(test_sstfile.exists());
assert_eq!(db.get(b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get(b"k2").unwrap().unwrap(), b"b");
assert_eq!(db.get(b"k3").unwrap().unwrap(), b"c");
let snap = db.snapshot();
let default_options = db.get_options();
gen_sst_merge(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
);
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get(b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get(b"k2").unwrap().unwrap(), b"b");
assert_eq!(db.get(b"k3").unwrap().unwrap(), b"cd");
let default_options = db.get_options();
gen_sst_delete(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
);
ingest_opt.move_files(true);
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get(b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get(b"k2").unwrap().unwrap(), b"b");
assert!(db.get(b"k3").unwrap().is_none());
assert_eq!(snap.get(b"k1").unwrap().unwrap(), b"a");
assert_eq!(snap.get(b"k2").unwrap().unwrap(), b"b");
assert_eq!(snap.get(b"k3").unwrap().unwrap(), b"c");
}
#[test]
fn test_ingest_external_file_new_cf() {
let path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_new_cf");
let mut db = create_default_database(&path);
let gen_path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_gen_new_cf");
let test_sstfile = gen_path.path().join("test_sst_file_new_cf");
let test_sstfile_str = test_sstfile.to_str().unwrap();
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.add_merge_operator("merge operator", concat_merge);
db.create_cf(("cf1", cf_opts)).unwrap();
let handle = db.cf_handle("cf1").unwrap();
let mut ingest_opt = IngestExternalFileOptions::new();
gen_sst_put(ColumnFamilyOptions::new(), None, test_sstfile_str);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert!(test_sstfile.exists());
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"b");
assert_eq!(db.get_cf(handle, b"k3").unwrap().unwrap(), b"c");
let snap = db.snapshot();
ingest_opt.move_files(true);
gen_sst_merge(ColumnFamilyOptions::new(), None, test_sstfile_str);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"b");
assert_eq!(db.get_cf(handle, b"k3").unwrap().unwrap(), b"cd");
gen_sst_delete(ColumnFamilyOptions::new(), None, test_sstfile_str);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"b");
assert!(db.get_cf(handle, b"k3").unwrap().is_none());
assert_eq!(snap.get_cf(handle, b"k1").unwrap().unwrap(), b"a");
assert_eq!(snap.get_cf(handle, b"k2").unwrap().unwrap(), b"b");
assert_eq!(snap.get_cf(handle, b"k3").unwrap().unwrap(), b"c");
}
fn check_kv(db: &DB, cf: Option<&CFHandle>, data: &[(&[u8], Option<&[u8]>)]) {
for &(k, v) in data {
let handle = cf.unwrap_or(db.cf_handle("default").unwrap());
if v.is_none() {
assert!(db.get_cf(handle, k).unwrap().is_none());
} else {
assert_eq!(db.get_cf(handle, k).unwrap().unwrap(), v.unwrap());
}
}
}
fn put_delete_and_generate_sst_cf(opt: ColumnFamilyOptions, db: &DB, cf: &CFHandle, path: &str) {
db.put_cf(cf, b"k1", b"v1").unwrap();
db.put_cf(cf, b"k2", b"v2").unwrap();
db.put_cf(cf, b"k3", b"v3").unwrap();
db.put_cf(cf, b"k4", b"v4").unwrap();
db.delete_cf(cf, b"k1").unwrap();
db.delete_cf(cf, b"k3").unwrap();
gen_sst_from_cf(opt, db, cf, path);
}
fn gen_sst_from_cf(opt: ColumnFamilyOptions, db: &DB, cf: &CFHandle, path: &str) {
let env_opt = EnvOptions::new();
let mut writer = SstFileWriter::new_cf(env_opt, opt, cf);
writer.open(path).unwrap();
let mut iter = db.iter_cf(cf);
iter.seek(SeekKey::Start).unwrap();
while iter.valid().unwrap() {
writer.put(iter.key(), iter.value()).unwrap();
iter.next().unwrap();
}
let info = writer.finish().unwrap();
assert_eq!(info.file_path().to_str().unwrap(), path);
iter.seek(SeekKey::Start).unwrap();
assert_eq!(info.smallest_key(), iter.key());
iter.seek(SeekKey::End).unwrap();
assert_eq!(info.largest_key(), iter.key());
assert_eq!(info.sequence_number(), 0);
assert!(info.file_size() > 0);
assert!(info.num_entries() > 0);
}
fn create_default_database(path: &tempfile::TempDir) -> DB {
let path_str = path.path().to_str().unwrap();
let mut opts = DBOptions::new();
opts.create_if_missing(true);
DB::open(opts, path_str).unwrap()
}
fn create_cfs(db: &mut DB, cfs: &[&str]) {
for cf in cfs {
if *cf != "default" {
db.create_cf(*cf).unwrap();
}
}
}
#[test]
fn test_ingest_simulate_real_world() {
const ALL_CFS: [&str; 3] = ["lock", "write", "default"];
let path = tempdir_with_prefix("_rust_rocksdb_ingest_real_world_1");
let mut db = create_default_database(&path);
let gen_path = tempdir_with_prefix("_rust_rocksdb_ingest_real_world_new_cf");
create_cfs(&mut db, &ALL_CFS);
for cf in &ALL_CFS {
let handle = db.cf_handle(cf).unwrap();
let cf_opts = ColumnFamilyOptions::new();
put_delete_and_generate_sst_cf(
cf_opts,
&db,
&handle,
gen_path.path().join(cf).to_str().unwrap(),
);
}
let path2 = tempdir_with_prefix("_rust_rocksdb_ingest_real_world_2");
let mut db2 = create_default_database(&path2);
for cf in &ALL_CFS {
if *cf != "default" {
db2.create_cf(*cf).unwrap();
}
}
for cf in &ALL_CFS {
let handle = db2.cf_handle(cf).unwrap();
let mut ingest_opt = IngestExternalFileOptions::new();
ingest_opt.move_files(true);
db2.ingest_external_file_cf(
handle,
&ingest_opt,
&[gen_path.path().join(cf).to_str().unwrap()],
)
.unwrap();
check_kv(
&db,
db.cf_handle(cf),
&[
(b"k1", None),
(b"k2", Some(b"v2")),
(b"k3", None),
(b"k4", Some(b"v4")),
],
);
let cf_opts = ColumnFamilyOptions::new();
gen_sst_from_cf(
cf_opts,
&db2,
&handle,
gen_path.path().join(cf).to_str().unwrap(),
);
}
for cf in &ALL_CFS {
let handle = db.cf_handle(cf).unwrap();
let ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file_cf(
handle,
&ingest_opt,
&[gen_path.path().join(cf).to_str().unwrap()],
)
.unwrap();
check_kv(
&db,
db.cf_handle(cf),
&[
(b"k1", None),
(b"k2", Some(b"v2")),
(b"k3", None),
(b"k4", Some(b"v4")),
],
);
}
}
#[test]
fn test_mem_sst_file_writer() {
let path = tempdir_with_prefix("_rust_mem_sst_file_writer");
let db = create_default_database(&path);
let env = Arc::new(Env::new_mem());
let mut opts = db.get_options().clone();
opts.set_env(env.clone());
let mem_sst_path = path.path().join("mem_sst");
let mem_sst_str = mem_sst_path.to_str().unwrap();
gen_sst(
opts,
None,
mem_sst_str,
&[(b"k1", b"v1"), (b"k2", b"v2"), (b"k3", b"v3")],
);
// Check that the file is not on disk.
assert!(!mem_sst_path.exists());
let mut buf = Vec::new();
let mut sst = env
.new_sequential_file(mem_sst_str, EnvOptions::new())
.unwrap();
sst.read_to_end(&mut buf).unwrap();
// Write the data to a temp file.
let sst_path = path.path().join("temp_sst_path");
fs::File::create(&sst_path)
.unwrap()
.write_all(&buf)
.unwrap();
// Ingest the temp file to check the test kvs.
let ingest_opts = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opts, &[sst_path.to_str().unwrap()])
.unwrap();
check_kv(
&db,
None,
&[
(b"k1", Some(b"v1")),
(b"k2", Some(b"v2")),
(b"k3", Some(b"v3")),
],
);
assert!(env.file_exists(mem_sst_str).is_ok());
assert!(env.delete_file(mem_sst_str).is_ok());
assert!(env.file_exists(mem_sst_str).is_err());
}
#[test]
fn test_set_external_sst_file_global_seq_no() {
let db_path = tempdir_with_prefix("_rust_rocksdb_set_external_sst_file_global_seq_no_db");
let db = create_default_database(&db_path);
let path = tempdir_with_prefix("_rust_rocksdb_set_external_sst_file_global_seq_no");
let file = path.path().join("sst_file");
let sstfile_str = file.to_str().unwrap();
gen_sst(
ColumnFamilyOptions::new(),
Some(db.cf_handle("default").unwrap()),
sstfile_str,
&[(b"k1", b"v1"), (b"k2", b"v2")],
);
let handle = db.cf_handle("default").unwrap();
let seq_no = 1;
// varify change seq_no
let r1 = set_external_sst_file_global_seq_no(&db, &handle, sstfile_str, seq_no);
assert!(r1.unwrap() != seq_no);
// varify that seq_no are equal
let r2 = set_external_sst_file_global_seq_no(&db, &handle, sstfile_str, seq_no);
assert!(r2.unwrap() == seq_no);
// change seq_no back to 0 so that it can be ingested
assert!(set_external_sst_file_global_seq_no(&db, &handle, sstfile_str, 0).is_ok());
db.ingest_external_file(&IngestExternalFileOptions::new(), &[sstfile_str])
.unwrap();
check_kv(&db, None, &[(b"k1", Some(b"v1")), (b"k2", Some(b"v2"))]);
}
#[test]
fn test_ingest_external_file_optimized() {
let path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_optimized");
let db = create_default_database(&path);
let gen_path = tempdir_with_prefix("_rust_rocksdb_ingest_sst_gen_new_cf");
let test_sstfile = gen_path.path().join("test_sst_file_optimized");
let test_sstfile_str = test_sstfile.to_str().unwrap();
let handle = db.cf_handle("default").unwrap();
let ingest_opt = IngestExternalFileOptions::new();
gen_sst_put(ColumnFamilyOptions::new(), None, test_sstfile_str);
db.put_cf(handle, b"k0", b"k0").unwrap();
// No overlap with the memtable.
let has_flush = db
.ingest_external_file_optimized(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert!(!has_flush);
assert!(test_sstfile.exists());
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"b");
assert_eq!(db.get_cf(handle, b"k3").unwrap().unwrap(), b"c");
db.put_cf(handle, b"k1", b"k1").unwrap();
// Overlap with the memtable.
let has_flush = db
.ingest_external_file_optimized(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert!(has_flush);
assert!(test_sstfile.exists());
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"a");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"b");
assert_eq!(db.get_cf(handle, b"k3").unwrap().unwrap(), b"c");
}
#[test]
fn test_read_sst() {
let dir = tempdir_with_prefix("_rust_rocksdb_test_read_sst");
let sst_path = dir.path().join("sst");
let sst_path_str = sst_path.to_str().unwrap();
gen_sst_put(ColumnFamilyOptions::new(), None, sst_path_str);
let mut reader = SstFileReader::new(ColumnFamilyOptions::default());
reader.open(sst_path_str).unwrap();
reader.verify_checksum().unwrap();
reader.read_table_properties(|props| {
assert_eq!(props.num_entries(), 3);
});
let mut it = reader.iter();
it.seek(SeekKey::Start).unwrap();
assert_eq!(
it.collect::<Vec<_>>(),
vec![
(b"k1".to_vec(), b"a".to_vec()),
(b"k2".to_vec(), b"b".to_vec()),
(b"k3".to_vec(), b"c".to_vec()),
]
);
}
#[test]
fn test_read_invalid_sst() {
let dir = tempdir_with_prefix("_rust_rocksdb_test_read_invalid_sst");
let sst_path = dir.path().join("sst");
let sst_path_str = sst_path.to_str().unwrap();
gen_sst_put(ColumnFamilyOptions::new(), None, sst_path_str);
// corrupt one byte.
{
use std::io::{Seek, SeekFrom};
let mut f = fs::OpenOptions::new().write(true).open(&sst_path).unwrap();
f.seek(SeekFrom::Start(9)).unwrap();
f.write(b"!").unwrap();
}
let mut reader = SstFileReader::new(ColumnFamilyOptions::default());
reader.open(sst_path_str).unwrap();
let error_message = reader.verify_checksum().unwrap_err();
assert!(error_message.contains("checksum mismatch"));
}
#[test]
fn test_ingest_external_file_options() {
let mut ingest_opt = IngestExternalFileOptions::new();
ingest_opt.set_write_global_seqno(false);
assert_eq!(false, ingest_opt.get_write_global_seqno());
ingest_opt.set_write_global_seqno(true);
assert_eq!(true, ingest_opt.get_write_global_seqno());
}
|
//this will be the global state; including what the current data, index, registers, etc are at any time
//this will get trick I think as in C++ we use ptrs to manipulate data dn indices, which we wont be doing here
pub struct GlobalState {
pub current_index: usize,
pub data: [char; 8],
pub x_register: char,
pub y_register: char,
pub register_check_passed: bool,
}
impl GlobalState {
pub fn new() -> GlobalState {
GlobalState {
current_index: 0,
data: [65 as char; 8],
x_register: 0 as char,
y_register: 0 as char,
register_check_passed: true,
}
}
} |
extern crate rand;
use std::fs::File;
use std::io::{Write};
use std::time::Instant;
use std::process::exit;
extern crate serde;
use serde::Deserialize;
extern crate serde_json;
extern crate rmp_serde;
use rmp_serde::Deserializer;
extern crate rayon;
use rayon::prelude::*;
#[macro_use(value_t)]
extern crate clap;
use clap::{Arg,ArgMatches,App,SubCommand};
extern crate coinjoin_analyzer;
use coinjoin_analyzer::{Partition,Distribution,SubsetSumsFilter,PartitionsSubsetSumsFilter,SumFilteredPartitionIterator,Run};
fn main() {
let matches= get_app().get_matches();
match matches.subcommand() {
("auto", Some(options)) => auto(options),
("analyze", Some(options)) => analyze(options),
_ => { let _ = get_app().print_help(); }
}
}
fn analyze(options: &ArgMatches) {
let inputs: Vec<u64> = value_t!(options.value_of("inputs"), String)
.unwrap_or_else(|e| e.exit())
.split(",")
.map(|i| i.parse::<u64>().unwrap_or_else(|e|{ println!("Invalid input value {}: {}", i, e); exit(1) }) )
.collect();
let outputs: Vec<u64> = value_t!(options.value_of("outputs"), String)
.unwrap_or_else(|e| e.exit())
.split(",")
.map(|o| o.parse::<u64>().unwrap_or_else(|e|{ println!("Invalid output value {}: {}", o, e); exit(1) }) )
.collect();
let in_partitions: Vec<Partition> = {
SumFilteredPartitionIterator::new(inputs.clone(), &SubsetSumsFilter::new(&outputs)).collect()
};
let out_partitions: Vec<Partition> = {
SumFilteredPartitionIterator::new(outputs.clone(), &PartitionsSubsetSumsFilter::new(&in_partitions)).collect()
};
let mut partition_tuples: Vec<(Partition, Partition)> = Vec::new();
for in_partition in in_partitions {
for out_partition in out_partitions.clone() {
if partitions_match(&in_partition, &out_partition) {
partition_tuples.push((in_partition.clone(), out_partition.clone()));
}
}
}
for &(ref input_sets, ref output_sets) in partition_tuples.iter() {
println!("Input sets: {:?} Output sets: {:?}", input_sets, output_sets);
}
}
fn auto(options: &ArgMatches) {
let parallelism = value_t!(options.value_of("parallelism"), usize)
.unwrap_or_else(|e| e.exit());
let _ = rayon::initialize(rayon::Configuration::new().set_num_threads(parallelism));
let distribution_file_name = match options.value_of("distribution") {
Some(string) => string,
None => return print!("No distribution file given!")
};
let distribution = match read_distribution(&distribution_file_name) {
Ok(dist) => dist,
Err(err) => return print!("Error while reading distribution: {}\n", err)
};
let transactions = value_t!(options.value_of("transactions"), u64)
.unwrap_or_else(|e| e.exit());
let transaction_size = value_t!(options.value_of("size"), u64)
.unwrap_or_else(|e| e.exit());
let runs = value_t!(options.value_of("runs"), usize)
.unwrap_or_else(|e| e.exit());
let shuffled = value_t!(options.value_of("shuffled"), String)
.unwrap_or_else(|e| e.exit());
if shuffled != "none" && shuffled != "input" && shuffled != "output" && shuffled != "distributed" {
return print!("Passed invalid value for shuffled parameter")
}
let result_file_name = match options.value_of("output") {
Some(string) => string.to_string(),
None => format!("result-{}-t-{}-s-{}-r-{}.json", shuffled, transactions, transaction_size, runs)
};
let mut result: Vec<Run> = Vec::new();
(0 .. runs)
.into_par_iter()
.weight_max()
.map(|_| run(&distribution, transactions, transaction_size, &shuffled) )
.collect_into(&mut result);
let mut file = File::create(result_file_name).unwrap();
let json_string = serde_json::to_string(&result).unwrap();
let _ = file.write(json_string.as_bytes());
}
fn get_app<'a>() -> App<'a, 'a> {
App::new("cja")
.author("Felix Konstantin Maurer <maufl@maufl.de>")
.about("This program generates and analyses CoinJoin transactions.")
.version("v0.1")
.subcommand(SubCommand::with_name("auto")
.about("generate and analyze CoinJoin transactions for various parameters")
.arg(Arg::with_name("transactions")
.short("t")
.default_value("4")
.takes_value(true))
.arg(Arg::with_name("size")
.short("s")
.default_value("3")
.takes_value(true))
.arg(Arg::with_name("shuffled")
.short("S")
.default_value("none")
.takes_value(true)
.possible_values(&["none", "output", "input", "distributed"]))
.arg(Arg::with_name("runs")
.short("r")
.default_value("5")
.takes_value(true))
.arg(Arg::with_name("parallelism")
.short("p")
.default_value("5")
.takes_value(true))
.arg(Arg::with_name("distribution")
.short("d")
.default_value("distribution.bin")
.takes_value(true))
.arg(Arg::with_name("output")
.short("o")
.takes_value(true))
)
.subcommand(SubCommand::with_name("analyze")
.about("analyze single CoinJoin transaction for given inputs and outputs ")
.arg(Arg::with_name("inputs")
.short("i")
.takes_value(true))
.arg(Arg::with_name("outputs")
.short("o")
.takes_value(true))
)
}
fn run(distribution: &Distribution, num_transactions: u64, transaction_size: u64, shuffled: & String) -> Run {
let (transactions, in_coins, out_coins) = match shuffled.as_ref() {
"output" => distribution.random_coinjoin_transaction_shuffled(num_transactions, transaction_size),
"input" => distribution.random_coinjoin_transaction_input_shuffled(num_transactions, transaction_size),
"distributed" => distribution.random_coinjoin_transaction_distributed_shuffled(num_transactions, transaction_size),
"none" => distribution.random_coinjoin_transaction(num_transactions, transaction_size),
_ => panic!("Invalid value for shuffled options")
};
let now = Instant::now();
let in_partitions: Vec<Partition> = {
SumFilteredPartitionIterator::new(in_coins.clone(), &SubsetSumsFilter::new(&out_coins)).collect()
};
let out_partitions: Vec<Partition> = {
SumFilteredPartitionIterator::new(out_coins.clone(), &PartitionsSubsetSumsFilter::new(&in_partitions)).collect()
};
let mut partition_tuples: Vec<(Partition, Partition)> = Vec::new();
for in_partition in in_partitions {
for out_partition in out_partitions.clone() {
if partitions_match(&in_partition, &out_partition) {
partition_tuples.push((in_partition.clone(), out_partition.clone()));
}
}
}
let duration = now.elapsed();
Run {
num_transactions: num_transactions,
num_inputs_per_transaction: transaction_size,
original_transactions: transactions,
in_coins: in_coins,
out_coins: out_coins,
partition_tuples: partition_tuples,
duration_secs: duration.as_secs(),
duration_nano: duration.subsec_nanos()
}
}
fn read_distribution(file_name: &str) -> Result<Distribution, String> {
let file = match File::open(file_name) {
Ok(file) => file,
Err(err) => return Err(format!("Error while opening file: {}", err))
};
match Deserialize::deserialize(&mut Deserializer::new(file)) {
Ok(dist) => Ok(dist),
Err(e) => Err(format!("Could not parse distribution: {}", e))
}
}
fn partitions_match(a: & Partition, b: & Partition) -> bool {
'outer: for set_a in a {
for set_b in b {
if set_a.iter().sum::<u64>() == set_b.iter().sum::<u64>() {
continue 'outer
}
}
return false
}
true
}
|
use sqlx::{query_file, query_file_as, Error, PgPool};
use uuid::Uuid;
use super::image::{Image, NewImage};
pub struct AppUser {
pub id: Uuid,
pub created: time::OffsetDateTime,
pub email: String,
pub password_hash: String,
pub is_admin: bool,
}
impl AppUser {
pub async fn new(
pool: &PgPool,
email: &str,
password_hash: &str,
admin: bool,
) -> Result<Uuid, sqlx::Error> {
Ok(
query_file!("queries/app_user/create.sql", email, password_hash, admin)
.fetch_one(pool)
.await
.map(|v| v.id)?,
)
}
pub async fn by_id(id: Uuid, pool: &PgPool) -> Result<Option<AppUser>, sqlx::Error> {
let res = query_file_as!(AppUser, "queries/app_user/get_by_id.sql", id)
.fetch_one(pool)
.await;
match res {
Ok(u) => Ok(Some(u)),
Err(e) => match e {
Error::RowNotFound => Ok(None),
_ => Err(e.into()),
},
}
}
pub async fn by_email(email: &str, pool: &PgPool) -> Result<Option<AppUser>, sqlx::Error> {
let res = query_file_as!(AppUser, "queries/app_user/get_by_email.sql", email)
.fetch_one(pool)
.await;
match res {
Ok(u) => Ok(Some(u)),
Err(e) => match e {
Error::RowNotFound => Ok(None),
_ => Err(e.into()),
},
}
}
}
/// Methods for an instance
impl AppUser {
pub async fn add_image(&self, image: NewImage, pool: &PgPool) -> Result<Uuid, sqlx::Error> {
Image::new(self.id, image, pool).await
}
pub async fn save(&self, pool: &PgPool) -> Result<(), sqlx::Error> {
query_file!(
"queries/app_user/update.sql",
&self.id,
&self.email,
&self.password_hash,
&self.is_admin
)
.execute(pool)
.await?;
Ok(())
}
}
|
// 2019-01-17 Déplacé le module dans son fichier à lui
// un struct est rendu public par pub, mais pas ses champs
// pas besoin d'annoncer la couleur avec mod plante { }
// En effet, on est l'a déjà fait avec mod plante; dans main.rs
#[derive(Debug)] // pour afficher les structs
pub struct Legume { // définit un struct public
pub nom: String, // le nom du légume est public
id: i32, // son ID ne l'est pas
}
impl Legume { // un bloc d'implémentation dans le
// contexte du struct Legume. Définit
// la fonction new(). Noter 'pub'
pub fn new(nom: &str) -> Legume { // prend un &str comme argument et
Legume { // construit une instance de Legume
nom: String::from(nom),
id: 1,
}
}
}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
//! An example that tracks thread pedigree using local state.
use std::io;
use std::mem;
use bitvec::bitvec;
use bitvec::order::Msb0;
use bitvec::vec::BitVec;
use libc::pid_t;
use nix::unistd::Pid;
use serde::Deserialize;
use serde::Serialize;
/// Helper function that finds the longest run of repeating bits in a bitvec
fn longest_run(sequence: &BitVec) -> (usize, usize) {
let mut prev_bit = false;
let mut prev_count = 1;
let mut max_count = 0;
let mut max_start = 0;
for (index, bit) in sequence.iter().enumerate() {
let count = if index > 0 && prev_bit == *bit {
prev_count + 1
} else {
1
};
if count > max_count {
max_count = count;
max_start = index + 1 - count;
}
prev_count = count;
prev_bit = *bit;
}
(max_start, max_count)
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
/// Unbounded bitstring representation of process pedigree (i.e. tree path or tree index)
/// which can be forked and converted to a deterministic virtual PID.
///
/// As a binary tree-index, a Pedigree can be viewed as a series of "left"/"right"
/// directions for how to navigate the tree. Therefore a zero-length Pedigree refers to
/// the root. For convenience, we refer to "parent/child" rather than "left/right",
/// following the normal conventions of process or thread forking. Note that this
/// pedigree datatype does not represent "joins" within a set of running processes, nor
/// does it otherwise represent dependencies or "happens before" edges.
///
/// TODO: Add serialization / deserialization
pub struct Pedigree {
pedigree: BitVec,
}
impl Pedigree {
/// Create a new root pedigree representing the top of a tree of processes or threads.
pub fn new() -> Self {
Pedigree {
pedigree: bitvec![0],
}
}
/// Split a pedigree into a pedigree for the two execution points
/// after the fork: `(parent,child)`. I.e. both tree-paths
/// returned are one level deeper from the root than the input was.
pub fn fork(&self) -> (Self, Self) {
let mut parent = self.clone();
let child = parent.fork_mut();
(parent, child)
}
/// Fork a pedigree, destructively.
/// Mutates parent pedigree, returns new child pedigree.
///
/// Since parent pedigree is being copied to the child, this function will
/// have O(n) complexity with respect to pedigree length.
pub fn fork_mut(&mut self) -> Self {
let mut child_pedigree = self.pedigree.clone();
child_pedigree.push(true);
self.pedigree.push(false);
Pedigree {
pedigree: child_pedigree,
}
}
/// Get pedigree's inner BitVec representation
pub fn raw(&self) -> BitVec {
self.pedigree.clone()
}
}
/// Attempts to convert the pedigree bitstring into a deterministic virtual PID
impl TryFrom<&Pedigree> for Pid {
type Error = io::Error;
fn try_from(pedigree: &Pedigree) -> Result<Self, Self::Error> {
// Define mpping of pedigree bits -> PID bits
const MSB_ZERO_BITS: usize = 1;
const TREE_BITS: usize = 16;
const RUN_INDEX_BITS: usize = 4;
const RUN_TYPE_BITS: usize = 1;
const RUN_LENGTH_BITS: usize = 10;
debug_assert!(
MSB_ZERO_BITS + TREE_BITS + RUN_INDEX_BITS + RUN_TYPE_BITS + RUN_LENGTH_BITS
== mem::size_of::<pid_t>() * 8
);
// Trim off any trailing P's from pedigree, i.e. viewing it as
// a sequence of 'P' (parent) and 'C' (child) directions.
let mut sequence = pedigree.raw();
while sequence.len() > 1 && sequence.last() == Some(&false) {
sequence.pop();
}
// Find longest run in pedigree sequence
let (index, len) = longest_run(&sequence);
// Make sure pedigree will fit into the bit encoding
if index >= 2_usize.pow(RUN_INDEX_BITS as u32)
|| len >= 2_usize.pow(RUN_LENGTH_BITS as u32)
|| sequence.len() - len > TREE_BITS
{
Err(Self::Error::new(
io::ErrorKind::Other,
"Pedigree is too large or complex to be deterministically converted into virtual PID.",
))
} else {
// Extract the longest run of bits from pedigree
let mut lower_tree = sequence.split_off(index + len);
let run = sequence.split_off(index);
let mut tree = sequence;
tree.append(&mut lower_tree);
// Construct a BitVec which will be interpreted as a pid_t
let mut vpid_bits: BitVec<Msb0, u32> =
BitVec::with_capacity(mem::size_of::<pid_t>() * 8);
// pid_t is signed, so MSB must always be zero or it will be interpreted as error
// when returned from fork, clone, etc.
vpid_bits.push(false);
// Pack the rest of the bits, using asserts to make sure the bitfield sizing
// is correct. Any errors here are fatal bugs, so assert seems acceptable.
let mut tree_bits: BitVec<Msb0, u32> = BitVec::repeat(false, TREE_BITS - tree.len());
tree_bits.append(&mut tree);
debug_assert!(tree_bits.len() == TREE_BITS);
vpid_bits.append(&mut tree_bits);
let mut run_index_bits = BitVec::<Msb0, u32>::from_element(index as u32);
run_index_bits = run_index_bits.split_off(run_index_bits.len() - RUN_INDEX_BITS);
debug_assert!(run_index_bits.len() == RUN_INDEX_BITS);
vpid_bits.append(&mut run_index_bits);
let mut run_type_bits: BitVec<Msb0, u32> = BitVec::new();
run_type_bits.push(run[0]);
debug_assert!(run_type_bits.len() == RUN_TYPE_BITS);
vpid_bits.append(&mut run_type_bits);
let mut run_length_bits = BitVec::<Msb0, u32>::from_element(len as u32);
run_length_bits = run_length_bits.split_off(run_length_bits.len() - RUN_LENGTH_BITS);
debug_assert!(run_length_bits.len() == RUN_LENGTH_BITS);
vpid_bits.append(&mut run_length_bits);
debug_assert!(vpid_bits.len() == mem::size_of::<pid_t>() * 8);
Ok(Pid::from_raw(vpid_bits.into_vec()[0] as i32))
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_longest_run() {
let sequence = bitvec![1, 1, 0, 0, 0];
let (index, len) = longest_run(&sequence);
assert_eq!((index, len), (2, 3));
let sequence = bitvec![1, 1, 1, 0, 0];
let (index, len) = longest_run(&sequence);
assert_eq!((index, len), (0, 3));
let sequence = bitvec![1, 1, 0, 0, 1, 1];
let (index, len) = longest_run(&sequence);
assert_eq!((index, len), (0, 2));
let sequence = bitvec![1, 0, 0, 0, 0, 1];
let (index, len) = longest_run(&sequence);
assert_eq!((index, len), (1, 4));
let sequence = bitvec![1, 0, 1, 0, 1, 0];
let (index, len) = longest_run(&sequence);
assert_eq!((index, len), (0, 1));
}
#[test]
fn test_pedigree_basic() {
// FIXME: These tests are dependent on the bit widths used to convert
// pedigree into PID, but the tests below assume that these values
// do not change.
let mut parent = Pedigree::new();
// Root pedigree = P
assert_eq!(Pid::try_from(&parent).unwrap(), Pid::from_raw(0x1));
let child = parent.fork_mut();
// Parent pedigree = PP
assert_eq!(Pid::try_from(&parent).unwrap(), Pid::from_raw(0x1));
// Child pedigree == PC
assert_eq!(Pid::try_from(&child).unwrap(), Pid::from_raw(0x00008001));
let child2 = parent.fork_mut();
// Parent pedigree == PPP
assert_eq!(Pid::try_from(&parent).unwrap(), Pid::from_raw(0x1));
// Child pedigree == PPC
assert_eq!(Pid::try_from(&child2).unwrap(), Pid::from_raw(0x00008002));
}
#[test]
fn test_pedigree_many_forks() {
let mut many_forks_bitstring = BitVec::repeat(false, 1023);
many_forks_bitstring.push(true);
let many_forks_pedigree = Pedigree {
pedigree: many_forks_bitstring,
};
assert_eq!(
Pid::try_from(&many_forks_pedigree).unwrap(),
Pid::from_raw(0x000083FF)
);
}
#[test]
fn test_pedigree_overflow() {
let mut many_forks_bitstring = BitVec::repeat(false, 1024);
many_forks_bitstring.push(true);
let many_forks_pedigree = Pedigree {
pedigree: many_forks_bitstring,
};
assert!(Pid::try_from(&many_forks_pedigree).is_err());
}
}
|
use crate::schema::shopify_connections;
use crate::utils::now;
use chrono::naive::NaiveDateTime;
use diesel::prelude::*;
#[derive(Debug, Identifiable, Queryable)]
#[table_name = "shopify_connections"]
pub struct ShopifyConnection {
pub id: i32,
pub shop: String,
pub nonce: String,
pub access_token: Option<String>,
pub created_at: NaiveDateTime,
pub updated_at: Option<NaiveDateTime>,
pub deleted_at: Option<NaiveDateTime>,
pub active: bool,
}
#[derive(Insertable)]
#[table_name = "shopify_connections"]
pub struct NewShopifyConnection {
pub shop: String,
pub nonce: String,
pub access_token: Option<String>,
pub created_at: NaiveDateTime,
pub updated_at: Option<NaiveDateTime>,
pub deleted_at: Option<NaiveDateTime>,
pub active: bool,
}
impl NewShopifyConnection {
pub fn new(shop: String, nonce: String) -> Self {
NewShopifyConnection {
shop,
nonce,
access_token: None,
created_at: now(),
updated_at: None,
deleted_at: None,
active: true,
}
}
pub fn insert(&self, conn: &PgConnection) -> ShopifyConnection {
create(conn, self)
}
}
pub fn create(
conn: &PgConnection,
new_shopify_connection: &NewShopifyConnection,
) -> ShopifyConnection {
diesel::insert_into(shopify_connections::table)
.values(new_shopify_connection)
.get_result(conn)
.expect("Error saving new shopify_connection")
}
pub fn read(conn: &PgConnection) -> Vec<ShopifyConnection> {
shopify_connections::table
.load::<ShopifyConnection>(conn)
.expect("Error loading shopify_connection")
}
pub fn read_by_shop(conn: &PgConnection, shop: String) -> Vec<ShopifyConnection> {
shopify_connections::table
.filter(shopify_connections::shop.eq(shop))
.load::<ShopifyConnection>(conn)
.expect("Error loading shopify_connection")
}
pub fn read_by_shop_and_nonce(
conn: &PgConnection,
shop: String,
nonce: String,
) -> Vec<ShopifyConnection> {
shopify_connections::table
.filter(shopify_connections::shop.eq(shop))
.filter(shopify_connections::nonce.eq(nonce))
.load::<ShopifyConnection>(conn)
.expect("Error loading shopify_connection")
}
pub fn update_access_token(
conn: &PgConnection,
shopify_connection: &ShopifyConnection,
access_token: String,
) -> QueryResult<usize> {
diesel::update(shopify_connection)
.set((
shopify_connections::access_token.eq(access_token),
shopify_connections::updated_at.eq(now()),
))
.execute(conn)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::establish_connection_test;
fn cleanup_table(conn: &PgConnection) {
diesel::delete(shopify_connections::table)
.execute(conn)
.unwrap();
}
fn mock_struct() -> NewShopifyConnection {
NewShopifyConnection::new(
String::from("ShopName"),
String::from("00a329c0648769a73afac7f9381e08fb43dbea72"),
)
}
#[test]
fn it_creates_a_shopify_connection() {
let conn = establish_connection_test();
create(&conn, &mock_struct());
let shopify_connection = shopify_connections::table
.load::<ShopifyConnection>(&conn)
.expect("Error loading shopify_connection");
assert_eq!(1, shopify_connection.len());
cleanup_table(&conn);
}
#[test]
fn it_reads_a_shopify_connection() {
let conn = establish_connection_test();
let new_shopify_connection = mock_struct();
let created_shopify_connection = diesel::insert_into(shopify_connections::table)
.values(&new_shopify_connection)
.get_result::<ShopifyConnection>(&conn)
.expect("Error saving new shopify_connection");
let shopify_connection = read(&conn);
assert!(0 < shopify_connection.len());
let my_shopify_connection = shopify_connection
.iter()
.find(|&x| x.shop == new_shopify_connection.shop);
assert!(
my_shopify_connection.is_some(),
"Could not find the created shopify_connection in the database!"
);
cleanup_table(&conn);
}
#[test]
fn it_reads_a_shopify_connection_by_shop() {
let conn = establish_connection_test();
let shop = String::from("ShopNameBaby");
// make 2 shopify_connections, each with different categories
let mut new_shopify_connection = mock_struct();
create(&conn, &new_shopify_connection);
new_shopify_connection.shop = shop.clone();
create(&conn, &new_shopify_connection);
let shopify_connection = read_by_shop(&conn, shop.clone());
assert_eq!(1, shopify_connection.len());
let my_shopify_connection = shopify_connection.iter().find(|x| x.shop == shop);
assert!(
my_shopify_connection.is_some(),
"Could not find the created shopify_connection in the database!"
);
cleanup_table(&conn);
}
#[test]
fn it_reads_a_shopify_connection_by_shop_and_nonce() {
let conn = establish_connection_test();
let nonce =
String::from("0cd1136c6702de4410d06d3ae80f592c9b2132ea232011bcc78fb53862cbd9ee");
// make 2 shopify_connections, each with different categories
let mut new_shopify_connection = mock_struct();
create(&conn, &new_shopify_connection);
new_shopify_connection.nonce = nonce.clone();
create(&conn, &new_shopify_connection);
let shopify_connection =
read_by_shop_and_nonce(&conn, String::from("ShopName"), nonce.clone());
assert_eq!(1, shopify_connection.len());
let my_shopify_connection = shopify_connection.iter().find(|x| x.nonce == nonce);
assert!(
my_shopify_connection.is_some(),
"Could not find the created shopify_connection in the database!"
);
cleanup_table(&conn);
}
#[test]
fn it_updates_a_shopify_connection_access_token() {
let conn = establish_connection_test();
let shopify_connection = create(&conn, &mock_struct());
let access_token = String::from("super ssssecret");
update_access_token(&conn, &shopify_connection, access_token.clone());
let shopify_connections = read_by_shop(&conn, shopify_connection.shop);
assert_eq!(1, shopify_connections.len());
let my_shopify_connection = shopify_connections
.iter()
.find(|x| x.access_token.as_ref().unwrap() == &access_token);
assert!(
my_shopify_connection.is_some(),
"Could not find the created shopify_connection in the database!"
);
cleanup_table(&conn);
}
}
|
pub(in crate) mod empty_iterator;
pub(in crate) mod generator_iterator;
|
use std::fs;
use aoc20::days::day3::{Geology};
fn geology() -> Geology {
let contents = fs::read_to_string("data/day3example.txt")
.expect("Something went wrong reading the file");
Geology::new(&contents)
}
#[test]
fn day3_parse() {
let geo = geology();
assert_eq!(geo.width(), 11);
assert_eq!(geo.height(), 11);
}
#[test]
fn day3_check_trees() {
let geo = geology();
assert_eq!(geo.is_tree(10, 10), true);
assert_eq!(geo.is_tree(2, 1), true);
assert_eq!(geo.is_tree(0, 1), false);
assert_eq!(geo.is_tree(0, 13), true);
}
#[test]
fn day3_hit_counts() {
let geo = geology();
// provided slopes and hit counts
assert_eq!(geo.hit_trees(1, 1), 2);
assert_eq!(geo.hit_trees(1, 3), 7);
assert_eq!(geo.hit_trees(1, 5), 3);
assert_eq!(geo.hit_trees(1, 7), 4);
assert_eq!(geo.hit_trees(2, 1), 2);
} |
use http::request::Parts;
pub fn get_root_path(parts: &Parts) -> String {
match parts.uri.path() {
"/" => "default".to_owned(),
_ => {
let stage_one: Vec<&str> = parts.uri.path().split("/").collect(); // Convert to array
let stage_two = &stage_one[1..stage_one.len() - 1]; // Remove the last path
let stage_three = stage_two.join("_"); // Join back with underscores
stage_three
}
}
}
|
//! This module contains general interrupt handlers.
//!
//! None of the contained interrupt handlers should be architecture specific.
//! They should instead
//! be called by the architecture specific interrupt handlers.
use arch::{self, schedule, Architecture};
use memory::VirtualAddress;
use multitasking::CURRENT_THREAD;
/// The timer interrupt handler for the system.
pub fn timer_interrupt() {
schedule();
}
/// The keyboard interrupt handler.
pub fn keyboard_interrupt(scancode: u8) {
if scancode == 1 {
unsafe { ::sync::disable_preemption() };
loop {}
}
info!("Key: <{}>", scancode);
}
/// The page fault handler.
pub fn page_fault_handler(address: VirtualAddress, program_counter: VirtualAddress) {
unsafe { ::sync::disable_preemption() };
let current_thread = CURRENT_THREAD.lock();
error!(
"Page fault in {:?} {:?} at address {:?} (PC: {:?})",
current_thread.pid, current_thread.id, address, program_counter
);
error!("Page flags: {:?}", arch::Current::get_page_flags(address));
loop {}
}
|
use once_cell::sync::Lazy;
#[link(wasm_import_module = "host")]
extern "C" {
#[link_name = "draw_str"]
fn draw_str_low_level(ptr: i32, len: i32, x: f32, y: f32);
fn draw_rect(x: f32, y: f32, width: f32, height: f32);
fn save();
fn clip_rect(x: f32, y: f32, width: f32, height: f32);
fn draw_rrect(x: f32, y: f32, width: f32, height: f32, radius: f32);
fn translate(x: f32, y: f32);
fn restore();
}
#[repr(C)]
pub struct PointerLengthString {
pub ptr: usize,
pub len: usize,
}
impl From<String> for PointerLengthString {
fn from(s: String) -> Self {
Self {
ptr: s.as_ptr() as usize,
len: s.len(),
}
}
}
impl From<PointerLengthString> for String {
fn from(s: PointerLengthString) -> Self {
unsafe { String::from_raw_parts(s.ptr as *mut u8, s.len, s.len) }
}
}
pub struct Canvas {
}
impl Canvas {
pub fn new() -> Self {
Self {
}
}
pub fn draw_str(&self, s: &str, x: f32, y: f32) {
unsafe {
draw_str_low_level(s.as_ptr() as i32, s.len() as i32, x, y);
}
}
pub fn draw_rect(&self, x: f32, y: f32, width: f32, height: f32) {
unsafe {
draw_rect(x, y, width, height);
}
}
pub fn save(&self) {
unsafe {
save();
}
}
pub fn clip_rect(&self, x: f32, y: f32, width: f32, height: f32) {
unsafe {
clip_rect(x, y, width, height);
}
}
pub fn draw_rrect(&self, x: f32, y: f32, width: f32, height: f32, radius: f32) {
unsafe {
draw_rrect(x, y, width, height, radius);
}
}
pub fn translate(&self, x: f32, y: f32) {
unsafe {
translate(x, y);
}
}
pub fn restore(&self) {
unsafe {
restore();
}
}
}
pub trait App {
type State;
fn init() -> Self;
fn draw(&mut self);
fn on_click(&mut self);
fn get_state(&self) -> Self::State;
fn set_state(&mut self, state: Self::State);
}
mod macros {
#[macro_export]
macro_rules! app {
($app:ident) => {
use once_cell::sync::Lazy;
use crate::framework::{PointerLengthString};
static mut APP : Lazy<Counter> = Lazy::new(|| $app::init());
#[no_mangle]
pub extern "C" fn on_click() {
unsafe { APP.on_click() }
}
#[no_mangle]
pub extern "C" fn get_state() -> *const PointerLengthString {
let s = serde_json::to_string(unsafe { &APP.get_state() }).unwrap();
let p : PointerLengthString = s.into();
&p as *const _
}
#[no_mangle]
pub extern "C" fn set_state(ptr: i32, len: i32) {
let s = String::from(PointerLengthString { ptr: ptr as usize, len: len as usize });
let state: isize = serde_json::from_str(&s).unwrap();
unsafe { APP.set_state(state)}
}
#[no_mangle]
pub extern "C" fn draw() {
unsafe { APP.draw() }
}
};
}
}
|
use yew::prelude::*;
use yew_router::component;
use yew_router::route;
use yew_router::{Route, Router};
use crate::page_not_found::PageNotFound;
pub struct AComp {}
pub enum Msg {}
impl Component for AComp {
type Message = Msg;
type Properties = ();
fn create(_props: Self::Properties, _link: ComponentLink<Self>) -> Self {
AComp {}
}
fn update(&mut self, _msg: Self::Message) -> ShouldRender {
true
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
true
}
fn destroy(&mut self) {
log::info!("AComp destroyed")
}
}
impl Renderable<AComp> for AComp {
fn view(&self) -> Html<Self> {
html! {
<>
<div>
{ "I am the A component"}
</div>
<div>
<Router>
<Route matcher=route!("/a/{*}") render=component::<PageNotFound>() />
</Router>
</div>
</>
}
}
}
|
use nix::unistd::Pid;
use std::fs::File;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::process::{Child, Command, Stdio};
use crate::configs::FunctionConfig;
use crate::request::Request;
use crate::request;
use cgroups::{cgroup_builder::CgroupBuilder, Cgroup};
use log::{info, warn, error};
pub enum VmStatus{
NotReady,
Ready = 65,
Unresponsive,
Crashed,
}
#[derive(Debug)]
pub struct VmAppConfig {
pub rootfs: String,
pub appfs: String,
pub load_dir: Option<PathBuf>,
pub dump_dir: Option<PathBuf>,
}
#[derive(Debug)]
pub struct Vm {
pub id: usize,
pub memory: usize, // MB
pub function_name: String,
process: Child,
/*
pub process: Pid,
cgroup_name: PathBuf,
pub cpu_share: usize,
pub vcpu_count: usize,
pub kernel: String,
pub kernel_args: String,
pub ready_notifier: File, // Vm writes to this File when setup finishes
pub app_config: VmAppConfig,
*/
}
impl Vm {
/// Launch a vm instance and return a Vm value
/// When this function returns, the VM has finished booting and is ready
/// to accept requests.
pub fn new(id: usize, function_config: &FunctionConfig) -> Option<Vm> {
let mut vm_process = Command::new("target/release/firerunner")
.args(&[
"--id",
&id.to_string(),
"--kernel",
"/etc/snapfaas/vmlinux",
"--kernel_args",
"quiet",
"--mem_size",
&function_config.memory.to_string(),
"--vcpu_count",
&function_config.vcpus.to_string(),
"--rootfs",
&function_config.runtimefs,
"--appfs",
&function_config.appfs,
])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn();
if vm_process.is_err() {
return None;
}
let mut vm_process = vm_process.unwrap();
//let mut ready_msg = String::new();
let mut ready_msg = vec![0;1];
{
let stdout = vm_process.stdout.as_mut().unwrap();
//stdout.read_to_string(&mut ready_msg);
// If no ready message is received, kill the child process and
// return None.
// TODO: have a timeout here in case the firerunner process does
// not die but hangs
match stdout.read(&mut ready_msg) {
Ok(_) => (), //info!("vm {:?} is ready", ready_msg),
Err(e) => {
error!("No ready message received from {:?}, with {:?}", vm_process, e);
vm_process.kill();
return None;
}
}
}
return Some(Vm {
id: id,
memory: function_config.memory,
process: vm_process,
function_name: function_config.name.clone(),
});
}
/// Send request to vm and wait for its response
pub fn process_req(&mut self, req: Request) -> Result<String, String> {
let req_str = req.payload_as_string();
let mut req_sender = self.process.stdin.as_mut().unwrap();
let buf = req_str.as_bytes();
request::write_u8_vm(&buf, &mut req_sender);
return match request::read_u8_vm(&mut self.process.stdout.as_mut().unwrap()) {
Ok(rsp_buf) => Ok(String::from_utf8(rsp_buf).unwrap()),
Err(e) => Err(String::from("failed to read from vm")),
}
}
/// shutdown this vm
pub fn shutdown(&mut self) {
// TODO: not sure if kill() waits for the child process to terminate
// before returning. This is relevant for shutdown latency measurement.
// TODO: std::process::Child.kill() is equivalent to sending a SIGKILL
// on unix platforms which means the child process won't be able to run
// its clean up process. Previously, we shutdown VMs through SIGTERM
// which does allow a shutdown process. We need to make sure using
// SIGKILL won't create any issues with vms.
self.process.kill();
}
}
|
use crate::dither::{Ditherer, DithererBuilder};
use zerocopy::AsBytes;
#[derive(AsBytes, Copy, Clone, Debug)]
#[allow(non_camel_case_types)]
#[repr(transparent)]
pub struct i24([u8; 3]);
impl i24 {
fn from_s24(sample: i32) -> Self {
// trim the padding in the most significant byte
#[allow(unused_variables)]
let [a, b, c, d] = sample.to_ne_bytes();
#[cfg(target_endian = "little")]
return Self([a, b, c]);
#[cfg(target_endian = "big")]
return Self([b, c, d]);
}
}
pub struct Converter {
ditherer: Option<Box<dyn Ditherer>>,
}
impl Converter {
pub fn new(dither_config: Option<DithererBuilder>) -> Self {
match dither_config {
Some(ditherer_builder) => {
let ditherer = (ditherer_builder)();
info!("Converting with ditherer: {}", ditherer.name());
Self {
ditherer: Some(ditherer),
}
}
None => Self { ditherer: None },
}
}
/// To convert PCM samples from floating point normalized as `-1.0..=1.0`
/// to 32-bit signed integer, multiply by 2147483648 (0x80000000) and
/// saturate at the bounds of `i32`.
const SCALE_S32: f64 = 2147483648.;
/// To convert PCM samples from floating point normalized as `-1.0..=1.0`
/// to 24-bit signed integer, multiply by 8388608 (0x800000) and saturate
/// at the bounds of `i24`.
const SCALE_S24: f64 = 8388608.;
/// To convert PCM samples from floating point normalized as `-1.0..=1.0`
/// to 16-bit signed integer, multiply by 32768 (0x8000) and saturate at
/// the bounds of `i16`. When the samples were encoded using the same
/// scaling factor, like the reference Vorbis encoder does, this makes
/// conversions transparent.
const SCALE_S16: f64 = 32768.;
pub fn scale(&mut self, sample: f64, factor: f64) -> f64 {
// From the many float to int conversion methods available, match what
// the reference Vorbis implementation uses: sample * 32768 (for 16 bit)
// Casting float to integer rounds towards zero by default, i.e. it
// truncates, and that generates larger error than rounding to nearest.
match self.ditherer.as_mut() {
Some(d) => (sample * factor + d.noise()).round(),
None => (sample * factor).round(),
}
}
// Special case for samples packed in a word of greater bit depth (e.g.
// S24): clamp between min and max to ensure that the most significant
// byte is zero. Otherwise, dithering may cause an overflow. This is not
// necessary for other formats, because casting to integer will saturate
// to the bounds of the primitive.
pub fn clamping_scale(&mut self, sample: f64, factor: f64) -> f64 {
let int_value = self.scale(sample, factor);
// In two's complement, there are more negative than positive values.
let min = -factor;
let max = factor - 1.0;
int_value.clamp(min, max)
}
pub fn f64_to_f32(&mut self, samples: &[f64]) -> Vec<f32> {
samples.iter().map(|sample| *sample as f32).collect()
}
pub fn f64_to_s32(&mut self, samples: &[f64]) -> Vec<i32> {
samples
.iter()
.map(|sample| self.scale(*sample, Self::SCALE_S32) as i32)
.collect()
}
// S24 is 24-bit PCM packed in an upper 32-bit word
pub fn f64_to_s24(&mut self, samples: &[f64]) -> Vec<i32> {
samples
.iter()
.map(|sample| self.clamping_scale(*sample, Self::SCALE_S24) as i32)
.collect()
}
// S24_3 is 24-bit PCM in a 3-byte array
pub fn f64_to_s24_3(&mut self, samples: &[f64]) -> Vec<i24> {
samples
.iter()
.map(|sample| i24::from_s24(self.clamping_scale(*sample, Self::SCALE_S24) as i32))
.collect()
}
pub fn f64_to_s16(&mut self, samples: &[f64]) -> Vec<i16> {
samples
.iter()
.map(|sample| self.scale(*sample, Self::SCALE_S16) as i16)
.collect()
}
}
|
// Biquadratic (BiQuad) Infinite Impulse Response (IIR) Filter.
/// Generic vector for integer IIR filter.
/// This struct is used to hold the x/y input/output data vector or the b/a coefficient
/// vector.
pub type Vec5 = [i32; 5];
/// Main IIR struct holds coefficient vector and a shift value which defines the fixed point position
pub struct Iir {
pub ba: Vec5, // b and a coeffitients can be changed. [b0,b1,b2,a1,a2]
pub shift: i32, // shift for fixed point pos
pub xy: Vec5, // x and y internal filter states [x0,x1,y0,y1,y2]
}
impl Iir {
/// Filter tick. Takes a new inout sample and returns a new output sample.
pub fn tick(&mut self, x0: i32) -> i32 {
// shift in x0
self.xy.copy_within(0..4, 1);
self.xy[0] = x0;
let y0 = 1 << ((self.shift) - 1);
let y = &self.xy
.iter()
.zip(&self.ba)
.map(|(xi, ai)| *xi as i64 * *ai as i64)
.fold(y0, |y, xa| y + xa);
self.xy[2] = (y >> self.shift) as i32;
self.xy[2]
}
}
|
use super::responses::GetTipsResponse;
use crate::Result;
use reqwest::Client;
/// Returns the list of tups
pub async fn get_tips(client: Client, uri: String) -> Result<GetTipsResponse> {
let body = json!({
"command": "getTips",
});
Ok(client
.post(&uri)
.header("ContentType", "application/json")
.header("X-IOTA-API-Version", "1")
.body(body.to_string())
.send()?
.json()?)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.