text
stringlengths
8
4.13M
mod error; mod srcstats; use std::path::PathBuf; use structopt::StructOpt; use srcstats::get_summary_src_stats; use error::StatsError; /// TODO: USE GPIO IN TG2560 PROJECT #[derive(Debug, StructOpt)] #[structopt( name = "RStat", about = "This tool generates statistics of Rust projects", )] struct Opt { #[structopt(name = "source directory", parse(from_os_str))] dir: PathBuf, #[structopt(name = "mode", long, short = "m")] mode: String, } fn main() -> Result<(), StatsError> { let opt = Opt::from_args(); let mode = &opt.mode[..]; match mode { "src" => { let stats = get_summary_src_stats(&opt.dir)?; println!("Total stats: {:?}", stats) } _ => println!("No stats!"), } Ok(()) }
use crate::format_context::FormatContext; use crate::order::{ filter_input::FilterInput, filter_output::FilterOutput, input::Input, input_kind::InputKind, output::Output, output_kind::OutputKind, stream::Stream, Filter, Order, OutputResult::Entry, ParameterValue, }; use crate::probe::deep::{CheckParameterValue, LoudnessResult, StreamProbeResult}; use ffmpeg_sys_next::log10; use std::collections::HashMap; pub fn create_graph<S: ::std::hash::BuildHasher>( filename: &str, params: &HashMap<String, CheckParameterValue, S>, ) -> Result<Order, String> { let mut inputs = vec![]; let mut outputs = vec![]; let mut filters = vec![]; let metadata_param = ParameterValue::Bool(true); let peak_param = ParameterValue::String("true".to_string()); let mut loudnessdetect_params: HashMap<String, ParameterValue> = HashMap::new(); loudnessdetect_params.insert("metadata".to_string(), metadata_param); loudnessdetect_params.insert("peak".to_string(), peak_param); match params.get("pairing_list") { Some(pairing_list) => { if let Some(pairs) = pairing_list.pairs.clone() { for (iter, pair) in pairs.iter().enumerate() { let mut amerge_params: HashMap<String, ParameterValue> = HashMap::new(); let mut amerge_input = vec![]; let mut input_streams_vec = vec![]; let mut lavfi_keys = vec!["lavfi.r128.I".to_string(), "lavfi.r128.LRA".to_string()]; let output_label = format!("output_label_{iter:?}"); for track in pair { let input_label = format!("input_label_{}", track.index); amerge_input.push(FilterInput { kind: InputKind::Stream, stream_label: input_label.clone(), }); input_streams_vec.push(Stream { index: track.index as u32, label: Some(input_label), }); } let channel: u8 = if pair.len() == 1 { pair[0].channel } else { pair.len() as u8 }; for ch in 0..channel { let key = format!("lavfi.r128.true_peaks_ch{ch}"); if !lavfi_keys.contains(&key) { lavfi_keys.push(key); } } inputs.push(Input::Streams { id: iter as u32, path: filename.to_string(), streams: input_streams_vec, }); outputs.push(Output { kind: Some(OutputKind::AudioMetadata), keys: lavfi_keys, stream: Some(output_label.clone()), path: None, streams: vec![], parameters: HashMap::new(), }); amerge_params.insert( "inputs".to_string(), ParameterValue::Int64(pair.len() as i64), ); filters.push(Filter { name: "amerge".to_string(), label: Some(format!("amerge_filter_{iter:?}")), parameters: amerge_params, inputs: Some(amerge_input), outputs: None, }); filters.push(Filter { name: "ebur128".to_string(), label: Some(format!("loudness_filter_{iter:?}")), parameters: loudnessdetect_params.clone(), inputs: None, outputs: None, }); filters.push(Filter { name: "aformat".to_string(), label: Some(format!("aformat_filter_{iter:?}")), parameters: HashMap::new(), inputs: None, outputs: Some(vec![FilterOutput { stream_label: output_label, }]), }); } } } None => { return Err("No input message for the loudness analysis (audio qualification)".to_string()) } } Order::new(inputs, filters, outputs) } pub fn detect_loudness<S: ::std::hash::BuildHasher>( filename: &str, streams: &mut [StreamProbeResult], audio_indexes: Vec<u32>, params: HashMap<String, CheckParameterValue, S>, ) { match create_graph(filename, &params) { Ok(mut order) => { if let Err(msg) = order.setup() { error!("{:?}", msg); return; } for index in audio_indexes { streams[index as usize].detected_loudness = Some(vec![]); } match order.process() { Ok(results) => { info!("END OF PROCESS"); info!("-> {:?} frames processed", results.len()); let mut context = FormatContext::new(filename).unwrap(); if let Err(msg) = context.open_input() { context.close_input(); error!("{:?}", msg); return; } for result in results { if let Entry(entry_map) = result { if let Some(stream_id) = entry_map.get("stream_id") { let index: i32 = stream_id.parse().unwrap(); if streams[(index) as usize].detected_loudness.is_none() { error!("Error : unexpected detection on stream ${index}"); break; } let detected_loudness = streams[(index) as usize] .detected_loudness .as_mut() .unwrap(); let mut loudness = LoudnessResult { range: -99.9, integrated: -99.9, true_peaks: vec![], }; let mut channel_start = 0; let mut channel_end = 0; if let Some(value) = entry_map.get("lavfi.r128.I") { let x = value.parse::<f64>().unwrap(); loudness.integrated = (x * 100.0).round() / 100.0; if loudness.integrated == -70.0 { loudness.integrated = -99.0; } } if let Some(value) = entry_map.get("lavfi.r128.LRA") { let y = value.parse::<f64>().unwrap(); loudness.range = (y * 100.0).round() / 100.0; } match params.get("pairing_list") { Some(pairing_list) => { if let Some(pairs) = &pairing_list.pairs { for pair in pairs { for (pos, track) in pair.iter().enumerate() { if index == track.index as i32 { if pair.len() == 1 { channel_start = 0; channel_end = track.channel; } else { channel_start = pos as u8; channel_end = (pos + 1) as u8; } } } } } } None => warn!("No input message for the loudness analysis (audio qualification)"), } for i in channel_start..channel_end { let str_tpk_key = format!("lavfi.r128.true_peaks_ch{i}"); if let Some(value) = entry_map.get(&str_tpk_key) { let energy = value.parse::<f64>().unwrap(); unsafe { let mut tpk = 20.0 * log10(energy); tpk = (tpk * 100.0).round() / 100.0; if tpk == std::f64::NEG_INFINITY { tpk = -99.00; } loudness.true_peaks.push(tpk); } } } detected_loudness.drain(..); detected_loudness.push(loudness); } } } } Err(msg) => { error!("ERROR: {}", msg); } } } Err(error) => error!("{:?}", error), } }
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::CLKOUT { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `CKEN`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CKENR { #[doc = "Disable CLKOUT value."] DIS, #[doc = "Enable CLKOUT value."] EN, } impl CKENR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { CKENR::DIS => false, CKENR::EN => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> CKENR { match value { false => CKENR::DIS, true => CKENR::EN, } } #[doc = "Checks if the value of the field is `DIS`"] #[inline] pub fn is_dis(&self) -> bool { *self == CKENR::DIS } #[doc = "Checks if the value of the field is `EN`"] #[inline] pub fn is_en(&self) -> bool { *self == CKENR::EN } } #[doc = "Possible values of the field `CKSEL`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CKSELR { #[doc = "LFRC value."] LFRC, #[doc = "XT / 2 value."] XT_DIV2, #[doc = "XT / 4 value."] XT_DIV4, #[doc = "XT / 8 value."] XT_DIV8, #[doc = "XT / 16 value."] XT_DIV16, #[doc = "XT / 32 value."] XT_DIV32, #[doc = "1 Hz as selected in RTC value."] RTC_1HZ, #[doc = "XT / 2^21 value."] XT_DIV2M, #[doc = "XT value."] XT, #[doc = "100 Hz as selected in CLKGEN value."] CG_100HZ, #[doc = "HFRC value."] HFRC, #[doc = "HFRC / 4 value."] HFRC_DIV4, #[doc = "HFRC / 8 value."] HFRC_DIV8, #[doc = "HFRC / 16 value."] HFRC_DIV16, #[doc = "HFRC / 64 value."] HFRC_DIV64, #[doc = "HFRC / 128 value."] HFRC_DIV128, #[doc = "HFRC / 256 value."] HFRC_DIV256, #[doc = "HFRC / 512 value."] HFRC_DIV512, #[doc = "Flash Clock value."] FLASH_CLK, #[doc = "LFRC / 2 value."] LFRC_DIV2, #[doc = "LFRC / 32 value."] LFRC_DIV32, #[doc = "LFRC / 512 value."] LFRC_DIV512, #[doc = "LFRC / 32768 value."] LFRC_DIV32K, #[doc = "XT / 256 value."] XT_DIV256, #[doc = "XT / 8192 value."] XT_DIV8K, #[doc = "XT / 2^16 value."] XT_DIV64K, #[doc = "Uncal LFRC / 16 value."] ULFRC_DIV16, #[doc = "Uncal LFRC / 128 value."] ULFRC_DIV128, #[doc = "Uncal LFRC / 1024 value."] ULFRC_1HZ, #[doc = "Uncal LFRC / 4096 value."] ULFRC_DIV4K, #[doc = "Uncal LFRC / 2^20 value."] ULFRC_DIV1M, #[doc = "HFRC / 2^16 value."] HFRC_DIV64K, #[doc = "HFRC / 2^24 value."] HFRC_DIV16M, #[doc = "LFRC / 2^20 value."] LFRC_DIV1M, #[doc = "HFRC (not autoenabled) value."] HFRCNE, #[doc = "HFRC / 8 (not autoenabled) value."] HFRCNE_DIV8, #[doc = "XT (not autoenabled) value."] XTNE, #[doc = "XT / 16 (not autoenabled) value."] XTNE_DIV16, #[doc = "LFRC / 32 (not autoenabled) value."] LFRCNE_DIV32, #[doc = "LFRC (not autoenabled) - Default for undefined values value."] LFRCNE, #[doc = r" Reserved"] _Reserved(u8), } impl CKSELR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { CKSELR::LFRC => 0, CKSELR::XT_DIV2 => 1, CKSELR::XT_DIV4 => 2, CKSELR::XT_DIV8 => 3, CKSELR::XT_DIV16 => 4, CKSELR::XT_DIV32 => 5, CKSELR::RTC_1HZ => 16, CKSELR::XT_DIV2M => 22, CKSELR::XT => 23, CKSELR::CG_100HZ => 24, CKSELR::HFRC => 25, CKSELR::HFRC_DIV4 => 26, CKSELR::HFRC_DIV8 => 27, CKSELR::HFRC_DIV16 => 28, CKSELR::HFRC_DIV64 => 29, CKSELR::HFRC_DIV128 => 30, CKSELR::HFRC_DIV256 => 31, CKSELR::HFRC_DIV512 => 32, CKSELR::FLASH_CLK => 34, CKSELR::LFRC_DIV2 => 35, CKSELR::LFRC_DIV32 => 36, CKSELR::LFRC_DIV512 => 37, CKSELR::LFRC_DIV32K => 38, CKSELR::XT_DIV256 => 39, CKSELR::XT_DIV8K => 40, CKSELR::XT_DIV64K => 41, CKSELR::ULFRC_DIV16 => 42, CKSELR::ULFRC_DIV128 => 43, CKSELR::ULFRC_1HZ => 44, CKSELR::ULFRC_DIV4K => 45, CKSELR::ULFRC_DIV1M => 46, CKSELR::HFRC_DIV64K => 47, CKSELR::HFRC_DIV16M => 48, CKSELR::LFRC_DIV1M => 49, CKSELR::HFRCNE => 50, CKSELR::HFRCNE_DIV8 => 51, CKSELR::XTNE => 53, CKSELR::XTNE_DIV16 => 54, CKSELR::LFRCNE_DIV32 => 55, CKSELR::LFRCNE => 57, CKSELR::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> CKSELR { match value { 0 => CKSELR::LFRC, 1 => CKSELR::XT_DIV2, 2 => CKSELR::XT_DIV4, 3 => CKSELR::XT_DIV8, 4 => CKSELR::XT_DIV16, 5 => CKSELR::XT_DIV32, 16 => CKSELR::RTC_1HZ, 22 => CKSELR::XT_DIV2M, 23 => CKSELR::XT, 24 => CKSELR::CG_100HZ, 25 => CKSELR::HFRC, 26 => CKSELR::HFRC_DIV4, 27 => CKSELR::HFRC_DIV8, 28 => CKSELR::HFRC_DIV16, 29 => CKSELR::HFRC_DIV64, 30 => CKSELR::HFRC_DIV128, 31 => CKSELR::HFRC_DIV256, 32 => CKSELR::HFRC_DIV512, 34 => CKSELR::FLASH_CLK, 35 => CKSELR::LFRC_DIV2, 36 => CKSELR::LFRC_DIV32, 37 => CKSELR::LFRC_DIV512, 38 => CKSELR::LFRC_DIV32K, 39 => CKSELR::XT_DIV256, 40 => CKSELR::XT_DIV8K, 41 => CKSELR::XT_DIV64K, 42 => CKSELR::ULFRC_DIV16, 43 => CKSELR::ULFRC_DIV128, 44 => CKSELR::ULFRC_1HZ, 45 => CKSELR::ULFRC_DIV4K, 46 => CKSELR::ULFRC_DIV1M, 47 => CKSELR::HFRC_DIV64K, 48 => CKSELR::HFRC_DIV16M, 49 => CKSELR::LFRC_DIV1M, 50 => CKSELR::HFRCNE, 51 => CKSELR::HFRCNE_DIV8, 53 => CKSELR::XTNE, 54 => CKSELR::XTNE_DIV16, 55 => CKSELR::LFRCNE_DIV32, 57 => CKSELR::LFRCNE, i => CKSELR::_Reserved(i), } } #[doc = "Checks if the value of the field is `LFRC`"] #[inline] pub fn is_lfrc(&self) -> bool { *self == CKSELR::LFRC } #[doc = "Checks if the value of the field is `XT_DIV2`"] #[inline] pub fn is_xt_div2(&self) -> bool { *self == CKSELR::XT_DIV2 } #[doc = "Checks if the value of the field is `XT_DIV4`"] #[inline] pub fn is_xt_div4(&self) -> bool { *self == CKSELR::XT_DIV4 } #[doc = "Checks if the value of the field is `XT_DIV8`"] #[inline] pub fn is_xt_div8(&self) -> bool { *self == CKSELR::XT_DIV8 } #[doc = "Checks if the value of the field is `XT_DIV16`"] #[inline] pub fn is_xt_div16(&self) -> bool { *self == CKSELR::XT_DIV16 } #[doc = "Checks if the value of the field is `XT_DIV32`"] #[inline] pub fn is_xt_div32(&self) -> bool { *self == CKSELR::XT_DIV32 } #[doc = "Checks if the value of the field is `RTC_1HZ`"] #[inline] pub fn is_rtc_1hz(&self) -> bool { *self == CKSELR::RTC_1HZ } #[doc = "Checks if the value of the field is `XT_DIV2M`"] #[inline] pub fn is_xt_div2m(&self) -> bool { *self == CKSELR::XT_DIV2M } #[doc = "Checks if the value of the field is `XT`"] #[inline] pub fn is_xt(&self) -> bool { *self == CKSELR::XT } #[doc = "Checks if the value of the field is `CG_100HZ`"] #[inline] pub fn is_cg_100hz(&self) -> bool { *self == CKSELR::CG_100HZ } #[doc = "Checks if the value of the field is `HFRC`"] #[inline] pub fn is_hfrc(&self) -> bool { *self == CKSELR::HFRC } #[doc = "Checks if the value of the field is `HFRC_DIV4`"] #[inline] pub fn is_hfrc_div4(&self) -> bool { *self == CKSELR::HFRC_DIV4 } #[doc = "Checks if the value of the field is `HFRC_DIV8`"] #[inline] pub fn is_hfrc_div8(&self) -> bool { *self == CKSELR::HFRC_DIV8 } #[doc = "Checks if the value of the field is `HFRC_DIV16`"] #[inline] pub fn is_hfrc_div16(&self) -> bool { *self == CKSELR::HFRC_DIV16 } #[doc = "Checks if the value of the field is `HFRC_DIV64`"] #[inline] pub fn is_hfrc_div64(&self) -> bool { *self == CKSELR::HFRC_DIV64 } #[doc = "Checks if the value of the field is `HFRC_DIV128`"] #[inline] pub fn is_hfrc_div128(&self) -> bool { *self == CKSELR::HFRC_DIV128 } #[doc = "Checks if the value of the field is `HFRC_DIV256`"] #[inline] pub fn is_hfrc_div256(&self) -> bool { *self == CKSELR::HFRC_DIV256 } #[doc = "Checks if the value of the field is `HFRC_DIV512`"] #[inline] pub fn is_hfrc_div512(&self) -> bool { *self == CKSELR::HFRC_DIV512 } #[doc = "Checks if the value of the field is `FLASH_CLK`"] #[inline] pub fn is_flash_clk(&self) -> bool { *self == CKSELR::FLASH_CLK } #[doc = "Checks if the value of the field is `LFRC_DIV2`"] #[inline] pub fn is_lfrc_div2(&self) -> bool { *self == CKSELR::LFRC_DIV2 } #[doc = "Checks if the value of the field is `LFRC_DIV32`"] #[inline] pub fn is_lfrc_div32(&self) -> bool { *self == CKSELR::LFRC_DIV32 } #[doc = "Checks if the value of the field is `LFRC_DIV512`"] #[inline] pub fn is_lfrc_div512(&self) -> bool { *self == CKSELR::LFRC_DIV512 } #[doc = "Checks if the value of the field is `LFRC_DIV32K`"] #[inline] pub fn is_lfrc_div32k(&self) -> bool { *self == CKSELR::LFRC_DIV32K } #[doc = "Checks if the value of the field is `XT_DIV256`"] #[inline] pub fn is_xt_div256(&self) -> bool { *self == CKSELR::XT_DIV256 } #[doc = "Checks if the value of the field is `XT_DIV8K`"] #[inline] pub fn is_xt_div8k(&self) -> bool { *self == CKSELR::XT_DIV8K } #[doc = "Checks if the value of the field is `XT_DIV64K`"] #[inline] pub fn is_xt_div64k(&self) -> bool { *self == CKSELR::XT_DIV64K } #[doc = "Checks if the value of the field is `ULFRC_DIV16`"] #[inline] pub fn is_ulfrc_div16(&self) -> bool { *self == CKSELR::ULFRC_DIV16 } #[doc = "Checks if the value of the field is `ULFRC_DIV128`"] #[inline] pub fn is_ulfrc_div128(&self) -> bool { *self == CKSELR::ULFRC_DIV128 } #[doc = "Checks if the value of the field is `ULFRC_1HZ`"] #[inline] pub fn is_ulfrc_1hz(&self) -> bool { *self == CKSELR::ULFRC_1HZ } #[doc = "Checks if the value of the field is `ULFRC_DIV4K`"] #[inline] pub fn is_ulfrc_div4k(&self) -> bool { *self == CKSELR::ULFRC_DIV4K } #[doc = "Checks if the value of the field is `ULFRC_DIV1M`"] #[inline] pub fn is_ulfrc_div1m(&self) -> bool { *self == CKSELR::ULFRC_DIV1M } #[doc = "Checks if the value of the field is `HFRC_DIV64K`"] #[inline] pub fn is_hfrc_div64k(&self) -> bool { *self == CKSELR::HFRC_DIV64K } #[doc = "Checks if the value of the field is `HFRC_DIV16M`"] #[inline] pub fn is_hfrc_div16m(&self) -> bool { *self == CKSELR::HFRC_DIV16M } #[doc = "Checks if the value of the field is `LFRC_DIV1M`"] #[inline] pub fn is_lfrc_div1m(&self) -> bool { *self == CKSELR::LFRC_DIV1M } #[doc = "Checks if the value of the field is `HFRCNE`"] #[inline] pub fn is_hfrcne(&self) -> bool { *self == CKSELR::HFRCNE } #[doc = "Checks if the value of the field is `HFRCNE_DIV8`"] #[inline] pub fn is_hfrcne_div8(&self) -> bool { *self == CKSELR::HFRCNE_DIV8 } #[doc = "Checks if the value of the field is `XTNE`"] #[inline] pub fn is_xtne(&self) -> bool { *self == CKSELR::XTNE } #[doc = "Checks if the value of the field is `XTNE_DIV16`"] #[inline] pub fn is_xtne_div16(&self) -> bool { *self == CKSELR::XTNE_DIV16 } #[doc = "Checks if the value of the field is `LFRCNE_DIV32`"] #[inline] pub fn is_lfrcne_div32(&self) -> bool { *self == CKSELR::LFRCNE_DIV32 } #[doc = "Checks if the value of the field is `LFRCNE`"] #[inline] pub fn is_lfrcne(&self) -> bool { *self == CKSELR::LFRCNE } } #[doc = "Values that can be written to the field `CKEN`"] pub enum CKENW { #[doc = "Disable CLKOUT value."] DIS, #[doc = "Enable CLKOUT value."] EN, } impl CKENW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { CKENW::DIS => false, CKENW::EN => true, } } } #[doc = r" Proxy"] pub struct _CKENW<'a> { w: &'a mut W, } impl<'a> _CKENW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: CKENW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Disable CLKOUT value."] #[inline] pub fn dis(self) -> &'a mut W { self.variant(CKENW::DIS) } #[doc = "Enable CLKOUT value."] #[inline] pub fn en(self) -> &'a mut W { self.variant(CKENW::EN) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `CKSEL`"] pub enum CKSELW { #[doc = "LFRC value."] LFRC, #[doc = "XT / 2 value."] XT_DIV2, #[doc = "XT / 4 value."] XT_DIV4, #[doc = "XT / 8 value."] XT_DIV8, #[doc = "XT / 16 value."] XT_DIV16, #[doc = "XT / 32 value."] XT_DIV32, #[doc = "1 Hz as selected in RTC value."] RTC_1HZ, #[doc = "XT / 2^21 value."] XT_DIV2M, #[doc = "XT value."] XT, #[doc = "100 Hz as selected in CLKGEN value."] CG_100HZ, #[doc = "HFRC value."] HFRC, #[doc = "HFRC / 4 value."] HFRC_DIV4, #[doc = "HFRC / 8 value."] HFRC_DIV8, #[doc = "HFRC / 16 value."] HFRC_DIV16, #[doc = "HFRC / 64 value."] HFRC_DIV64, #[doc = "HFRC / 128 value."] HFRC_DIV128, #[doc = "HFRC / 256 value."] HFRC_DIV256, #[doc = "HFRC / 512 value."] HFRC_DIV512, #[doc = "Flash Clock value."] FLASH_CLK, #[doc = "LFRC / 2 value."] LFRC_DIV2, #[doc = "LFRC / 32 value."] LFRC_DIV32, #[doc = "LFRC / 512 value."] LFRC_DIV512, #[doc = "LFRC / 32768 value."] LFRC_DIV32K, #[doc = "XT / 256 value."] XT_DIV256, #[doc = "XT / 8192 value."] XT_DIV8K, #[doc = "XT / 2^16 value."] XT_DIV64K, #[doc = "Uncal LFRC / 16 value."] ULFRC_DIV16, #[doc = "Uncal LFRC / 128 value."] ULFRC_DIV128, #[doc = "Uncal LFRC / 1024 value."] ULFRC_1HZ, #[doc = "Uncal LFRC / 4096 value."] ULFRC_DIV4K, #[doc = "Uncal LFRC / 2^20 value."] ULFRC_DIV1M, #[doc = "HFRC / 2^16 value."] HFRC_DIV64K, #[doc = "HFRC / 2^24 value."] HFRC_DIV16M, #[doc = "LFRC / 2^20 value."] LFRC_DIV1M, #[doc = "HFRC (not autoenabled) value."] HFRCNE, #[doc = "HFRC / 8 (not autoenabled) value."] HFRCNE_DIV8, #[doc = "XT (not autoenabled) value."] XTNE, #[doc = "XT / 16 (not autoenabled) value."] XTNE_DIV16, #[doc = "LFRC / 32 (not autoenabled) value."] LFRCNE_DIV32, #[doc = "LFRC (not autoenabled) - Default for undefined values value."] LFRCNE, } impl CKSELW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { CKSELW::LFRC => 0, CKSELW::XT_DIV2 => 1, CKSELW::XT_DIV4 => 2, CKSELW::XT_DIV8 => 3, CKSELW::XT_DIV16 => 4, CKSELW::XT_DIV32 => 5, CKSELW::RTC_1HZ => 16, CKSELW::XT_DIV2M => 22, CKSELW::XT => 23, CKSELW::CG_100HZ => 24, CKSELW::HFRC => 25, CKSELW::HFRC_DIV4 => 26, CKSELW::HFRC_DIV8 => 27, CKSELW::HFRC_DIV16 => 28, CKSELW::HFRC_DIV64 => 29, CKSELW::HFRC_DIV128 => 30, CKSELW::HFRC_DIV256 => 31, CKSELW::HFRC_DIV512 => 32, CKSELW::FLASH_CLK => 34, CKSELW::LFRC_DIV2 => 35, CKSELW::LFRC_DIV32 => 36, CKSELW::LFRC_DIV512 => 37, CKSELW::LFRC_DIV32K => 38, CKSELW::XT_DIV256 => 39, CKSELW::XT_DIV8K => 40, CKSELW::XT_DIV64K => 41, CKSELW::ULFRC_DIV16 => 42, CKSELW::ULFRC_DIV128 => 43, CKSELW::ULFRC_1HZ => 44, CKSELW::ULFRC_DIV4K => 45, CKSELW::ULFRC_DIV1M => 46, CKSELW::HFRC_DIV64K => 47, CKSELW::HFRC_DIV16M => 48, CKSELW::LFRC_DIV1M => 49, CKSELW::HFRCNE => 50, CKSELW::HFRCNE_DIV8 => 51, CKSELW::XTNE => 53, CKSELW::XTNE_DIV16 => 54, CKSELW::LFRCNE_DIV32 => 55, CKSELW::LFRCNE => 57, } } } #[doc = r" Proxy"] pub struct _CKSELW<'a> { w: &'a mut W, } impl<'a> _CKSELW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: CKSELW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "LFRC value."] #[inline] pub fn lfrc(self) -> &'a mut W { self.variant(CKSELW::LFRC) } #[doc = "XT / 2 value."] #[inline] pub fn xt_div2(self) -> &'a mut W { self.variant(CKSELW::XT_DIV2) } #[doc = "XT / 4 value."] #[inline] pub fn xt_div4(self) -> &'a mut W { self.variant(CKSELW::XT_DIV4) } #[doc = "XT / 8 value."] #[inline] pub fn xt_div8(self) -> &'a mut W { self.variant(CKSELW::XT_DIV8) } #[doc = "XT / 16 value."] #[inline] pub fn xt_div16(self) -> &'a mut W { self.variant(CKSELW::XT_DIV16) } #[doc = "XT / 32 value."] #[inline] pub fn xt_div32(self) -> &'a mut W { self.variant(CKSELW::XT_DIV32) } #[doc = "1 Hz as selected in RTC value."] #[inline] pub fn rtc_1hz(self) -> &'a mut W { self.variant(CKSELW::RTC_1HZ) } #[doc = "XT / 2^21 value."] #[inline] pub fn xt_div2m(self) -> &'a mut W { self.variant(CKSELW::XT_DIV2M) } #[doc = "XT value."] #[inline] pub fn xt(self) -> &'a mut W { self.variant(CKSELW::XT) } #[doc = "100 Hz as selected in CLKGEN value."] #[inline] pub fn cg_100hz(self) -> &'a mut W { self.variant(CKSELW::CG_100HZ) } #[doc = "HFRC value."] #[inline] pub fn hfrc(self) -> &'a mut W { self.variant(CKSELW::HFRC) } #[doc = "HFRC / 4 value."] #[inline] pub fn hfrc_div4(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV4) } #[doc = "HFRC / 8 value."] #[inline] pub fn hfrc_div8(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV8) } #[doc = "HFRC / 16 value."] #[inline] pub fn hfrc_div16(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV16) } #[doc = "HFRC / 64 value."] #[inline] pub fn hfrc_div64(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV64) } #[doc = "HFRC / 128 value."] #[inline] pub fn hfrc_div128(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV128) } #[doc = "HFRC / 256 value."] #[inline] pub fn hfrc_div256(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV256) } #[doc = "HFRC / 512 value."] #[inline] pub fn hfrc_div512(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV512) } #[doc = "Flash Clock value."] #[inline] pub fn flash_clk(self) -> &'a mut W { self.variant(CKSELW::FLASH_CLK) } #[doc = "LFRC / 2 value."] #[inline] pub fn lfrc_div2(self) -> &'a mut W { self.variant(CKSELW::LFRC_DIV2) } #[doc = "LFRC / 32 value."] #[inline] pub fn lfrc_div32(self) -> &'a mut W { self.variant(CKSELW::LFRC_DIV32) } #[doc = "LFRC / 512 value."] #[inline] pub fn lfrc_div512(self) -> &'a mut W { self.variant(CKSELW::LFRC_DIV512) } #[doc = "LFRC / 32768 value."] #[inline] pub fn lfrc_div32k(self) -> &'a mut W { self.variant(CKSELW::LFRC_DIV32K) } #[doc = "XT / 256 value."] #[inline] pub fn xt_div256(self) -> &'a mut W { self.variant(CKSELW::XT_DIV256) } #[doc = "XT / 8192 value."] #[inline] pub fn xt_div8k(self) -> &'a mut W { self.variant(CKSELW::XT_DIV8K) } #[doc = "XT / 2^16 value."] #[inline] pub fn xt_div64k(self) -> &'a mut W { self.variant(CKSELW::XT_DIV64K) } #[doc = "Uncal LFRC / 16 value."] #[inline] pub fn ulfrc_div16(self) -> &'a mut W { self.variant(CKSELW::ULFRC_DIV16) } #[doc = "Uncal LFRC / 128 value."] #[inline] pub fn ulfrc_div128(self) -> &'a mut W { self.variant(CKSELW::ULFRC_DIV128) } #[doc = "Uncal LFRC / 1024 value."] #[inline] pub fn ulfrc_1hz(self) -> &'a mut W { self.variant(CKSELW::ULFRC_1HZ) } #[doc = "Uncal LFRC / 4096 value."] #[inline] pub fn ulfrc_div4k(self) -> &'a mut W { self.variant(CKSELW::ULFRC_DIV4K) } #[doc = "Uncal LFRC / 2^20 value."] #[inline] pub fn ulfrc_div1m(self) -> &'a mut W { self.variant(CKSELW::ULFRC_DIV1M) } #[doc = "HFRC / 2^16 value."] #[inline] pub fn hfrc_div64k(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV64K) } #[doc = "HFRC / 2^24 value."] #[inline] pub fn hfrc_div16m(self) -> &'a mut W { self.variant(CKSELW::HFRC_DIV16M) } #[doc = "LFRC / 2^20 value."] #[inline] pub fn lfrc_div1m(self) -> &'a mut W { self.variant(CKSELW::LFRC_DIV1M) } #[doc = "HFRC (not autoenabled) value."] #[inline] pub fn hfrcne(self) -> &'a mut W { self.variant(CKSELW::HFRCNE) } #[doc = "HFRC / 8 (not autoenabled) value."] #[inline] pub fn hfrcne_div8(self) -> &'a mut W { self.variant(CKSELW::HFRCNE_DIV8) } #[doc = "XT (not autoenabled) value."] #[inline] pub fn xtne(self) -> &'a mut W { self.variant(CKSELW::XTNE) } #[doc = "XT / 16 (not autoenabled) value."] #[inline] pub fn xtne_div16(self) -> &'a mut W { self.variant(CKSELW::XTNE_DIV16) } #[doc = "LFRC / 32 (not autoenabled) value."] #[inline] pub fn lfrcne_div32(self) -> &'a mut W { self.variant(CKSELW::LFRCNE_DIV32) } #[doc = "LFRC (not autoenabled) - Default for undefined values value."] #[inline] pub fn lfrcne(self) -> &'a mut W { self.variant(CKSELW::LFRCNE) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 63; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 7 - Enable the CLKOUT signal"] #[inline] pub fn cken(&self) -> CKENR { CKENR::_from({ const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bits 0:5 - CLKOUT signal select"] #[inline] pub fn cksel(&self) -> CKSELR { CKSELR::_from({ const MASK: u8 = 63; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 7 - Enable the CLKOUT signal"] #[inline] pub fn cken(&mut self) -> _CKENW { _CKENW { w: self } } #[doc = "Bits 0:5 - CLKOUT signal select"] #[inline] pub fn cksel(&mut self) -> _CKSELW { _CKSELW { w: self } } }
pub mod r#const; pub mod create; pub mod id; pub mod meta; pub mod obj; pub mod ptr; pub mod tx; pub use self::create::*; pub use self::id::*; pub use self::meta::*; pub use self::obj::*; pub use self::ptr::*; pub use self::r#const::*; pub use self::tx::*;
//! run rust code on the rust-lang playground mod api; mod util; mod microbench; mod misc_commands; mod play_eval; mod procmacro; pub use microbench::*; pub use misc_commands::*; pub use play_eval::*; pub use procmacro::*; use super::reply_potentially_long_text;
#[doc = "Reader of register LE_PING_TIMER_WRAP_COUNT"] pub type R = crate::R<u32, super::LE_PING_TIMER_WRAP_COUNT>; #[doc = "Reader of field `CONN_SEC_CURRENT_WRAP`"] pub type CONN_SEC_CURRENT_WRAP_R = crate::R<u16, u16>; impl R { #[doc = "Bits 0:15 - This register holds the current position of the Ping timer."] #[inline(always)] pub fn conn_sec_current_wrap(&self) -> CONN_SEC_CURRENT_WRAP_R { CONN_SEC_CURRENT_WRAP_R::new((self.bits & 0xffff) as u16) } }
#![no_std] use core::fmt::Debug; pub use minitest_macros::tests; /// Private implementation details used by the proc macro. #[doc(hidden)] pub mod export; mod sealed { pub trait Sealed {} impl Sealed for () {} impl<T, E> Sealed for Result<T, E> {} } /// Indicates whether a test succeeded or failed. /// /// This is comparable to the `Termination` trait in libstd, except stable and tailored towards the /// needs of defmt-test. It is implemented for `()`, which always indicates success, and `Result`, /// where `Ok` indicates success. pub trait TestOutcome: Debug + sealed::Sealed { fn is_success(&self) -> bool; } impl TestOutcome for () { fn is_success(&self) -> bool { true } } impl<T: Debug, E: Debug> TestOutcome for Result<T, E> { fn is_success(&self) -> bool { self.is_ok() } } #[macro_export] macro_rules! log { ($s:literal $(, $x:expr)* $(,)?) => { { #[cfg(feature = "semihosting")] ::cortex_m_semihosting::hprintln!($s $(, $x)*); #[cfg(feature = "rtt")] ::rtt_target::rprintln!($s $(, $x)*); #[cfg(not(any(feature = "semihosting", feature="rtt")))] let _ = ($( & $x ),*); } }; } /// Stop all tests without failure. pub fn exit() -> ! { #[cfg(feature = "rtt")] cortex_m::asm::bkpt(); #[cfg(feature = "semihosting")] cortex_m_semihosting::debug::exit(cortex_m_semihosting::debug::EXIT_SUCCESS); unreachable!() } /// Stop all tests and report a failure. pub fn fail() -> ! { #[cfg(feature = "rtt")] cortex_m::asm::udf(); #[cfg(feature = "semihosting")] cortex_m_semihosting::debug::exit(cortex_m_semihosting::debug::EXIT_FAILURE); #[cfg(not(feature = "rtt"))] unreachable!() }
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ //! `TracedTask` and its methods. use std::collections::BTreeMap; use std::collections::HashMap; use std::fmt; use std::ops::DerefMut; use std::pin::Pin; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::task::Context; use std::task::Poll; use async_trait::async_trait; use futures::future; use futures::future::Either; use futures::future::Future; use futures::future::FutureExt; use futures::future::TryFutureExt; use nix::sys::mman::ProtFlags; use nix::sys::signal::Signal; use reverie::syscalls::Addr; use reverie::syscalls::AddrMut; use reverie::syscalls::MemoryAccess; use reverie::syscalls::Mprotect; use reverie::syscalls::Syscall; use reverie::syscalls::SyscallArgs; use reverie::syscalls::SyscallInfo; use reverie::syscalls::Sysno; use reverie::Backtrace; use reverie::Errno; use reverie::ExitStatus; use reverie::Frame; use reverie::GlobalRPC; use reverie::GlobalTool; use reverie::Guest; use reverie::Never; use reverie::Pid; #[cfg(target_arch = "x86_64")] use reverie::Rdtsc; use reverie::Subscription; use reverie::Tid; use reverie::TimerSchedule; use reverie::Tool; use safeptrace::ChildOp; use safeptrace::Error as TraceError; use safeptrace::Event; use safeptrace::Running; use safeptrace::Stopped; use safeptrace::Wait; use tokio::sync::broadcast; use tokio::sync::mpsc; use tokio::sync::oneshot; use tokio::sync::Mutex; use tokio::sync::Notify; use tokio::task::JoinError; use tokio::task::JoinHandle; use crate::children; use crate::cp; use crate::error::Error; use crate::gdbstub::BreakpointType; use crate::gdbstub::CoreRegs; use crate::gdbstub::GdbRequest; use crate::gdbstub::GdbServer; use crate::gdbstub::ResumeAction; use crate::gdbstub::ResumeInferior; use crate::gdbstub::StopEvent; use crate::gdbstub::StopReason; use crate::gdbstub::StoppedInferior; use crate::regs::Reg; use crate::regs::RegAccess; use crate::stack::GuestStack; use crate::timer::HandleFailure; use crate::timer::Timer; use crate::timer::TimerEventRequest; use crate::vdso; #[derive(Debug)] struct Suspended { waker: Option<mpsc::Sender<Pid>>, suspended: Arc<AtomicBool>, } /// Expected resume action sent by gdb client, when the task is in a gdb stop. #[derive(Debug, Clone, Copy, PartialEq)] enum ExpectedGdbResume { /// Expecting a normal gdb resume, either single step, until or continue Resume, /// Expecting a gdb step over, this happens the underlying task hit a sw /// breakpoint, gdb then needs to restore the original instruction -- /// which implies deleting the breakpoint, single-step, then restore /// the breakpoint. This is a special case because we need to serialize /// the whole operation, otherwise when there's a different thread in /// the same process group which share the same breakpoint, removing /// breakpoint can cause the 2nd thread to miss the breakpoint. StepOver, /// Force single-step, even if Resume(continue) is requested. This /// is a workaround when fork/vfork/clone event is reported to gdb, /// gdb could then issue an `vCont;p<pid>:-1` to resume all threads in /// the thread group, which could cause the main thread to miss events. StepOnly, } pub struct Child { id: Pid, /// Task is suspended, either stopped by gdb (client), or received /// SIGSTOP sent by other threads in the same process group. suspended: Arc<AtomicBool>, /// Notify a task reached SIGSTOP. wait_all_stop_tx: Option<mpsc::Sender<(Pid, Suspended)>>, /// Channel to receive if a child task is becoming a daemon, when /// `daemonize()` is called. pub(crate) daemonizer_rx: Option<mpsc::Receiver<broadcast::Receiver<()>>>, /// Join handle to let child task exit gracefully. pub(crate) handle: JoinHandle<ExitStatus>, } impl Child { /// Child task identifier. pub fn id(&self) -> Pid { self.id } } impl fmt::Debug for Child { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Child").field("id", &self.id).finish() } } impl Future for Child { type Output = Result<ExitStatus, JoinError>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { self.handle.poll_unpin(cx) } } pub type Children = children::Children<Child>; enum HandleSignalResult { /// Signal is suppressed with task resumed. SignalSuppressed(Wait), /// signal needs to be delivered. SignalToDeliver(Stopped, Signal), } /// All the info needed to be able to interact with the global state. struct GlobalState<G: GlobalTool> { /// The tool's static configuration data. cfg: G::Config, /// Reference to the tool's global state. This is used to send it "rpc" messages. gs_ref: Arc<G>, /// Events the tool is subscripted (like interception) subscriptions: Arc<Subscription>, /// guests are sequentialized already (by detcore for example), gdbserver /// should avoid sequentialize threads. sequentialized_guest: Arc<bool>, } impl<G: GlobalTool> Clone for GlobalState<G> { fn clone(&self) -> Self { Self { cfg: self.cfg.clone(), gs_ref: self.gs_ref.clone(), subscriptions: self.subscriptions.clone(), sequentialized_guest: self.sequentialized_guest.clone(), } } } /// Our runtime representation of what Reverie knows about a guest thread. Its /// lifetime matches the lifetime of the thread. pub struct TracedTask<L: Tool> { /// Thread ID. tid: Pid, /// Process ID. pid: Pid, /// Parent process ID. ppid: Option<Pid>, /// State associated with the thread. Unique for each thread. thread_state: L::ThreadState, /// State associated with the process. This is shared among threads in the /// same thread group. process_state: Arc<L>, /// Global state. This is shared among all threads in a process tree. global_state: GlobalState<L::GlobalState>, /// True if we can intercept CPUID, false otherwise. has_cpuid_interception: bool, /// Set to `Some` if the syscall has not been injected yet. `None` if it has. pending_syscall: Option<(Sysno, SyscallArgs)>, /// pending signal to deliver. This can happen when /// syscall got interrupted (by signal) pending_signal: Option<Signal>, /// A channel to allow short-circuiting the next state to main run loop. This /// is useful inside of `inject` or `tail_inject` where we might need to /// cancel a future early. next_state: mpsc::Sender<Result<Wait, TraceError>>, /// The receiving end of the next_state channel. next_state_rx: Option<mpsc::Receiver<Result<Wait, TraceError>>>, /// The timer tracking this task. Used to trigger RCB-based `timeouts`. timer: Timer, /// A notifier used to cancel `handle_syscall_event` futures. For example, /// `tail_inject` should never return to the handler. notifier: Arc<Notify>, /// Child processes to wait on. When one of the children exits, it should be /// removed from this list. child_procs: Arc<Mutex<Children>>, /// Child threads to wait on. When one of the child threads exits, it should /// be removed from this list. child_threads: Arc<Mutex<Children>>, /// Channel to send child processes to that are left over by the time this /// task exits. orphanage: mpsc::Sender<Child>, /// broadcast to kill all daemons daemon_kill_switch: broadcast::Sender<()>, /// Channel to damonize a process daemonizer: mpsc::Sender<broadcast::Receiver<()>>, /// The rx end of `daemonizer`. daemonizer_rx: Option<mpsc::Receiver<broadcast::Receiver<()>>>, /// Total number of tasks ntasks: Arc<AtomicUsize>, /// Total number of daemons ndaemons: Arc<AtomicUsize>, /// Task is a daemon is_a_daemon: bool, /// Software breakpoints. // NB: For multi-threaded programs, sw breakpoints apply to all threads // because they're in the same address space. Hence removing sw // breakpoint in one thread also remove it for the rest of the threads // in the same process group. *However*, our model is slightly different // because we use different tx/rx channels even the threads are in the // same process group, hence each threads owns `breakpoints: HashMap` // instead of `Arc<Mutex<..>>`. breakpoints: HashMap<u64, u64>, /// Notify gdbserver start accepting incoming packets. gdbserver_start_tx: Option<oneshot::Sender<()>>, /// task is suspended (received SIGSTOP) suspended: Arc<AtomicBool>, /// Notify gdbserver there's a new stop event. gdb_stop_tx: Option<mpsc::Sender<StoppedInferior>>, /// Task is attached by gdb. // NB: gdb doesn't always attach everything, when fork/clone is called. // gdb also allows detach from a task, and re-attach again. attached_by_gdb: bool, /// Task is resumed by gdb. // NB: gdb doesn't always attach everything, when fork/clone is called. // gdb also allows detach from a task, and re-attach again. resumed_by_gdb: Option<ResumeAction>, /// GDB resume request, gdbstub is the sender gdb_resume_tx: Option<mpsc::Sender<ResumeInferior>>, /// GDB resume request, reverie is the receiver gdb_resume_rx: Option<mpsc::Receiver<ResumeInferior>>, /// Request sent by gdb. the tx channel is used by gdb instead of /// `TracedTask`. gdb_request_tx: Option<mpsc::Sender<GdbRequest>>, /// Receiver to receive gdb request. gdb_request_rx: Option<mpsc::Receiver<GdbRequest>>, /// Wait to be resumed when in sigstop due to all stop mode. exit_suspend_tx: Option<mpsc::Sender<Pid>>, /// Wait to be resumed when in sigstop due to all stop mode. exit_suspend_rx: Option<mpsc::Receiver<Pid>>, /// Suspended task when hitting swbp. This is used to implement gdb's /// all stop mode. suspended_tasks: BTreeMap<Pid, Suspended>, /// Task needs (single) step over the swbp instruciton when a swbp is /// hit. unless this is done, if is not safe for other threads running /// in parallel to report breakpoint, otherwise there're could be an /// interleaved step-over, which might remove the breakpoint, hence /// causing others to miss the breakpoint. needs_step_over: Arc<Mutex<()>>, /// Whether or not the tool is currently holding a handle on the guest Stack (and thus /// potentially using actual stack memory within the guest). stack_checked_out: Arc<AtomicBool>, } impl<L: Tool> fmt::Debug for TracedTask<L> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TracedTask") .field("tid", &self.tid) .field("pid", &self.pid) .field("ppid", &self.ppid) .finish() } } impl<L: Tool> TracedTask<L> { /// Create a new TracedTask. pub fn new( tid: Pid, cfg: <L::GlobalState as GlobalTool>::Config, gs_ref: Arc<L::GlobalState>, events: &Subscription, orphanage: mpsc::Sender<Child>, daemon_kill_switch: broadcast::Sender<()>, mut gdbserver: Option<GdbServer>, ) -> Self { let process_state = Arc::new(L::new(tid, &cfg)); let global_state = GlobalState { gs_ref, cfg, subscriptions: Arc::new(events.clone()), sequentialized_guest: Arc::new( gdbserver .as_ref() .map(|s| s.sequentialized_guest) .unwrap_or(false), ), }; let thread_state = process_state.init_thread_state(tid, None); let (next_state, next_state_rx) = mpsc::channel(1); let (daemonizer, daemonizer_rx) = mpsc::channel(1); let (gdb_resume_tx, gdb_resume_rx) = mpsc::channel(1); let (gdb_request_tx, gdb_request_rx) = mpsc::channel(1); let (exit_suspend_tx, exit_suspend_rx) = mpsc::channel(16); Self { tid, pid: tid, ppid: None, thread_state, process_state, global_state, has_cpuid_interception: false, pending_syscall: None, next_state, next_state_rx: Some(next_state_rx), timer: Timer::new(tid, tid), notifier: Arc::new(Notify::new()), pending_signal: None, child_procs: Arc::new(Mutex::new(Children::new())), child_threads: Arc::new(Mutex::new(Children::new())), orphanage, daemon_kill_switch, daemonizer, daemonizer_rx: Some(daemonizer_rx), ntasks: Arc::new(AtomicUsize::new(1)), ndaemons: Arc::new(AtomicUsize::new(0)), is_a_daemon: false, gdbserver_start_tx: gdbserver.as_mut().and_then(|s| s.server_tx.take()), gdb_stop_tx: gdbserver .as_mut() .and_then(|s| s.inferior_attached_tx.take()), attached_by_gdb: false, resumed_by_gdb: None, gdb_resume_tx: Some(gdb_resume_tx), gdb_resume_rx: Some(gdb_resume_rx), breakpoints: HashMap::new(), suspended: Arc::new(AtomicBool::new(false)), gdb_request_tx: Some(gdb_request_tx), gdb_request_rx: Some(gdb_request_rx), exit_suspend_tx: Some(exit_suspend_tx), exit_suspend_rx: Some(exit_suspend_rx), needs_step_over: Arc::new(Mutex::new(())), suspended_tasks: BTreeMap::new(), stack_checked_out: Arc::new(AtomicBool::new(false)), } } /// Create a child TracedTask corresponding to a clone() fn cloned(&self, child: Pid) -> Self { let global_state = self.global_state.clone(); let process_state = self.process_state.clone(); let thread_state = process_state.init_thread_state(child, Some((self.tid, &self.thread_state))); let (next_state, next_state_rx) = mpsc::channel(1); let (daemonizer, daemonizer_rx) = mpsc::channel(1); let (gdb_resume_tx, gdb_resume_rx) = mpsc::channel(1); let (gdb_request_tx, gdb_request_rx) = mpsc::channel(1); let (exit_suspend_tx, exit_suspend_rx) = mpsc::channel(16); self.ntasks.fetch_add(1, Ordering::SeqCst); Self { tid: child, pid: self.pid, ppid: self.ppid, thread_state, process_state, global_state, has_cpuid_interception: self.has_cpuid_interception, pending_syscall: None, next_state, next_state_rx: Some(next_state_rx), timer: Timer::new(self.pid, child), notifier: Arc::new(Notify::new()), pending_signal: None, child_procs: self.child_procs.clone(), child_threads: self.child_threads.clone(), orphanage: self.orphanage.clone(), daemon_kill_switch: self.daemon_kill_switch.clone(), daemonizer, daemonizer_rx: Some(daemonizer_rx), ntasks: self.ntasks.clone(), ndaemons: self.ndaemons.clone(), is_a_daemon: self.is_a_daemon, gdbserver_start_tx: None, gdb_stop_tx: None, attached_by_gdb: self.attached_by_gdb, resumed_by_gdb: self.resumed_by_gdb, gdb_resume_tx: Some(gdb_resume_tx), gdb_resume_rx: Some(gdb_resume_rx), breakpoints: self.breakpoints.clone(), suspended: Arc::new(AtomicBool::new(false)), gdb_request_tx: Some(gdb_request_tx), gdb_request_rx: Some(gdb_request_rx), exit_suspend_tx: Some(exit_suspend_tx), exit_suspend_rx: Some(exit_suspend_rx), needs_step_over: self.needs_step_over.clone(), suspended_tasks: BTreeMap::new(), stack_checked_out: Arc::new(AtomicBool::new(false)), } } /// Create a child TracedTask corresponding to a fork() fn forked(&self, child: Pid) -> Self { let process_state = Arc::new(L::new(child, &self.global_state.cfg)); let thread_state = process_state.init_thread_state(child, Some((self.tid, &self.thread_state))); let (next_state, next_state_rx) = mpsc::channel(1); let (daemonizer, daemonizer_rx) = mpsc::channel(1); let (gdb_resume_tx, gdb_resume_rx) = mpsc::channel(1); let (gdb_request_tx, gdb_request_rx) = mpsc::channel(1); let (exit_suspend_tx, exit_suspend_rx) = mpsc::channel(16); self.ntasks.fetch_add(1, Ordering::SeqCst); Self { tid: child, pid: child, ppid: Some(self.pid), thread_state, process_state, global_state: self.global_state.clone(), has_cpuid_interception: self.has_cpuid_interception, pending_syscall: None, next_state, next_state_rx: Some(next_state_rx), timer: Timer::new(child, child), notifier: Arc::new(Notify::new()), pending_signal: None, child_procs: Arc::new(Mutex::new(Children::new())), child_threads: Arc::new(Mutex::new(Children::new())), orphanage: self.orphanage.clone(), daemon_kill_switch: self.daemon_kill_switch.clone(), daemonizer, daemonizer_rx: Some(daemonizer_rx), ntasks: self.ntasks.clone(), ndaemons: self.ndaemons.clone(), // NB: if daemon forks, then its child's parent pid is no longer 1. is_a_daemon: false, gdbserver_start_tx: None, gdb_stop_tx: None, attached_by_gdb: self.attached_by_gdb, resumed_by_gdb: None, gdb_resume_tx: Some(gdb_resume_tx), gdb_resume_rx: Some(gdb_resume_rx), breakpoints: self.breakpoints.clone(), suspended: Arc::new(AtomicBool::new(false)), gdb_request_tx: Some(gdb_request_tx), gdb_request_rx: Some(gdb_request_rx), exit_suspend_tx: Some(exit_suspend_tx), exit_suspend_rx: Some(exit_suspend_rx), needs_step_over: Arc::new(Mutex::new(())), suspended_tasks: BTreeMap::new(), stack_checked_out: Arc::new(AtomicBool::new(false)), } } fn get_syscall(&self, task: &Stopped) -> Result<Syscall, TraceError> { let regs = task.getregs()?; let nr = Sysno::from(regs.orig_syscall() as i32); let args = regs.args(); Ok(Syscall::from_raw( nr, SyscallArgs::new( args.0 as usize, args.1 as usize, args.2 as usize, args.3 as usize, args.4 as usize, args.5 as usize, ), )) } } fn set_ret(task: &Stopped, ret: Reg) -> Result<Reg, TraceError> { let mut regs = task.getregs()?; let old = regs.ret(); *regs.ret_mut() = ret; task.setregs(regs)?; Ok(old) } /// Handles a potentially internal error, converting it to an exit status. async fn handle_internal_error(err: Error) -> Result<ExitStatus, reverie::Error> { match err { Error::Internal(TraceError::Died(zombie)) => Ok(zombie.reap().await), Error::Internal(TraceError::Errno(errno)) => Err(errno.into()), Error::External(err) => Err(err), } } /// Helper for canceling handlers. async fn cancellable<F>(notifier: Arc<Notify>, f: F) -> Option<F::Output> where F: Future, { futures::select! { () = notifier.notified().fuse() => None, result = f.fuse() => Some(result), } } #[cfg(target_arch = "x86_64")] #[derive(PartialEq, Eq, Clone, Copy, Debug)] enum SegfaultTrapInfo { Cpuid, Rdtscs(Rdtsc), } #[cfg(target_arch = "x86_64")] impl SegfaultTrapInfo { /// Check if segfault is called by cpuid/rdtsc trap pub fn decode_segfault(insn_at_rip: u64) -> Option<SegfaultTrapInfo> { if insn_at_rip & 0xffffu64 == 0xa20fu64 { Some(SegfaultTrapInfo::Cpuid) } else if insn_at_rip & 0xffffu64 == 0x310fu64 { Some(SegfaultTrapInfo::Rdtscs(Rdtsc::Tsc)) } else if insn_at_rip & 0xffffffu64 == 0xf9010fu64 { Some(SegfaultTrapInfo::Rdtscs(Rdtsc::Tscp)) } else { None } } } // restore syscall context when it returns. This is needed because we might // have injected a different syscall (or arguments) in handle_seccomp. fn restore_context( task: &Stopped, context: libc::user_regs_struct, retval: Option<Reg>, ) -> Result<(), TraceError> { let mut regs = task.getregs()?; if let Some(ret) = retval { *regs.ret_mut() = ret; } // Restore instruction pointer. *regs.ip_mut() = context.ip(); // Restore syscall arguments. regs.set_args(context.args()); // This is needed when syscall is interrupted by a signal (ERESTARTSYS) // we need restore the original syscall number as well because it is // possible syscall is reinjected as a different variant, like vfork -> // clone, which accepts different arguments. *regs.orig_syscall_mut() = context.orig_syscall(); // NB: syscall also clobbers %rcx/%r11, but we're not required to restore // them, because the syscall is finished and they're supposed to change. // TL&DR: do not restore %rcx/%r11 here. task.setregs(regs) } impl<L: Tool + 'static> TracedTask<L> { #[cfg(target_arch = "x86_64")] async fn intercept_cpuid(&mut self) -> Result<(), Errno> { use reverie::syscalls::ArchPrctl; use reverie::syscalls::ArchPrctlCmd; self.inject_with_retry(ArchPrctl::new().with_cmd(ArchPrctlCmd::ARCH_SET_CPUID(0))) .await .map(|_| ()) } /// Perform the very first setup of a fresh tracee process: /// /// (1) Set up the special reverie/guest shared page in the tracee. /// /// (2) Also disables vdso within the guest /// /// Warning: this function MUTATES guest code to accomplish the modifications, even though this /// mutation is undone before it returns. As a result, it has an extra precondition. /// /// Precondition: all threads in the guest process are stopped. Otherwise a guest state may be /// executing the instructions that are mutated and may crash (due to problems with incoherent /// instruction fetch resulting in non-atomic writes to instructions that straddle cache line /// boundaries). /// /// Precondition: the caller is entitled to execute (blocking, destructive) waitpids against the /// target tracee. This must not race with concurrent asynchronous tasks operating on the same /// TID. /// /// Postcondition: the guest registers and code memory are restored to their original state, /// including RIP, but the vdso page and special shared page are modified accordingly. pub async fn tracee_preinit(&mut self, task: Stopped) -> Result<Stopped, TraceError> { type SavedInstructions = [u8; 8]; /// Helper function for tracee_preinit that does the core work. async fn setup_special_mmap_page( task: Stopped, saved_regs: &libc::user_regs_struct, ) -> Result<Stopped, TraceError> { // NOTE: This point in the code assumes that a specific instruction // sequence "SYSCALL; INT3", has been patched into the guest, and // that RIP points to the syscall. let mut regs = saved_regs.clone(); let page_addr = cp::PRIVATE_PAGE_OFFSET; *regs.syscall_mut() = Sysno::mmap as Reg; *regs.orig_syscall_mut() = regs.syscall(); regs.set_args(( page_addr as Reg, cp::PRIVATE_PAGE_SIZE as Reg, (libc::PROT_READ | libc::PROT_WRITE | libc::PROT_EXEC) as Reg, (libc::MAP_PRIVATE | libc::MAP_FIXED | libc::MAP_ANONYMOUS) as Reg, -1i64 as Reg, 0, )); task.setregs(regs)?; // Execute the injected mmap call. let mut running = task.step(None)?; // loop until second breakpoint hit after injected syscall. let task = loop { let (task, event) = running.next_state().await?.assume_stopped(); match event { Event::Signal(Signal::SIGTRAP) => break task, Event::Signal(sig) => { // We can catch spurious signals here, such as SIGWINCH. // All we can do is skip over them. tracing::debug!( "[{}] Skipping {:?} during initialization", task.pid(), event ); running = task.resume(sig)?; } Event::Seccomp => { // Injected mmap trapped. We may not necessarily // intercept a seccomp event here if the tool hasn't // subscribed to the mmap syscall. running = task.resume(None)?; } unknown => { panic!("task {} returned unknown event {:?}", task.pid(), unknown); } } }; // Make sure we got our desired address. assert_eq!( Errno::from_ret(task.getregs()?.ret() as usize)?, page_addr, "Could not mmap address {}", page_addr ); cp::populate_mmap_page(task.pid().into(), page_addr).map_err(|err| err)?; // Restore our saved registers, including our instruction pointer. task.setregs(*saved_regs)?; Ok(task) } /// Put the guest into the weird state where it has an /// "INT3;SYSCALL;INT3" patched into the code wherever RIP happens to be /// pointing. It leaves RIP pointing at the syscall instruction. This /// allows forcible injection of syscalls into the guest. async fn establish_injection_state( mut task: Stopped, ) -> Result<(Stopped, libc::user_regs_struct, SavedInstructions), TraceError> { #[cfg(target_arch = "x86_64")] const SYSCALL_BP: SavedInstructions = [ 0x0f, 0x05, // syscall 0xcc, // int3 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // padding ]; #[cfg(target_arch = "aarch64")] const SYSCALL_BP: SavedInstructions = [ 0x01, 0x00, 0x00, 0xd4, // svc 0 0x20, 0x00, 0x20, 0xd4, // brk 1 ]; // Save the original registers so we can restore them later. let regs = task.getregs()?; // Saved instruction memory let ip = AddrMut::from_raw(regs.ip() as usize).unwrap(); let saved: SavedInstructions = task.read_value(ip)?; // Patch the tracee at the current instruction pointer. // // NOTE: `process_vm_writev` cannot write to write-protected pages, // but `PTRACE_POKEDATA` can! Thus, we need to make sure we only // write one word-sized chunk at a time. Luckily, the instructions // we want to inject fit inside of just one 64-bit word. task.write_value(ip.cast(), &SYSCALL_BP)?; Ok((task, regs, saved)) } /// Undo the effects of `establish_injection_state` and put the program /// code memory and instruction pointer back to normal. fn remove_injection_state( task: &mut Stopped, regs: libc::user_regs_struct, saved: SavedInstructions, ) -> Result<(), TraceError> { // NOTE: Again, because `process_vm_writev` cannot write to // write-protected pages, we must write in word-sized chunks with // PTRACE_POKEDATA. let ip = AddrMut::from_raw(regs.ip() as usize).unwrap(); task.write_value(ip, &saved)?; task.setregs(regs)?; Ok(()) } let (task, regs, prev_state) = establish_injection_state(task).await?; let mut task = setup_special_mmap_page(task, &regs).await?; // Restore registers after adding our temporary injection state. remove_injection_state(&mut task, regs, prev_state)?; vdso::vdso_patch(self).await.expect("unable to patch vdso"); // Protect our trampoline page from being written to. We won't need to // change this again for the lifetime of the guest process. self.inject_with_retry( Mprotect::new() .with_addr(AddrMut::from_raw(cp::TRAMPOLINE_BASE)) .with_len(cp::TRAMPOLINE_SIZE) .with_protection(ProtFlags::PROT_READ | ProtFlags::PROT_EXEC), ) .await?; // Try to intercept cpuid instructions on x86_64 #[cfg(target_arch = "x86_64")] if self.global_state.subscriptions.has_cpuid() { self.has_cpuid_interception = self.intercept_cpuid().await.map_err(|err| { match err { Errno::ENODEV => tracing::warn!( "Unable to intercept CPUID: Underlying hardware does not support CPUID faulting" ), err => tracing::warn!("Unable to intercept CPUID: {}", err), } err }).is_ok(); } // Restore registers again after we've injected syscalls so that we // don't leave the return value register (%rax) in a dirty state. task.setregs(regs)?; Ok(task) } #[cfg(target_arch = "x86_64")] async fn handle_cpuid( &mut self, mut regs: libc::user_regs_struct, ) -> Result<libc::user_regs_struct, TraceError> { let eax = regs.rax as u32; let ecx = regs.rcx as u32; let cpuid = self .process_state .clone() .handle_cpuid_event(self, eax, ecx) .await?; regs.rax = cpuid.eax as u64; regs.rbx = cpuid.ebx as u64; regs.rcx = cpuid.ecx as u64; regs.rdx = cpuid.edx as u64; regs.rip += 2; self.timer.finalize_requests(); Ok(regs) } #[cfg(target_arch = "x86_64")] async fn handle_rdtscs( &mut self, mut regs: libc::user_regs_struct, request: Rdtsc, ) -> Result<libc::user_regs_struct, TraceError> { let retval = self .process_state .clone() .handle_rdtsc_event(self, request) .await?; regs.rax = retval.tsc & 0xffff_ffffu64; regs.rdx = retval.tsc >> 32; match request { Rdtsc::Tsc => { regs.rip += 2; } Rdtsc::Tscp => { regs.rip += 3; regs.rcx = retval.aux.unwrap_or(0) as u64; } } self.timer.finalize_requests(); Ok(regs) } /// Returns `true` if the signal was actually meant for the timer, and /// therefore should not be forwarded to the tool / guest. async fn handle_timer(&mut self, task: Stopped) -> Result<(bool, Stopped), TraceError> { let task = match self.timer.handle_signal(task).await { Err(HandleFailure::ImproperSignal(task)) => return Ok((false, task)), Err(HandleFailure::Cancelled(task)) => return Ok((true, task)), Err(HandleFailure::TraceError(e)) => return Err(e), Err(HandleFailure::Event(wait)) => self.abort(Ok(wait)).await, Ok(task) => task, }; self.process_state.clone().handle_timer_event(self).await; self.timer.finalize_requests(); Ok((true, task)) } /// Handle a state change in the guest, and leave it in a stopped state. /// Return the signal that the process would be resumed with, if any. /// /// Preconditions: /// * running on the ptracer pthread /// /// Postconditions: /// * guest thread may or may not be stopped, depending on value of GuestNext /// async fn handle_stop_event(&mut self, stopped: Stopped, event: Event) -> Result<Wait, Error> { self.timer.observe_event(); // A task is processed by this loop on any state change, so we must // handle all possibilities here: Ok(match event { Event::Signal(sig) => self.handle_signal(stopped, sig).await?, // A state we reach in the middle, between the prehook (before exec // syscall) and the exec completing (posthook). Event::Exec(_new_pid) => self.handle_exec_event(stopped).await?, // A regular old system call. Event::Seccomp => self.handle_seccomp(stopped).await?, Event::NewChild(op, child) => self.handle_new_task(op, stopped, child, None).await?, Event::VforkDone => self.handle_vfork_done_event(stopped).await?, task_state => panic!("unknown task state: {:?}", task_state), }) } async fn get_stop_tx(&self) -> Option<(Arc<AtomicBool>, mpsc::Sender<(Pid, Suspended)>)> { for child in self.child_threads.lock().await.deref_mut().into_iter() { if child.id() == self.tid() { return Some((child.suspended.clone(), child.wait_all_stop_tx.take()?)); } } None } async fn handle_sigtrap(&mut self, task: Stopped) -> Result<HandleSignalResult, TraceError> { let resumed_by_gdb_step = self .resumed_by_gdb .map_or(false, |action| matches!(action, ResumeAction::Step(_))); let mut regs = task.getregs()?; let rip_minus_one = regs.ip() - 1; Ok(if self.breakpoints.contains_key(&rip_minus_one) { *regs.ip_mut() = rip_minus_one; let next_state = self.resume_from_swbreak(task, regs).await?; HandleSignalResult::SignalSuppressed(next_state) } else if resumed_by_gdb_step { self.notify_gdb_stop(StopReason::stopped( task.pid(), self.pid(), StopEvent::Signal(Signal::SIGTRAP), regs.into(), )) .await?; let running = self .await_gdb_resume(task, ExpectedGdbResume::Resume) .await?; HandleSignalResult::SignalSuppressed(running.next_state().await?) } else { let running = task.resume(None)?; HandleSignalResult::SignalSuppressed(running.next_state().await?) }) } async fn handle_sigstop(&mut self, task: Stopped) -> Result<HandleSignalResult, TraceError> { let resumed_by_gdb_step = self .resumed_by_gdb .map_or(false, |action| matches!(action, ResumeAction::Step(_))); debug_assert!(!resumed_by_gdb_step); if let Some((suspended_flag, stop_tx)) = self.get_stop_tx().await { let notify_stop_tx = stop_tx .send(( task.pid(), Suspended { waker: self.exit_suspend_tx.clone(), suspended: suspended_flag, }, )) .await; drop(stop_tx); if notify_stop_tx.is_ok() { if let Some(rx) = self.exit_suspend_rx.as_mut() { let _resumed_by = rx.recv().await.unwrap(); } } } Ok(HandleSignalResult::SignalSuppressed( task.resume(None)?.next_state().await?, )) } #[cfg(target_arch = "x86_64")] async fn handle_sigsegv(&mut self, task: Stopped) -> Result<HandleSignalResult, TraceError> { let regs = task.getregs()?; let trap_info = Addr::from_raw(regs.rip as usize) .and_then(|addr| task.read_value(addr).ok()) .and_then(SegfaultTrapInfo::decode_segfault); Ok(match trap_info { Some(SegfaultTrapInfo::Cpuid) => { let regs = self.handle_cpuid(regs).await?; task.setregs(regs)?; HandleSignalResult::SignalSuppressed(task.resume(None)?.next_state().await?) } Some(SegfaultTrapInfo::Rdtscs(req)) => { let regs = self.handle_rdtscs(regs, req).await?; task.setregs(regs)?; HandleSignalResult::SignalSuppressed(task.resume(None)?.next_state().await?) } None => HandleSignalResult::SignalToDeliver(task, Signal::SIGSEGV), }) } #[cfg(not(target_arch = "x86_64"))] async fn handle_sigsegv(&mut self, task: Stopped) -> Result<HandleSignalResult, TraceError> { Ok(HandleSignalResult::SignalToDeliver(task, Signal::SIGSEGV)) } // handle ptrace signal delivery stop async fn handle_signal(&mut self, task: Stopped, sig: Signal) -> Result<Wait, TraceError> { tracing::debug!("[{}] handle_signal: received signal {}", task.pid(), sig); let result = match sig { Signal::SIGSEGV => self.handle_sigsegv(task).await?, Signal::SIGSTOP => self.handle_sigstop(task).await?, Signal::SIGTRAP => self.handle_sigtrap(task).await?, sig if sig == Timer::signal_type() => { let (was_timer, task) = self.handle_timer(task).await?; if was_timer { HandleSignalResult::SignalSuppressed(task.resume(None)?.next_state().await?) } else { HandleSignalResult::SignalToDeliver(task, sig) } } sig => HandleSignalResult::SignalToDeliver(task, sig), }; match result { HandleSignalResult::SignalSuppressed(wait) => Ok(wait), HandleSignalResult::SignalToDeliver(task, sig) => { let sig = self .process_state .clone() .handle_signal_event(self, sig) .await?; self.timer.finalize_requests(); Ok(task.resume(sig)?.next_state().await?) } } } // handle ptrace exec event async fn handle_exec_event(&mut self, task: Stopped) -> Result<Wait, TraceError> { // execve/execveat are tail injected, however, after exec, the new // program start as a clean slate, hence it is actually ok to do either // inject or tail inject after execve succeeded. self.pending_syscall = None; // TODO: Update PID? Need to write a test checking this. // Step the tracee to get the SIGTRAP that immediately follows the // PTRACE_EVENT_EXEC. We can't call `tracee_preinit` until after this // because when it tries to step the tracee, it'll get this SIGTRAP // signal instead. let (task, event) = task .step(None)? .wait_for_signal(Signal::SIGTRAP) .await? .assume_stopped(); assert_eq!(event, Event::Signal(Signal::SIGTRAP)); let task = self.tracee_preinit(task).await?; self.process_state.clone().handle_post_exec(self).await?; self.timer.finalize_requests(); if self.attached_by_gdb { let request_tx = self.gdb_request_tx.clone(); let resume_tx = self.gdb_resume_tx.clone(); let proc_exe = format!("/proc/{}/exe", task.pid()); let exe = std::fs::read_link(&proc_exe[..]).unwrap(); let stopped = StoppedInferior { reason: StopReason::stopped( task.pid(), self.pid(), StopEvent::Exec(exe), task.getregs()?.into(), ), request_tx: request_tx.unwrap(), resume_tx: resume_tx.unwrap(), }; // NB: notify initial gdb stop, this is the first time we can // tell gdb tracee is ready, because a new memory map has been // loaded (due to execve). Otherwise gdb may try to manipulate // old process' address space. if let Some(attach_tx) = self.gdb_stop_tx.as_ref() { let _ = attach_tx.send(stopped).await.unwrap(); } let running = self .await_gdb_resume(task, ExpectedGdbResume::Resume) .await?; Ok(running.next_state().await?) } else { Ok(task.step(None)?.next_state().await?) } } async fn handle_seccomp(&mut self, mut task: Stopped) -> Result<Wait, Error> { let syscall = self.get_syscall(&task)?; let (nr, args) = syscall.into_parts(); self.pending_syscall = Some((nr, args)); let retval = cancellable(self.notifier.clone(), async { self.process_state .clone() .handle_syscall_event(self, syscall) .await }) .await; // If no syscall was injected, then we need to suppress the implicit // syscall. if self.pending_syscall.is_some() { task = self.skip_seccomp_syscall(task).await?; } // Finalize timer requests after `skip_seccomp_syscall`, which may step self.timer.finalize_requests(); if let Some(retval) = retval { let ret = match retval { Ok(x) => x as u64, Err(err) => (-(err.into_errno()?.into_raw() as i64)) as u64, }; set_ret(&task, ret)?; } // Finally, resume the guest. let sig = self.pending_signal.take(); Ok(task.resume(sig)?.next_state().await?) } async fn handle_new_task( &mut self, op: ChildOp, parent: Stopped, child: Running, context: Option<libc::user_regs_struct>, ) -> Result<Wait, TraceError> { tracing::debug!( "[scheduler] handling fork from parent {} to child {}: {:?}", parent.pid(), child.pid(), op ); let mut child_task = match op { ChildOp::Clone => self.cloned(child.pid()), ChildOp::Fork => self.forked(child.pid()), ChildOp::Vfork => self.forked(child.pid()), }; let (child_stop_tx, child_stop_rx) = mpsc::channel(1); child_task.gdb_stop_tx = Some(child_stop_tx); let daemonizer_rx = child_task.daemonizer_rx.take(); let child_resume_tx = child_task.gdb_resume_tx.clone(); let child_request_tx = child_task.gdb_request_tx.clone(); let suspended = child_task.suspended.clone(); if let Some(context) = context { restore_context(&parent, context, Some(child.pid().as_raw() as u64))?; } let id = child.pid(); let task = tokio::task::spawn_local(async move { // The child could potentially exit here. In most cases the first // event we get here should be `Event::Signal(Signal::SIGSTOP)`, but // we can also receive `Event::Exit` if a thread is created via // `clone`, but immediately killed via an `exit_group`. We have to // handle that rare case here. // // NOTE: It is okay to call `wait` instead of the async `next_state` // here because the notifier is not yet aware of the new process. let (child, event) = child.wait().unwrap().assume_stopped(); assert!( event == Event::Signal(Signal::SIGSTOP) || event == Event::Exit, "Got unexpected event {:?}", event ); if let Some(context) = context { // Restore context, but only if the child hasn't arrived at // `Event::Exit`. if event == Event::Signal(Signal::SIGSTOP) { restore_context(&child, context, None).unwrap(); } } if child_task.is_a_daemon { child_task.ndaemons.fetch_add(1, Ordering::SeqCst); } let tid = child.pid(); match child_task.run(child).await { Err(err) => { tracing::error!("Error in tracee tid {}: {}", tid, err); // We assume the tracee is stopped since this error likely // originated from the tool itself when the tracee is // already stopped. If the tracee is not in a stopped state, // that's fine too and ignore the detach error. let running = match Stopped::new_unchecked(tid).detach(None) { Err(err) => { // If we get an error here, the child process may // not be in a ptrace stop. tracing::error!("Failed to detach from {}: {}", tid, err); return ExitStatus::Exited(1); } Ok(running) => running, }; // Reap the process and get its exit status. let (_pid, exit_status) = running.next_state().await.unwrap().assume_exited(); exit_status } Ok(exit_status) => exit_status, } }); if op == ChildOp::Clone { let mut child_threads = self.child_threads.lock().await; child_threads.push(Child { id, suspended, wait_all_stop_tx: None, daemonizer_rx, handle: task, }); } else { let mut child_procs = self.child_procs.lock().await; child_procs.push(Child { id, suspended, wait_all_stop_tx: None, daemonizer_rx, handle: task, }); } let parent_regs = parent.getregs()?; if self.attached_by_gdb { // NB: We report T05;create event (for clone). However gdbserver // from binutils-gdb doesn't report it, even after toggling // QThreadEvents, as mentioned in https://sourceware.org/gdb/onlinedocs/gdb/General-Query-Packets.html#QThreadEvents // We report `create` event anyway. self.notify_gdb_stop(StopReason::new_task( self.tid(), self.pid(), id, parent_regs.into(), op, child_request_tx, child_resume_tx, Some(child_stop_rx), )) .await?; // We just reported a new event, wait for gdb resume. let running = self .await_gdb_resume(parent, ExpectedGdbResume::StepOnly) .await?; // NB: We could potentially hit a breakpoint after above resume, // make sure we don't miss the breakpoint and await for gdb // resume (once again). This is possible because result of // handle_new_task in from_task_state is ignored, while it could // be a valid state like SIGTRAP, which could be a breakpoint is // hit. running .next_state() .and_then(|wait| self.check_swbreak(wait)) .await } else { Ok(parent.step(None)?.next_state().await?) } } async fn handle_vfork_done_event(&mut self, stopped: Stopped) -> Result<Wait, TraceError> { Ok(stopped.resume(None)?.next_state().await?) } async fn handle_exit_event(task: Stopped) -> Result<ExitStatus, TraceError> { // Nothing to do but resume and wait for the final exit status. let wait = task.resume(None)?.next_state().await?; let (_pid, exit_status) = wait.assume_exited(); Ok(exit_status) } /// Aborts the current handler. This just sends a result through a channel to /// the `run_loop`, which should cause the current future to be dropped and /// canceled. Thus, this function will never return so that execution of the /// current future doesn't proceed any further. async fn abort(&mut self, result: Result<Wait, TraceError>) -> ! { self.next_state.send(result).await.unwrap(); // Wait on a future that will never complete. This pending future will // be dropped when the channel receives the event just sent. future::pending().await } /// Marks the current task as exited via a channel. The receiver end of the /// channel should cause the current future to be dropped and canceled. Thus, /// this function will never return so that execution doesn't proceed any /// further. async fn exit(&mut self, exit_status: ExitStatus) -> ! { self.abort(Ok(Wait::Exited(self.tid(), exit_status))).await } /// Marks the current task as having successfully called `execve` and so it /// should never return. async fn execve(&mut self, next_state: Wait) -> ! { self.abort(Ok(next_state)).await } /// Triggers the tool exit callbacks. async fn tool_exit(self, exit_status: ExitStatus) -> Result<(), reverie::Error> { if self.is_main_thread() { // Wait for all child threads to fully exit. This *must* happen before // the main thread can exit. // TODO: Use FuturesUnordered instead of `join_all` for better // performance. { let children = self.child_threads.lock().await.take_inner(); future::join_all(children).await; } // Check if there are any children who's futures are still pending. If // this is the case, then they shall be considered "orphans" and are // "adopted" by the tracer process who shall then wait for them to exit // and get their final exit code. Normally, when not running under // ptrace, orphans are adopted by the init process who should // automatically reap them by waiting for the final exit status. let (orphans, _) = { let mut child_procs = self.child_procs.lock().await; child_procs.deref_mut().await }; for orphan in orphans.into_inner() { // Bon voyage. self.orphanage.send(orphan).await.unwrap(); } let _ = self .notify_gdb_stop(StopReason::Exited(self.pid(), exit_status)) .await; let wrapped = WrappedFrom(self.tid, &self.global_state); // Thread exit self.process_state .on_exit_thread(self.tid, &wrapped, self.thread_state, exit_status) .await?; // The try_unwrap and subsequent unwrap are safe to do. ptrace // guarantees that all threads in the thread group have exited // before the main thread. let process_state = Arc::try_unwrap(self.process_state).unwrap_or_else(|_| { // If you end up seeing this panic, make sure that all clones of // `process_state` are dropped before reaching this point. panic!("Reverie internal invariant broken. try_unwrap on process state failed") }); let wrapped = WrappedFrom(self.tid, &self.global_state); process_state .on_exit_process(self.tid, &wrapped, exit_status) .await?; let ntasks_remaining = self.ntasks.fetch_sub(1, Ordering::SeqCst); let ndaemons = self.ndaemons.load(Ordering::SeqCst); if self.is_a_daemon { self.ndaemons.fetch_sub(1, Ordering::SeqCst); } if ntasks_remaining == 1 + ndaemons { // daemonize() might not get called, this is not an error. let _ = self.daemon_kill_switch.send(()); } } else { let _ = self .notify_gdb_stop(StopReason::ThreadExited( self.tid(), self.pid(), exit_status, )) .await; let wrapped = WrappedFrom(self.tid, &self.global_state); self.child_threads .lock() .await .retain(|child| child.id() != self.tid); // Thread exit self.process_state .on_exit_thread(self.tid, &wrapped, self.thread_state, exit_status) .await?; self.ntasks.fetch_sub(1, Ordering::SeqCst); if self.is_a_daemon { self.ndaemons.fetch_sub(1, Ordering::SeqCst); } } Ok(()) } async fn run_loop(&mut self, task: Stopped) -> Result<ExitStatus, reverie::Error> { match self.run_loop_internal(task).await { Ok(exit_status) => Ok(exit_status), Err(err) => { // Note: Calling handle_internal_error cannot happen in the // `select!()` of the `run` function because then the exit // events that get generated in here cannot be caught by the // `select!()`. handle_internal_error(err).await } } } async fn run_loop_internal(&mut self, task: Stopped) -> Result<ExitStatus, Error> { // This is the beginning of the life of the guest. Allow the tool to // inject syscalls as soon as the thread starts. if let Some(Err(err)) = cancellable(self.notifier.clone(), async { self.process_state.clone().handle_thread_start(self).await }) .await { // Propagate user errors. Don't care about the result of syscall injections. err.into_errno()?; } self.timer.finalize_requests(); // Resume the guest for the first time. Note that the root task and // child tasks start out in a stopped state for different reasons: The // root task is stopped because of the SIGSTOP raised inside of `fork()` // after calling `traceme`. Child tasks start out in a running state, // but we wait for them to stop in `Event::NewChild`. // // NB: await_gdb_resume == resume if not attached_by_gdb. let running = self .await_gdb_resume(task, ExpectedGdbResume::Resume) .await?; // Notify gdb server (if any) that tracee is ready. if let Some(server_tx) = self.gdbserver_start_tx.take() { self.attached_by_gdb = true; server_tx.send(()).unwrap(); } let mut task_state = running.next_state().await?; let mut next_state_rx = self.next_state_rx.take().unwrap(); loop { match task_state { Wait::Stopped(stopped, event) => { // Allow short-circuiting of the event stream. This makes it // easier to send exit and execve events directly to the run // loop from within `inject` or `tail_inject`. let fut1 = next_state_rx.recv().fuse(); let fut2 = self.handle_stop_event(stopped, event).fuse(); futures::pin_mut!(fut1, fut2); task_state = futures::select_biased! { next_state = fut1 => { if let Some(next_state) = next_state { next_state.map_err(Error::Internal) } else { panic!() } } next_state = fut2 => next_state, }?; } Wait::Exited(pid, exit_status) => { self.notify_gdb_stop(StopReason::Exited(pid, exit_status)) .await?; break Ok(exit_status); } } } } /// Drive a single guest thread to completion. Returns the final exit code /// when that guest thread exits. pub async fn run(mut self, child: Stopped) -> Result<ExitStatus, reverie::Error> { let exit_status = { let exit_event = child.exit_event().fuse(); let run_loop = self.run_loop(child).fuse(); futures::pin_mut!(exit_event, run_loop); futures::select_biased! { task = exit_event => match Self::handle_exit_event(task).await { Ok(exit_status) => exit_status, Err(err) => handle_internal_error(err.into()).await?, }, exit_status = run_loop => exit_status?, } }; self.tool_exit(exit_status).await?; Ok(exit_status) } /// Skip the syscall which is about to happen in the tracee, switching the tracee /// from Seccomp() state to Stopped(SIGTRAP) state. /// /// This uses the convention that setting the syscall number to -1 causes the /// kernel to skip it. This function takes as argument the current register state /// and restores it after stepping over the skipped syscall instruction. /// /// Preconditions: /// Ptrace tracee is in a (seccomp) stopped state. /// The tracee was stopped with the RIP pointing just after a syscall instruction (+2). /// /// Postconditions: /// Set tracee state to Stopped/SIGTRP. /// Restore the registers to the state specified by the regs arg. async fn skip_seccomp_syscall(&mut self, task: Stopped) -> Result<Stopped, TraceError> { // So here we are, at ptrace seccomp stop, if we simply resume, the kernel // would do the syscall, without our patch. we change to syscall number to // -1, so that kernel would simply skip the syscall, so that we can jump to // our patched syscall on the first run. Please note after calling this // function, the task state will no longer be in ptrace event seccomp. #[cfg(target_arch = "x86_64")] let regs = task.getregs()?; #[cfg(target_arch = "x86_64")] { let mut new_regs = regs; *new_regs.orig_syscall_mut() = -1i64 as u64; task.setregs(new_regs)?; } #[cfg(target_arch = "aarch64")] task.set_syscall(-1)?; let mut running = task.step(None)?; // After the step, wait for the next transition. Note that this can return // an exited state if there is a group exit while some thread is blocked on // a syscall. loop { match running.next_state().await? { Wait::Stopped(task, Event::Signal(Signal::SIGTRAP)) => { #[cfg(target_arch = "x86_64")] task.setregs(regs)?; break Ok(task); } Wait::Stopped(task, Event::Signal(sig)) => { // We can get a spurious signal here, such as SIGWINCH. Skip // past them until the tracee eventually arrives at SIGTRAP. running = task.step(sig)?; } Wait::Stopped(task, event) => { panic!( "skip_seccomp_syscall: PID {} got unexpected event: {:?}", task.pid(), event ); } Wait::Exited(_pid, exit_status) => { break self.exit(exit_status).await; } } } } /// inject syscall for given tracee /// /// NB: limitations: /// - tracee must be in stopped state. /// - the tracee must have returned from PTRACE_EXEC_EVENT /// - must be called on the ptracer thread /// /// Side effects: /// - mutates contexts async fn untraced_syscall( &mut self, task: Stopped, nr: Sysno, args: SyscallArgs, ) -> Result<Result<i64, Errno>, TraceError> { tracing::trace!( "[scheduler/tool] (pid = {}) untraced syscall: {:?}", task.pid(), nr ); let mut regs = task.getregs()?; let oldregs = regs; *regs.syscall_mut() = nr as Reg; *regs.orig_syscall_mut() = nr as Reg; regs.set_args(( args.arg0 as Reg, args.arg1 as Reg, args.arg2 as Reg, args.arg3 as Reg, args.arg4 as Reg, args.arg5 as Reg, )); // Jump to our private page to run the syscall instruction there. See // `populate_mmap_page` for details. *regs.ip_mut() = cp::PRIVATE_PAGE_OFFSET as Reg; task.setregs(regs)?; // Step to run the syscall instruction. let wait = task.step(None)?.next_state().await?; // Get the result of the syscall to return to the caller. self.from_task_state(wait, Some(oldregs)).await } // Helper function async fn private_inject( &mut self, task: Stopped, nr: Sysno, args: SyscallArgs, ) -> Result<Result<i64, Errno>, TraceError> { let task = self.skip_seccomp_syscall(task).await?; self.untraced_syscall(task, nr, args).await } async fn from_task_state( &mut self, wait_status: Wait, context: Option<libc::user_regs_struct>, ) -> Result<Result<i64, Errno>, TraceError> { match wait_status { Wait::Stopped(stopped, event) => match event { Event::Signal(_sig) if context.is_none() => { let regs = stopped.getregs()?; Ok(Ok(regs.ret() as i64)) } Event::Signal(sig) => { let mut regs = stopped.getregs()?; // NB: it is possible to get interrupted by signal (such as // SIGCHLD) before single step finishes (in that case rip == // 0x7000_0000u64). debug_assert!( regs.ip() as usize == cp::PRIVATE_PAGE_OFFSET + cp::SYSCALL_INSTR_SIZE || regs.ip() as usize == cp::PRIVATE_PAGE_OFFSET ); // interrupted by signal, return -ERESTARTSYS so that tracee can do a // restart_syscall. if sig != Signal::SIGTRAP { *regs.ret_mut() = (-(Errno::ERESTARTSYS.into_raw()) as i64) as u64; self.pending_signal = Some(sig); } if let Some(context) = context { // Restore syscall args to original values. This is // needed when we convert syscalls like SYS_open -> // SYS_openat, syscall args are modified need to restore // it back. restore_context(&stopped, context, None)?; } Ok(Errno::from_ret(regs.ret() as usize).map(|x| x as i64)) } Event::NewChild(op, child) => { let ret = child.pid().as_raw() as i64; let _ = self.handle_new_task(op, stopped, child, context).await?; Ok(Ok(ret)) } Event::Exec(_new_pid) => { // This should never return. let next_state = self.handle_exec_event(stopped).await?; self.execve(next_state).await } Event::Syscall => { let regs = stopped.getregs()?; Ok(Errno::from_ret(regs.ret() as usize).map(|x| x as i64)) } st => panic!("untraced_syscall returned unknown state: {:?}", st), }, Wait::Exited(_pid, exit_status) => self.exit(exit_status).await, } } async fn do_inject(&mut self, nr: Sysno, args: SyscallArgs) -> Result<i64, Errno> { match self.inner_inject(nr, args).await { Ok(ret) => ret, Err(err) => self.abort(Err(err)).await, } } async fn inner_inject( &mut self, nr: Sysno, args: SyscallArgs, ) -> Result<Result<i64, Errno>, TraceError> { let task = self.assume_stopped(); tracing::debug!( "[tool] (tid {}) beginning inject of syscall: {}, args {:?}", self.tid(), nr, args, ); if self.pending_syscall.take() == Some((nr, args)) { // If we're reinjecting the same syscall with the same arguments, // then we can just let the tracee continue and stop at sysexit. let wait = task.syscall(None)?.next_state().await?; self.from_task_state(wait, None).await } else { self.private_inject(task, nr, args).await } } async fn do_tail_inject(&mut self, nr: Sysno, args: SyscallArgs) -> ! { match self.inner_tail_inject(nr, args).await { Ok(_) => { // Drop the handle_syscall_event future. self.notifier.notify_one(); future::pending().await } Err(err) => self.abort(Err(err)).await, } } async fn inner_tail_inject( &mut self, nr: Sysno, args: SyscallArgs, ) -> Result<Result<i64, Errno>, TraceError> { let tid = self.tid(); tracing::info!( "[tool] (tid {}) beginning tail_inject of syscall: {}", &tid, nr, ); let task = self.assume_stopped(); if self.pending_syscall.take() == Some((nr, args)) { // We're reinjecting the same syscall with the same arguments. // Nothing to actually do but let the tracee resume. // The return value here doesn't matter. Ok(Ok(0)) } else { // Syscall has already been injected. Can't do the optimization. self.private_inject(task, nr, args).await } } /// Get a ptrace stub which can do ptrace operations // Assumption: Task is in stopped state as long as we have a valid // reference to `TracedTask`. fn assume_stopped(&self) -> Stopped { Stopped::new_unchecked(self.tid()) } async fn notify_gdb_stop(&self, reason: StopReason) -> Result<(), TraceError> { if !self.attached_by_gdb { return Ok(()); } if let Some(stop_tx) = self.gdb_stop_tx.as_ref() { let request_tx = self.gdb_request_tx.clone(); let resume_tx = self.gdb_resume_tx.clone(); let stop = StoppedInferior { reason, request_tx: request_tx.unwrap(), resume_tx: resume_tx.unwrap(), }; let _ = stop_tx.send(stop).await.unwrap(); } Ok(()) } async fn handle_gdb_request(&mut self, request: Option<GdbRequest>) { if let Some(request) = request { match request { GdbRequest::SetBreakpoint(bkpt, reply_tx) => { if bkpt.ty == BreakpointType::Software { let result = self.add_breakpoint(bkpt.addr).await; reply_tx.send(result).unwrap(); } } GdbRequest::RemoveBreakpoint(bkpt, reply_tx) => { if bkpt.ty == BreakpointType::Software { let result = self.remove_breakpoint(bkpt.addr).await; reply_tx.send(result).unwrap(); } } GdbRequest::ReadInferiorMemory(addr, length, reply_tx) => { let result = self.read_inferior_memory(addr, length); reply_tx.send(result).unwrap(); } GdbRequest::WriteInferiorMemory(addr, length, data, reply_tx) => { let result = self.write_inferior_memory(addr, length, data); reply_tx.send(result).unwrap(); } GdbRequest::ReadRegisters(reply_tx) => { let result = self.read_registers(); reply_tx.send(result).unwrap(); } GdbRequest::WriteRegisters(core_regs, reply_tx) => { let result = self.write_registers(core_regs); reply_tx.send(result).unwrap(); } } } } async fn handle_gdb_resume( resume: Option<ResumeInferior>, task: Stopped, resume_action: ExpectedGdbResume, ) -> Result<(Running, Option<ResumeInferior>), TraceError> { match resume { None => Ok((task.resume(None)?, None)), Some(resume) => { let is_resume = resume_action == ExpectedGdbResume::Resume || resume.detach; let is_step_only = resume_action == ExpectedGdbResume::StepOnly; let running = match resume.action { ResumeAction::Step(sig) => task.step(sig)?, ResumeAction::Continue(sig) if is_resume => task.resume(sig)?, ResumeAction::Continue(sig) if is_step_only => task.step(sig)?, action => panic!( "[pid = {}] unexpected resume action {:?}, expecting: {:?}", task.pid(), action, resume_action, ), }; Ok((running, Some(resume))) } } } async fn await_gdb_resume( &mut self, task: Stopped, resume_action: ExpectedGdbResume, ) -> Result<Running, TraceError> { if !self.attached_by_gdb { return task.resume(None); } let mut resume_rx = self.gdb_resume_rx.take().unwrap(); let mut gdb_request_rx = self.gdb_request_rx.take().unwrap(); let mut resume_future = Box::pin(resume_rx.recv()); let (running, resumed) = loop { let request_future = Box::pin(gdb_request_rx.recv()); match future::select(request_future, resume_future).await { Either::Left((gdb_request, pending_resume_future)) => { self.handle_gdb_request(gdb_request).await; resume_future = pending_resume_future; } Either::Right((resume_request, _)) => { break Self::handle_gdb_resume(resume_request, task, resume_action).await?; } } }; self.gdb_request_rx = Some(gdb_request_rx); self.gdb_resume_rx = Some(resume_rx); if let Some(resumed) = resumed { if resumed.detach { // no longer report stop event to gdb // self.gdb_stop_tx = None; self.attached_by_gdb = false; } self.resumed_by_gdb = Some(resumed.action); } Ok(running) } /// Resume from a software breakpoint set by gdb. The resume action is /// initiated from gdb (client). // NB: caller to %rip accordingly prior to hitting breakpoint. async fn resume_from_swbreak( &mut self, task: Stopped, regs: libc::user_regs_struct, ) -> Result<Wait, TraceError> { task.setregs(regs)?; // Task could be hitting a breakpoint, after previously suspended by // a different task, need to notify this task is fully stopped. self.suspended.store(true, Ordering::SeqCst); if let Some((suspended_flag, stop_tx)) = self.get_stop_tx().await { let _ = stop_tx .send(( self.tid(), Suspended { waker: None, suspended: suspended_flag, }, )) .await .unwrap(); } // When resuming from breakpoint, gdb (client) needs to remove the // breakpoint (implying restore the original instruction), do a // single-step (step-over), and re-insert the breakpoint. // Because removing (sw) breakpoint modifies the instructions, other // thread might miss the breakpoint after the breakpoint is removed // and before the breakpoint is (re-)inserted. Hence we must make // serialize this sequence. let needs_step_over = self.needs_step_over.clone(); let _guard = needs_step_over.lock().await; self.notify_gdb_stop(StopReason::stopped( task.pid(), self.pid(), StopEvent::SwBreak, regs.into(), )) .await?; self.freeze_all().await?; let running = self .await_gdb_resume(task, ExpectedGdbResume::StepOver) .await?; let wait = running.next_state().await?.assume_stopped(); let mut task = wait.0; let mut event = wait.1; // Detached by client. if !self.attached_by_gdb { self.thaw_all().await?; return Ok(Wait::Stopped(task, event)); } task = loop { match event { Event::Signal(Signal::SIGTRAP) => break task, Event::Signal(Signal::SIGSTOP) => { let running = task.step(None)?; let wait = running.next_state().await?.assume_stopped(); task = wait.0; event = wait.1; } // TODO: combine with handle_signal! Event::Signal(Signal::SIGCHLD) => { let running = task.step(Signal::SIGCHLD)?; let wait = running.next_state().await?.assume_stopped(); task = wait.0; event = wait.1; } unknown => panic!("[pid = {}] got unexpected event {:?}", self.tid(), unknown), } }; self.notify_gdb_stop(StopReason::stopped( task.pid(), self.pid(), StopEvent::Signal(Signal::SIGTRAP), task.getregs()?.into(), )) .await?; let running = self .await_gdb_resume(task, ExpectedGdbResume::Resume) .await?; let wait = running.next_state().await?; self.thaw_all().await?; Ok(wait) } /// check if the stop is caused by sw breakpoint. async fn check_swbreak(&mut self, wait: Wait) -> Result<Wait, TraceError> { match wait { Wait::Stopped(task, event) if event == Event::Signal(Signal::SIGTRAP) => { let mut regs = task.getregs()?; let rip_minus_one = regs.ip() - 1; if self.breakpoints.contains_key(&rip_minus_one) { *regs.ip_mut() = rip_minus_one; self.resume_from_swbreak(task, regs).await } else { Ok(Wait::Stopped(task, event)) } } other => Ok(other), } } async fn add_breakpoint(&mut self, addr: u64) -> Result<(), TraceError> { if let Some(bkpt_addr) = AddrMut::from_raw(addr as usize) { let mut task = self.assume_stopped(); let saved_insn: u64 = task.read_value(bkpt_addr)?; let insn = (saved_insn & !0xffu64) | 0xccu64; task.write_value(bkpt_addr, &insn)?; self.breakpoints.insert(addr, saved_insn); } Ok(()) } /// thaw all threads. async fn thaw_all(&mut self) -> Result<(), TraceError> { for (_pid, suspended_task) in core::mem::take(&mut self.suspended_tasks) { if let Some(tx) = suspended_task.waker.as_ref() { suspended_task.suspended.store(false, Ordering::SeqCst); let _sent = tx.try_send(self.tid()); } } Ok(()) } /// freeze all threads, except the caller. async fn freeze_all(&mut self) -> Result<(), TraceError> { // The tool have chosen to sequentialize thread execution, gdbserver // should avoid doing its own thread serialization, otherwise this // could lead to deadlock. if *self.global_state.sequentialized_guest { return Ok(()); } let (stop_tx, mut stop_rx) = mpsc::channel(1); for child in self.child_threads.lock().await.deref_mut().into_iter() { if child.id() != self.tid() && !child.suspended.load(Ordering::SeqCst) { let killed = Errno::result(unsafe { libc::syscall(libc::SYS_tgkill, self.pid(), child.id(), Signal::SIGSTOP) }); if killed.is_ok() { child.suspended.store(true, Ordering::SeqCst); child.wait_all_stop_tx = Some(stop_tx.clone()); } } } drop(stop_tx); while let Some((pid, suspended_task)) = stop_rx.recv().await { self.suspended_tasks.insert(pid, suspended_task); } Ok(()) } async fn remove_breakpoint(&mut self, addr: u64) -> Result<(), TraceError> { let insn = self.breakpoints.remove(&addr).ok_or(Errno::ENOENT)?; let mut task = self.assume_stopped(); if let Some(bkpt_addr) = AddrMut::from_raw(addr as usize) { task.write_value(bkpt_addr, &insn)?; } Ok(()) } fn read_inferior_memory(&self, addr: u64, mut size: usize) -> Result<Vec<u8>, TraceError> { let task = self.assume_stopped(); // NB: dont' trust size to be sane blindly. if size > 0x8000 { size = 0x8000; } let mut res = vec![0; size]; if let Some(addr) = Addr::from_raw(addr as usize) { let nb = task.read(addr, &mut res)?; res.resize(nb, 0); } // There could be a software breakpoint within the address requested, // we should return the orignal contents without the breakpoint insn. // This is *not* documented in gdb remote protocol, however, both // gdbserver and rr does this. see: // rr: https://github.com/rr-debugger/rr/blob/master/src/GdbServer.cc#L561 // gdbserver: https://github.com/bminor/binutils-gdb/blob/master/gdbserver/mem-break.cc#L1914 for (bkpt, saved_insn) in self.breakpoints.iter() { if (addr..addr + res.len() as u64).contains(bkpt) { // This abuses bkpt insn 0xcc is single byte. res[*bkpt as usize - addr as usize] = *saved_insn as u8; } } Ok(res) } fn write_inferior_memory( &self, addr: u64, size: usize, data: Vec<u8>, ) -> Result<(), TraceError> { let mut task = self.assume_stopped(); let size = std::cmp::min(size, data.len()); let addr = AddrMut::from_raw(addr as usize).ok_or(Errno::EFAULT)?; task.write(addr, &data[..size])?; Ok(()) } fn read_registers(&self) -> Result<CoreRegs, TraceError> { let task = self.assume_stopped(); let regs = task.getregs()?; let fpregs = task.getfpregs()?; let core_regs = CoreRegs::from_parts(regs, fpregs); Ok(core_regs) } fn write_registers(&self, core_regs: CoreRegs) -> Result<(), TraceError> { let task = self.assume_stopped(); let (regs, fpregs) = core_regs.into_parts(); task.setregs(regs)?; task.setfpregs(fpregs)?; Ok(()) } } #[async_trait] impl<L: Tool + 'static> Guest<L> for TracedTask<L> { type Memory = Stopped; type Stack = GuestStack; #[inline] fn tid(&self) -> Pid { self.tid } #[inline] fn pid(&self) -> Pid { self.pid } #[inline] fn ppid(&self) -> Option<Pid> { self.ppid } fn memory(&self) -> Self::Memory { self.assume_stopped() } async fn regs(&mut self) -> libc::user_regs_struct { let task = self.assume_stopped(); match task.getregs() { Ok(ret) => ret, Err(err) => self.abort(Err(err)).await, } } async fn stack(&mut self) -> Self::Stack { match GuestStack::new(self.tid, self.stack_checked_out.clone()) { Ok(ret) => ret, Err(err) => self.abort(Err(err)).await, } } fn thread_state_mut(&mut self) -> &mut L::ThreadState { &mut self.thread_state } fn thread_state(&self) -> &L::ThreadState { &self.thread_state } async fn daemonize(&mut self) { let pid = self.pid(); self.ndaemons.fetch_add(1, Ordering::SeqCst); self.is_a_daemon = true; tracing::info!("[reverie] daemonizing pid {} ..", pid); self.daemonizer .send(self.daemon_kill_switch.subscribe()) .await .unwrap(); if self.ndaemons.load(Ordering::SeqCst) == self.ntasks.load(Ordering::SeqCst) { self.daemon_kill_switch.send(()).unwrap(); } } async fn inject<S: SyscallInfo>(&mut self, syscall: S) -> Result<i64, Errno> { // Call a non-templatized function to reduce code bloat. let (nr, args) = syscall.into_parts(); self.do_inject(nr, args).await } #[allow(unreachable_code)] async fn tail_inject<S: SyscallInfo>(&mut self, syscall: S) -> Never { // Call a non-templatized function to reduce code bloat. let (nr, args) = syscall.into_parts(); self.do_tail_inject(nr, args).await } fn set_timer(&mut self, sched: TimerSchedule) -> Result<(), reverie::Error> { let rcbs = match sched { TimerSchedule::Rcbs(r) => r, TimerSchedule::Time(dur) => Timer::as_ticks(dur), //if timer is imprecise there is no really a point in trying to single step any further than r TimerSchedule::RcbsAndInstructions(r, _) => r, }; self.timer .request_event(TimerEventRequest::Imprecise(rcbs))?; Ok(()) } fn set_timer_precise(&mut self, sched: TimerSchedule) -> Result<(), reverie::Error> { match sched { TimerSchedule::Rcbs(r) => self.timer.request_event(TimerEventRequest::Precise(r))?, TimerSchedule::Time(dur) => self .timer .request_event(TimerEventRequest::Precise(Timer::as_ticks(dur)))?, TimerSchedule::RcbsAndInstructions(r, i) => self .timer .request_event(TimerEventRequest::PreciseInstruction(r, i))?, }; Ok(()) } fn read_clock(&mut self) -> Result<u64, reverie::Error> { Ok(self.timer.read_clock()) } fn backtrace(&mut self) -> Option<Backtrace> { use unwind::Accessors; use unwind::AddressSpace; use unwind::Byteorder; use unwind::Cursor; use unwind::PTraceState; use unwind::RegNum; let mut frames = Vec::new(); let space = AddressSpace::new(Accessors::ptrace(), Byteorder::DEFAULT).ok()?; let state = PTraceState::new(self.tid.as_raw() as u32).ok()?; let mut cursor = Cursor::remote(&space, &state).ok()?; loop { let ip = cursor.register(RegNum::IP).ok()?; let is_signal = cursor.is_signal_frame().ok()?; frames.push(Frame { ip, is_signal }); if !cursor.step().ok()? { break; } } // TODO: Take a snapshot of `/proc/self/maps` so the backtrace can be // processed offline? Some(Backtrace::new(self.tid(), frames)) } fn has_cpuid_interception(&self) -> bool { self.has_cpuid_interception } } #[async_trait] impl<L: Tool + 'static> GlobalRPC<L::GlobalState> for TracedTask<L> { async fn send_rpc<'a>( &'a self, args: <L::GlobalState as GlobalTool>::Request, ) -> <L::GlobalState as GlobalTool>::Response { let wrapped = WrappedFrom(self.tid(), &self.global_state); wrapped.send_rpc(args).await } fn config(&self) -> &<L::GlobalState as GlobalTool>::Config { &self.global_state.cfg } } /// Wrap a GlobalState with a Tid from which the messages originate. This enables the /// GlobalRPC instance below. struct WrappedFrom<'a, G: GlobalTool>(Tid, &'a GlobalState<G>); #[async_trait] impl<'a, G: GlobalTool> GlobalRPC<G> for WrappedFrom<'a, G> { async fn send_rpc(&self, args: G::Request) -> G::Response { // In debugging mode we round-trip through a serialized representation // to make sure it works. let deserial = if cfg!(debug_assertions) { let serial = bincode::serialize(&args).unwrap(); bincode::deserialize(&serial).unwrap() } else { args }; self.1.gs_ref.receive_rpc(self.0, deserial).await } fn config(&self) -> &G::Config { &self.1.cfg } }
#[doc = "Register `DAC_DHR12L2` reader"] pub type R = crate::R<DAC_DHR12L2_SPEC>; #[doc = "Register `DAC_DHR12L2` writer"] pub type W = crate::W<DAC_DHR12L2_SPEC>; #[doc = "Field `DACC2DHR` reader - DAC channel2 12-bit left-aligned data These bits are written by software which specify 12-bit data for DAC channel2."] pub type DACC2DHR_R = crate::FieldReader<u16>; #[doc = "Field `DACC2DHR` writer - DAC channel2 12-bit left-aligned data These bits are written by software which specify 12-bit data for DAC channel2."] pub type DACC2DHR_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 12, O, u16>; #[doc = "Field `DACC2DHRB` reader - DAC channel2 12-bit left-aligned data B"] pub type DACC2DHRB_R = crate::FieldReader<u16>; #[doc = "Field `DACC2DHRB` writer - DAC channel2 12-bit left-aligned data B"] pub type DACC2DHRB_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 12, O, u16>; impl R { #[doc = "Bits 4:15 - DAC channel2 12-bit left-aligned data These bits are written by software which specify 12-bit data for DAC channel2."] #[inline(always)] pub fn dacc2dhr(&self) -> DACC2DHR_R { DACC2DHR_R::new(((self.bits >> 4) & 0x0fff) as u16) } #[doc = "Bits 20:31 - DAC channel2 12-bit left-aligned data B"] #[inline(always)] pub fn dacc2dhrb(&self) -> DACC2DHRB_R { DACC2DHRB_R::new(((self.bits >> 20) & 0x0fff) as u16) } } impl W { #[doc = "Bits 4:15 - DAC channel2 12-bit left-aligned data These bits are written by software which specify 12-bit data for DAC channel2."] #[inline(always)] #[must_use] pub fn dacc2dhr(&mut self) -> DACC2DHR_W<DAC_DHR12L2_SPEC, 4> { DACC2DHR_W::new(self) } #[doc = "Bits 20:31 - DAC channel2 12-bit left-aligned data B"] #[inline(always)] #[must_use] pub fn dacc2dhrb(&mut self) -> DACC2DHRB_W<DAC_DHR12L2_SPEC, 20> { DACC2DHRB_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "DAC channel2 12-bit left aligned data holding register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dac_dhr12l2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dac_dhr12l2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct DAC_DHR12L2_SPEC; impl crate::RegisterSpec for DAC_DHR12L2_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`dac_dhr12l2::R`](R) reader structure"] impl crate::Readable for DAC_DHR12L2_SPEC {} #[doc = "`write(|w| ..)` method takes [`dac_dhr12l2::W`](W) writer structure"] impl crate::Writable for DAC_DHR12L2_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets DAC_DHR12L2 to value 0"] impl crate::Resettable for DAC_DHR12L2_SPEC { const RESET_VALUE: Self::Ux = 0; }
// Copyright 2018 Vlad Yermakov // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::convert::From; use std::fmt::{self, Display, Formatter}; use std::ops::{Add, Div, Mul, Neg, Sub}; #[derive(PartialOrd, Debug, Copy, Clone)] pub struct Real(f64); // TODO: --//-- const EPS: Real = Real(1e-14); impl Real { pub fn new<T: Into<f64>>(real: T) -> Real { Real(real.into()) } pub fn zero() -> Real { Real::new(0) } pub fn value(&self) -> f64 { self.0 } pub fn abs(&self) -> Real { Real::new(self.0.abs()) } } impl Display for Real { fn fmt(&self, f: &mut Formatter) -> fmt::Result { self.0.fmt(f) } } impl PartialEq for Real { fn eq(&self, other: &Real) -> bool { (*self - *other).abs() < EPS } } impl<T: Into<f64>> From<T> for Real { fn from(some: T) -> Real { Real::new(some.into()) } } impl Neg for Real { type Output = Real; fn neg(self) -> Real { Real::new(-self.0) } } impl_std_ops_for_tuple_struct! { Real: @all } #[macro_export] macro_rules! real { ($a:expr) => { $crate::numbers::Real::new($a as f64) }; } impl_default! { Real, real!(0) }
// This file was generated by gir (https://github.com/gtk-rs/gir @ fbb95f4) // from gir-files (https://github.com/gtk-rs/gir-files @ 77d1f70) // DO NOT EDIT use Action; use ffi; use glib::object::IsA; use glib::translate::*; use glib_ffi; use gobject_ffi; use std::mem; use std::ptr; glib_wrapper! { pub struct ActionMap(Object<ffi::GActionMap, ffi::GActionMapInterface>); match fn { get_type => || ffi::g_action_map_get_type(), } } pub trait ActionMapExt { fn add_action<P: IsA<Action>>(&self, action: &P); //fn add_action_entries<P: Into<Option</*Unimplemented*/Fundamental: Pointer>>>(&self, entries: /*Ignored*/&[&ActionEntry], user_data: P); fn lookup_action(&self, action_name: &str) -> Option<Action>; fn remove_action(&self, action_name: &str); } impl<O: IsA<ActionMap>> ActionMapExt for O { fn add_action<P: IsA<Action>>(&self, action: &P) { unsafe { ffi::g_action_map_add_action(self.to_glib_none().0, action.to_glib_none().0); } } //fn add_action_entries<P: Into<Option</*Unimplemented*/Fundamental: Pointer>>>(&self, entries: /*Ignored*/&[&ActionEntry], user_data: P) { // unsafe { TODO: call ffi::g_action_map_add_action_entries() } //} fn lookup_action(&self, action_name: &str) -> Option<Action> { unsafe { from_glib_none(ffi::g_action_map_lookup_action(self.to_glib_none().0, action_name.to_glib_none().0)) } } fn remove_action(&self, action_name: &str) { unsafe { ffi::g_action_map_remove_action(self.to_glib_none().0, action_name.to_glib_none().0); } } }
#[doc = "Register `DBG_APB_FZ1` reader"] pub type R = crate::R<DBG_APB_FZ1_SPEC>; #[doc = "Register `DBG_APB_FZ1` writer"] pub type W = crate::W<DBG_APB_FZ1_SPEC>; #[doc = "Field `DBG_TIM3_STOP` reader - Clocking of TIM3 counter when the core is halted This bit enables/disables the clock to the counter of TIM3 when the core is halted:"] pub type DBG_TIM3_STOP_R = crate::BitReader; #[doc = "Field `DBG_TIM3_STOP` writer - Clocking of TIM3 counter when the core is halted This bit enables/disables the clock to the counter of TIM3 when the core is halted:"] pub type DBG_TIM3_STOP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `DBG_RTC_STOP` reader - Clocking of RTC counter when the core is halted This bit enables/disables the clock to the counter of RTC when the core is halted:"] pub type DBG_RTC_STOP_R = crate::BitReader; #[doc = "Field `DBG_RTC_STOP` writer - Clocking of RTC counter when the core is halted This bit enables/disables the clock to the counter of RTC when the core is halted:"] pub type DBG_RTC_STOP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `DBG_WWDG_STOP` reader - Clocking of WWDG counter when the core is halted This bit enables/disables the clock to the counter of WWDG when the core is halted:"] pub type DBG_WWDG_STOP_R = crate::BitReader; #[doc = "Field `DBG_WWDG_STOP` writer - Clocking of WWDG counter when the core is halted This bit enables/disables the clock to the counter of WWDG when the core is halted:"] pub type DBG_WWDG_STOP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `DBG_IWDG_STOP` reader - Clocking of IWDG counter when the core is halted This bit enables/disables the clock to the counter of IWDG when the core is halted:"] pub type DBG_IWDG_STOP_R = crate::BitReader; #[doc = "Field `DBG_IWDG_STOP` writer - Clocking of IWDG counter when the core is halted This bit enables/disables the clock to the counter of IWDG when the core is halted:"] pub type DBG_IWDG_STOP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `DBG_I2C1_SMBUS_TIMEOUT` reader - SMBUS timeout when core is halted"] pub type DBG_I2C1_SMBUS_TIMEOUT_R = crate::BitReader; #[doc = "Field `DBG_I2C1_SMBUS_TIMEOUT` writer - SMBUS timeout when core is halted"] pub type DBG_I2C1_SMBUS_TIMEOUT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; impl R { #[doc = "Bit 1 - Clocking of TIM3 counter when the core is halted This bit enables/disables the clock to the counter of TIM3 when the core is halted:"] #[inline(always)] pub fn dbg_tim3_stop(&self) -> DBG_TIM3_STOP_R { DBG_TIM3_STOP_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 10 - Clocking of RTC counter when the core is halted This bit enables/disables the clock to the counter of RTC when the core is halted:"] #[inline(always)] pub fn dbg_rtc_stop(&self) -> DBG_RTC_STOP_R { DBG_RTC_STOP_R::new(((self.bits >> 10) & 1) != 0) } #[doc = "Bit 11 - Clocking of WWDG counter when the core is halted This bit enables/disables the clock to the counter of WWDG when the core is halted:"] #[inline(always)] pub fn dbg_wwdg_stop(&self) -> DBG_WWDG_STOP_R { DBG_WWDG_STOP_R::new(((self.bits >> 11) & 1) != 0) } #[doc = "Bit 12 - Clocking of IWDG counter when the core is halted This bit enables/disables the clock to the counter of IWDG when the core is halted:"] #[inline(always)] pub fn dbg_iwdg_stop(&self) -> DBG_IWDG_STOP_R { DBG_IWDG_STOP_R::new(((self.bits >> 12) & 1) != 0) } #[doc = "Bit 21 - SMBUS timeout when core is halted"] #[inline(always)] pub fn dbg_i2c1_smbus_timeout(&self) -> DBG_I2C1_SMBUS_TIMEOUT_R { DBG_I2C1_SMBUS_TIMEOUT_R::new(((self.bits >> 21) & 1) != 0) } } impl W { #[doc = "Bit 1 - Clocking of TIM3 counter when the core is halted This bit enables/disables the clock to the counter of TIM3 when the core is halted:"] #[inline(always)] #[must_use] pub fn dbg_tim3_stop(&mut self) -> DBG_TIM3_STOP_W<DBG_APB_FZ1_SPEC, 1> { DBG_TIM3_STOP_W::new(self) } #[doc = "Bit 10 - Clocking of RTC counter when the core is halted This bit enables/disables the clock to the counter of RTC when the core is halted:"] #[inline(always)] #[must_use] pub fn dbg_rtc_stop(&mut self) -> DBG_RTC_STOP_W<DBG_APB_FZ1_SPEC, 10> { DBG_RTC_STOP_W::new(self) } #[doc = "Bit 11 - Clocking of WWDG counter when the core is halted This bit enables/disables the clock to the counter of WWDG when the core is halted:"] #[inline(always)] #[must_use] pub fn dbg_wwdg_stop(&mut self) -> DBG_WWDG_STOP_W<DBG_APB_FZ1_SPEC, 11> { DBG_WWDG_STOP_W::new(self) } #[doc = "Bit 12 - Clocking of IWDG counter when the core is halted This bit enables/disables the clock to the counter of IWDG when the core is halted:"] #[inline(always)] #[must_use] pub fn dbg_iwdg_stop(&mut self) -> DBG_IWDG_STOP_W<DBG_APB_FZ1_SPEC, 12> { DBG_IWDG_STOP_W::new(self) } #[doc = "Bit 21 - SMBUS timeout when core is halted"] #[inline(always)] #[must_use] pub fn dbg_i2c1_smbus_timeout(&mut self) -> DBG_I2C1_SMBUS_TIMEOUT_W<DBG_APB_FZ1_SPEC, 21> { DBG_I2C1_SMBUS_TIMEOUT_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "DBG APB freeze register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dbg_apb_fz1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dbg_apb_fz1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct DBG_APB_FZ1_SPEC; impl crate::RegisterSpec for DBG_APB_FZ1_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`dbg_apb_fz1::R`](R) reader structure"] impl crate::Readable for DBG_APB_FZ1_SPEC {} #[doc = "`write(|w| ..)` method takes [`dbg_apb_fz1::W`](W) writer structure"] impl crate::Writable for DBG_APB_FZ1_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets DBG_APB_FZ1 to value 0"] impl crate::Resettable for DBG_APB_FZ1_SPEC { const RESET_VALUE: Self::Ux = 0; }
// https://adventofcode.com/2017/day/14 fn main() { let input = "ljoxqyyw"; // Generate disk grid and count used space let mut grid = Vec::new(); let mut used_space = 0; for i in 0..128 { // Convert row input to utf-8 bytes and generate knot hash let mut row_input: Vec<u8> = (input.to_string() + "-" + i.to_string().as_str()) .bytes() .collect(); let hash = knot_hash(&row_input); grid.push(hash_to_bits(&hash)); used_space += hash.iter() .map(|&b| { (SET_BITS[(b >> 4) as usize] + SET_BITS[(b & 0xF) as usize]) as u32 }) .sum::<u32>(); } // Assert to facilitate further tweaks assert_eq!(8316, used_space); println!("{} spaces used", used_space); // Do dfs through the grid to count distinct regions let mut visited = [[false; 128]; 128]; let mut regions = 0; for (j, row) in grid.iter().enumerate() { for (i, &cell) in row.iter().enumerate() { if cell { if !visited[j][i] { // New region regions += 1; let mut stack = vec![(i, j)]; while let Some((x, y)) = stack.pop() { visited[y][x] = true; if grid[y][x] { for &(nx, ny) in get_neighbours(x, y).iter() { if !visited[ny][nx] { stack.push((nx, ny)); } } } } } } } } // Assert to facilitate further tweaks assert_eq!(1074, regions); println!("{} regions present", regions); } fn knot_hash(input: &Vec<u8>) -> Vec<u8> { let mut list: Vec<u8> = (0..255).collect(); list.push(255); // Can't range collect u8s with 256 // Perform knots let mut slice_start = 0; let mut skip_size = 0; for _ in 0..64 { for &length in input.iter().chain(KNOT_PAD) { let slice_end = slice_start + length as usize; // Check that slice doesn't wrap around if slice_end <= 256 { &list[slice_start..slice_end].reverse(); } else { // Reverse slice that wraps around for i in 0..(length / 2) as usize { let e1 = (slice_start + i) % 256; let e2 = (slice_start + length as usize - 1 - i) % 256; let tmp = list[e2]; list[e2] = list[e1]; list[e1] = tmp; } } slice_start = (slice_start + length as usize + skip_size) % 256; skip_size += 1; } } // XOR 16 byte chunks to form final hash list.chunks(16) .map(|c| c.iter().fold(0, |acc, &x| acc ^ x)) .collect() } fn hash_to_bits(hash: &Vec<u8>) -> Vec<bool> { let mut row = Vec::new(); hash.iter().for_each(|&b| { for i in (0..8).rev() { row.push(b & (0x1 << i) > 0); } }); row } fn get_neighbours(x: usize, y: usize) -> [(usize, usize); 4] { [ (x, y.saturating_sub(1)), (if x + 1 < 128 { x + 1 } else { 127 }, y), (x, if y + 1 < 128 { y + 1 } else { 127 }), (x.saturating_sub(1), y), ] } static KNOT_PAD: &'static [u8] = &[17, 31, 73, 47, 23]; static SET_BITS: &'static [u8] = &[0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4];
use std::collections::HashMap; /// Lists custom emoji for a team. /// /// Wraps https://api.slack.com/methods/emoji.list #[derive(Clone, Debug, Deserialize)] #[serde(deny_unknown_fields)] pub struct ListResponse { ok: bool, pub emoji: Option<HashMap<String, String>>, cache_ts: Option<::Timestamp>, }
enum_impl! { /// Environment Env { /// Unknown environment Unknown => "unknown", /// None None => "none", /// Android Android => "android", /// CODE16 CODE16 => "code16", /// CoreCLR CoreCLR => "coreclr", /// Cygnus Cygnus => "cygnus", /// EABI EABI => "eabi", /// EABIHF EABIHF => "eabihf", /// GNU GNU => "gnu", /// GNUABI64 GNUABI64 => "gnuabi64", /// GNUABIN32 GNUABIN32 => "gnuabin32", /// GNUEABI GNUEABI => "gnueabi", /// GNUEABIHF GNUEABIHF => "gnueabihf", /// GNUX32 GNUX32 => "gnux32", /// GNUILP32 GNUILP32 => "gnu_ilp32", /// Itanium Itanium => "itanium", /// MSVC MSVC => "msvc", /// MacABI MacABI => "macabi", /// Mingw32 Mingw32 => "mingw32" "mingw", /// Musl Musl => "musl", /// MuslEABI MuslEABI => "musleabi", /// MuslEABIHF MuslEABIHF => "musleabihf", /// Simulator Simulator => "simulator", } }
use crate::platter::Platter; use crate::error::{err, Error}; pub struct Program { registers: [Platter; 8], platters: Vec<Vec<Platter>>, free: Vec<usize>, finger: usize } impl Program { pub fn new() -> Program { Program { registers: [Platter::from(0); 8], platters: vec![vec![]], free: vec![], finger: 0 } } pub fn instruction_index(&self) -> usize { self.finger } pub fn instruction_count(&self) -> usize { self.platters[0].len() } pub fn load_program(&mut self, scrolls: &[u8]) { let mut program_vec = vec![]; for chunks in scrolls.chunks(4) { if let &[a,b,c,d] = chunks { let val = (a as u32) << 24 | (b as u32) << 16 | (c as u32) << 8 | (d as u32); program_vec.push(Platter::from(val)); } else { continue; } } self.platters[0] = program_vec; } /// If a step asks for input, we are given back an Inputter, which cannot /// otherwise be created. We can pass this inputter here with some input /// to complete the action. pub fn provide_input(&mut self, inputter: Inputter, ascii: Option<u8>) { if let Some(val) = ascii { self.registers[inputter.register] = Platter::from(val as u32); } else { self.registers[inputter.register] = Platter::from(!0); } } pub fn step(&mut self) -> Result<StepResult,Error> { let program = &self.platters[0]; // get operator let platter = *program.get(self.finger)?; // advance finger self.finger += 1; // apply operator self.apply_operator(platter) } fn apply_operator(&mut self, op: Platter) -> Result<StepResult,Error> { let op_val = op.to_u32(); let op_num = (op_val >> 28) & 15; let a = || ((op_val >> 6) & 7) as usize; let b = || ((op_val >> 3) & 7) as usize; let c = || (op_val & 7) as usize; match op_num { 0 /* Conditional Move */ => { if self.registers[c()] != Platter::from(0) { self.registers[a()] = self.registers[b()]; } }, 1 /* Array Index */ => { let array = self.platters.get(self.registers[b()].to_pos())?; let val = array.get(self.registers[c()].to_pos())?; self.registers[a()] = *val; }, 2 /* Array Amendment */ => { let array = self.platters.get_mut(self.registers[a()].to_pos())?; let offset = self.registers[b()].to_pos(); *array.get_mut(offset)? = self.registers[c()]; }, 3 /* Addition */ => { self.registers[a()] = self.registers[b()].wrapping_add(self.registers[c()]); }, 4 /* Multiplication */ => { self.registers[a()] = self.registers[b()].wrapping_mul(self.registers[c()]); }, 5 /* Division */ => { let c_val = self.registers[c()]; if c_val == Platter::from(0) { return Err(err("divide by 0")); } self.registers[a()] = self.registers[b()] / c_val; }, 6 /* Not-And */ => { self.registers[a()] = !self.registers[b()] | !self.registers[c()]; }, 7 /* Halt */ => { return Ok(StepResult::Halted) }, 8 /* Allocation */ => { let size = self.registers[c()].to_pos(); let pos = if let Some(idx) = self.free.pop() { self.platters[idx] = vec![Platter::from(0); size]; idx } else { let idx = self.platters.len(); self.platters.push(vec![Platter::from(0); size]); idx }; self.registers[b()] = Platter::from(pos as u32); }, 9 /* Abandonment */ => { let idx = self.registers[c()].to_pos(); *self.platters.get_mut(self.registers[c()].to_pos())? = vec![]; self.free.push(idx); }, 10 /* Output */ => { return Ok(StepResult::Output{ ascii: self.registers[c()].to_u8() }); }, 11 /* Input */ => { return Ok(StepResult::InputNeeded{ inputter: Inputter{ register: c() } }); }, 12 /* LoadProgram */ => { let pos = self.registers[b()].to_pos(); if pos != 0 { self.platters[0] = self.platters.get(pos)?.clone(); } self.finger = self.registers[c()].to_pos(); }, 13 /* Orthography */ => { let a = (op_val >> 25) & 7; let value = op_val & 0b0000_000_1111111111111111111111111; self.registers[a as usize] = Platter::from(value); }, _ /* invalid op */ => { return Err(err("Invalid op")) } } Ok(StepResult::Continue) } } /// If a step succeeds we get back a result which describes /// anything that needs to happen. pub enum StepResult { Halted, Output{ ascii: u8 }, InputNeeded{ inputter: Inputter }, Continue } /// If a step succeeds and asks for input, we get given back /// this opaque struct which describes what needs to happen /// with the input when it's passed back. #[derive(Clone,Copy)] pub struct Inputter { register: usize }
use error::{Error, Result}; use handlers::Handler; use uuid::Uuid; use std::path::{Path, PathBuf}; use std::slice::IterMut; pub mod dir; pub mod file; pub mod stat; #[derive(Debug)] pub struct CellFs { mount_points: Vec<MountPoint>, } impl CellFs { pub fn new() -> CellFs { CellFs { mount_points: vec![], } } pub fn mount<P: Into<PathBuf>>(&mut self, path: P, handler: Box<Handler>) -> Uuid { let mount_point = MountPoint::new(path, handler); let id = mount_point.id(); self.mount_points.push(mount_point); id } pub fn unmount(&mut self, id: Uuid) { self.mount_points.retain(|mount_point| mount_point.id() != id); } fn search<P: Into<PathBuf>>(&mut self, path: P, exists: bool) -> Search { Search::new(path, self, exists) } pub fn exists<P: AsRef<Path>>(&mut self, path: P) -> bool { let path = path.as_ref(); self.search(path, true).count() > 0 } pub fn read_dir<P: AsRef<Path>>(&mut self, path: P) -> Vec<dir::DirEntry> { use itertools::Itertools; use std::hash::{Hash, SipHasher, Hasher}; fn hash<T: Hash>(t: &T) -> u64 { let mut s = SipHasher::new(); t.hash(&mut s); s.finish() } let path = path.as_ref(); self.search(path, true).rev() .filter_map(|(mount_point, path)| mount_point.read_dir(path).ok()) .flat_map(|iter| iter) .unique_by(hash) .collect() } pub fn open<P: AsRef<Path>>(&mut self, path: P) -> Result<file::File<file::Read>> { let path = path.as_ref(); match self.search(path, true).last() { Some((mount_point, path)) => mount_point.open(path), None => Err(Error::NotFound(path.into())), } } } #[derive(Debug)] pub struct MountPoint { id: Uuid, path: PathBuf, handler: Box<Handler>, } impl MountPoint { fn new<P: Into<PathBuf>>(path: P, handler: Box<Handler>) -> MountPoint { MountPoint { id: Uuid::new_v4(), path: path.into(), handler: handler, } } pub fn id(&self) -> Uuid { self.id } pub fn path(&self) -> &Path { &self.path } fn handler(&mut self) -> &mut Handler { &mut *self.handler } fn exists<P: AsRef<Path>>(&mut self, path: P) -> bool { self.handler().exists(path.as_ref()) } fn read_dir<P: AsRef<Path>>(&mut self, path: P) -> Result<dir::ReadDir> { let path = path.as_ref(); self.handler().read_dir(path).map(|iter| dir::ReadDir::new(path, iter)) } fn open<P: AsRef<Path>>(&mut self, path: P) -> Result<file::File<file::Read>> { self.handler().open(path.as_ref()) } } struct Search<'a> { path: PathBuf, iter: IterMut<'a, MountPoint>, exists: bool, } impl<'a> Search<'a> { fn new<P: Into<PathBuf>>(path: P, fs: &mut CellFs, exists: bool) -> Search { Search { path: path.into(), iter: fs.mount_points.iter_mut(), exists: exists, } } fn map_item(&self, mount_point: &'a mut MountPoint) -> Option<(&'a mut MountPoint, PathBuf)> { self.path.strip_prefix(&mount_point.path.clone()).ok() .and_then(|path| { if self.exists && !mount_point.exists(&path) { return None; } Some((mount_point, path.into())) }) } } impl<'a> Iterator for Search<'a> { type Item = (&'a mut MountPoint, PathBuf); fn next(&mut self) -> Option<Self::Item> { while let Some(mount_point) = self.iter.next() { if let Some(item) = self.map_item(mount_point) { return Some(item); } } None } } impl<'a> DoubleEndedIterator for Search<'a> { fn next_back(&mut self) -> Option<Self::Item> { while let Some(mount_point) = self.iter.next_back() { if let Some(item) = self.map_item(mount_point) { return Some(item); } } None } }
extern crate nom; use std::borrow::Cow; use std::str; use nom::bytes::complete::{tag, take}; use nom::character::{complete, is_alphabetic, is_digit, is_hex_digit}; use nom::error::ErrorKind; use nom::IResult; use nom::multi::{many0, separated_nonempty_list}; use nom::sequence::delimited; use crate::ast::*; use crate::misc::*; use crate::predicates::*; // trailer-part = *( header-field CRLF ) pub use headers as trailer_part; // BWS = OWS ; "bad" whitespace pub use ows as bws; //method = token pub use token as method; // field-name = token pub use token as field_name; // chunk-ext-name = token pub use token as chunk_ext_name; // HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive pub fn http_name(i: &[u8]) -> IResult<&[u8], &[u8], (&[u8], ErrorKind)> { tag("HTTP")(i) } // DIGIT = %x30-39 ; 0-9 pub fn digit(i: &[u8]) -> IResult<&[u8], &[u8], (&[u8], ErrorKind)> { char_predicate!( i , is_digit ) } // HEXDIG (hexadecimal 0-9/A-F/a-f) pub fn hex_digit(i: &[u8]) -> IResult<&[u8], &[u8], (&[u8], ErrorKind)> { char_predicate!( i , is_hex_digit ) } // HTTP-version = HTTP-name "/" DIGIT "." DIGIT named!(pub http_version <HttpVersion>, do_parse!( http_name >> tag!("/") >> major: digit >> tag!(".") >> minor: digit >> (HttpVersion { major: asci_digit(major), minor: asci_digit(minor)}) )); // SP = %x20 pub fn space(i: &[u8]) -> IResult<&[u8], &[u8], (&[u8], ErrorKind)> { tag(" ")(i) } // CRLF = CR LF ; Internet standard newline named!(pub crlf, tag!("\r\n")); // HTAB = %x09 ; horizontal tab named!(pub htab, tag!("\t")); // VCHAR = %x21-7E ; visible (printing) characters named!(pub vchar, char_predicate!(range(0x21,0x7E))); // obs-text = %x80-FF ; obsolete text named!(pub obs_text, char_predicate!(range(0x80,0xFF))); // OWS = *( SP / HTAB ) ; optional whitespace named!(pub ows, map_res!(many0!(complete!(alt!(space | htab))), join_vec)); // RWS = 1*( SP / HTAB ) ; required whitespace named!(pub rws, map_res!(many1!(complete!(alt!(space | htab))), join_vec)); // DQUOTE = %x22 ; " (Double Quote) named!(pub double_quote, tag!("\"")); // qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text named!(pub quoted_text, alt!(htab | space | char_predicate!(or!(ch(0x21), range(0x23,0x5B), range(0x5D,0x7E))) | obs_text )); // quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text ) named!(pub quoted_pair, preceded!(char!('\\'), alt!(htab | space | vchar | obs_text ))); // quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE named!(pub quoted_string <Cow<str>>, delimited!(double_quote, map_res!(many0!(complete!(alt!(quoted_text | quoted_pair))), to_cow_str), double_quote)); // TODO: full impl named!(pub request_target <&str>, map_res!(is_not!(" "), str::from_utf8)); // tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA named!(pub tchar, char_predicate!(or!(among("!#$%&'*+-.^_`|~"), is_digit, is_alphabetic))); ////token = 1*tchar named!(pub token <&str>, map_res!(map_res!(many1!(complete!(tchar)), join_vec), str::from_utf8)); //request-line = method SP request-target SP HTTP-version CRLF named!(pub request_line <RequestLine>, do_parse!( method: method >> space >> request_target: request_target >> space >> version: http_version >> crlf >> (RequestLine { method: method, request_target: request_target, version: version }) )); //status-code = 3DIGIT named!(pub status_code <u16>, map_res!(map_res!(map_res!(many_m_n!(3,3, complete!(digit)), join_vec), str::from_utf8), parse_u16)); //reason-phrase = *( HTAB / SP / VCHAR / obs-text ) named!(pub reason_phrase <&str>, map_res!(map_res!(many0!(complete!(alt!(htab | space | vchar | obs_text))), join_vec), str::from_utf8)); // status-line = HTTP-version SP status-code SP reason-phrase CRLF named!(pub status_line <StatusLine>, do_parse!( version: http_version >> space >> status: status_code >> space >> reason_phrase:reason_phrase >> crlf >> (StatusLine { version: version, code: status, description: reason_phrase }) )); // start-line = request-line / status-line named!(pub start_line <StartLine>, alt!(map!(request_line, StartLine::RequestLine) | map!(status_line, StartLine::StatusLine))); // field-vchar = VCHAR / obs-text named!(pub field_vchar, alt!(vchar | obs_text)); named!(pub spaces, map_res!(many1!(complete!(alt!(space | htab))), join_vec)); // field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] named!(pub field_content, do_parse!( chr:field_vchar >> optional: opt!(complete!(map_res!(pair!( spaces, field_vchar), join_pair))) >> (match optional { Some(other) => join_slice(chr, other).unwrap(), None => chr, }) )); // obs-fold = CRLF 1*( SP / HTAB ) ; obsolete line folding named!(pub obs_fold, do_parse!( crlf >> spaces >> (Default::default()) )); // field-value = *( field-content / obs-fold ) named!(pub field_value <Cow<str>>, map_res!(many0!(complete!(alt!(field_content | obs_fold))), to_cow_str)); // header-field = field-name ":" OWS field-value OWS named!(pub header_field <Header>, do_parse!( name:field_name >> tag!(":") >> ows >> value:field_value >> ows >> (Header::new(name, value)) )); pub fn message_body<'a>(slice: &'a [u8], headers: &Headers<'a>) -> IResult<&'a [u8], MessageBody<'a>> { match headers.content_length() { Some(length) if length > 0 => match take!(slice, length) { Ok((rest, body)) => Ok((rest, MessageBody::Slice(body))), Err(a) => Err(a), }, _ => IResult::Ok((slice, MessageBody::None)) } } named!(pub headers <Headers>, map!(many0!(complete!(terminated!(header_field, crlf))), Headers)); named!(pub message_head <MessageHead> , do_parse!( start_line:start_line >> headers:headers >> crlf >> (MessageHead { start_line:start_line, headers:headers}) )); macro_rules! apply ( ($i:expr, $fun:expr, $($args:expr),* ) => ( $fun( $i, $($args),* ) ); ); // HTTP-message = start-line *( header-field CRLF ) CRLF [ message-body ] named!(pub http_message <HttpMessage> , do_parse!( head:message_head >> body:apply!(message_body, &head.headers) >> (HttpMessage { start_line:head.start_line, headers:head.headers, body:body}) )); // chunk-size = 1*HEXDIG named!(pub chunk_size <u64>, map_res!(map_res!(map_res!(many1!(complete!(hex_digit)), join_vec), str::from_utf8), parse_hex)); // chunk-ext-val = token / quoted-string named!(pub chunk_ext_value <Cow<str>>, alt!(map!(token, Cow::from) | quoted_string)); // chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] ) named!(pub chunk_ext <ChunkExtensions>, map!(many0!(complete!(do_parse!( bws >> char!(';') >> bws >> name:chunk_ext_name >> value:opt!(complete!(preceded!(delimited!(bws, char!('='), bws), chunk_ext_value))) >> (name, value) ))), ChunkExtensions)); // chunk-data = 1*OCTET ; a sequence of chunk-size octets // chunk = chunk-size [ chunk-ext ] CRLF chunk-data CRLF pub fn chunk(i: &[u8]) -> nom::IResult<&[u8], Chunk, (&[u8], nom::error::ErrorKind)> { let (i, size) = chunk_size(i)?; if size == 0 { return Err(nom::Err::Error((i, ErrorKind::Complete))); }; let (i, extensions) = chunk_ext(i)?; let (i, _) = crlf(i)?; let (i, data) = take(size)(i)?; let (i, _) = crlf(i)?; Ok((i, Chunk::Slice(extensions, data))) } named!(pub chunk_head <(u64, ChunkExtensions)>, do_parse!( size:chunk_size >> extensions:chunk_ext >> crlf >> (size, extensions) )); // last-chunk = 1*("0") [ chunk-ext ] CRLF named!(pub last_chunk <ChunkExtensions>, do_parse!( many1!(complete!(char!('0'))) >> extensions:chunk_ext >> crlf >> (extensions) )); // chunked-body = *chunk last-chunk trailer-part CRLF pub fn chunked_body(i: &[u8]) -> nom::IResult<&[u8], ChunkedBody, (&[u8], nom::error::ErrorKind)> { let (i, chunks) = many0(chunk)(i)?; let (i, last) = last_chunk(i)?; let (i, trailers) = trailer_part(i)?; let (i, _) = crlf(i)?; Ok((i, ChunkedBody::new(chunks, last, trailers))) } // transfer-parameter = token / token BWS "=" BWS ( token / quoted-string ) named!(pub transfer_parameter <TransferParameter>, do_parse!( name:token >> bws >> char!('=') >> bws >> value:opt!(complete!(alt!(map!(token, Cow::from) | quoted_string))) >> (TransferParameter::new(name, value)) )); // transfer-extension = token *( OWS ";" OWS transfer-parameter ) named!(pub transfer_extension <TransferCoding>, do_parse!( name:token >> params:many0!(complete!(do_parse!(ows >> char!(';') >> ows >> param: transfer_parameter >> (param)))) >> (TransferCoding::Extension(name, params)) )); // transfer-coding = "chunked" / "compress" / "deflate" / "gzip" / transfer-extension named!(pub transfer_coding <TransferCoding>, alt!( value!(TransferCoding::Chunked, tag!("chunked")) | value!(TransferCoding::Compress, tag!("compress")) | value!(TransferCoding::Deflate, tag!("deflate")) | value!(TransferCoding::Gzip, tag!("gzip")) | transfer_extension )); // Transfer-Encoding = 1#transfer-coding // #rule: 1#element => element *( OWS "," OWS element ) pub fn transfer_encoding(i: &[u8]) -> nom::IResult<&[u8], Vec<TransferCoding>, (&[u8], nom::error::ErrorKind)> { separated_nonempty_list(delimited(ows, complete::char(','), ows), transfer_coding)(i) } #[cfg(test)] mod tests { use std::borrow::Cow; use crate::ast::*; #[test] fn http_name() { assert_eq!(super::http_name(&b"HTTP"[..]), Ok((&b""[..], &b"HTTP"[..]))); } #[test] fn http_version() { assert_eq!(super::http_version(&b"HTTP/1.1"[..]), Ok((&b""[..], HttpVersion { major: 1, minor: 1 }))); } #[test] fn request_target() { assert_eq!(super::request_target(&b"/where?q=now "[..]), Ok((&b" "[..], "/where?q=now"))); assert_eq!(super::request_target(&b"http://www.example.org/pub/WWW/TheProject.html "[..]), Ok((&b" "[..], "http://www.example.org/pub/WWW/TheProject.html"))); assert_eq!(super::request_target(&b"www.example.com:80 "[..]), Ok((&b" "[..], "www.example.com:80"))); assert_eq!(super::request_target(&b"* "[..]), Ok((&b" "[..], "*"))); } #[test] fn tchar() { assert_eq!(super::tchar(&b"abc"[..]), Ok((&b"bc"[..], &b"a"[..]))); } #[test] #[test] fn token() { assert_eq!(super::token(&b"abc"[..]), Ok((&b""[..], "abc"))); } #[test] fn method() { assert_eq!(super::method(&b"GET"[..]), Ok((&b""[..], "GET"))); } #[test] fn request_line() { assert_eq!(super::request_line(&b"GET /where?q=now HTTP/1.1\r\n"[..]), Ok((&b""[..], RequestLine { method: "GET", request_target: "/where?q=now", version: HttpVersion { major: 1, minor: 1 } }))); } #[test] fn status_code() { assert_eq!(super::status_code(&b"200"[..]), Ok((&b""[..], 200))); } #[test] fn reason_phrase() { assert_eq!(super::reason_phrase(&b"OK"[..]), Ok((&b""[..], "OK"))); assert_eq!(super::reason_phrase(&b"Not Found"[..]), Ok((&b""[..], "Not Found"))); } #[test] fn status_line() { assert_eq!(super::status_line(&b"HTTP/1.1 200 OK\r\n"[..]), Ok((&b""[..], StatusLine { version: HttpVersion { major: 1, minor: 1 }, code: 200, description: "OK" }))); } #[test] fn start_line() { assert_eq!(super::start_line(&b"GET /where?q=now HTTP/1.1\r\n"[..]), Ok((&b""[..], StartLine::RequestLine(RequestLine { method: "GET", request_target: "/where?q=now", version: HttpVersion { major: 1, minor: 1 } })))); assert_eq!(super::start_line(&b"HTTP/1.1 200 OK\r\n"[..]), Ok((&b""[..], StartLine::StatusLine(StatusLine { version: HttpVersion { major: 1, minor: 1 }, code: 200, description: "OK" })))); } #[test] fn field_name() { assert_eq!(super::field_name(&b"Content-Type"[..]), Ok((&b""[..], "Content-Type"))); } #[test] fn field_content() { assert_eq!(super::field_content(&b"a b"[..]), Ok((&b""[..], &b"a b"[..]))); assert_eq!(super::field_content(&b"a b"[..]), Ok((&b""[..], &b"a b"[..]))); assert_eq!(super::field_content(&b"a"[..]), Ok((&b""[..], &b"a"[..]))); } #[test] fn field_value() { assert_eq!(super::field_value(&b"plain/text"[..]), Ok((&b""[..], Cow::from("plain/text")))); assert_eq!(super::field_value(&b"Spaces are allowed in the middle"[..]), Ok((&b""[..], Cow::from("Spaces are allowed in the middle")))); assert_eq!(super::field_value(&b"You can al\r\n so wrap onto new lines!"[..]), Ok((&b""[..], Cow::from("You can also wrap onto new lines!")))); } #[test] fn header_field() { assert_eq!(super::header_field(&b"Content-Type:plain/text"[..]), Ok((&b""[..], Header::new("Content-Type", "plain/text")))); assert_eq!(super::header_field(&b"Content-Type: plain/text"[..]), Ok((&b""[..], Header::new("Content-Type", "plain/text")))); assert_eq!(super::header_field(&b"Content-Type: plain/text "[..]), Ok((&b""[..], Header::new("Content-Type", "plain/text")))); assert_eq!(super::header_field(&b"Content-Type: plain/\r\n text "[..]), Ok((&b""[..], Header::new("Content-Type", "plain/text")))); } #[test] fn http_message() { assert_eq!(super::http_message(&b"GET /where?q=now HTTP/1.1\r\nContent-Type:plain/text\r\n\r\n"[..]), Ok((&b""[..], HttpMessage { start_line: StartLine::RequestLine(RequestLine { method: "GET", request_target: "/where?q=now", version: HttpVersion { major: 1, minor: 1 } }), headers: Headers(vec!(Header::new("Content-Type", "plain/text"))), body: MessageBody::None, }))); assert_eq!(super::http_message(&b"HTTP/1.1 200 OK\r\nContent-Type:plain/text\r\n\r\n"[..]), Ok((&b""[..], HttpMessage { start_line: StartLine::StatusLine(StatusLine { version: HttpVersion { major: 1, minor: 1 }, code: 200, description: "OK" }), headers: Headers(vec!(Header::new("Content-Type", "plain/text"))), body: MessageBody::None, }))); assert_eq!(super::http_message(&b"HTTP/1.1 200 OK\r\nContent-Type:plain/text\r\nContent-Length:3\r\n\r\nabc"[..]), Ok((&b""[..], HttpMessage { start_line: StartLine::StatusLine(StatusLine { version: HttpVersion { major: 1, minor: 1 }, code: 200, description: "OK" }), headers: Headers(vec!(Header::new("Content-Type", "plain/text"), Header::new("Content-Length", "3"))), body: MessageBody::Slice(&b"abc"[..]), }))); } #[test] fn chunk_size() { assert_eq!(super::chunk_size(&b"4\r\n"[..]), Ok((&b"\r\n"[..], 4))); assert_eq!(super::chunk_size(&b"E\r\n"[..]), Ok((&b"\r\n"[..], 14))); assert_eq!(super::chunk_size(&b"e\r\n"[..]), Ok((&b"\r\n"[..], 14))); } #[test] fn quoted_string() { assert_eq!(super::quoted_string(&b"\"This is a quoted string\""[..]), Ok((&b""[..], Cow::from("This is a quoted string")))); assert_eq!(super::quoted_string(&b"\"This is a \\\"quoted\\\" string\""[..]), Ok((&b""[..], Cow::from("This is a \"quoted\" string")))); } #[test] fn chunk_ext() { assert_eq!(super::chunk_ext(&b";foo=bar"[..]), Ok((&b""[..], ChunkExtensions(vec!(("foo", Some(Cow::from("bar")))))))); assert_eq!(super::chunk_ext(&b";foo"[..]), Ok((&b""[..], ChunkExtensions(vec!(("foo", None)))))); assert_eq!(super::chunk_ext(&b";foo=bar;baz"[..]), Ok((&b""[..], ChunkExtensions(vec!(("foo", Some(Cow::from("bar"))), ("baz", None)))))); assert_eq!(super::chunk_ext(&b" ; foo = bar ; baz"[..]), Ok((&b""[..], ChunkExtensions(vec!(("foo", Some(Cow::from("bar"))), ("baz", None)))))); assert_eq!(super::chunk_ext(&b""[..]), Ok((&b""[..], ChunkExtensions(vec!())))); } #[test] fn chunk() { assert_eq!(super::chunk(&b"4;foo=bar\r\nWiki\r\n"[..]), Ok((&b""[..], Chunk::Slice(ChunkExtensions(vec!(("foo", Some(Cow::from("bar"))))), &b"Wiki"[..])))); } #[test] fn chunked_body() { let chunked_body = ChunkedBody::new(vec!( Chunk::Slice(ChunkExtensions(vec!()), &b"Wiki"[..]), Chunk::Slice(ChunkExtensions(vec!()), &b"pedia"[..]), Chunk::Slice(ChunkExtensions(vec!()), &b" in\r\n\r\nchunks."[..])), ChunkExtensions(vec!()), Headers(vec!())); assert_eq!(super::chunked_body(&b"4\r\nWiki\r\n5\r\npedia\r\nE\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n"[..]), Ok((&b""[..], chunked_body))); } #[test] fn message_head() { assert_eq!(super::message_head(&b"POST /where?q=now HTTP/1.1\r\nContent-Type:plain/text\r\nContent-Length:3\r\n\r\nabc"[..]), Ok((&b"abc"[..], MessageHead { start_line: StartLine::RequestLine(RequestLine { method: "POST", request_target: "/where?q=now", version: HttpVersion { major: 1, minor: 1 } }), headers: Headers(vec!(Header::new("Content-Type", "plain/text"), Header::new("Content-Length", "3"))), }))); } #[test] fn transfer_coding() { assert_eq!(super::transfer_coding(&b"chunked"[..]), Ok((&b""[..], TransferCoding::Chunked))); assert_eq!(super::transfer_coding(&b"compress"[..]), Ok((&b""[..], TransferCoding::Compress))); assert_eq!(super::transfer_coding(&b"deflate"[..]), Ok((&b""[..], TransferCoding::Deflate))); assert_eq!(super::transfer_coding(&b"gzip"[..]), Ok((&b""[..], TransferCoding::Gzip))); assert_eq!(super::transfer_coding(&b"cat ; foo=bar"[..]), Ok((&b""[..], TransferCoding::Extension("cat", vec![TransferParameter::new("foo", Some("bar"))])))); } #[test] fn transfer_encoding() { assert_eq!(super::transfer_encoding(&b"gzip, chunked"[..]), Ok((&b""[..], vec![TransferCoding::Gzip, TransferCoding::Chunked]))); assert_eq!(super::transfer_encoding(&b"chunked"[..]), Ok((&b""[..], vec![TransferCoding::Chunked]))); } }
#[doc = "Reader of register MDMA_C10ISR"] pub type R = crate::R<u32, super::MDMA_C10ISR>; #[doc = "TEIF10\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TEIF10_A { #[doc = "0: No transfer error on stream\r\n x"] B_0X0 = 0, #[doc = "1: A transfer error occurred on stream\r\n x"] B_0X1 = 1, } impl From<TEIF10_A> for bool { #[inline(always)] fn from(variant: TEIF10_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `TEIF10`"] pub type TEIF10_R = crate::R<bool, TEIF10_A>; impl TEIF10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TEIF10_A { match self.bits { false => TEIF10_A::B_0X0, true => TEIF10_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == TEIF10_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == TEIF10_A::B_0X1 } } #[doc = "CTCIF10\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CTCIF10_A { #[doc = "0: No channel transfer complete event\r\n on channel x"] B_0X0 = 0, #[doc = "1: A channel transfer complete event\r\n occurred on channel x"] B_0X1 = 1, } impl From<CTCIF10_A> for bool { #[inline(always)] fn from(variant: CTCIF10_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `CTCIF10`"] pub type CTCIF10_R = crate::R<bool, CTCIF10_A>; impl CTCIF10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CTCIF10_A { match self.bits { false => CTCIF10_A::B_0X0, true => CTCIF10_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == CTCIF10_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == CTCIF10_A::B_0X1 } } #[doc = "BRTIF10\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BRTIF10_A { #[doc = "0: No block repeat transfer complete\r\n event on channel x"] B_0X0 = 0, #[doc = "1: A block repeat transfer complete\r\n event occurred on channel x"] B_0X1 = 1, } impl From<BRTIF10_A> for bool { #[inline(always)] fn from(variant: BRTIF10_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `BRTIF10`"] pub type BRTIF10_R = crate::R<bool, BRTIF10_A>; impl BRTIF10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BRTIF10_A { match self.bits { false => BRTIF10_A::B_0X0, true => BRTIF10_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == BRTIF10_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == BRTIF10_A::B_0X1 } } #[doc = "BTIF10\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BTIF10_A { #[doc = "0: No block transfer complete event on\r\n channel x"] B_0X0 = 0, #[doc = "1: A block transfer complete event\r\n occurred on channel x"] B_0X1 = 1, } impl From<BTIF10_A> for bool { #[inline(always)] fn from(variant: BTIF10_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `BTIF10`"] pub type BTIF10_R = crate::R<bool, BTIF10_A>; impl BTIF10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BTIF10_A { match self.bits { false => BTIF10_A::B_0X0, true => BTIF10_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == BTIF10_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == BTIF10_A::B_0X1 } } #[doc = "TCIF10\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TCIF10_A { #[doc = "0: No buffer transfer complete event on\r\n channel x"] B_0X0 = 0, #[doc = "1: A buffer transfer complete event\r\n occurred on channel x"] B_0X1 = 1, } impl From<TCIF10_A> for bool { #[inline(always)] fn from(variant: TCIF10_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `TCIF10`"] pub type TCIF10_R = crate::R<bool, TCIF10_A>; impl TCIF10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TCIF10_A { match self.bits { false => TCIF10_A::B_0X0, true => TCIF10_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == TCIF10_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == TCIF10_A::B_0X1 } } #[doc = "CRQA10\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CRQA10_A { #[doc = "0: The MDMA transfer RQ is inactive for\r\n channel x."] B_0X0 = 0, #[doc = "1: The MDMA transfer RQ is active for\r\n channel x"] B_0X1 = 1, } impl From<CRQA10_A> for bool { #[inline(always)] fn from(variant: CRQA10_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `CRQA10`"] pub type CRQA10_R = crate::R<bool, CRQA10_A>; impl CRQA10_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CRQA10_A { match self.bits { false => CRQA10_A::B_0X0, true => CRQA10_A::B_0X1, } } #[doc = "Checks if the value of the field is `B_0X0`"] #[inline(always)] pub fn is_b_0x0(&self) -> bool { *self == CRQA10_A::B_0X0 } #[doc = "Checks if the value of the field is `B_0X1`"] #[inline(always)] pub fn is_b_0x1(&self) -> bool { *self == CRQA10_A::B_0X1 } } impl R { #[doc = "Bit 0 - TEIF10"] #[inline(always)] pub fn teif10(&self) -> TEIF10_R { TEIF10_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - CTCIF10"] #[inline(always)] pub fn ctcif10(&self) -> CTCIF10_R { CTCIF10_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - BRTIF10"] #[inline(always)] pub fn brtif10(&self) -> BRTIF10_R { BRTIF10_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - BTIF10"] #[inline(always)] pub fn btif10(&self) -> BTIF10_R { BTIF10_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - TCIF10"] #[inline(always)] pub fn tcif10(&self) -> TCIF10_R { TCIF10_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 16 - CRQA10"] #[inline(always)] pub fn crqa10(&self) -> CRQA10_R { CRQA10_R::new(((self.bits >> 16) & 0x01) != 0) } }
use crate::providers::{BlockImportProvider, RpcProvider}; use crate::{DomainConfiguration, FullBackend, FullClient}; use cross_domain_message_gossip::ChainTxPoolSink; use domain_client_block_preprocessor::runtime_api_full::RuntimeApiFull; use domain_client_consensus_relay_chain::DomainBlockImport; use domain_client_message_relayer::GossipMessageSink; use domain_client_operator::{Operator, OperatorParams, OperatorStreams}; use domain_runtime_primitives::opaque::Block; use domain_runtime_primitives::{Balance, BlockNumber, DomainCoreApi, Hash, InherentExtrinsicApi}; use futures::channel::mpsc; use futures::Stream; use jsonrpsee::tracing; use pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi; use sc_client_api::{BlockBackend, BlockImportNotification, BlockchainEvents, StateBackendFor}; use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}; use sc_rpc_api::DenyUnsafe; use sc_service::{ BuildNetworkParams, Configuration as ServiceConfiguration, NetworkStarter, PartialComponents, SpawnTasksParams, TFullBackend, TaskManager, }; use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; use sc_utils::mpsc::tracing_unbounded; use serde::de::DeserializeOwned; use sp_api::{ApiExt, BlockT, ConstructRuntimeApi, Metadata, NumberFor, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus::{SelectChain, SyncOracle}; use sp_consensus_slots::Slot; use sp_core::traits::SpawnEssentialNamed; use sp_core::{Decode, Encode}; use sp_domains::{BundleProducerElectionApi, DomainId, DomainsApi}; use sp_messenger::{MessengerApi, RelayerApi}; use sp_offchain::OffchainWorkerApi; use sp_session::SessionKeys; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use std::fmt::{Debug, Display}; use std::marker::PhantomData; use std::str::FromStr; use std::sync::Arc; use subspace_core_primitives::Randomness; use subspace_runtime_primitives::Index as Nonce; use subspace_transaction_pool::FullChainApiWrapper; use substrate_frame_rpc_system::AccountNonceApi; type BlockImportOf<Block, Client, Provider> = <Provider as BlockImportProvider<Block, Client>>::BI; pub type DomainOperator<Block, CBlock, CClient, RuntimeApi, ExecutorDispatch, BI> = Operator< Block, CBlock, FullClient<Block, RuntimeApi, ExecutorDispatch>, CClient, FullPool<CBlock, CClient, RuntimeApi, ExecutorDispatch>, FullBackend<Block>, NativeElseWasmExecutor<ExecutorDispatch>, DomainBlockImport<BI>, >; /// Domain full node along with some other components. pub struct NewFull<C, CodeExecutor, CBlock, CClient, RuntimeApi, ExecutorDispatch, AccountId, BI> where Block: BlockT, CBlock: BlockT, NumberFor<CBlock>: From<NumberFor<Block>>, CBlock::Hash: From<Hash>, CClient: HeaderBackend<CBlock> + BlockBackend<CBlock> + ProvideRuntimeApi<CBlock> + Send + Sync + 'static, CClient::Api: DomainsApi<CBlock, BlockNumber, Hash>, RuntimeApi: ConstructRuntimeApi<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>> + Send + Sync + 'static, RuntimeApi::RuntimeApi: ApiExt<Block, StateBackend = StateBackendFor<TFullBackend<Block>, Block>> + Metadata<Block> + AccountNonceApi<Block, AccountId, Nonce> + BlockBuilder<Block> + OffchainWorkerApi<Block> + SessionKeys<Block> + TaggedTransactionQueue<Block> + TransactionPaymentRuntimeApi<Block, Balance> + DomainCoreApi<Block> + MessengerApi<Block, NumberFor<Block>> + RelayerApi<Block, AccountId, NumberFor<Block>>, ExecutorDispatch: NativeExecutionDispatch + 'static, AccountId: Encode + Decode, { /// Task manager. pub task_manager: TaskManager, /// Full client. pub client: C, /// Backend. pub backend: Arc<FullBackend<Block>>, /// Code executor. pub code_executor: Arc<CodeExecutor>, /// Network service. pub network_service: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>, /// Sync service. pub sync_service: Arc<sc_network_sync::SyncingService<Block>>, /// RPCHandlers to make RPC queries. pub rpc_handlers: sc_service::RpcHandlers, /// Network starter. pub network_starter: NetworkStarter, /// Operator. pub operator: DomainOperator<Block, CBlock, CClient, RuntimeApi, ExecutorDispatch, BI>, /// Transaction pool sink pub tx_pool_sink: ChainTxPoolSink, _phantom_data: PhantomData<AccountId>, } type DomainTxPreValidator<CBlock, CClient, RuntimeApi, ExecutorDispatch> = crate::domain_tx_pre_validator::DomainTxPreValidator< Block, CBlock, FullClient<Block, RuntimeApi, ExecutorDispatch>, CClient, RuntimeApiFull<FullClient<Block, RuntimeApi, ExecutorDispatch>>, >; pub type FullPool<CBlock, CClient, RuntimeApi, ExecutorDispatch> = subspace_transaction_pool::FullPool< Block, FullClient<Block, RuntimeApi, ExecutorDispatch>, DomainTxPreValidator<CBlock, CClient, RuntimeApi, ExecutorDispatch>, >; /// Constructs a partial domain node. #[allow(clippy::type_complexity)] fn new_partial<RuntimeApi, ExecutorDispatch, CBlock, CClient, BIMP>( config: &ServiceConfiguration, domain_id: DomainId, consensus_client: Arc<CClient>, block_import_provider: &BIMP, ) -> Result< PartialComponents< FullClient<Block, RuntimeApi, ExecutorDispatch>, FullBackend<Block>, (), sc_consensus::DefaultImportQueue<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>>, FullPool<CBlock, CClient, RuntimeApi, ExecutorDispatch>, ( Option<Telemetry>, Option<TelemetryWorkerHandle>, NativeElseWasmExecutor<ExecutorDispatch>, Arc<DomainBlockImport<BIMP::BI>>, ), >, sc_service::Error, > where CBlock: BlockT, NumberFor<CBlock>: From<NumberFor<Block>>, CBlock::Hash: From<Hash>, CClient: HeaderBackend<CBlock> + BlockBackend<CBlock> + ProvideRuntimeApi<CBlock> + Send + Sync + 'static, CClient::Api: DomainsApi<CBlock, BlockNumber, Hash>, RuntimeApi: ConstructRuntimeApi<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>> + Send + Sync + 'static, RuntimeApi::RuntimeApi: TaggedTransactionQueue<Block> + MessengerApi<Block, NumberFor<Block>> + ApiExt<Block, StateBackend = StateBackendFor<TFullBackend<Block>, Block>>, ExecutorDispatch: NativeExecutionDispatch + 'static, BIMP: BlockImportProvider<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>>, { let telemetry = config .telemetry_endpoints .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { let worker = TelemetryWorker::new(16)?; let telemetry = worker.handle().new_telemetry(endpoints); Ok((worker, telemetry)) }) .transpose()?; let executor = sc_service::new_native_or_wasm_executor(config); let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts( config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor.clone(), )?; let client = Arc::new(client); let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); let telemetry = telemetry.map(|(worker, telemetry)| { task_manager .spawn_handle() .spawn("telemetry", None, worker.run()); telemetry }); let domain_tx_pre_validator = DomainTxPreValidator::new( domain_id, client.clone(), Box::new(task_manager.spawn_handle()), consensus_client, RuntimeApiFull::new(client.clone()), ); let transaction_pool = subspace_transaction_pool::new_full( config, &task_manager, client.clone(), domain_tx_pre_validator, ); let block_import = Arc::new(DomainBlockImport::new(BlockImportProvider::block_import( block_import_provider, client.clone(), ))); let import_queue = domain_client_consensus_relay_chain::import_queue( block_import.clone(), &task_manager.spawn_essential_handle(), config.prometheus_registry(), )?; let params = PartialComponents { backend, client, import_queue, keystore_container, task_manager, transaction_pool, select_chain: (), other: (telemetry, telemetry_worker_handle, executor, block_import), }; Ok(params) } pub struct DomainParams<CBlock, CClient, SC, IBNS, CIBNS, NSNS, AccountId, Provider> where CBlock: BlockT, { pub domain_id: DomainId, pub domain_config: DomainConfiguration<AccountId>, pub domain_created_at: NumberFor<CBlock>, pub consensus_client: Arc<CClient>, pub consensus_network_sync_oracle: Arc<dyn SyncOracle + Send + Sync>, pub select_chain: SC, pub operator_streams: OperatorStreams<CBlock, IBNS, CIBNS, NSNS>, pub gossip_message_sink: GossipMessageSink, pub provider: Provider, } /// Builds service for a domain full node. pub async fn new_full< CBlock, CClient, SC, IBNS, CIBNS, NSNS, RuntimeApi, ExecutorDispatch, AccountId, Provider, >( domain_params: DomainParams<CBlock, CClient, SC, IBNS, CIBNS, NSNS, AccountId, Provider>, ) -> sc_service::error::Result< NewFull< Arc<FullClient<Block, RuntimeApi, ExecutorDispatch>>, NativeElseWasmExecutor<ExecutorDispatch>, CBlock, CClient, RuntimeApi, ExecutorDispatch, AccountId, BlockImportOf<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>, Provider>, >, > where CBlock: BlockT, NumberFor<CBlock>: From<NumberFor<Block>> + Into<u32>, <Block as BlockT>::Hash: From<Hash>, CBlock::Hash: From<Hash>, CClient: HeaderBackend<CBlock> + HeaderMetadata<CBlock, Error = sp_blockchain::Error> + BlockBackend<CBlock> + ProvideRuntimeApi<CBlock> + BlockchainEvents<CBlock> + Send + Sync + 'static, CClient::Api: DomainsApi<CBlock, BlockNumber, Hash> + BundleProducerElectionApi<CBlock, subspace_runtime_primitives::Balance>, SC: SelectChain<CBlock>, IBNS: Stream<Item = (NumberFor<CBlock>, mpsc::Sender<()>)> + Send + 'static, CIBNS: Stream<Item = BlockImportNotification<CBlock>> + Send + 'static, NSNS: Stream<Item = (Slot, Randomness, Option<mpsc::Sender<()>>)> + Send + 'static, RuntimeApi: ConstructRuntimeApi<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>> + Send + Sync + 'static, RuntimeApi::RuntimeApi: ApiExt<Block, StateBackend = StateBackendFor<TFullBackend<Block>, Block>> + Metadata<Block> + BlockBuilder<Block> + OffchainWorkerApi<Block> + SessionKeys<Block> + DomainCoreApi<Block> + MessengerApi<Block, NumberFor<Block>> + InherentExtrinsicApi<Block> + TaggedTransactionQueue<Block> + AccountNonceApi<Block, AccountId, Nonce> + TransactionPaymentRuntimeApi<Block, Balance> + RelayerApi<Block, AccountId, NumberFor<Block>>, ExecutorDispatch: NativeExecutionDispatch + 'static, AccountId: DeserializeOwned + Encode + Decode + Clone + Debug + Display + FromStr + Sync + Send + 'static, Provider: RpcProvider< Block, FullClient<Block, RuntimeApi, ExecutorDispatch>, FullPool<CBlock, CClient, RuntimeApi, ExecutorDispatch>, FullChainApiWrapper< Block, FullClient<Block, RuntimeApi, ExecutorDispatch>, DomainTxPreValidator<CBlock, CClient, RuntimeApi, ExecutorDispatch>, >, TFullBackend<Block>, AccountId, > + BlockImportProvider<Block, FullClient<Block, RuntimeApi, ExecutorDispatch>> + 'static, { let DomainParams { domain_id, mut domain_config, domain_created_at, consensus_client, consensus_network_sync_oracle, select_chain, operator_streams, gossip_message_sink, provider, } = domain_params; // TODO: Do we even need block announcement on domain node? // domain_config.announce_block = false; let params = new_partial( &domain_config.service_config, domain_id, consensus_client.clone(), &provider, )?; let (mut telemetry, _telemetry_worker_handle, code_executor, block_import) = params.other; let client = params.client.clone(); let backend = params.backend.clone(); let transaction_pool = params.transaction_pool.clone(); let mut task_manager = params.task_manager; let mut net_config = sc_network::config::FullNetworkConfiguration::new(&domain_config.service_config.network); net_config.add_notification_protocol( domain_client_subnet_gossip::domain_subnet_gossip_peers_set_config(), ); let (network_service, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = crate::build_network(BuildNetworkParams { config: &domain_config.service_config, net_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue: params.import_queue, // TODO: we might want to re-enable this some day. block_announce_validator_builder: None, warp_sync_params: None, block_relay: None, })?; let is_authority = domain_config.service_config.role.is_authority(); domain_config.service_config.rpc_id_provider = provider.rpc_id(); let rpc_builder = { let deps = crate::rpc::FullDeps { client: client.clone(), pool: transaction_pool.clone(), graph: transaction_pool.pool().clone(), chain_spec: domain_config.service_config.chain_spec.cloned_box(), deny_unsafe: DenyUnsafe::Yes, network: network_service.clone(), sync: sync_service.clone(), is_authority, prometheus_registry: domain_config.service_config.prometheus_registry().cloned(), database_source: domain_config.service_config.database.clone(), task_spawner: task_manager.spawn_handle(), backend: backend.clone(), }; let spawn_essential = task_manager.spawn_essential_handle(); let rpc_deps = provider.deps(deps)?; Box::new(move |_, subscription_task_executor| { let spawn_essential = spawn_essential.clone(); provider .rpc_builder( rpc_deps.clone(), subscription_task_executor, spawn_essential, ) .map_err(Into::into) }) }; let rpc_handlers = sc_service::spawn_tasks(SpawnTasksParams { rpc_builder, client: client.clone(), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, config: domain_config.service_config, keystore: params.keystore_container.keystore(), backend: backend.clone(), network: network_service.clone(), system_rpc_tx, tx_handler_controller, sync_service: sync_service.clone(), telemetry: telemetry.as_mut(), })?; let code_executor = Arc::new(code_executor); let spawn_essential = task_manager.spawn_essential_handle(); let (bundle_sender, _bundle_receiver) = tracing_unbounded("domain_bundle_stream", 100); // let domain_confirmation_depth = consensus_client // .runtime_api() // .receipts_pruning_depth(consensus_client.info().best_hash) // .map_err(|err| sc_service::error::Error::Application(Box::new(err)))? // .into(); // TODO: Implement when block tree is ready. let domain_confirmation_depth = 256u32; let operator = Operator::new( Box::new(spawn_essential.clone()), &select_chain, OperatorParams { domain_id, domain_created_at, consensus_client: consensus_client.clone(), consensus_network_sync_oracle, client: client.clone(), transaction_pool: transaction_pool.clone(), backend: backend.clone(), code_executor: code_executor.clone(), is_authority, keystore: params.keystore_container.keystore(), bundle_sender: Arc::new(bundle_sender), operator_streams, domain_confirmation_depth, block_import, }, ) .await?; if let Some(relayer_id) = domain_config.maybe_relayer_id { tracing::info!(?domain_id, ?relayer_id, "Starting domain relayer"); let relayer_worker = domain_client_message_relayer::worker::relay_domain_messages( relayer_id, client.clone(), sync_service.clone(), gossip_message_sink, ); spawn_essential.spawn_essential_blocking("domain-relayer", None, Box::pin(relayer_worker)); } let (msg_sender, msg_receiver) = tracing_unbounded("domain_message_channel", 100); // Start cross domain message listener for domain let domain_listener = cross_domain_message_gossip::start_domain_message_listener( domain_id, client.clone(), params.transaction_pool.clone(), msg_receiver, ); spawn_essential.spawn_essential_blocking( "domain-message-listener", None, Box::pin(domain_listener), ); let new_full = NewFull { task_manager, client, backend, code_executor, network_service, sync_service, rpc_handlers, network_starter, operator, tx_pool_sink: msg_sender, _phantom_data: Default::default(), }; Ok(new_full) }
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub mod accounts { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, filter: Option<&str>, top: Option<i32>, skip: Option<i32>, select: Option<&str>, orderby: Option<&str>, count: Option<bool>, ) -> std::result::Result<DataLakeStoreAccountListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeStore/accounts", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(filter) = filter { req_builder = req_builder.query(&[("$filter", filter)]); } if let Some(top) = top { req_builder = req_builder.query(&[("$top", top)]); } if let Some(skip) = skip { req_builder = req_builder.query(&[("$skip", skip)]); } if let Some(select) = select { req_builder = req_builder.query(&[("$select", select)]); } if let Some(orderby) = orderby { req_builder = req_builder.query(&[("$orderby", orderby)]); } if let Some(count) = count { req_builder = req_builder.query(&[("$count", count)]); } let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: DataLakeStoreAccountListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, filter: Option<&str>, top: Option<i32>, skip: Option<i32>, select: Option<&str>, orderby: Option<&str>, count: Option<bool>, ) -> std::result::Result<DataLakeStoreAccountListResult, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(filter) = filter { req_builder = req_builder.query(&[("$filter", filter)]); } if let Some(top) = top { req_builder = req_builder.query(&[("$top", top)]); } if let Some(skip) = skip { req_builder = req_builder.query(&[("$skip", skip)]); } if let Some(select) = select { req_builder = req_builder.query(&[("$select", select)]); } if let Some(orderby) = orderby { req_builder = req_builder.query(&[("$orderby", orderby)]); } if let Some(count) = count { req_builder = req_builder.query(&[("$count", count)]); } let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: DataLakeStoreAccountListResult = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; list_by_resource_group::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<DataLakeStoreAccount, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: DataLakeStoreAccount = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &CreateDataLakeStoreAccountParameters, ) -> std::result::Result<create::Response, create::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create::BuildRequestError)?; let rsp = client.execute(req).await.context(create::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?; let rsp_value: DataLakeStoreAccount = serde_json::from_slice(&body).context(create::DeserializeError { body })?; Ok(create::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?; let rsp_value: DataLakeStoreAccount = serde_json::from_slice(&body).context(create::DeserializeError { body })?; Ok(create::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create::ResponseBytesError)?; create::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(DataLakeStoreAccount), Created201(DataLakeStoreAccount), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &UpdateDataLakeStoreAccountParameters, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: DataLakeStoreAccount = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: DataLakeStoreAccount = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: DataLakeStoreAccount = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(DataLakeStoreAccount), Created201(DataLakeStoreAccount), Accepted202(DataLakeStoreAccount), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn enable_key_vault( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<(), enable_key_vault::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/enableKeyVault", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(enable_key_vault::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(enable_key_vault::BuildRequestError)?; let rsp = client.execute(req).await.context(enable_key_vault::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(()), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(enable_key_vault::ResponseBytesError)?; enable_key_vault::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod enable_key_vault { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn check_name_availability( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, parameters: &CheckNameAvailabilityParameters, ) -> std::result::Result<NameAvailabilityInformation, check_name_availability::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeStore/locations/{}/checkNameAvailability", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(check_name_availability::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(check_name_availability::BuildRequestError)?; let rsp = client.execute(req).await.context(check_name_availability::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(check_name_availability::ResponseBytesError)?; let rsp_value: NameAvailabilityInformation = serde_json::from_slice(&body).context(check_name_availability::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(check_name_availability::ResponseBytesError)?; check_name_availability::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod check_name_availability { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod firewall_rules { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<FirewallRuleListResult, list_by_account::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/firewallRules", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_account::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_account::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_account::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_account::ResponseBytesError)?; let rsp_value: FirewallRuleListResult = serde_json::from_slice(&body).context(list_by_account::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_account::ResponseBytesError)?; list_by_account::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_account { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, ) -> std::result::Result<FirewallRule, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/firewallRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: FirewallRule = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, parameters: &CreateOrUpdateFirewallRuleParameters, ) -> std::result::Result<FirewallRule, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/firewallRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: FirewallRule = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, parameters: Option<&UpdateFirewallRuleParameters>, ) -> std::result::Result<FirewallRule, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/firewallRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(parameters) = parameters { req_builder = req_builder.json(parameters); } let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: FirewallRule = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/firewallRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_network_rules { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<VirtualNetworkRuleListResult, list_by_account::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/virtualNetworkRules", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_account::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_account::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_account::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_account::ResponseBytesError)?; let rsp_value: VirtualNetworkRuleListResult = serde_json::from_slice(&body).context(list_by_account::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_account::ResponseBytesError)?; list_by_account::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_account { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, virtual_network_rule_name: &str, ) -> std::result::Result<VirtualNetworkRule, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/virtualNetworkRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, virtual_network_rule_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualNetworkRule = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, virtual_network_rule_name: &str, parameters: &CreateOrUpdateVirtualNetworkRuleParameters, ) -> std::result::Result<VirtualNetworkRule, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/virtualNetworkRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, virtual_network_rule_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualNetworkRule = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, virtual_network_rule_name: &str, parameters: Option<&UpdateVirtualNetworkRuleParameters>, ) -> std::result::Result<VirtualNetworkRule, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/virtualNetworkRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, virtual_network_rule_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(parameters) = parameters { req_builder = req_builder.json(parameters); } let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualNetworkRule = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, virtual_network_rule_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/virtualNetworkRules/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, virtual_network_rule_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod trusted_id_providers { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<TrustedIdProviderListResult, list_by_account::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/trustedIdProviders", &operation_config.base_path, subscription_id, resource_group_name, account_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_account::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_account::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_account::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_account::ResponseBytesError)?; let rsp_value: TrustedIdProviderListResult = serde_json::from_slice(&body).context(list_by_account::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_account::ResponseBytesError)?; list_by_account::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_account { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, trusted_id_provider_name: &str, ) -> std::result::Result<TrustedIdProvider, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/trustedIdProviders/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, trusted_id_provider_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: TrustedIdProvider = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, trusted_id_provider_name: &str, parameters: &CreateOrUpdateTrustedIdProviderParameters, ) -> std::result::Result<TrustedIdProvider, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/trustedIdProviders/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, trusted_id_provider_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: TrustedIdProvider = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, trusted_id_provider_name: &str, parameters: Option<&UpdateTrustedIdProviderParameters>, ) -> std::result::Result<TrustedIdProvider, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/trustedIdProviders/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, trusted_id_provider_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(parameters) = parameters { req_builder = req_builder.json(parameters); } let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: TrustedIdProvider = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, trusted_id_provider_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeStore/accounts/{}/trustedIdProviders/{}", &operation_config.base_path, subscription_id, resource_group_name, account_name, trusted_id_provider_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod operations { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!("{}/providers/Microsoft.DataLakeStore/operations", &operation_config.base_path,); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: OperationListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod locations { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get_capability( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, ) -> std::result::Result<CapabilityInformation, get_capability::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeStore/locations/{}/capability", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_capability::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_capability::BuildRequestError)?; let rsp = client.execute(req).await.context(get_capability::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_capability::ResponseBytesError)?; let rsp_value: CapabilityInformation = serde_json::from_slice(&body).context(get_capability::DeserializeError { body })?; Ok(rsp_value) } StatusCode::NOT_FOUND => get_capability::NotFound404 {}.fail(), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_capability::ResponseBytesError)?; get_capability::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_capability { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_usage( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, ) -> std::result::Result<UsageListResult, get_usage::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeStore/locations/{}/usages", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_usage::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_usage::BuildRequestError)?; let rsp = client.execute(req).await.context(get_usage::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_usage::ResponseBytesError)?; let rsp_value: UsageListResult = serde_json::from_slice(&body).context(get_usage::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_usage::ResponseBytesError)?; get_usage::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_usage { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } }
//! Provides utilities for managing media resources. //! //! Currently, only textures are managed by this implementation, //! but in the future, sounds or models could be loaded as well. use luminance::{ context::GraphicsContext, pixel::RGB32F, texture::{Dim2, Flat, MagFilter, MinFilter, Sampler, Texture}, }; use png::{self, Decoder, OutputInfo}; use std::{ fs::File, path::{Path, PathBuf}, rc::Rc, }; const RESOURCE_PATH: &str = "./res"; /// The master resource manager. /// /// A ``ResourceManager`` has subordinate resource manangers /// that load and store various types of media. pub struct ResourceManager { textures: TextureManager, } impl ResourceManager { /// Load all media and return a ``ResourceManager`` /// that owns the loaded resources. /// /// ``ctx`` is a GraphicsContext that acts as a handle /// for the current OpenGL state. Usually, the GLFW /// window will be supplied for this parameter. pub fn load_all<C: GraphicsContext>(ctx: &mut C) -> ResourceManager { ResourceManager { textures: TextureManager::load_all(ctx), } } /// Return a reference to the ``TextureManager`` for this /// parent resource manager. pub fn texture_mgr(&self) -> &TextureManager { &self.textures } } /// A texture manager. /// /// This ``struct`` owns all textures that are used by the /// game during runtime. /// /// Each texture stored in the manager is behind a ``Rc``. /// Obtaining a reference to the texture thus requires a /// reference count update, but it is not horribly expensive /// since the update is non-atomic. pub struct TextureManager { terrain_tex: Rc<Texture2D>, } impl TextureManager { const TEXTURE_PATH: &'static str = "tex"; const TERRAIN: &'static str = "terrain.png"; /// Load all textures and store them in a new /// ``TextureManager`` instance. /// /// For the ``ctx`` parameter, an instance of /// ``GraphicsContext`` must be specified. This /// parameter represents the OpenGL context. /// The current GLFW window normally should be /// supplied for ``ctx``. pub fn load_all<C: GraphicsContext>(ctx: &mut C) -> TextureManager { let tex_path: PathBuf = [RESOURCE_PATH, Self::TEXTURE_PATH].iter().collect(); let terrain_path = tex_path.join(Self::TERRAIN); let mut sampler = Sampler::default(); sampler.min_filter = MinFilter::Nearest; sampler.mag_filter = MagFilter::Nearest; TextureManager { terrain_tex: Rc::new(Texture2D::with_path(ctx, terrain_path, &sampler)), } } pub fn terrain(&self) -> Rc<Texture2D> { Rc::clone(&self.terrain_tex) } } /// The type of a low-level simple 2D texture. /// /// This is an alias to the underlying ``luminance`` /// texture. If you are not talking directly to the /// graphics API, use ``Texture2D`` instead. pub type Tex2DInner = Texture<Flat, Dim2, RGB32F>; /// An individual 2D texture. /// /// A texture is composed of a ``luminance`` texture and /// an ``info`` field that contains size and format /// metadata. pub struct Texture2D { inner: Tex2DInner, info: OutputInfo, } impl Texture2D { /// Create a new 2D texture with the given /// ``luminance`` ``Texture`` and ``OutputInfo``. pub fn new(inner: Tex2DInner, info: OutputInfo) -> Texture2D { Texture2D { inner, info } } /// Create a new 2D texture by loading the texture /// data from ``file``. /// /// The ``sampler`` is used by ``luminance`` and /// customizes how the image is sampled by OpenGL. pub fn from_file<C>(ctx: &mut C, file: File, sampler: &Sampler) -> Texture2D where C: GraphicsContext, { let (inner, info) = load_png(ctx, file, sampler); Self::new(inner, info) } /// Create a new 2D texture by loading the texture /// data from the file located at ``path``. /// /// The ``sampler`` is passed on to ``luminance`` /// to control how the image is sampled by the /// OpenGL backend. pub fn with_path<C>(ctx: &mut C, path: impl AsRef<Path>, sampler: &Sampler) -> Texture2D where C: GraphicsContext, { let file = File::open(path).unwrap(); Self::from_file(ctx, file, sampler) } /// Return the low-level inner ``luminance`` texture. pub fn inner(&self) -> &Tex2DInner { &self.inner } /// Return the texture info structure. pub fn info(&self) -> &OutputInfo { &self.info } } /// Load a PNG image from the given ``File``. /// /// The ``sampler`` parameter allows the caller /// to customize how the image data is sampled /// by OpenGL. #[rustfmt::skip] fn load_png<C>(ctx: &mut C, file: File, sampler: &Sampler) -> (Tex2DInner, OutputInfo) where C: GraphicsContext, { let decoder = Decoder::new(file); let (info, mut reader) = decoder.read_info().unwrap(); assert_eq!(info.color_type, png::ColorType::RGB); assert_eq!(info.bit_depth, png::BitDepth::Eight); let mut data = vec![0; info.buffer_size()]; reader.next_frame(&mut data).unwrap(); let mut image = Vec::with_capacity(data.len() / 3); for i in 0..(data.len() / 3) { let idx = i * 3; image.push((data[idx] as f32 / 255., data[idx + 1] as f32 / 255., data[idx + 2] as f32 / 255.)); } let tex = Tex2DInner::new(ctx, [info.width, info.height], 0, sampler).unwrap(); tex.upload(false, &image); (tex, info) }
#![deny(clippy::pedantic)] #![feature(option_result_unwrap_unchecked)] #![feature(drain_filter)] #[macro_use] extern crate serde_derive_state; use std::collections::VecDeque; use necsim_core::{ cogs::RngCore, lineage::{GlobalLineageReference, Lineage}, reporter::Reporter, simulation::Simulation, }; use necsim_core_bond::NonNegativeF64; use necsim_impls_cuda::cogs::rng::CudaRng; use necsim_impls_no_std::cogs::{ active_lineage_sampler::independent::{ event_time_sampler::exp::ExpEventTimeSampler, IndependentActiveLineageSampler, }, coalescence_sampler::independent::IndependentCoalescenceSampler, dispersal_sampler::in_memory::packed_alias::InMemoryPackedAliasDispersalSampler, emigration_exit::never::NeverEmigrationExit, event_sampler::independent::IndependentEventSampler, immigration_entry::never::NeverImmigrationEntry, lineage_store::independent::IndependentLineageStore, origin_sampler::{decomposition::DecompositionOriginSampler, pre_sampler::OriginPreSampler}, rng::wyhash::WyHash, }; use necsim_partitioning_core::LocalPartition; use rustcoalescence_algorithms::{Algorithm, AlgorithmArguments}; use rustcoalescence_scenarios::Scenario; use rust_cuda::{ common::RustToCuda, host::CudaDropWrapper, rustacuda::{ function::{BlockSize, GridSize}, prelude::{Stream, StreamFlags}, }, }; mod arguments; mod cuda; mod info; mod kernel; mod parallelisation; use arguments::{ CudaArguments, IsolatedParallelismMode, MonolithicParallelismMode, ParallelismMode, }; use crate::kernel::SimulationKernel; use cuda::with_initialised_cuda; #[allow(clippy::module_name_repetitions, clippy::empty_enum)] pub enum CudaAlgorithm {} impl AlgorithmArguments for CudaAlgorithm { type Arguments = CudaArguments; } #[allow(clippy::type_complexity)] impl<O: Scenario<CudaRng<WyHash>>> Algorithm<O> for CudaAlgorithm where O::Habitat: RustToCuda, O::DispersalSampler<InMemoryPackedAliasDispersalSampler<O::Habitat, CudaRng<WyHash>>>: RustToCuda, O::TurnoverRate: RustToCuda, O::SpeciationProbability: RustToCuda, { type Error = anyhow::Error; type LineageReference = GlobalLineageReference; type LineageStore = IndependentLineageStore<O::Habitat>; type Rng = CudaRng<WyHash>; fn initialise_and_simulate<I: Iterator<Item = u64>, R: Reporter, P: LocalPartition<R>>( args: Self::Arguments, seed: u64, scenario: O, pre_sampler: OriginPreSampler<I>, local_partition: &mut P, ) -> Result<(NonNegativeF64, u64), Self::Error> { let lineages: VecDeque<Lineage> = match args.parallelism_mode { // Apply no lineage origin partitioning in the `Monolithic` mode ParallelismMode::Monolithic(..) => scenario .sample_habitat(pre_sampler) .map(|indexed_location| Lineage::new(indexed_location, scenario.habitat())) .collect(), // Apply lineage origin partitioning in the `IsolatedIndividuals` mode ParallelismMode::IsolatedIndividuals(IsolatedParallelismMode { partition, .. }) => { scenario .sample_habitat( pre_sampler.partition(partition.rank(), partition.partitions().get()), ) .map(|indexed_location| Lineage::new(indexed_location, scenario.habitat())) .collect() }, // Apply lineage origin partitioning in the `IsolatedLandscape` mode ParallelismMode::IsolatedLandscape(IsolatedParallelismMode { partition, .. }) => { DecompositionOriginSampler::new( scenario.sample_habitat(pre_sampler), &O::decompose(scenario.habitat(), partition.rank(), partition.partitions()), ) .map(|indexed_location| Lineage::new(indexed_location, scenario.habitat())) .collect() }, }; let (habitat, dispersal_sampler, turnover_rate, speciation_probability) = scenario.build::<InMemoryPackedAliasDispersalSampler<O::Habitat, CudaRng<WyHash>>>(); let rng = CudaRng::from(WyHash::seed_from_u64(seed)); let lineage_store = IndependentLineageStore::default(); let emigration_exit = NeverEmigrationExit::default(); let coalescence_sampler = IndependentCoalescenceSampler::default(); let event_sampler = IndependentEventSampler::default(); let immigration_entry = NeverImmigrationEntry::default(); let active_lineage_sampler = IndependentActiveLineageSampler::empty(ExpEventTimeSampler::new(args.delta_t)); let simulation = Simulation::builder() .habitat(habitat) .rng(rng) .speciation_probability(speciation_probability) .dispersal_sampler(dispersal_sampler) .lineage_reference(std::marker::PhantomData::<GlobalLineageReference>) .lineage_store(lineage_store) .emigration_exit(emigration_exit) .coalescence_sampler(coalescence_sampler) .turnover_rate(turnover_rate) .event_sampler(event_sampler) .immigration_entry(immigration_entry) .active_lineage_sampler(active_lineage_sampler) .build(); // Note: It seems to be more performant to spawn smaller blocks let block_size = BlockSize::x(args.block_size); let grid_size = GridSize::x(args.grid_size); let event_slice = match args.parallelism_mode { ParallelismMode::Monolithic(MonolithicParallelismMode { event_slice }) | ParallelismMode::IsolatedIndividuals(IsolatedParallelismMode { event_slice, .. }) | ParallelismMode::IsolatedLandscape(IsolatedParallelismMode { event_slice, .. }) => { event_slice }, }; with_initialised_cuda(args.device, || { let stream = CudaDropWrapper::from(Stream::new(StreamFlags::NON_BLOCKING, None)?); SimulationKernel::with_kernel(args.ptx_jit, |kernel| { info::print_kernel_function_attributes(kernel.function()); parallelisation::monolithic::simulate( simulation, kernel, &stream, (grid_size, block_size, args.dedup_cache, args.step_slice), lineages, event_slice, local_partition, ) }) }) } }
#![feature(phase)] #[phase(plugin, link)] extern crate log; pub use cursor::Cursor; pub use scan_error::{ScanError, NothingMatched, OtherScanError, ScanIoError}; pub use scanner::{Scanner, NegInt}; pub use tokenizer::Tokenizer; pub use whitespace::Whitespace; pub mod cursor; pub mod scan_error; pub mod scanner; pub mod tokenizer; pub mod whitespace; fn len_while(s: &str, pred: |char| -> bool) -> Option<uint> { s.char_indices() .take_while(|&(_,ch)| pred(ch)) .last() .map(|(i,_)| { let ::std::str::CharRange { ch: _, next } = s.char_range_at(i); next }) }
#![allow(non_snake_case)] extern crate pest; #[macro_use] extern crate pest_derive; use pest::{Parser, iterators::*}; #[derive(Parser)] #[grammar = "lox.pest"] struct Lox; fn main() { let source = "fun main() { }"; Lox::parse(Rule::Program, source) .unwrap() .for_each(|pair| { pair.into_inner().for_each(|inner_pair| { let inner_span = inner_pair.clone().into_span(); println!("{}: {}", inner_pair, inner_span.as_str()); }) }); } struct AST { decls: Vec<Decl> } impl AST { fn from(source: &str) -> Self { Self { decls: Lox::parse(Rule::Program, source) .unwrap() .map(|pair| Decl::from(pair)) .into_iter() .collect() } } } enum Decl { Class(Class), Fun(Fun), Var(Var), Stmt(Stmt), } impl Decl { fn from(pair: Pair<Rule>) -> Self { Decl( match pair.as_rule() { Rule::Class => Class::from(pair), Rule::Fun => Class::from(pair), Rule::Var => Class::from(pair), Rule::Stmt => Class::from(pair), } ) } } struct Class { fields: Vec<Field>, methods: Vec<Fun>, } impl Class { fn from(pair: Pair<Rule>) -> Self { let (fields, methods) = pair.into_inner() Self { fields: methods: } } } struct Field { } struct Fun { params: Vec<Param>, body: Box<Expr>, } struct Param { id: Id, } struct Var { id: Id, val: Expr, } enum Stmt { ExprStmt(Expr), For(For), If(If), Print(Print), Return(Return), While(While), Block(Expr), } struct For { init: Option<Expr>, cond: Option<Expr>, after: Option<Expr>, body: Expr, } struct If { cond: Expr, then_branch: Box<Stmt>, else_branch: Option<Box<Stmt>>, } struct Print { expr: Expr, } struct Return { expr: Expr, } struct While { cond: Expr, body: Box<Stmt>, } enum Expr { Assign(Assign), Binary(Binary), Unary(Unary), Call(Call), Primary(Primary), } struct Call { expr: Primary, methods: Vec<Method>, } struct Method { id: Id, } struct Assign { lval: Box<Call>, rval: Box<Expr>, } struct Binary { l: Box<Expr>, op: BinaryOp, r: Box<Expr>, } enum BinaryOp { Or, And, Eq, Neq, Gt, Geq, Lt, Leq, Add, Sub, Mul, Div, } struct Unary { op: UnaryOp, expr: Box<Expr> } enum UnaryOp { Not, Neg, } struct Access { } enum Primary { Expr(Box<Expr>), Lit(Lit), Id(Id), Super(Id), } type Id = String; enum Lit { Float(f32), Int(i32), Str(String), Char(char), Bool(bool), }
use crate::constraint::Binary; use crate::constraint::Unary; use crate::domain::SimpleDomain; use crate::variable::Variable; use std::collections::VecDeque; type Value = i64; struct ValueWarper { value: Value, pos: i32, } struct Oblique {} // impl Binary for Oblique{ // fn test(self, left: ValueWarper, right: ValueWarper) -> bool{ // isOblique((left.pos,left.value),(right.pos,right.value)) // } // } fn isOblique(left: (i16, i16), right: (i16, i16)) -> bool { let a = (left.0 - right.0).abs(); let b = (left.1 - right.1).abs(); a != b } fn isHorizontal(left: (i16, i16), right: (i16, i16)) -> bool { (left.0 != right.0) && (left.1 == right.1) } struct Mill { domains: Vec<SimpleDomain>, variables: Vec<Variable>, // binaryConstraints: Vec<Binary>, } fn generate(qeue: &mut VecDeque<(usize, usize)>, from: usize, to: usize) { for i in from..to { qeue.push_back((from, i)); qeue.push_back((i, from)); } } impl Mill { // fn grind() -> Vec<i16> { // } // fn addBinaryConstraint(&mut self,constraint: Binary){ // // self.push(constraint); // } fn addVariable(&mut self, variable: Variable) { // self.push(variable); } fn assign() {} fn checkConstraint(i: Value, j: Value) -> bool { true } fn revise(&self, vi: usize, vj: usize) -> bool { let mut di = self.variables[vi]; let mut dj = self.variables[vj]; let mut toDelete = Vec::new(); for i in di.getDomain() { if dj.getDomain().iter().all(|j| !Mill::checkConstraint(i, *j)) { toDelete.push(i); } } if !toDelete.is_empty() { toDelete.iter().for_each(|v| { di.remove(*v); }); return true; } false } fn arcConsistency(&self, pos: usize) -> bool { let mut q = VecDeque::new(); generate(&mut q, pos, self.variables.len()); while let Some((vk, vm)) = q.pop_front() { if self.revise(vk, vm) { if self.variables[vk].getDomain().is_empty() { return false; } else { generate(&mut q, vk, self.variables.len()); } } } true } } #[cfg(test)] mod tests { use super::isOblique; #[test] fn isOblique_1() { let d = (0, 0); assert!(!isOblique(d, (1, 1))); assert!(!isOblique(d, (1, -1))); assert!(!isOblique(d, (-1, 1))); assert!(!isOblique(d, (-1, -1))); } }
use ::memory; use ::core; use ::mantle::KError; use ::mantle::kernel::{PAGE_4K_SIZE, PAGE_2M_SIZE}; use ::core::cell::RefCell; use ::core::cell::RefMut; use ::mantle::concurrency::SingleThreaded; pub struct VRegion { // both page-aligned start: usize, end: usize } impl VRegion { fn new(start: usize, end: usize) -> VRegion { assert!((start & (PAGE_4K_SIZE - 1)) == 0); assert!((end & (PAGE_4K_SIZE - 1)) == 0); assert!(end > start); VRegion { start, end } } pub fn len(&self) -> usize { assert!(self.end >= self.start); self.end - self.start } pub fn start(&self) -> usize { self.start } pub fn is_empty(&self) -> bool { self.len() == 0 } fn chop_len(&mut self, length: usize) -> VRegion { assert!((length & (PAGE_4K_SIZE - 1)) == 0 && length > 0); assert!(self.len() >= length); let out = VRegion::new(self.start, self.start + length); self.start += length; out } pub fn intersection(&self, other: &VRegion) -> Option<VRegion> { let (lower, higher) = if self.start < other.start { (self, other) } else { (other, self) }; if lower.end > higher.start { Some(VRegion { start: higher.start, end: lower.end }) } else { None } } fn join(&mut self, other: VRegion) -> Option<VRegion> { assert!(self.intersection(&other).is_none()); // intersections are BAD if self.end == other.start { self.end = other.end; None } else if self.start == other.end { self.start = other.start; None } else { Some(other) } } pub fn could_join(&self, other: &VRegion) -> bool { assert!(self.intersection(&other).is_none()); // intersections are BAD self.end == other.start || self.start == other.end } pub fn to_4k_address(&self) -> usize { assert!((self.start & (PAGE_4K_SIZE - 1)) == 0); assert!((self.end - self.start) == PAGE_4K_SIZE); self.start } } impl core::fmt::Display for VRegion { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { write!(f, "{:#X}-{:#X}", self.start, self.end) } } #[cfg(target_arch = "x86")] const KERNEL_BASE_VADDR: usize = 0xe0000000usize; #[cfg(target_arch = "x86_64")] const KERNEL_BASE_VADDR: usize = 0xffffffff80000000usize; static AVAILABLE_REGIONS: SingleThreaded<RefCell<memory::LinkedList<VRegion>>> = SingleThreaded(RefCell::new(memory::LinkedList::empty())); fn get_avail_regions_list() -> RefMut<'static, memory::LinkedList<VRegion>> { AVAILABLE_REGIONS.get().borrow_mut() } pub fn init_vspace(executable_start: usize, image_len: usize) { let region = &mut *get_avail_regions_list(); region.pushmut(VRegion::new(executable_start + image_len + PAGE_4K_SIZE * 8, KERNEL_BASE_VADDR)); //region.pushmut(VRegion::new(PAGE_2M_SIZE, executable_start)); debug!("self was loaded to: {:#X}-{:#X}", executable_start, executable_start + image_len); } pub fn allocate_vregion(length: usize) -> core::result::Result<VRegion, KError> { assert!((length & (PAGE_4K_SIZE - 1)) == 0 && length > 0); let rl: &mut memory::LinkedList<VRegion> = &mut *get_avail_regions_list(); let (vregion, is_now_empty): (VRegion, bool) = { let h = rl.find_mut(|b| b.len() >= length); if h.is_none() { return Err(KError::NotEnoughMemory); } let head = h.unwrap(); (head.chop_len(length), head.is_empty()) }; if is_now_empty { assert!(rl.remove_mut(|b| b.is_empty()).unwrap().is_empty()); assert!(rl.find(|b| b.is_empty()).is_none()); } debug!("allocated vregion {}", vregion); Ok(vregion) } pub fn free_vregion(mut r: VRegion) { assert!(!r.is_empty()); let rl: &mut memory::LinkedList<VRegion> = &mut *get_avail_regions_list(); let mut cur: &mut memory::LinkedList<VRegion> = rl; loop { let tmp = cur; if tmp.is_empty() { // nope -- cur is the end of the line! just add our stuff. if tmp.pushmut(r).is_err() { panic!("could not free vregion due to OOM condition"); } // added! return; } let (head, ncur) = tmp.nextmut().unwrap(); // try to merge this one if let Some(rest) = head.join(r) { // we can't merge here. we'll try the next element r = rest; } else { // merged! hooray! now we need to see if we can merge onto the next one as well let should_do_postjoin = if let Some(adjacent) = ncur.head() { head.could_join(adjacent) } else { false }; if should_do_postjoin { assert!(head.join(ncur.popmut().unwrap()).is_none()); // make sure we're successful } return; } cur = ncur } }
mod list_queues_response; pub use list_queues_response::ListQueuesResponse; mod put_message_response; pub use put_message_response::PutMessageResponse; mod get_messages_response; pub use get_messages_response::GetMessagesResponse; mod peek_messages_response; pub use peek_messages_response::PeekMessagesResponse; mod delete_message_response; pub use delete_message_response::DeleteMessageResponse; pub use delete_message_response::PopReceipt; mod clear_messages_response; pub use clear_messages_response::ClearMessagesResponse; mod create_queue_response; pub use create_queue_response::CreateQueueResponse; mod delete_queue_response; pub use delete_queue_response::DeleteQueueResponse;
use std::io::{stdin, Read, StdinLock}; use std::str::FromStr; fn main() { let cin = stdin(); let cin = cin.lock(); let mut sc = Scanner::new(cin); } // from: http://ir5.hatenablog.com/entry/20171209/1512821837 #[allow(dead_code)] struct Scanner<'a> { cin: StdinLock<'a>, } #[allow(dead_code)] impl<'a> Scanner<'a> { fn new(cin: StdinLock<'a>) -> Scanner<'a> { Scanner { cin: cin } } fn read<T: FromStr>(&mut self) -> Option<T> { let token = self.cin.by_ref().bytes().map(|c| c.unwrap() as char) .skip_while(|c| c.is_whitespace()) .take_while(|c| !c.is_whitespace()) .collect::<String>(); token.parse::<T>().ok() } fn input<T: FromStr>(&mut self) -> T { self.read().unwrap() } } trait BinarySearch<T> { // key 以上の値が最初に現れる index を返す fn lower_bound(&self, key: &T) -> usize; // key より大きい値が最初に現れる index を返す fn upper_bound(&self, key: &T) -> usize; } use std::cmp::Ordering; impl<T: Ord> BinarySearch<T> for [T] { fn lower_bound(&self, key: &T) -> usize { let mut ng = -1; let mut ok = self.len() as i64; while (ok - ng).abs() > 1 { let mid = (ok + ng) / 2; match key.cmp(&self[mid as usize]) { Ordering::Less | Ordering::Equal => { ok = mid; } Ordering::Greater => { ng = mid; } } } ok as usize } fn upper_bound(&self, key: &T) -> usize { let mut ng = -1; let mut ok = self.len() as i64; while (ok - ng).abs() > 1 { let mid = (ok + ng) / 2; match key.cmp(&self[mid as usize]) { Ordering::Less => { ok = mid; } Ordering::Equal | Ordering::Greater => { ng = mid; } } } ok as usize } } fn count_digit(n: usize) -> usize { let mut n = n; let mut ret = 0; while n > 0 { ret += 1; n /= 10; } ret } fn digit_sum(n: usize) -> usize { let mut n = n; let mut ret = 0; while n > 0 { ret += n % 10; n /= 10; } ret } fn enumerate_divisors(n: usize) -> Vec<usize> { let mut ret = Vec::new(); let mut i = 1; while i * i <= n { if n % i == 0 { ret.push(i); if i * i != n { ret.push(n / i); } } i += 1; } ret } fn prime_factor(n: usize) -> BTreeMap<usize, usize> { let mut ret = BTreeMap::new(); let mut n = n; let mut i: usize = 2; while i * i <= n { while n % i == 0 { *ret.entry(i).or_insert(0) += 1; n /= i; } i += 1; } if n != 1 { *ret.entry(n).or_insert(0) += 1; } ret } fn gcd(m: i64, n: i64) -> i64 { if m < n { gcd(n, m) } else { if n == 0 { m } else { gcd(n, m % n) } } } fn lcm(m: i64, n: i64) -> i64 { m / gcd(m, n) * n } fn mod_pow(x: i64, n: i64, modulo: i64) -> i64 { let mut n = n; let mut x = x; let mut ret = 1; while n > 0 { if n & 1 == 1 { ret = ret * x % modulo; } x = x * x % modulo; n >>= 1; } ret } struct UnionFind { par: Vec<usize>, rank: Vec<usize>, } impl UnionFind { fn new(n: usize) -> Self { let mut vec = vec![0; n]; for i in 0..n { vec[i] = i; } UnionFind { par: vec, rank: vec![0; n], } } fn find(&mut self, x: usize) -> usize { if x == self.par[x] { x } else { let par = self.par[x]; let ret = self.find(par); self.par[x] = ret; ret } } fn is_same(&mut self, a: usize, b: usize) -> bool { self.find(a) == self.find(b) } fn merge(&mut self, a: usize, b: usize) { let a_par = self.find(a); let b_par = self.find(b); if self.rank[a_par] > self.rank[b_par] { self.par[b_par] = a_par; } else { self.par[a_par] = b_par; if self.rank[a_par] == self.rank[b_par] { self.rank[b_par] += 1; } } } } struct UnionFind { par: Vec<usize>, size: Vec<usize>, } impl UnionFind { fn new(n: usize) -> Self { let mut par = vec![0; n]; let mut size = vec![0; n]; for i in 0..n { par[i] = i; size[i] = 1; } Self { par, size } } fn is_same(&mut self, x: usize, y: usize) -> bool { self.find_root(x) == self.find_root(y) } fn find_root(&mut self, x: usize) -> usize { if x != self.par[x] { self.par[x] = self.find_root(self.par[x]); } self.par[x] } fn merge(&mut self, x: usize, y: usize) { let x = self.find_root(x); let y = self.find_root(y); if x == y { return; } if self.size[x] > self.size[y] { self.par[y] = x; self.size[x] += self.size[y]; } else { self.par[x] = y; self.size[y] += self.size[x]; } } fn tree_size(&mut self, x: usize) -> usize { let root = self.find_root(x); self.size[root] } } #[derive(Clone, Debug, PartialEq)] struct Edge { to: usize, cost: i64, } impl Edge { fn new(to: usize, cost: i64) -> Edge { Edge { to: to, cost: cost } } } #[derive(Clone, Debug)] struct Graph { n: usize, adj_list: Vec<Vec<Edge>>, } impl Graph { fn new(n: usize) -> Self { let adj_list = vec![vec![]; n]; Graph { n, adj_list } } fn add_edge(&mut self, u: usize, v: Edge) { self.adj_list[u].push(v); } } fn shortest_path(graph: &Graph, start: usize) -> (Vec<i64>, Vec<Option<usize>>) { use std::collections::BinaryHeap; let n = graph.n; let mut dist: Vec<_> = (0..graph.n).map(|_| std::i64::MAX).collect(); let mut prevs = vec![None; n]; let mut heap = BinaryHeap::new(); dist[start] = 0i64; heap.push(std::cmp::Reverse((0i64, start))); while let Some(std::cmp::Reverse((cost, cur))) = heap.pop() { if cost > dist[cur] { continue; } for next in graph.adj_list[cur].iter() { if cost + next.cost < dist[next.to] { dist[next.to] = cost + 1; prevs[next.to] = Some(cur); heap.push(std::cmp::Reverse((cost + next.cost, next.to))); } } } (dist, prevs) } fn get_path(to: usize, prevs: &Vec<Option<usize>>) -> Vec<usize> { let mut path = vec![]; let mut cur = prevs[to].unwrap(); while let Some(next) = prevs[cur] { path.push(cur); cur = next; } path.reverse(); path } /// from https://docs.rs/permutohedron/0.2.4/permutohedron/ pub trait LexicalPermutation { /// Return `true` if the slice was permuted, `false` if it is already /// at the last ordered permutation. fn next_permutation(&mut self) -> bool; /// Return `true` if the slice was permuted, `false` if it is already /// at the first ordered permutation. fn prev_permutation(&mut self) -> bool; } impl<T> LexicalPermutation for [T] where T: Ord, { /// Original author in Rust: Thomas Backman <serenity@exscape.org> fn next_permutation(&mut self) -> bool { // These cases only have 1 permutation each, so we can't do anything. if self.len() < 2 { return false; } // Step 1: Identify the longest, rightmost weakly decreasing part of the vector let mut i = self.len() - 1; while i > 0 && self[i - 1] >= self[i] { i -= 1; } // If that is the entire vector, this is the last-ordered permutation. if i == 0 { return false; } // Step 2: Find the rightmost element larger than the pivot (i-1) let mut j = self.len() - 1; while j >= i && self[j] <= self[i - 1] { j -= 1; } // Step 3: Swap that element with the pivot self.swap(j, i - 1); // Step 4: Reverse the (previously) weakly decreasing part self[i..].reverse(); true } fn prev_permutation(&mut self) -> bool { // These cases only have 1 permutation each, so we can't do anything. if self.len() < 2 { return false; } // Step 1: Identify the longest, rightmost weakly increasing part of the vector let mut i = self.len() - 1; while i > 0 && self[i - 1] <= self[i] { i -= 1; } // If that is the entire vector, this is the first-ordered permutation. if i == 0 { return false; } // Step 2: Reverse the weakly increasing part self[i..].reverse(); // Step 3: Find the rightmost element equal to or bigger than the pivot (i-1) let mut j = self.len() - 1; while j >= i && self[j - 1] < self[i - 1] { j -= 1; } // Step 4: Swap that element with the pivot self.swap(i - 1, j); true } } struct Combination { fac: Vec<usize>, finv: Vec<usize>, modulo: usize, } impl Combination { fn new(max_n: usize, modulo: usize) -> Combination { let mut fac = vec![0; max_n]; let mut finv = vec![0; max_n]; let mut inv = vec![0; max_n]; fac[0] = 1; fac[1] = 1; finv[0] = 1; finv[1] = 1; inv[1] = 1; for i in 2..max_n { fac[i] = fac[i - 1] * i % modulo; inv[i] = modulo - inv[modulo % i] * (modulo / i) % modulo; finv[i] = finv[i - 1] * inv[i] % modulo; } Combination { fac: fac, finv: finv, modulo: modulo, } } fn get(&self, n: usize, k: usize) -> usize { assert!(n >= k); self.fac[n] * (self.finv[k] * self.finv[n - k] % self.modulo) % self.modulo } #[allow(dead_code)] fn h(&self, n: usize, k: usize) -> usize { self.get(n + k - 1, k) } #[allow(dead_code)] fn p(&self, n: usize, k: usize) -> usize { assert!(n >= k); self.fac[n] * self.finv[n - k] } } #[test] fn lexical() { let mut data = [1, 2, 3]; data.next_permutation(); assert_eq!(&data, &[1, 3, 2]); data.next_permutation(); assert_eq!(&data, &[2, 1, 3]); data.prev_permutation(); assert_eq!(&data, &[1, 3, 2]); data.prev_permutation(); assert_eq!(&data, &[1, 2, 3]); assert!(!data.prev_permutation()); let mut c = 0; while data.next_permutation() { c += 1; } assert_eq!(c, 5); } struct Bit { // Binary Indexed Tree (1-indexed) n: usize, data: Vec<i64>, } impl Bit { fn new(n: usize) -> Bit { let mut m = 1; while m < n { m *= 2; } Bit { n: n, data: vec![0; m], } } fn sum(&self, i: usize) -> i64 { let mut i = i; let mut ret = 0; while i > 0 { ret += self.data[i - 1]; i -= (i as i64 & -(i as i64)) as usize; } ret } fn add(&mut self, i: usize, x: i64) { let mut i = i; while i < self.n { self.data[i - 1] += x; i += (i as i64 & -(i as i64)) as usize; } } } #[derive(Clone, Copy, Debug)] struct ModInt(usize); const MOD: usize = 1_000_000_007; #[allow(dead_code)] impl ModInt { fn new(n: usize) -> ModInt { ModInt(n % MOD) } fn zero() -> ModInt { ModInt(0) } fn one() -> ModInt { ModInt(1) } fn pow(self, mut n: usize) -> ModInt { let mut ret = ModInt::one(); let mut x = self; while n > 0 { if n & 1 == 1 { ret *= x; } x *= x; n >>= 1; } ret } fn inv(self) -> ModInt { assert!(self.0 > 0); self.pow(MOD - 2) } } impl std::ops::Add for ModInt { type Output = ModInt; fn add(self, rhs: ModInt) -> Self::Output { let mut d = self.0 + rhs.0; if d >= MOD { d -= MOD; } ModInt(d) } } impl std::ops::AddAssign for ModInt { fn add_assign(&mut self, rhs: ModInt) { *self = *self + rhs; } } impl std::ops::Sub for ModInt { type Output = ModInt; fn sub(self, rhs: ModInt) -> Self::Output { let mut d = self.0 + MOD - rhs.0; if d >= MOD { d -= MOD; } ModInt(d) } } impl std::ops::SubAssign for ModInt { fn sub_assign(&mut self, rhs: ModInt) { *self = *self - rhs; } } impl std::ops::Mul for ModInt { type Output = ModInt; fn mul(self, rhs: ModInt) -> Self::Output { ModInt(self.0 * rhs.0 % MOD) } } impl std::ops::MulAssign for ModInt { fn mul_assign(&mut self, rhs: ModInt) { *self = *self * rhs; } } impl std::ops::Neg for ModInt { type Output = ModInt; fn neg(self) -> Self::Output { ModInt(if self.0 == 0 { 0 } else { MOD - self.0 }) } } impl std::fmt::Display for ModInt { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{}", self.0) } } impl std::str::FromStr for ModInt { type Err = std::num::ParseIntError; fn from_str(s: &str) -> Result<Self, Self::Err> { let val = s.parse::<usize>()?; Ok(ModInt::new(val)) } } impl From<usize> for ModInt { fn from(n: usize) -> Self { Self::new(n) } } impl Magma for usize { fn op(&self, rhs: &Self) -> Self { *self.min(rhs) } } impl Associative for usize {} impl Unital for usize { fn identity() -> Self { std::usize::MAX } } pub trait Magma: Sized + Clone { fn op(&self, rhs: &Self) -> Self; } pub trait Associative: Magma {} pub trait Unital: Magma { fn identity() -> Self; } pub trait Monoid: Magma + Associative + Unital {} impl<T: Magma + Associative + Unital> Monoid for T {} pub struct SegmentTree<T: Monoid> { node: Vec<T>, sz: usize, } impl<T: Monoid> SegmentTree<T> { pub fn init(vec: Vec<T>) -> Self { let mut sz = 1; while sz < vec.len() { sz *= 2; } let mut node = vec![T::identity(); sz << 1]; for i in 0..vec.len() { node[i + sz] = vec[i].clone(); } for i in (1..sz).rev() { node[i] = node[i << 1].op(&node[(i << 1) + 1]); } SegmentTree { node: node, sz: sz } } pub fn update(&mut self, i: usize, x: T) { let mut idx = i + self.sz; self.node[idx] = x; while idx > 1 { idx = idx >> 1; self.node[idx] = self.node[idx << 1].op(&self.node[(idx << 1) + 1]); } } pub fn fold(&self, left: usize, right: usize) -> T { let mut lx = T::identity(); let mut rx = T::identity(); let mut l = left + self.sz; let mut r = right + self.sz; while l < r { if (l & 1) == 1 { lx = lx.op(&self.node[l]); } if (r & 1) == 0 { rx = self.node[r].op(&rx); } l = (l + 1) >> 1; r = (r - 1) >> 1; } if l == r { lx = lx.op(&self.node[l]); } lx.op(&rx) } } mod MultiSet { trait MultiSet<T> { fn add(&mut self, key: T); fn del(&mut self, key: T); fn least(&self) -> Option<T>; fn most(&self) -> Option<T>; } impl<T: Ord + Clone> MultiSet<T> for BTreeMap<T, usize> { fn add(&mut self, key: T) { *self.entry(key).or_insert(0) += 1; } fn del(&mut self, key: T) { if let Some(value) = self.get_mut(&key) { *value -= 1; if *value == 0 { self.remove(&key); } } } fn least(&self) -> Option<T> { self.iter().next().map(|(k, _)| k.clone()) } fn most(&self) -> Option<T> { self.iter().next_back().map(|(k, _)| k.clone()) } } }
#[doc = "Register `ETH_MACTSSR` reader"] pub type R = crate::R<ETH_MACTSSR_SPEC>; #[doc = "Field `TSSOVF` reader - TSSOVF"] pub type TSSOVF_R = crate::BitReader; #[doc = "Field `TSTARGT0` reader - TSTARGT0"] pub type TSTARGT0_R = crate::BitReader; #[doc = "Field `AUXTSTRIG` reader - AUXTSTRIG"] pub type AUXTSTRIG_R = crate::BitReader; #[doc = "Field `TSTRGTERR0` reader - TSTRGTERR0"] pub type TSTRGTERR0_R = crate::BitReader; #[doc = "Field `TXTSSIS` reader - TXTSSIS"] pub type TXTSSIS_R = crate::BitReader; #[doc = "Field `ATSSTN` reader - ATSSTN"] pub type ATSSTN_R = crate::FieldReader; #[doc = "Field `ATSSTM` reader - ATSSTM"] pub type ATSSTM_R = crate::BitReader; #[doc = "Field `ATSNS` reader - ATSNS"] pub type ATSNS_R = crate::FieldReader; impl R { #[doc = "Bit 0 - TSSOVF"] #[inline(always)] pub fn tssovf(&self) -> TSSOVF_R { TSSOVF_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - TSTARGT0"] #[inline(always)] pub fn tstargt0(&self) -> TSTARGT0_R { TSTARGT0_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - AUXTSTRIG"] #[inline(always)] pub fn auxtstrig(&self) -> AUXTSTRIG_R { AUXTSTRIG_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - TSTRGTERR0"] #[inline(always)] pub fn tstrgterr0(&self) -> TSTRGTERR0_R { TSTRGTERR0_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 15 - TXTSSIS"] #[inline(always)] pub fn txtssis(&self) -> TXTSSIS_R { TXTSSIS_R::new(((self.bits >> 15) & 1) != 0) } #[doc = "Bits 16:19 - ATSSTN"] #[inline(always)] pub fn atsstn(&self) -> ATSSTN_R { ATSSTN_R::new(((self.bits >> 16) & 0x0f) as u8) } #[doc = "Bit 24 - ATSSTM"] #[inline(always)] pub fn atsstm(&self) -> ATSSTM_R { ATSSTM_R::new(((self.bits >> 24) & 1) != 0) } #[doc = "Bits 25:29 - ATSNS"] #[inline(always)] pub fn atsns(&self) -> ATSNS_R { ATSNS_R::new(((self.bits >> 25) & 0x1f) as u8) } } #[doc = "The Timestamp Status register is present only when the IEEE 1588 Timestamp feature is selected. All bits except Bits\\[27:25\\] gets cleared when the application reads this register.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`eth_mactssr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct ETH_MACTSSR_SPEC; impl crate::RegisterSpec for ETH_MACTSSR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`eth_mactssr::R`](R) reader structure"] impl crate::Readable for ETH_MACTSSR_SPEC {} #[doc = "`reset()` method sets ETH_MACTSSR to value 0"] impl crate::Resettable for ETH_MACTSSR_SPEC { const RESET_VALUE: Self::Ux = 0; }
#[cfg(test)] mod tests { use super::*; #[test] fn it_works() { assert_eq!(2 + 2, 4); } #[test] fn exploration() { assert_eq!(3+6, 7); } #[test] fn another() { panic!("Make this test fail"); } #[test] fn larger_can_hold_smaller() { let a = Rectangle { width: 10, height: 8 }; let b = Rectangle { width: 7, height: 6 }; assert!(a.can_hold(&b)); } #[test] fn smaller_cannot_hold_larger() { let a = Rectangle { width: 10, height: 8 }; let b = Rectangle { width: 13, height: 6 }; assert!(!a.can_hold(&b)); } #[test] fn it_add_two() { assert_eq!(5, add_two(3)); } #[test] fn greeting_contains_name() { // assert!(greeting("Rust").contains("Rust")); let result = greeting("Rust"); assert!( result.contains("Rust2"), "value is `{}`", result ); } #[test] fn greater_than_100() { Guess::new(130); } #[test] fn it_works2() -> Result<(), String> { if 2 + 2 == 4 { Ok(()) } else { Err(String::from("two plus two does not equal four")) } } } #[derive(Debug)] struct Rectangle { width: u32, height: u32, } impl Rectangle { fn can_hold(&self, other: &Rectangle) -> bool { self.width > other.width && self.height > other.height } } pub fn add_two(a: i32) -> i32 { a + 2 } pub fn greeting(name: &str) -> String { format!("hi {}!", name) } // #[derive(Debug)] pub struct Guess { value: i32, } impl Guess { pub fn new(value: i32) -> Guess { // if value < 1 || value > 100 { // panic!("Guess value must be between 1 and 100, got {}", value); // } if value < 1 { panic!("Guess value must be greater than or equal to 1, got {}", value); } else if value > 100 { panic!("Guess value must be less than or equal to 100, got {}", value); } Guess { value } } }
use std::{borrow::Cow, sync::Arc}; use tiberius::ColumnData; pub trait ToSql: Send + Sync { fn to_sql(&self) -> ColumnData; } pub trait ToSqlNull { fn to_sql_null() -> ColumnData<'static>; } impl<T: ToSql> ToSql for &T { fn to_sql(&self) -> ColumnData { (**self).to_sql() } } impl<T> ToSql for Option<T> where T: ToSql + ToSqlNull, { fn to_sql(&self) -> ColumnData { match self.as_ref() { Some(v) => v.to_sql(), None => T::to_sql_null(), } } } macro_rules! to_sql { (borrowed $t:ty => $n:ident) => { impl ToSql for $t { #[inline] fn to_sql(&self) -> ColumnData { ColumnData::$n(Some(Cow::Borrowed(&self))) } } impl ToSqlNull for $t { #[inline] fn to_sql_null() -> ColumnData<'static> { ColumnData::$n(None) } } }; (copied $t:ty => $n:ident) => { impl ToSql for $t { #[inline] fn to_sql(&self) -> ColumnData { ColumnData::$n(Some(*self)) } } impl ToSqlNull for $t { #[inline] fn to_sql_null() -> ColumnData<'static> { ColumnData::$n(None) } } }; (deref<'a> $t:ty, $n:ident) => { impl<'a> ToSql for $t { #[inline] fn to_sql(&self) -> ColumnData { (**self).to_sql() } } impl<'a> ToSqlNull for $t { #[inline] fn to_sql_null() -> ColumnData<'static> { ColumnData::$n(None) } } }; (deref $t:ty, $n:ident) => { impl ToSql for $t { #[inline] fn to_sql(&self) -> ColumnData { (**self).to_sql() } } impl ToSqlNull for $t { #[inline] fn to_sql_null() -> ColumnData<'static> { ColumnData::$n(None) } } }; (transform $t:ty) => { impl ToSql for $t { #[inline] fn to_sql(&self) -> ColumnData { tiberius::ToSql::to_sql(self) } } impl ToSqlNull for $t { #[inline] fn to_sql_null() -> ColumnData<'static> { tiberius::ToSql::to_sql(&Option::<$t>::None) } } }; } to_sql!(borrowed &[u8] => Binary); to_sql!(borrowed &str => String); to_sql!(borrowed String => String); to_sql!(borrowed Vec<u8> => Binary); to_sql!(borrowed [u8] => Binary); to_sql!(borrowed str => String); to_sql!(copied bool => Bit); to_sql!(copied f32 => F32); to_sql!(copied f64 => F64); to_sql!(copied i16 => I16); to_sql!(copied i32 => I32); to_sql!(copied i64 => I64); to_sql!(copied u8 => U8); to_sql!(deref Arc<[u8]>, Binary); to_sql!(deref Arc<str>, String); to_sql!(deref Box<[u8]>, Binary); to_sql!(deref Box<str>, String); to_sql!(deref<'a> Cow<'a, [u8]>, Binary); to_sql!(deref<'a> Cow<'a, str>, String); to_sql!(transform chrono::DateTime<chrono::FixedOffset>); to_sql!(transform chrono::DateTime<chrono::Utc>); to_sql!(transform chrono::NaiveDate); to_sql!(transform chrono::NaiveDateTime); to_sql!(transform chrono::NaiveTime); to_sql!(transform uuid::Uuid); #[cfg(feature = "dec19x5")] impl ToSql for dec19x5crate::Decimal { #[inline] fn to_sql(&self) -> ColumnData { tiberius::ToSql::to_sql(self) } } #[cfg(feature = "dec19x5")] impl ToSqlNull for dec19x5crate::Decimal { #[inline] fn to_sql_null() -> ColumnData<'static> { ColumnData::Numeric(None) } }
use reon::isa::IsaResult; use reon::isa::encoding::{InstructionType, EncodeCursor, DecodeCursor, DecodeContext}; use reon::isa::w65816::*; #[inline(never)] fn decode(r: &mut DecodeCursor<'_>, ctx: &DecodeContext) -> IsaResult<Instruction> { Instruction::decode(r, &ctx) } #[inline(never)] fn encode(i: &Instruction, w: &mut EncodeCursor<'_>) -> IsaResult<()> { i.encode(w) } fn disasm(data: &[u8]) { let ctx = DecodeContext { emulation_mode: false, a_8_bit: false, xy_8_bit: false }; let mut idx = 0; let mut cursor = DecodeCursor::new(data); let mut vec = Vec::new(); while idx < data.len() { let instr = decode(&mut cursor, &ctx).unwrap(); println!("$00:{:04x} {:?}", 0x8000+idx, instr); idx += instr.instruction_len() as usize; vec.push(instr); } let mut buf = Vec::new(); let mut cursor = EncodeCursor::new(&mut buf); for instr in vec { encode(&instr, &mut cursor).unwrap(); } println!(); println!("Input and output equal: {}", buf.as_slice() == data); println!("Instr size: {}/{}", std::mem::size_of::<Instruction>(), std::mem::align_of::<Instruction>()); } fn main() { disasm(include_bytes!("../tests/arch-65816.sfc")); }
fn main() { println!("Hello from RPlag!"); }
use serde::{Deserialize, Serialize}; use std::{ collections::{HashMap, HashSet}, hash::{Hash, Hasher}, }; fn is_false(value: &bool) -> bool { !value } #[derive(Debug, Serialize, Deserialize)] pub struct IgniteTypeDefinition { #[serde(default, skip_serializing_if = "String::is_empty")] pub namespace: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub generic_args: Vec<String>, pub variant: IgniteTypeVariant, #[serde(default, skip_serializing_if = "HashMap::is_empty")] pub meta: HashMap<String, IgniteAttribMeta>, #[serde(default, skip_serializing_if = "is_false")] pub is_proxy: bool, } impl IgniteTypeDefinition { pub fn name(&self) -> String { self.variant.name() } pub fn referenced(&self) -> HashSet<String> { self.variant.referenced() } } impl Hash for IgniteTypeDefinition { fn hash<H>(&self, state: &mut H) where H: Hasher, { self.namespace.hash(state); self.generic_args.hash(state); self.variant.hash(state); } } #[derive(Debug, Serialize, Deserialize, Hash)] pub enum IgniteTypeVariant { StructUnit(String), StructNamed(IgniteNamed), StructUnnamed(IgniteUnnamed), Enum(IgniteEnum), } impl IgniteTypeVariant { pub fn name(&self) -> String { match self { Self::StructUnit(name) => name.clone(), Self::StructNamed(value) => value.name.clone(), Self::StructUnnamed(value) => value.name.clone(), Self::Enum(value) => value.name.clone(), } } pub fn referenced(&self) -> HashSet<String> { match self { Self::StructUnit(_) => HashSet::new(), Self::StructNamed(value) => value.referenced(), Self::StructUnnamed(value) => value.referenced(), Self::Enum(value) => value.referenced(), } } } #[derive(Debug, Serialize, Deserialize, Hash)] pub struct IgniteNamed { pub name: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub fields: Vec<IgniteNamedField>, } impl IgniteNamed { pub fn referenced(&self) -> HashSet<String> { self.fields .iter() .flat_map(|field| field.referenced()) .collect() } } #[derive(Debug, Serialize, Deserialize, Hash)] pub struct IgniteUnnamed { pub name: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub fields: Vec<IgniteUnnamedField>, } impl IgniteUnnamed { pub fn referenced(&self) -> HashSet<String> { self.fields .iter() .flat_map(|field| field.referenced()) .collect() } } #[derive(Debug, Serialize, Deserialize)] pub struct IgniteNamedField { pub name: String, pub typename: IgniteType, #[serde(default, skip_serializing_if = "Option::is_none")] pub mapping: Option<String>, #[serde(default, skip_serializing_if = "HashMap::is_empty")] pub meta: HashMap<String, IgniteAttribMeta>, } impl IgniteNamedField { pub fn referenced(&self) -> HashSet<String> { if let Some(mapping) = &self.mapping { let mut result = HashSet::new(); if let Some(mapping) = mapping.split('.').last() { result.insert(mapping.to_owned()); } result } else { self.typename.referenced() } } } impl Hash for IgniteNamedField { fn hash<H>(&self, state: &mut H) where H: Hasher, { self.name.hash(state); self.typename.hash(state); } } #[derive(Debug, Serialize, Deserialize)] pub struct IgniteUnnamedField { pub typename: IgniteType, #[serde(default, skip_serializing_if = "Option::is_none")] pub mapping: Option<String>, #[serde(default, skip_serializing_if = "HashMap::is_empty")] pub meta: HashMap<String, IgniteAttribMeta>, } impl IgniteUnnamedField { pub fn referenced(&self) -> HashSet<String> { if let Some(mapping) = &self.mapping { let mut result = HashSet::new(); if let Some(mapping) = mapping.split('.').last() { result.insert(mapping.to_owned()); } result } else { self.typename.referenced() } } } impl Hash for IgniteUnnamedField { fn hash<H>(&self, state: &mut H) where H: Hasher, { self.typename.hash(state); } } #[derive(Debug, Serialize, Deserialize, Hash)] pub struct IgniteEnum { pub name: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub variants: Vec<IgniteVariant>, } impl IgniteEnum { pub fn referenced(&self) -> HashSet<String> { self.variants .iter() .flat_map(|variant| variant.referenced()) .collect() } } #[derive(Debug, Serialize, Deserialize, Hash)] pub enum IgniteVariant { Unit(String), Named(IgniteNamed), Unnamed(IgniteUnnamed), } impl IgniteVariant { pub fn referenced(&self) -> HashSet<String> { match self { Self::Unit(_) => HashSet::new(), Self::Named(value) => value.referenced(), Self::Unnamed(value) => value.referenced(), } } } #[derive(Debug, Serialize, Deserialize, Hash)] pub enum IgniteType { Unit, Atom(String), Tuple(Vec<IgniteType>), Array(IgniteTypeArray), Generic(IgniteTypeGeneric), } impl IgniteType { pub fn referenced(&self) -> HashSet<String> { match self { Self::Unit => HashSet::new(), Self::Atom(name) => { let mut result = HashSet::new(); result.insert(name.clone()); result } Self::Tuple(value) => value.iter().flat_map(|item| item.referenced()).collect(), Self::Array(value) => value.referenced(), Self::Generic(value) => value.referenced(), } } } #[derive(Debug, Serialize, Deserialize, Hash)] pub struct IgniteTypeArray { pub typename: Box<IgniteType>, pub size: usize, } impl IgniteTypeArray { pub fn referenced(&self) -> HashSet<String> { self.typename.referenced() } } #[derive(Debug, Serialize, Deserialize, Hash)] pub struct IgniteTypeGeneric { pub name: String, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub arguments: Vec<IgniteType>, } impl IgniteTypeGeneric { pub fn referenced(&self) -> HashSet<String> { std::iter::once(self.name.clone()) .chain(self.arguments.iter().flat_map(|arg| arg.referenced())) .collect() } } #[derive(Debug, Serialize, Deserialize)] pub enum IgniteAttribMeta { None, Bool(bool), String(String), Integer(i64), Float(f64), } impl Default for IgniteAttribMeta { fn default() -> Self { Self::None } } pub trait Ignite { fn generate_type_definition() -> IgniteTypeDefinition; }
use super::SnapshotsState; mod builder; mod change; mod init; // Helper struct to help with managing changes to state pub struct StateBuilder<'a> { state: &'a mut SnapshotsState, // The state to modify } #[cfg(test)] mod tests { use crate::hash::Hash; use crate::snapshots::SnapshotsState; use crate::AtomicUpdate; use testspace::TestSpace; impl PartialEq for SnapshotsState { fn eq(&self, other: &Self) -> bool { if self.working_snapshot != other.working_snapshot { return false; } if self.latest_snapshot != other.latest_snapshot { return false; } return true; } } #[test] fn test_create_new_state() { // Tests that the snapshot state file is correctly initialized and loaded let mut ts = TestSpace::new(); ts.create_dir("snapshots"); let base_path = ts.get_path().to_path_buf(); // Note: This is inconsistent, new will assume a directory and filename while load_state assumes nothing let initial = SnapshotsState::new(base_path.as_path()).expect("Failed to initialize the repository"); let loaded = SnapshotsState::load(base_path.as_path()) .expect("Failed to load the repository that was just initialized"); assert_eq!(initial, loaded); } #[test] fn test_get_set_latest_hash() { let known_hash: [u8; 64] = [ 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, ]; let latest_hash = Hash::from(known_hash.as_ref()); let ts = TestSpace::new(); let mut ts2 = ts.create_child(); ts2.create_dir("snapshots"); let working_path = ts.get_path(); let repository_path = ts2.get_path(); let known_hash = latest_hash.clone(); { let mut au = AtomicUpdate::new(repository_path, working_path) .expect("Failed to initialize atomic updater"); let mut initial = SnapshotsState::new(repository_path).expect("Failed to initialize state"); initial .change_state(&mut au, |state| { state.set_latest_snapshot(Some(latest_hash)); Ok(()) }) .expect("Failed to change state"); au.complete().expect("Failed to update files atomically"); } let result = SnapshotsState::load(repository_path).expect("Failed to load state"); let result_hash = result .get_latest_snapshot() .expect("Get latest snapshot returned none after a hash was added to latest snapshot"); assert_eq!(result_hash, known_hash); } #[test] fn test_get_set_working_hash() { let known_hash: [u8; 64] = [ 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, ]; let latest_hash = Hash::from(known_hash.as_ref()); let ts = TestSpace::new(); let mut ts2 = ts.create_child(); ts2.create_dir("snapshots"); let working_path = ts.get_path(); let repository_path = ts2.get_path(); let known_hash = latest_hash.clone(); { let mut initial = SnapshotsState::new(repository_path).expect("Failed to initialize state"); let mut au = AtomicUpdate::new(repository_path, working_path) .expect("Failed to initialize atomic updater"); initial .change_state(&mut au, |state| { state.set_working_snapshot(Some(latest_hash)); Ok(()) }) .expect("Failed to change state"); au.complete().expect("Failed to update files atomically"); } let result = SnapshotsState::load(repository_path).expect("Failed to load state"); let result_hash = result.get_working_snapshot().unwrap(); assert_eq!(result_hash, known_hash); } #[test] fn test_add_remove_root_snapshots() { let known_hash: [u8; 64] = [ 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, ]; let latest_hash = Hash::from(known_hash.as_ref()); let ts = TestSpace::new(); let mut ts2 = ts.create_child(); ts2.create_dir("snapshots"); let working_path = ts.get_path(); let repository_path = ts2.get_path(); let known_hash = latest_hash.clone(); { let mut initial = SnapshotsState::new(repository_path).expect("Failed to initialize state"); let mut au = AtomicUpdate::new(repository_path, working_path) .expect("Failed to initialize atomic updater"); initial .change_state(&mut au, |state| { state.add_root_node(known_hash.clone()); Ok(()) }) .expect("Failed to change state"); au.complete().expect("Failed to complete atomic update"); } { let state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); assert_eq!(state.root_snapshots.len(), 1); assert_eq!(state.root_snapshots[0], latest_hash); } { let mut state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); let mut au = AtomicUpdate::load(working_path, repository_path); state .change_state(&mut au, |state| { state.remove_root_node(&known_hash); Ok(()) }) .expect("Failed to change state and remove root snapshot"); au.complete().expect("Failed to complete atomic update"); assert_eq!(state.root_snapshots.len(), 0); } { let state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); assert_eq!(state.root_snapshots.len(), 0); } } #[test] fn test_add_recent_hashes() { // Generate 15 random hashes let mut test_hashes = Vec::new(); for _ in 0..15 { let hash = Hash::generate_random_hash(); test_hashes.push(hash); } let ts = TestSpace::new(); let ts2 = ts.create_child(); let working_path = ts.get_path(); let repository_path = ts2.get_path(); { let mut initial = SnapshotsState::new(repository_path).expect("Failed to initialize state"); let mut au = AtomicUpdate::new(repository_path, working_path) .expect("Failed to initialize atomic updater"); // Add the first 10 hashes to the recent list initial .change_state(&mut au, |state| { for hash in test_hashes.as_slice()[..10].iter() { state.add_recent_snapshot(hash.clone()); } Ok(()) }) .expect("Failed to change state"); au.complete().expect("Failed to complete atomic update"); assert_eq!(initial.recent_snapshots.len(), 10); } { let mut state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); assert_eq!(state.recent_snapshots.len(), 10); // Check for those 10 hashes for index in 0..state.recent_snapshots.len() { assert_eq!(state.recent_snapshots[index], test_hashes[index]); } let mut au = AtomicUpdate::load(working_path, repository_path); state .change_state(&mut au, |state_data| { // Add the final 5 hashes for hash in test_hashes.as_slice()[10..].iter() { state_data.add_recent_snapshot(hash.clone()); } Ok(()) }) .expect("Failed to change state"); au.complete().expect("Failed to complete atomic update"); } { let state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); // Check state after removing oldest 5 - so fifth test hash should be first hash in recent for index in 5..state.recent_snapshots.len() { assert_eq!(state.recent_snapshots[index - 5], test_hashes[index]); } } } #[test] fn add_remove_end_node_test() { let known_hash: [u8; 64] = [ 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, 0xAB, 0x11, 0x0A, 0xFF, ]; let latest_hash = Hash::from(known_hash.as_ref()); let ts = TestSpace::new(); let mut ts2 = ts.create_child(); ts2.create_dir("snapshots"); let working_path = ts.get_path(); let repository_path = ts2.get_path(); let known_hash = latest_hash.clone(); { let mut initial = SnapshotsState::new(repository_path).expect("Failed to initialize state"); let mut au = AtomicUpdate::new(repository_path, working_path) .expect("Failed to initialize atomic updater"); initial .change_state(&mut au, |state| { state.add_end_node(known_hash.clone()); Ok(()) }) .expect("Failed to change state"); au.complete().expect("Failed to complete atomic update"); } { let state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); assert_eq!(state.end_snapshots.len(), 1); assert_eq!(state.end_snapshots[0], latest_hash); } { let mut state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); let mut au = AtomicUpdate::load(working_path, repository_path); state .change_state(&mut au, |state| { state.remove_end_node(&known_hash); Ok(()) }) .expect("Failed to change state and remove root snapshot"); au.complete().expect("Failed to complete atomic update"); assert_eq!(state.end_snapshots.len(), 0); } { let state = SnapshotsState::parse_state_file( repository_path .join(SnapshotsState::SNAPSHOTS_PATH) .join(SnapshotsState::STATE_FILE_NAME), ) .expect("Failed to load state"); assert_eq!(state.end_snapshots.len(), 0); } } }
use crate::lexer::UndefinedBehavior; use crate::token::Token; use failure::Error; use logos::Logos; #[derive(Logos, Debug, PartialEq, Copy, Clone)] pub enum LogosToken { #[end] End, #[error] Error, #[regex = "[0-9]+"] Number, #[regex = "[a-zA-Z_$][a-zA-Z_$0-9]*"] Id, #[regex = "#.*"] Comment, #[token = ","] Comma, #[token = ";"] Semicolon, // Types #[token = "entier"] IntegerType, // Predefined functions #[token = "lire"] ReadFunction, #[token = "ecrire"] WriteFunction, // Instructions #[token = "retour"] Return, #[token = "si"] If, #[token = "alors"] Then, #[token = "sinon"] Else, #[token = "tantque"] While, #[token = "faire"] Do, #[token = "pour"] For, // Brackets #[token = "("] OpenParenthesis, #[token = ")"] CloseParenthesis, #[token = "{"] OpenCurlyBracket, #[token = "}"] CloseCurlyBracket, #[token = "["] OpenSquareBracket, #[token = "]"] CloseSquareBracket, // Operators #[token = "+"] Addition, #[token = "-"] Subtraction, #[token = "*"] Multiplication, #[token = "/"] Division, #[token = "<"] LessThan, #[token = "="] Equal, #[token = "&"] And, #[token = "|"] Or, #[token = "!"] Not, } impl LogosToken { pub fn to_token(self, token: &str) -> Result<Token, Error> { use LogosToken::*; use Token as T; let token = match self { End => unreachable!(), Error => return Err(UndefinedBehavior {}.into()), Number => T::Number(token.parse()?), Id => T::Id(token.to_string()), Comment => unreachable!(), Comma => T::Comma, Semicolon => T::Semicolon, IntegerType => T::IntegerType, ReadFunction => T::ReadFunction, WriteFunction => T::WriteFunction, Return => T::Return, If => T::If, Then => T::Then, Else => T::Else, While => T::While, Do => T::Do, For => T::For, OpenParenthesis => T::OpenParenthesis, CloseParenthesis => T::CloseParenthesis, OpenCurlyBracket => T::OpenCurlyBracket, CloseCurlyBracket => T::CloseCurlyBracket, OpenSquareBracket => T::OpenSquareBracket, CloseSquareBracket => T::CloseSquareBracket, Addition => T::Addition, Subtraction => T::Subtraction, Multiplication => T::Multiplication, Division => T::Division, LessThan => T::LessThan, Equal => T::Equal, And => T::And, Or => T::Or, Not => T::Not, }; Ok(token) } }
/* Copyright (c) 2023 Uber Technologies, Inc. <p>Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at <p>http://www.apache.org/licenses/LICENSE-2.0 <p>Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::collections::{HashMap, HashSet}; use glob::Pattern; use super::{ capture_group_patterns::CGPattern, filter::Filter, language::PiranhaLanguage, outgoing_edges::OutgoingEdges, rule::Rule, rule_graph::RuleGraph, }; pub const JAVA: &str = "java"; pub const JAVA_CS: &str = "java_cs"; pub const KOTLIN: &str = "kt"; pub const GO: &str = "go"; pub const PYTHON: &str = "py"; pub const SWIFT: &str = "swift"; pub const TYPESCRIPT: &str = "ts"; pub const TSX: &str = "tsx"; pub const THRIFT: &str = "thrift"; pub const STRINGS: &str = "strings"; pub const TS_SCHEME: &str = "scm"; // We support scheme files that contain tree-sitter query pub const SCALA: &str = "scala"; pub const REGEX_QUERY_PREFIX: &str = "rgx "; pub const CONCRETE_SYNTAX_QUERY_PREFIX: &str = "cs "; #[cfg(test)] //FIXME: Remove this hack by not passing PiranhaArguments to SourceCodeUnit pub(crate) const UNUSED_CODE_PATH: &str = "/dev/null"; pub fn default_number_of_ancestors_in_parent_scope() -> u8 { 4 } pub fn default_language() -> String { JAVA.to_string() } pub fn default_substitutions() -> Vec<(String, String)> { vec![] } pub fn default_delete_file_if_empty() -> bool { true } pub fn default_cleanup_comments_buffer() -> i32 { 2 } pub fn default_cleanup_comments() -> bool { false } pub fn default_global_tag_prefix() -> String { "GLOBAL_TAG.".to_string() } pub fn default_dry_run() -> bool { false } pub fn default_paths_to_codebase() -> Vec<String> { Vec::new() } pub fn default_code_snippet() -> String { String::new() } pub fn default_include() -> Vec<Pattern> { Vec::new() } pub fn default_exclude() -> Vec<Pattern> { Vec::new() } pub fn default_path_to_configurations() -> String { String::new() } pub fn default_path_to_output_summaries() -> Option<String> { None } pub fn default_piranha_language() -> PiranhaLanguage { PiranhaLanguage::default() } pub fn default_delete_consecutive_new_lines() -> bool { false } pub(crate) fn default_query() -> CGPattern { CGPattern::new(String::new()) } pub fn default_replace_node() -> String { String::new() } pub fn default_replace_idx() -> u8 { u8::MAX } pub fn default_replace() -> String { String::new() } pub fn default_rule_graph_map() -> HashMap<String, Vec<(String, String)>> { HashMap::new() } pub(crate) fn default_holes() -> HashSet<String> { HashSet::new() } pub(crate) fn default_groups() -> HashSet<String> { HashSet::new() } pub(crate) fn default_filters() -> HashSet<Filter> { HashSet::new() } pub(crate) fn default_rules() -> Vec<Rule> { Vec::new() } pub(crate) fn default_edges() -> Vec<OutgoingEdges> { vec![] } pub(crate) fn default_not_contains_queries() -> Vec<CGPattern> { Vec::new() } pub(crate) fn default_contains_query() -> CGPattern { CGPattern::new(String::from("")) } pub(crate) fn default_contains_at_least() -> u32 { 1 } pub(crate) fn default_contains_at_most() -> u32 { u32::MAX } pub(crate) fn default_child_count() -> u32 { u32::MAX } pub(crate) fn default_sibling_count() -> u32 { u32::MAX } pub(crate) fn default_enclosing_node() -> CGPattern { CGPattern::new(String::new()) } pub(crate) fn default_not_enclosing_node() -> CGPattern { CGPattern::new(String::new()) } pub(crate) fn default_rule_name() -> String { String::new() } pub(crate) fn default_rule_graph() -> RuleGraph { RuleGraph::default() } pub(crate) fn default_is_seed_rule() -> bool { true } pub(crate) fn default_allow_dirty_ast() -> bool { false } pub(crate) fn default_graph_validation() -> bool { true } pub(crate) fn default_experiment_dyn() -> bool { false }
// Type inference use racer::{Match}; use racer::util::{to_refs}; use racer::nameres::{do_local_search_with_string}; use racer::ast; use racer::codeiter; use racer::scopes; use racer; use std::io::File; use std::io::BufferedReader; use std::str; use racer::{ExactMatch}; use racer::util::txt_matches; fn find_start_of_function_body(src: &str) -> uint { // TODO: this should ignore anything inside parens so as to skip the arg list return src.find_str("{").unwrap(); } // Removes the body of the statement (anything in the braces {...}), leaving just // the header // TODO: this should skip parens (e.g. function arguments) pub fn generate_skeleton_for_parsing(src: &str) -> String { let mut s = String::new(); let n = src.find_str("{").unwrap(); s.push_str(src.slice_to(n+1)); s.push_str("};"); return s; } pub fn first_param_is_self(blob: &str) -> bool { return blob.find_str("(").map_or(false, |start| { let end = scopes::find_closing_paren(blob, start+1); debug!("PHIL searching fn args: |{}| {}",blob.slice(start+1,end), txt_matches(ExactMatch, "self", blob.slice(start+1,end))); return txt_matches(ExactMatch, "self", blob.slice(start+1,end)); }); } #[test] fn generates_skeleton_for_mod() { let src = "mod foo { blah };"; let out = generate_skeleton_for_parsing(src); assert_eq!("mod foo {};", out.as_slice()); } fn get_type_of_fnarg(m: &Match, msrc: &str) -> Option<Match> { debug!("PHIL get type of fn arg {:?}",m); let point = scopes::find_stmt_start(msrc, m.point).unwrap(); for (start,end) in codeiter::iter_stmts(msrc.slice_from(point)) { let blob = msrc.slice(point+start,point+end); // wrap in "impl blah { }" so that methods get parsed correctly too let mut s = String::new(); s.push_str("impl blah {"); let impl_header_len = s.len(); s.push_str(blob.slice_to(find_start_of_function_body(blob)+1)); s.push_str("}}"); let fn_ = ast::parse_fn(s); let mut result = None; for (_/*name*/, pos, ty_) in fn_.args.move_iter() { let globalpos = pos - impl_header_len + start + point; if globalpos == m.point && ty_.len() != 0 { let v = to_refs(&ty_); let fqn = v.as_slice(); result = do_local_search_with_string(fqn, &m.filepath, globalpos, racer::ExactMatch, racer::TypeNamespace, // just the type namespace ).nth(0); } } return result; } None } fn get_type_of_let_expr(m: &Match, msrc: &str) -> Option<Match> { // ASSUMPTION: this is being called on a let decl let opoint = scopes::find_stmt_start(msrc, m.point); let point = opoint.unwrap(); let src = msrc.slice_from(point); for (start,end) in codeiter::iter_stmts(src) { let blob = src.slice(start,end); return ast::parse_let(String::from_str(blob), m.filepath.clone(), m.point, true).map_or(None, |letres|{ let inittype = letres.inittype; debug!("PHIL parse let result {:?}", inittype); inittype.as_ref().map(|m|{ debug!("PHIL parse let type is {}",m.matchstr); }); return inittype; }); } return None; } pub fn get_type_of_match(m: Match, msrc: &str) -> Option<Match> { debug!("PHIL get_type_of match {:?} {} ",m, m.matchstr); return match m.mtype { racer::Let => get_type_of_let_expr(&m, msrc), racer::FnArg => get_type_of_fnarg(&m, msrc), racer::Struct => Some(m), racer::Enum => Some(m), racer::Function => Some(m), racer::Module => Some(m), _ => { debug!("!!! WARNING !!! Can't get type of {:?}",m.mtype); None } } } pub fn get_return_type_of_function(fnmatch: &Match) -> Vec<String> { let filetxt = BufferedReader::new(File::open(&fnmatch.filepath)).read_to_end().unwrap(); let src = str::from_utf8(filetxt.as_slice()).unwrap(); let point = scopes::find_stmt_start(src, fnmatch.point).unwrap(); //debug!("get_return_type_of_function |{}|",src.slice_from(point)); let outputpath = src.slice_from(point).find_str("{").map(|n|{ // wrap in "impl blah { }" so that methods get parsed correctly too let mut decl = String::new(); decl.push_str("impl blah {"); decl.push_str(src.slice(point, point+n+1)); decl.push_str("}}"); debug!("PHIL: passing in |{}|",decl); return ast::parse_fn_output(decl); }).unwrap_or(Vec::new()); return outputpath; }
extern crate async_std; pub mod ingestor; pub mod rotator; pub mod utils; use ingestor::Ingestor; use std::env; use std::time::Instant; #[async_std::main] async fn main() { let now = Instant::now(); Ingestor::start(env::args().skip(1).collect()).await; println!("time taken - {} secs", now.elapsed().as_secs()); }
// Dan Obermiller <dobermiller16@cmc.edu> // <- Your name should replace this line! // Starter code for HMC's MemorySafe, week 1 // // The implementation of SinglyLinkedList use std::mem; use Stack; struct Node<T> { pointer: Option<Box<Node<T>>>, data: T, } pub struct SinglyLinkedList<T> { head: Option<Box<Node<T>>>, size: usize, } impl<T: Eq> Stack<T> for SinglyLinkedList<T> { fn new() -> Self { SinglyLinkedList{head: None, size: 0} } fn push_front(&mut self, item: T) { let old = mem::replace(&mut self.head, None); self.head = Some(Box::new(Node { pointer: old, data: item })); // let mut new_head = Node { pointer: None, data: item }; // mem::swap(&mut new_head.pointer, &mut self.head); // self.head = Some(Box::new(new_head)); self.size += 1; } fn pop_front(&mut self) -> Option<T> { self.head.take().map(|mut n| { assert!(self.size > 0); self.head = n.pointer.take(); self.size -= 1; n.data }) } fn peek_front(&self) -> Option<&T> { match self.head { Some(ref node) => Some(&node.data), None => None } } fn len(&self) -> usize { self.size } fn remove_first(&mut self, item: &T) -> Option<T> { // I wanted this to work, but it didn't :( // let ref mut current = self.head; // let mut previous: Option<Box<Node<T>>> = None; // // loop { // match current.take() { // Some(n) => { // let unwrapped = *n; // if unwrapped.data == *item { // let ref mut prev_unwrapped = *previous.unwrap(); // mem::replace(&mut prev_unwrapped.pointer, unwrapped.pointer); // return Some(unwrapped.data); // } // // mem::replace(&mut previous, mem::replace(current, unwrapped.pointer)); // }, // None => return None // } // } // Now I have to do it the ugly way - reverse the list, and remove the first // occurence, then reverse again. let mut tmp = SinglyLinkedList::new(); let mut result : Option<T> = None; while let Some(node) = self.pop_front() { if node == *item { result = Some(node); break; } tmp.push_front(node); } while let Some(node) = self.pop_front() { tmp.push_front(node); } tmp.reverse(); mem::swap(self, &mut tmp); result } fn reverse(&mut self) { let mut reversed = SinglyLinkedList::<T>::new(); while let Some(node) = self.pop_front() { reversed.push_front(node); } mem::swap(self, &mut reversed); } } // Yay for stack overflow // The default destructor was a recursive function, which overflows for large lists impl<T> Drop for SinglyLinkedList<T> { fn drop(&mut self) { let mut current = mem::replace(&mut self.head, None); while let Some(node) = current { current = node.pointer; } } }
mod vector; mod vector3; mod constant; mod float; mod num; pub use self::vector::*; pub use self::vector3::*; pub use self::constant::*; pub use self::float::*; pub use self::num::*;
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A unique pointer type. #![stable] use core::any::Any; use core::clone::Clone; use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering}; use core::default::Default; use core::fmt; use core::hash::{self, Hash}; use core::marker::Sized; use core::mem; use core::option::Option; use core::ptr::Unique; use core::raw::TraitObject; use core::result::Result; use core::result::Result::{Ok, Err}; use core::ops::{Deref, DerefMut}; use vga::*; /// A value that represents the global exchange heap. This is the default /// place that the `box` keyword allocates into when no place is supplied. /// /// The following two examples are equivalent: /// /// ```rust /// #![feature(box_syntax)] /// use std::boxed::HEAP; /// /// fn main() { /// # struct Bar; /// # impl Bar { fn new(_a: int) { } } /// let foo = box(HEAP) Bar::new(2); /// let foo = box Bar::new(2); /// } /// ``` #[lang = "exchange_heap"] #[unstable = "may be renamed; uncertain about custom allocator design"] pub static HEAP: () = (); /// A type that represents a uniquely-owned value. #[lang = "owned_box"] #[stable] pub struct Box<T>(Unique<T>); impl<T> Box<T> { /// Moves `x` into a freshly allocated box on the global exchange heap. #[stable] pub fn new(x: T) -> Box<T> { box x } } #[stable] impl<T: Default> Default for Box<T> { #[stable] fn default() -> Box<T> { box Default::default() } } #[stable] impl<T> Default for Box<[T]> { #[stable] fn default() -> Box<[T]> { box [] } } #[stable] impl<T: Clone> Clone for Box<T> { /// Returns a copy of the owned box. #[inline] fn clone(&self) -> Box<T> { box {(**self).clone()} } /// Performs copy-assignment from `source` by reusing the existing allocation. #[inline] fn clone_from(&mut self, source: &Box<T>) { (**self).clone_from(&(**source)); } } #[stable] impl<T: ?Sized + PartialEq> PartialEq for Box<T> { #[inline] fn eq(&self, other: &Box<T>) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &Box<T>) -> bool { PartialEq::ne(&**self, &**other) } } #[stable] impl<T: ?Sized + PartialOrd> PartialOrd for Box<T> { #[inline] fn partial_cmp(&self, other: &Box<T>) -> Option<Ordering> { PartialOrd::partial_cmp(&**self, &**other) } #[inline] fn lt(&self, other: &Box<T>) -> bool { PartialOrd::lt(&**self, &**other) } #[inline] fn le(&self, other: &Box<T>) -> bool { PartialOrd::le(&**self, &**other) } #[inline] fn ge(&self, other: &Box<T>) -> bool { PartialOrd::ge(&**self, &**other) } #[inline] fn gt(&self, other: &Box<T>) -> bool { PartialOrd::gt(&**self, &**other) } } #[stable] impl<T: ?Sized + Ord> Ord for Box<T> { #[inline] fn cmp(&self, other: &Box<T>) -> Ordering { Ord::cmp(&**self, &**other) } } #[stable] impl<T: ?Sized + Eq> Eq for Box<T> {} #[cfg(stage0)] impl<S: hash::Writer, T: ?Sized + Hash<S>> Hash<S> for Box<T> { #[inline] fn hash(&self, state: &mut S) { (**self).hash(state); } } #[cfg(not(stage0))] impl<S: hash::Hasher, T: ?Sized + Hash<S>> Hash<S> for Box<T> { #[inline] fn hash(&self, state: &mut S) { (**self).hash(state); } } /// Extension methods for an owning `Any` trait object. #[unstable = "post-DST and coherence changes, this will not be a trait but \ rather a direct `impl` on `Box<Any>`"] pub trait BoxAny { /// Returns the boxed value if it is of type `T`, or /// `Err(Self)` if it isn't. #[stable] fn downcast<T: 'static>(self) -> Result<Box<T>, Self>; } impl BoxAny for Box<Any> { #[inline] #[unstable = "method may be renamed with respect to other downcasting \ methods"] fn downcast<T: 'static>(self) -> Result<Box<T>, Box<Any>> { if self.is::<T>() { unsafe { // Get the raw representation of the trait object let to: TraitObject = mem::transmute::<Box<Any>, TraitObject>(self); // Extract the data pointer Ok(mem::transmute(to.data)) } } else { Err(self) } } } impl<T: ?Sized + fmt::Show> fmt::Show for Box<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Box({:?})", &**self) } } #[stable] impl<T: ?Sized + fmt::String> fmt::String for Box<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::String::fmt(&**self, f) } } impl fmt::Show for Box<Any> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.pad("Box<Any>") } } #[stable] impl<T: ?Sized> Deref for Box<T> { type Target = T; fn deref(&self) -> &T { &**self } } #[stable] impl<T: ?Sized> DerefMut for Box<T> { fn deref_mut(&mut self) -> &mut T { &mut **self } } #[cfg(test)] mod test { #[test] fn test_owned_clone() { let a = Box::new(5i); let b: Box<int> = a.clone(); assert!(a == b); } #[test] fn any_move() { let a = Box::new(8u) as Box<Any>; let b = Box::new(Test) as Box<Any>; match a.downcast::<uint>() { Ok(a) => { assert!(a == Box::new(8u)); } Err(..) => panic!() } match b.downcast::<Test>() { Ok(a) => { assert!(a == Box::new(Test)); } Err(..) => panic!() } let a = Box::new(8u) as Box<Any>; let b = Box::new(Test) as Box<Any>; assert!(a.downcast::<Box<Test>>().is_err()); assert!(b.downcast::<Box<uint>>().is_err()); } #[test] fn test_show() { let a = Box::new(8u) as Box<Any>; let b = Box::new(Test) as Box<Any>; let a_str = a.to_str(); let b_str = b.to_str(); assert_eq!(a_str, "Box<Any>"); assert_eq!(b_str, "Box<Any>"); let a = &8u as &Any; let b = &Test as &Any; let s = format!("{}", a); assert_eq!(s, "&Any"); let s = format!("{}", b); assert_eq!(s, "&Any"); } #[test] fn deref() { fn homura<T: Deref<Target=i32>>(_: T) { } homura(Box::new(765i32)); } }
// Copyright 2020 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use serde::{Deserialize, Deserializer, Serialize, Serializer}; use vm::{ExitCode, Serialized}; /// Result of a state transition from a message #[derive(PartialEq, Clone)] pub struct MessageReceipt { pub exit_code: ExitCode, pub return_data: Serialized, pub gas_used: u64, } impl Serialize for MessageReceipt { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { (&self.exit_code, &self.return_data, &self.gas_used).serialize(serializer) } } impl<'de> Deserialize<'de> for MessageReceipt { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { let (exit_code, return_data, gas_used) = Deserialize::deserialize(deserializer)?; Ok(Self { exit_code, return_data, gas_used, }) } }
use std::{ fs, io::{self, Write}, path::PathBuf, process, }; use structopt::StructOpt; #[derive(Debug, StructOpt)] #[structopt(name = "lrs")] struct Opt { #[structopt(parse(from_os_str))] input: Option<PathBuf>, } struct Lrs { had_error: bool, } impl Lrs { fn new() -> Lrs { Lrs { had_error: false } } fn run(&self, command: &str) { for i in command.chars() { println!("{}", i); } } fn error(&mut self, line: usize, message: &str) { self.report(line, "", message); println!("{} {}", line, message); } fn report(&mut self, line: usize, place: &str, message: &str) { println!("[line{}] Error{}: {}", line, place, message); self.had_error = true; } fn run_file(&self, path: PathBuf) { let error = format!("Failed to open {:?}", path); let content = fs::read_to_string(path).expect(&error); self.run(&content); } fn run_promt(&self) { loop { print!("> "); io::stdout().flush(); let mut input = String::new(); match io::stdin().read_line(&mut input) { Ok(n) => { let trimmed = input.trim(); match trimmed { "quit" => break, _ => { self.run(trimmed); }, } }, Err(error) => println!("error: {}", error), } } } } fn main() { let scanner = Lrs::new(); let opt = Opt::from_args(); match opt.input { Some(f) => { scanner.run_file(f); }, None => { scanner.run_promt(); }, } }
#[doc = "Register `TZC_CID0` reader"] pub type R = crate::R<TZC_CID0_SPEC>; #[doc = "Field `COMP_ID_0` reader - COMP_ID_0"] pub type COMP_ID_0_R = crate::FieldReader; impl R { #[doc = "Bits 0:7 - COMP_ID_0"] #[inline(always)] pub fn comp_id_0(&self) -> COMP_ID_0_R { COMP_ID_0_R::new((self.bits & 0xff) as u8) } } #[doc = "Component ID 0.\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tzc_cid0::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct TZC_CID0_SPEC; impl crate::RegisterSpec for TZC_CID0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`tzc_cid0::R`](R) reader structure"] impl crate::Readable for TZC_CID0_SPEC {} #[doc = "`reset()` method sets TZC_CID0 to value 0x0d"] impl crate::Resettable for TZC_CID0_SPEC { const RESET_VALUE: Self::Ux = 0x0d; }
extern crate time; mod euler10; fn main() { let start_time = time::get_time().nsec; println!("start running"); euler10::execute(); println!("finished in - {} nano seconds", time::get_time().nsec - start_time); }
//! Prometheus target metrics fetcher //! //! This module scrapes Prometheus/OpenMetrics formatted metrics from the target //! software. //! use std::{collections::HashMap, str::FromStr, time::Duration}; use metrics::{counter, gauge}; use serde::Deserialize; use tracing::{error, info, trace, warn}; use crate::signals::Shutdown; #[derive(Debug, Clone, Copy, thiserror::Error)] /// Errors produced by [`Prometheus`] pub enum Error { /// Prometheus scraper shut down unexpectedly #[error("Unexpected shutdown")] EarlyShutdown, } #[derive(Debug, Deserialize, PartialEq, Eq)] #[serde(rename_all = "snake_case")] /// Configuration for collecting Prometheus based target metrics pub struct Config { /// URI to scrape uri: String, /// Metric names to scrape. Leave unset to scrape all metrics. metrics: Option<Vec<String>>, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum MetricType { Gauge, Counter, Histogram, Summary, } #[derive(Debug)] enum MetricTypeParseError { UnknownType, } impl FromStr for MetricType { type Err = MetricTypeParseError; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "counter" => Ok(Self::Counter), "gauge" => Ok(Self::Gauge), "histogram" => Ok(Self::Histogram), "summary" => Ok(Self::Summary), _ => Err(MetricTypeParseError::UnknownType), } } } /// The `Prometheus` target metrics implementation. #[derive(Debug)] pub struct Prometheus { config: Config, shutdown: Shutdown, } impl Prometheus { /// Create a new [`Prometheus`] instance /// /// This is responsible for scraping metrics from the target process in the /// Prometheus format. /// pub(crate) fn new(config: Config, shutdown: Shutdown) -> Self { Self { config, shutdown } } /// Run this [`Server`] to completion /// /// Scrape metrics from the target at 1Hz. /// /// # Errors /// /// None are known. /// /// # Panics /// /// None are known. #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] #[allow(clippy::too_many_lines)] pub(crate) async fn run(mut self) -> Result<(), Error> { info!("Prometheus target metrics scraper running"); let client = reqwest::Client::new(); let server = async move { loop { tokio::time::sleep(Duration::from_secs(1)).await; let Ok(resp) = client.get(&self.config.uri).timeout(Duration::from_secs(1)).send().await else { info!("failed to get Prometheus uri"); continue; }; let Ok(text) = resp.text().await else { info!("failed to read Prometheus response"); continue; }; // remember the type for each metric across lines let mut typemap = HashMap::new(); // this deserves a real parser, but this will do for now. // Format doc: https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md for line in text.lines() { if line.starts_with("# HELP") { continue; } if line.starts_with("# TYPE") { let mut parts = line.split_ascii_whitespace().skip(2); let name = parts.next().unwrap(); let metric_type = parts.next().unwrap(); let metric_type: MetricType = metric_type.parse().unwrap(); // summary and histogram metrics additionally report names suffixed with _sum, _count, _bucket if matches!(metric_type, MetricType::Histogram | MetricType::Summary) { typemap.insert(format!("{name}_sum"), metric_type); typemap.insert(format!("{name}_count"), metric_type); typemap.insert(format!("{name}_bucket"), metric_type); } typemap.insert(name.to_owned(), metric_type); continue; } let mut parts = line.split_ascii_whitespace(); let name_and_labels = parts.next().unwrap(); let value = parts.next().unwrap(); let (name, labels) = { if let Some((name, labels)) = name_and_labels.split_once('{') { let labels = labels.trim_end_matches('}'); let labels = labels.split(',').map(|label| { let (label_name, label_value) = label.split_once('=').unwrap(); let label_value = label_value.trim_matches('\"'); (label_name.to_owned(), label_value.to_owned()) }); let labels = labels.collect::<Vec<_>>(); (name, Some(labels)) } else { (name_and_labels, None) } }; let metric_type = typemap.get(name); let name = name.replace("__", "."); if let Some(metrics) = &self.config.metrics { if !metrics.contains(&name) { continue; } } match metric_type { Some(MetricType::Gauge) => { let Ok(value): Result<f64, _> = value.parse() else { let e = value.parse::<f64>().unwrap_err(); warn!("{e}: {name} = {value}"); continue; }; trace!("gauge: {name} = {value}"); gauge!(format!("target/{name}"), value, &labels.unwrap_or_default()); } Some(MetricType::Counter) => { let Ok(value): Result<f64, _> = value.parse() else { let e = value.parse::<f64>().unwrap_err(); warn!("{e}: {name} = {value}"); continue; }; let value = if value < 0.0 { warn!("Negative counter value unhandled"); continue; } else { // clippy shows "error: casting `f64` to `u64` may lose the sign of the value". This is // guarded by the sign check above. if value > u64::MAX as f64 { warn!("Counter value above maximum limit"); continue; } value as u64 }; trace!("counter: {name} = {value}"); counter!(format!("target/{name}"), value, &labels.unwrap_or_default()); } Some(_) | None => { trace!("unsupported metric type: {name} = {value}"); } } } } }; tokio::select! { _res = server => { error!("server shutdown unexpectedly"); Err(Error::EarlyShutdown) } _ = self.shutdown.recv() => { info!("shutdown signal received"); Ok(()) } } } }
mod gql; use async_std::sync::{Arc, RwLock}; use worker::*; use crate::{infra::persistent::counter_adapter::CounterGraphProxy, integration, port}; pub fn build_app(env: Env) -> tide::Server<()> { let wrapped = crate::infra::persistent::Shared(env); let shared = Arc::new(RwLock::new(wrapped)); let counter_repo: Arc<dyn port::CounterRepository> = Arc::new(CounterGraphProxy { shared }); let mut app = tide::new(); integration::app_with_gql( &mut app, String::from("/graphql"), gql::build_schema(counter_repo), ); app }
mod spawner; mod nodes; pub mod catcher; pub use spawner::SpawnerSystem; pub use nodes::NodesSystem; pub use catcher::{CatcherSystem, Catcher};
fn main() { let x = { 3; 2 // 块表达式, 最后一个值返回, 注意不能用 ;号 }; println!("{}", x); }
use pattern::{StepArgument, StepPattern}; use step::Step; use std::collections::HashMap; pub type StepHandler = fn(arguments: HashMap<String, StepArgument>); #[derive(Clone, PartialEq, Eq, Debug)] pub struct StepDefinition { pattern: StepPattern, handler: StepHandler, } impl StepDefinition { pub fn new(pattern: StepPattern, handler: StepHandler) -> Self { StepDefinition { pattern: pattern, handler: handler, } } pub fn is_match(&self, step: &Step) -> bool { self.pattern.is_match(&step.description) } pub fn handle(&self, description: &String) { let arguments = self.pattern.captures(description); (self.handler)(arguments); } } #[cfg(test)] mod test { use super::StepDefinition; use pattern::{Pattern, StepPattern}; use step::{Step, StepType}; use testing::mock::{PatternMock, step_handler}; const A_NON_MATCHING_DESCRIPTION: &str = "This description does not match"; #[test] fn it_is_matching_a_step_given_the_step_description_is_matching_its_pattern() { let pattern = PatternMock {}; let description = pattern.to_string(); let step_pattern = StepPattern::new(pattern); let step = Step::new(StepType::Given, description); let step_definition = StepDefinition::new(step_pattern, step_handler); assert!(step_definition.is_match(&step)); } #[test] fn it_is_not_matching_a_step_given_the_step_description_is_not_matching_its_pattern() { let pattern = PatternMock {}; let step_pattern = StepPattern::new(pattern); let step = Step::new(StepType::Given, A_NON_MATCHING_DESCRIPTION); let step_definition = StepDefinition::new(step_pattern, step_handler); assert!(!step_definition.is_match(&step)); } }
use std::borrow::Borrow; use std::collections::HashMap; use std::{fmt::Display, hash::Hash}; /// A mapping of Keys to Values #[derive(Debug, Default, serde::Deserialize)] pub struct Mapping<T: Hash + Eq + Sized, V = T>(HashMap<T, V>); impl<T: Hash + Eq> Mapping<T> { /// Tries to get the value for the key pub fn get<K: ?Sized>(&self, key: &K) -> Option<&T> where K: Hash + Eq + Display, T: Borrow<K>, { self.0.get(key) } }
use std::collections::HashMap; use types; use card::Card; use calculator::utility; use calculator::straight; pub fn test(mut cards: Vec<Card>) -> Option<types::Combination> { if cards.len() < 5 { return None; } utility::sort_cards(&mut cards); let hashmap_suit = cards.iter().fold(HashMap::new(), |mut acc, &card| { { let stat = acc.entry(card.suit).or_insert(0); *stat += 1 }; acc }); let suit_for_straight_flush = hashmap_suit.iter().filter(|&(_, value)| *value > 4).next(); if suit_for_straight_flush == None { return None; } if let Some((suit_for_straight_flush, _)) = suit_for_straight_flush { cards = cards .iter() .cloned() .filter(|card| card.suit == *suit_for_straight_flush) .collect::<Vec<Card>>(); if let Some(straight_combination) = straight::test(cards.to_vec()) { if let types::Combination::Straight(straight_ranks) = straight_combination { return Some(types::Combination::StraightFlush( *suit_for_straight_flush, straight_ranks, )); } } } None } #[cfg(test)] mod tests { use super::*; #[test] fn none_for_empty() { assert_eq!(None, test(vec![])); } #[test] fn none_for_four_cards() { assert_eq!( None, test(vec![ Card { rank: types::Rank::King, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Queen, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Jack, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Ten, suit: types::Suit::Diamonds, }, ]) ); } #[test] fn option_for_king_starting_straight_flush() { assert_eq!( Some(types::Combination::StraightFlush( types::Suit::Diamonds, [ types::Rank::King, types::Rank::Queen, types::Rank::Jack, types::Rank::Ten, types::Rank::Nine ] )), test(vec![ Card { rank: types::Rank::Three, suit: types::Suit::Hearts, }, Card { rank: types::Rank::King, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Jack, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Six, suit: types::Suit::Spades, }, Card { rank: types::Rank::Ten, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Nine, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Queen, suit: types::Suit::Diamonds, }, ]) ); } #[test] fn option_for_ace_starting_straight_flush() { assert_eq!( Some(types::Combination::StraightFlush( types::Suit::Diamonds, [ types::Rank::Ace, types::Rank::King, types::Rank::Queen, types::Rank::Jack, types::Rank::Ten ] )), test(vec![ Card { rank: types::Rank::Three, suit: types::Suit::Hearts, }, Card { rank: types::Rank::King, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Jack, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Six, suit: types::Suit::Spades, }, Card { rank: types::Rank::Ten, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Ace, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Nine, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Queen, suit: types::Suit::Diamonds, }, ]) ); } #[test] fn option_for_five_starting_straight_flush() { assert_eq!( Some(types::Combination::StraightFlush( types::Suit::Diamonds, [ types::Rank::Five, types::Rank::Four, types::Rank::Three, types::Rank::Two, types::Rank::Ace ] )), test(vec![ Card { rank: types::Rank::Three, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Five, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::King, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Two, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Ace, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Seven, suit: types::Suit::Spades, }, Card { rank: types::Rank::Four, suit: types::Suit::Diamonds, }, ]) ); } #[test] fn option_for_king_starting_straight_flush_with_nine_other_rank() { assert_eq!( Some(types::Combination::StraightFlush( types::Suit::Diamonds, [ types::Rank::King, types::Rank::Queen, types::Rank::Jack, types::Rank::Ten, types::Rank::Nine ] )), test(vec![ Card { rank: types::Rank::Three, suit: types::Suit::Hearts, }, Card { rank: types::Rank::King, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Jack, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Six, suit: types::Suit::Spades, }, Card { rank: types::Rank::Ten, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Nine, suit: types::Suit::Spades, }, Card { rank: types::Rank::Nine, suit: types::Suit::Diamonds, }, Card { rank: types::Rank::Queen, suit: types::Suit::Diamonds, }, ]) ); } }
let x = 3; let y = { let x_squared = x * x; let x_cubed = x_squared * x; // This expression will be assigned to `y` x_cubed + x_squared + x };
use std::collections::HashSet; use apllodb_shared_components::SchemaIndex; use apllodb_storage_engine_interface::{RowProjectionQuery, TableName}; use serde::{Deserialize, Serialize}; use crate::{ aliaser::Aliaser, condition::Condition, records::{record_schema::RecordSchema, Records}, select::ordering::Ordering, }; /// Leaf operations, which generates [RecordIterator](apllodb-shared-components::RecordIterator). #[derive(Clone, PartialEq, Debug)] pub(crate) enum LeafPlanOperation { Values { records: Records, }, SeqScan { table_name: TableName, projection: RowProjectionQuery, aliaser: Aliaser, }, // TODO extend. // See PostgreSQL's plan nodes: <https://github.com/postgres/postgres/blob/master/src/include/nodes/nodes.h#L42-L95> } /// Unary operations, which inputs [RecordIterator](apllodb-shared-components::RecordIterator) and outputs [RecordIterator](apllodb-shared-components::RecordIterator). #[derive(Clone, PartialEq, Debug, Serialize, Deserialize)] pub(crate) enum UnaryPlanOperation { Projection { fields: HashSet<SchemaIndex>, }, Selection { condition: Condition, }, Sort { index_orderings: Vec<(SchemaIndex, Ordering)>, }, // TODO extend. // See PostgreSQL's plan nodes: <https://github.com/postgres/postgres/blob/master/src/include/nodes/nodes.h#L42-L95> } /// Binary operations, which inputs two [RecordIterator](apllodb-shared-components::RecordIterator) and outputs one [RecordIterator](apllodb-shared-components::RecordIterator). #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] pub(crate) enum BinaryPlanOperation { HashJoin { joined_schema: RecordSchema, left_field: SchemaIndex, right_field: SchemaIndex, }, // TODO extend. // See PostgreSQL's plan nodes: <https://github.com/postgres/postgres/blob/master/src/include/nodes/nodes.h#L42-L95> }
use actix; use actix_web::{error, Error, HttpResponse}; use futures; use uuid; #[derive(Fail, Debug, Serialize)] #[serde(tag = "error", content = "msg")] pub enum IkError { #[fail(display = "internal error")] InternalError, #[fail(display = "bad request")] BadRequest(String), #[fail(display = "not found")] NotFound(String), } impl error::ResponseError for IkError { fn error_response(&self) -> HttpResponse { match *self { IkError::InternalError => { let error_uid = uuid::Uuid::new_v4(); error!("{:?} with id {}", self, error_uid); HttpResponse::InternalServerError() .header( "X-Request-Id", error_uid.to_hyphenated().to_string().as_str(), ) .finish() } IkError::BadRequest(_) => HttpResponse::BadRequest().json(self), IkError::NotFound(_) => HttpResponse::NotFound().json(self), } } } impl From<error::JsonPayloadError> for IkError { fn from(err: error::JsonPayloadError) -> IkError { match err { error::JsonPayloadError::Deserialize(json_err) => { IkError::BadRequest(format!("{}", json_err)) } _ => IkError::BadRequest(format!("{}", err)), } } } impl From<actix::MailboxError> for IkError { fn from(err: actix::MailboxError) -> IkError { error!("Got a {:?}", err); IkError::InternalError } } impl From<futures::Canceled> for IkError { fn from(err: futures::Canceled) -> IkError { error!("Got a {:?}", err); IkError::InternalError } } impl From<Error> for IkError { fn from(err: Error) -> IkError { error!("Got a {:?}", err); IkError::InternalError } }
use actix_web::{Form, HttpResponse, State}; use models::requests::{ActionRequest, SlackAction, SlackActionRequest, SlackRequest}; use service_actor::messages::{Cancel, Message, QueryMovie}; use AppState; pub const BASE_URL: &str = "http://cinemas.nos.pt"; pub const SERVICE_REQUEST_PATH: &str = "/_layouts/15/Cinemas/ApplicationPages/CinemasHelperService.aspx/GetAllMoviesPlaying"; pub fn list_movies_in_display( (request, state): (Form<SlackRequest>, State<AppState>), ) -> HttpResponse { let response_url = request.into_inner().response_url; let message = Message::QueryOp(QueryMovie { page: 1, response_url, }); state.responder.do_send(message); HttpResponse::Ok().finish() } pub fn handle_action((request, state): (Form<ActionRequest>, State<AppState>)) -> HttpResponse { let request_json = request.into_inner().payload; let request_struct: SlackActionRequest = serde_json::from_str(request_json.as_str()).unwrap(); let response_url = request_struct.response_url; let button_action = request_struct.actions.first().unwrap(); let response = action_response(button_action, response_url); state.responder.do_send(response); HttpResponse::Ok().finish() } fn action_response(action: &SlackAction, url: String) -> Message { let button_id = action.action_id.to_owned(); let value_string = action.value.to_owned(); let page: u8 = value_string.parse().unwrap(); match button_id.as_ref() { "cancel" => Message::CancelOp(Cancel { response_url: url }), _ => Message::QueryOp(QueryMovie { page, response_url: url, }), } }
use std::hash::{Hash, Hasher}; use std::collections::{HashSet, HashMap}; use std::usize; use pasture_core::{ containers::{PointBuffer, PointBufferExt}, layout::attributes::POSITION_3D, nalgebra::Vector3, }; use anyhow::{Result, anyhow}; #[derive(Clone, Copy)] struct Triangle { a: usize, b: usize, c: usize, normal: Vector3<f64>, } #[derive(Eq, Clone, Copy)] struct Edge { a: usize, b: usize, } impl PartialEq for Edge { fn eq(&self, other: &Self) -> bool { self.a == other.a && self.b == other.b || self.a == other.b && self.b == other.a } } impl Hash for Edge { fn hash<H: Hasher>(&self, state: &mut H) { (self.a * self.b).hash(state); } } /// Convex Hull generation as triangle mesh /// Returns the convex hull as a vector of tuples of size 3 that contains the indices of the triangle vertices within the input buffer that form a convex hull around all input points /// or an error if less than 3 linearily independent points were given in the input buffer. /// /// #Panics /// /// If the PointBuffer doesn't cointain a POSITION_3D attribute. pub fn convex_hull_as_triangle_mesh<T: PointBuffer>(buffer: &T) -> Result<Vec<Vector3<usize>>> { let triangles = create_convex_hull(buffer); if triangles.len() < 2 { return Err(anyhow!("input buffer cointains too few linearly independent points")); } let mut triangle_indices = Vec::new(); for tri in triangles { triangle_indices.push(Vector3::new(tri.a, tri.b, tri.c)); } return Ok(triangle_indices); } /// Convex Hull generation as points /// Returns the convex hull as an unsorted vector that contains the indices the points forming a convex hull around all input points. /// /// #Panics /// /// If the PointBuffer doesn't cointain a POSITION_3D attribute. pub fn convex_hull_as_points<T: PointBuffer>(buffer: &T) -> Vec<usize> { let triangles = create_convex_hull(buffer); let mut points = HashSet::new(); if triangles.len() > 1 { for tri in triangles { points.insert(tri.a); points.insert(tri.b); points.insert(tri.c); } } else { let tri = triangles[0]; points.insert(tri.a); points.insert(tri.b); if tri.c != 0 { points.insert(tri.c); } } let point_indices: Vec<usize> = points.into_iter().collect(); return point_indices; } fn create_convex_hull<T: PointBuffer>(buffer: &T) -> Vec<Triangle> { let mut triangles: Vec<Triangle> = Vec::new(); let position_attribute = match buffer .point_layout() .get_attribute_by_name(POSITION_3D.name()) { Some(a) => a, None => { panic!("point buffer contains no position attribute") }, }; let mut pointid: usize = 0; if position_attribute.datatype() == POSITION_3D.datatype() { for point in buffer.iter_attribute::<Vector3<f64>>(&POSITION_3D) { iteration(buffer, pointid, point, &mut triangles); pointid += 1; } } else { for point in buffer.iter_attribute_as::<Vector3<f64>>(&POSITION_3D) { iteration(buffer, pointid, point, &mut triangles); pointid += 1; } }; return triangles; } /// Performs a single iteration of the cunvex hull algorithm. /// `pointid`: current index within the buffer /// `point`: current point to be checked against the convex hull, possibly extends the convex hull /// `triangles`: the set of triangles forming the convex hull /// Each iteration receives a convex hull and checks it against the current point within the buffer. /// If the point lies within the current convex hull no changes have to be made. /// If the point lies outside of the current convex hull the hull has to be extended to include the current point. /// If 'triangles' contain only one entry: no full triangle has been found yet. In case of linearily dependant points no second triangle is added. /// If all 'triangles' are in a plane with 'point' a special triangulation procedure is needed to prevent a degenerated triangle mesh. fn iteration<T: PointBuffer>(buffer: &T, pointid: usize, point: Vector3<f64>, triangles: &mut Vec<Triangle>) { if pointid == 0 { triangles.push(Triangle{ a: 0, b: 0, c: 0, normal: point }) } else if triangles.len() == 1 { let mut first = &mut triangles[0]; if pointid == 1 { first.b = pointid; } else { let first_a = buffer.get_attribute(&POSITION_3D, first.a); let first_b = buffer.get_attribute(&POSITION_3D, first.b); let ab: Vector3<f64> = first_b - first_a; let ab_mag_sqr = ab.magnitude_squared(); if ab_mag_sqr == 0.0 { first.b = pointid; } else { let ac: Vector3<f64> = point - first_a; let ab_norm = ab.normalize(); let ac_ab_projected_length = ac.dot(&ab_norm); if f64::abs(ac_ab_projected_length) == ac.magnitude() { if ac_ab_projected_length >= 0.0 { if ac.magnitude_squared() > ab_mag_sqr { first.b = pointid; } } else { first.a = pointid; } } else { first.c = pointid; first.normal = calc_normal(first_a, first_b, point); let first = triangles[0]; triangles.push(Triangle{ a: first.a, b: first.c, c: first.b, normal: -first.normal }) } } } } else { let mut outer_edges = HashSet::new(); let mut inner_edges = HashSet::new(); let mut planar_triangles = Vec::new(); triangles.retain(|tri| { let tri_a: Vector3<f64> = buffer.get_attribute(&POSITION_3D, tri.a); let pa: Vector3<f64> = tri_a - point; let dot = pa.dot(&tri.normal); if dot < 0.0 { add_edge_to_outer_or_inner_edges(tri.a, tri.b, &mut outer_edges, &mut inner_edges); add_edge_to_outer_or_inner_edges(tri.b, tri.c, &mut outer_edges, &mut inner_edges); add_edge_to_outer_or_inner_edges(tri.c, tri.a, &mut outer_edges, &mut inner_edges); return false; } else if dot == 0.0 { planar_triangles.push(tri.clone()); } return true; }); if outer_edges.len() > 0 || inner_edges.len() > 0 { for edge in outer_edges.iter() { let edge_a: Vector3<f64> = buffer.get_attribute(&POSITION_3D, edge.a); let edge_b: Vector3<f64> = buffer.get_attribute(&POSITION_3D, edge.b); triangles.push(Triangle{ a: edge.a, b: edge.b, c: pointid, normal: calc_normal(edge_a, edge_b, point)}); } } else { // Find all edges of the triangle of which the edge-normal is facing the point. let mut edges_facing_point = Vec::new(); let mut edge_triangle_id = Vec::new(); for (i, pt) in planar_triangles.iter().enumerate() { let planar_a = buffer.get_attribute(&POSITION_3D, pt.a); let planar_b = buffer.get_attribute(&POSITION_3D, pt.b); let planar_c = buffer.get_attribute(&POSITION_3D, pt.c); let dist_ab = dist_point_to_edge(point, planar_a, planar_b, pt.normal); if dist_ab >= 0.0 { edges_facing_point.push(Edge{a: pt.a, b: pt.b}); edge_triangle_id.push(i); } let dist_bc = dist_point_to_edge(point, planar_b, planar_c, pt.normal); if dist_bc >= 0.0 { edges_facing_point.push(Edge{a: pt.b, b: pt.c}); edge_triangle_id.push(i); } let dist_ca = dist_point_to_edge(point, planar_c, planar_a, pt.normal); if dist_ca >= 0.0 { edges_facing_point.push(Edge{a: pt.c, b: pt.a}); edge_triangle_id.push(i); } if dist_ab < 0.0 && dist_bc < 0.0 && dist_ca < 0.0 { edges_facing_point.clear(); break; } } // Remove all edges occluded by other edges. let mut edge_triangle_normals = Vec::new(); for i in (0..edges_facing_point.len()).rev() { let edg = edges_facing_point[i]; let edg_a: Vector3<f64> = buffer.get_attribute(&POSITION_3D, edg.a); let edg_b: Vector3<f64> = buffer.get_attribute(&POSITION_3D, edg.b); let dist_edg_a_p = (edg_a - point).magnitude_squared(); let dist_edg_b_p = (edg_b - point).magnitude_squared(); let dist_edg_p = dist_point_to_line_segment(point, edg_a, edg_b); let edg_triangle_normal: Vector3<f64> = planar_triangles[edge_triangle_id[i]].normal; let mut remove = false; for other_edge_id in 0..edges_facing_point.len() { if other_edge_id != i { let other_edg_triangle_normal = planar_triangles[edge_triangle_id[other_edge_id]].normal; if edg_triangle_normal.dot(&other_edg_triangle_normal) > 0.0 { let other_edg = edges_facing_point[other_edge_id]; let other_edg_a: Vector3<f64> = buffer.get_attribute(&POSITION_3D, other_edg.a); let other_edg_b: Vector3<f64> = buffer.get_attribute(&POSITION_3D, other_edg.b); let point_other_edg_a = other_edg_a - point; let point_other_edg_b = other_edg_b - point; let point_other_edg_a_norm = other_edg_triangle_normal.cross(&point_other_edg_a); let point_other_edg_b_norm = other_edg_triangle_normal.cross(&point_other_edg_b); let edg_a_other_edg_a = edg_a - other_edg_a; let edg_a_other_edg_b = edg_a - other_edg_b; let edg_b_other_edg_a = edg_b - other_edg_a; let edg_b_other_edg_b = edg_b - other_edg_b; let ea_oea_dot_border_a = point_other_edg_a_norm.dot(&edg_a_other_edg_a); let eb_oea_dot_border_a = point_other_edg_a_norm.dot(&edg_b_other_edg_a); let ea_oeb_dot_border_b = point_other_edg_b_norm.dot(&edg_a_other_edg_b); let eb_oeb_dot_border_b = point_other_edg_b_norm.dot(&edg_b_other_edg_b); if ea_oea_dot_border_a < 0.0 && ea_oeb_dot_border_b > 0.0 { let dist_other_edg_p = f64::min(point_other_edg_a.magnitude_squared(), point_other_edg_b.magnitude_squared()); if dist_edg_a_p > dist_other_edg_p { remove = true; break; } } if eb_oea_dot_border_a < 0.0 && eb_oeb_dot_border_b > 0.0 { let dist_other_edg_p = f64::min(point_other_edg_a.magnitude_squared(), point_other_edg_b.magnitude_squared()); if dist_edg_b_p > dist_other_edg_p { remove = true; break; } } if (ea_oea_dot_border_a < 0.0 && eb_oeb_dot_border_b > 0.0) || (eb_oea_dot_border_a < 0.0 && ea_oeb_dot_border_b > 0.0) { let dist_other_edg_p = dist_point_to_line_segment(point, other_edg_a, other_edg_b); if dist_edg_p > dist_other_edg_p { remove = true; break; } } } } } if remove { edges_facing_point.remove(i); edge_triangle_id.remove(i); } else { edge_triangle_normals.insert(0, -edg_triangle_normal); } } // Remove all triangles with vertices that are contained in two edges facing point let edgenum = edges_facing_point.len(); if edgenum > 2 { let mut edges_to_remove = HashSet::new(); let mut vertices_on_one_edge_start = HashMap::new(); let mut vertices_on_one_edge_end = HashMap::new(); let mut vertices_on_two_edges = HashMap::new(); for facing_edge in edges_facing_point.iter() { let res_a = vertices_on_one_edge_start.insert(facing_edge.a, (facing_edge.b, facing_edge.clone())); if res_a.is_some() && res_a.unwrap().0 != facing_edge.b { vertices_on_one_edge_start.remove(&facing_edge.a); vertices_on_two_edges.insert(facing_edge.a, (res_a.unwrap().1, facing_edge.clone())); } let res_b = vertices_on_one_edge_end.insert(facing_edge.b, (facing_edge.a, facing_edge.clone())); if res_b.is_some() && res_b.unwrap().0 != facing_edge.a { vertices_on_one_edge_end.remove(&facing_edge.b); vertices_on_two_edges.insert(facing_edge.b, (res_b.unwrap().1, facing_edge.clone())); } } let mut triangles_to_remove = Vec::new(); for t_id in 0..triangles.len() { let tri = triangles.get(t_id).unwrap(); let res_a = vertices_on_two_edges.get(&tri.a); let res_b = vertices_on_two_edges.get(&tri.b); let res_c = vertices_on_two_edges.get(&tri.c); let a_on_two_edges = res_a.is_some(); let b_on_two_edges = res_b.is_some(); let c_on_two_edges = res_c.is_some(); if a_on_two_edges || b_on_two_edges || c_on_two_edges { triangles_to_remove.push(t_id); if c_on_two_edges { edges_facing_point.push(Edge{a: tri.a, b: tri.b}); edges_to_remove.insert(Edge{a: tri.b, b: tri.c}); edges_to_remove.insert(Edge{a: tri.c, b: tri.a}); edge_triangle_normals.push(tri.normal); } if b_on_two_edges { edges_facing_point.push(Edge{a: tri.c, b: tri.a}); edges_to_remove.insert(Edge{a: tri.a, b: tri.b}); edges_to_remove.insert(Edge{a: tri.b, b: tri.c}); edge_triangle_normals.push(tri.normal); } if a_on_two_edges { edges_facing_point.push(Edge{a: tri.b, b: tri.c}); edges_to_remove.insert(Edge{a: tri.c, b: tri.a}); edges_to_remove.insert(Edge{a: tri.a, b: tri.b}); edge_triangle_normals.push(tri.normal); } } } for t_id_remove in triangles_to_remove.iter().rev() { triangles.remove(*t_id_remove); } for efp in (0..edges_facing_point.len()).rev() { if edges_to_remove.contains(edges_facing_point.get(efp).unwrap()) { edges_facing_point.remove(efp); edge_triangle_normals.remove(efp); } } } for i in 0..edges_facing_point.len() { let edg = edges_facing_point[i]; triangles.push(Triangle{ a: edg.a, b: edg.b, c: pointid, normal: edge_triangle_normals[i]}); } } } } /// Calculates the distance of a point to an edge of a triangle. Assumes the point to be in the same plane as the triangle. /// `point`: the point that lies in the same plane as the edge /// `edge_a`: first vertex of the edge /// `edge_b`: second vertex of the edge /// 'triangle_normal': the normal of the triangle the edge belongs to fn dist_point_to_edge(point: Vector3<f64>, edge_a: Vector3<f64>, edge_b: Vector3<f64>, triangle_normal: Vector3<f64>) -> f64 { let pa = edge_a - point; let edge_ab: Vector3<f64> = edge_b - edge_a; let edge_ab_normal = triangle_normal.cross(&edge_ab); return edge_ab_normal.dot(&pa); } /// Calculates the distance of a point to a line segment. /// `point`: the point that lies in the same plane as the edge /// `line_a`: first vertex of the line segment /// `line_b`: second vertex of the line segment fn dist_point_to_line_segment(point: Vector3<f64>, segment_a: Vector3<f64>, segment_b: Vector3<f64>) -> f64 { let ab: Vector3<f64> = segment_b - segment_a; let ap: Vector3<f64> = point - segment_a; if ap.dot(&ab) <= 0.0 { return ap.magnitude(); } let bp = point - segment_b; if bp.dot(&ab) >= 0.0 { return bp.magnitude(); } return ab.cross(&ap).magnitude() / ab.magnitude(); } /// Adds the given edge to the set of outer edges. If the given edge is already contained in the set of outer edges it is removed and added to the set of inner edges. /// `a`: first vertex of the edge /// `b`: second vertex of the edge /// `outer_edges`: the set of outer edges /// `inner_edges`: the set of inner edges fn add_edge_to_outer_or_inner_edges(a: usize, b: usize, outer_edges: &mut HashSet<Edge>, inner_edges: &mut HashSet<Edge>) { let e = Edge{a, b}; if !outer_edges.insert(e) { outer_edges.remove(&e); inner_edges.insert(e); } } /// Calculates the normal of a triangle formed b three points. /// `a`: first vertex of the triangle /// `b`: second vertex of the triangle /// `c`: third vertex of the triangle fn calc_normal(a: Vector3<f64>, b: Vector3<f64>, c: Vector3<f64>) -> Vector3<f64> { let ab: Vector3<f64> = b - a; let ac: Vector3<f64> = c - a; return ab.cross(&ac); } #[cfg(test)] mod tests { use pasture_core::{containers::PerAttributeVecPointStorage, containers::{PointBuffer, PointBufferExt}, layout::PointType, layout::attributes::POSITION_3D, nalgebra::Vector3}; use crate::convexhull; use pasture_derive::PointType; use anyhow::Result; use rand::{distributions::Uniform, thread_rng, Rng}; #[derive(PointType, Default)] #[repr(C)] struct TestPointTypeSmall { #[pasture(BUILTIN_POSITION_3D)] pub position: Vector3<f64>, } // Internal Tests fn test_normals_for_triangles(triangles: &Vec<convexhull::Triangle>, normals: &Vec<Vector3<f64>>) { for n in normals { let mut found = false; for t in triangles.iter() { if f64::abs(t.normal.normalize().dot(&n) - 1.0) < 0.0001 { found = true; break; } } assert!(found); } } fn test_all_points_inside_hull<T: PointBuffer>(buffer: &T, triangles: &Vec<convexhull::Triangle>) { let position_attribute = buffer.point_layout().get_attribute_by_name(POSITION_3D.name()).unwrap(); if position_attribute.datatype() == POSITION_3D.datatype() { for point in buffer.iter_attribute::<Vector3<f64>>(&POSITION_3D) { for t in triangles.iter() { let a: Vector3<f64> = buffer.get_attribute(&POSITION_3D, t.a); let pa = a - point; assert!(pa.dot(&t.normal) >= -0.0000001); } } } else { for point in buffer.iter_attribute_as::<Vector3<f64>>(&POSITION_3D) { for t in triangles.iter() { let a: Vector3<f64> = buffer.get_attribute(&POSITION_3D, t.a); let pa = a - point; assert!(pa.dot(&t.normal) >= -0.0000001); } } }; } #[test] fn test_convex_simple_triangle() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(3, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 1.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 2); let normals = vec![Vector3::new(0.0, 1.0, 0.0), Vector3::new(0.0, -1.0, 0.0)]; test_normals_for_triangles(&result, &normals); Ok(()) } #[test] fn test_convex_simple_tet_4_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(4, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 1.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(-1.0, 0.0, 0.0), Vector3::new(0.0, -1.0, 0.0), Vector3::new(0.0, 0.0, -1.0), Vector3::new(1.0, 1.0, 1.0).normalize()]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_simple_tet_5_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(5, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, -1.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(1.0, 1.0, 1.0).normalize(), Vector3::new(1.0, 1.0, -3.0).normalize(), Vector3::new(1.0, -3.0, 1.0).normalize(), Vector3::new(-3.0, 1.0, 1.0).normalize()]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_1_point() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(1, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 1); assert_eq!(result[0].a, 0); Ok(()) } #[test] fn test_convex_line_2_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(2, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 1); assert_eq!(result[0].a, 0); assert_eq!(result[0].b, 1); Ok(()) } #[test] fn test_convex_line_3_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(3, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(2.0, 0.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 1); assert_eq!(result[0].a, 0); assert_eq!(result[0].b, 2); Ok(()) } #[test] fn test_convex_line_4_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(4, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(2.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 0.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 1); assert_eq!(result[0].a, 3); assert_eq!(result[0].b, 2); Ok(()) } #[test] fn test_convex_plane_4_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(4, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 1.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 1.0, 0.0), Vector3::new(0.0, -1.0, 0.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_point_in_square() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(5, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_point_next_to_square_1() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(5, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(2.0, 0.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 6); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_point_next_to_square_2() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(5, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 2.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 6); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_point_next_to_square_3() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(5, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(2.0, 2.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_point_next_to_square_4() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(5, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-2.0, 2.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_random_1d_points_in_box_create_box_first() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(22, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); let mut rng = thread_rng(); for _ in 0..20 { buffer.push_point(TestPointTypeSmall { position: Vector3::new(rng.sample(Uniform::new(-0.9, 0.9)), 0.0, 0.0) }); } let result = convexhull::convex_hull_as_points(&buffer); assert_eq!(result.len(), 2); assert!(result.contains(&0)); assert!(result.contains(&1)); Ok(()) } #[test] fn test_convex_random_1d_points_in_box_create_box_last() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(22, TestPointTypeSmall::layout()); let mut rng = thread_rng(); for _ in 0..20 { buffer.push_point(TestPointTypeSmall { position: Vector3::new(rng.sample(Uniform::new(-0.9, 0.9)), 0.0, 0.0) }); } buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); let result = convexhull::convex_hull_as_points(&buffer); assert_eq!(result.len(), 2); assert!(result.contains(&20)); assert!(result.contains(&21)); Ok(()) } #[test] fn test_convex_random_2d_points_in_box_create_box_first() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(24, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); let mut rng = thread_rng(); for _ in 0..20 { buffer.push_point(TestPointTypeSmall { position: Vector3::new(rng.sample(Uniform::new(-0.9, 0.9)), rng.sample(Uniform::new(-0.9, 0.9)), 0.0) }); } let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_points_in_box_create_box_last_case_1() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(6, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.5, 0.2, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-0.5, -0.3, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_points_in_box_create_box_last_case_2() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(6, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.2, 0.1, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-0.9, 0.3, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_2d_points_in_box_create_box_last_case_3() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(7, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-0.3, -0.3, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.9, -0.4, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.2, 0.1, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 0.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 4); let normals = vec![Vector3::new(0.0, 0.0, 1.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_random_points_in_box_create_box_first() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(28, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 1.0) }); let mut rng = thread_rng(); for _ in 0..20 { buffer.push_point(TestPointTypeSmall { position: Vector3::new(rng.sample(Uniform::new(-0.9, 0.9)), rng.sample(Uniform::new(-0.9, 0.9)), rng.sample(Uniform::new(-0.9, 0.9))) }); } let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 12); let normals = vec![Vector3::new(1.0, 0.0, 0.0), Vector3::new(0.0, 1.0, 0.0), Vector3::new(0.0, 0.0, 1.0), Vector3::new(-1.0, 0.0, 0.0), Vector3::new(0.0, -1.0, 0.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_random_points_in_box_create_box_last() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(28, TestPointTypeSmall::layout()); let mut rng = thread_rng(); for _ in 0..20 { buffer.push_point(TestPointTypeSmall { position: Vector3::new(rng.sample(Uniform::new(-0.9, 0.9)), rng.sample(Uniform::new(-0.9, 0.9)), rng.sample(Uniform::new(-0.9, 0.9))) }); } buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, 1.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, -1.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 1.0, 1.0) }); let result = convexhull::create_convex_hull(&buffer); assert_eq!(result.len(), 12); let normals = vec![Vector3::new(1.0, 0.0, 0.0), Vector3::new(0.0, 1.0, 0.0), Vector3::new(0.0, 0.0, 1.0), Vector3::new(-1.0, 0.0, 0.0), Vector3::new(0.0, -1.0, 0.0), Vector3::new(0.0, 0.0, -1.0)]; test_normals_for_triangles(&result, &normals); test_all_points_inside_hull(&buffer, &result); Ok(()) } #[test] fn test_convex_random_points() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(100, TestPointTypeSmall::layout()); let mut rng = thread_rng(); for _ in 0..100 { buffer.push_point(TestPointTypeSmall { position: Vector3::new(rng.sample(Uniform::new(-100.0, 100.0)), rng.sample(Uniform::new(-100.0, 100.0)), rng.sample(Uniform::new(-100.0, 100.0))) }); } let result = convexhull::create_convex_hull(&buffer); test_all_points_inside_hull(&buffer, &result); Ok(()) } // Interface Tests #[test] fn test_convex_0_point_output_mesh_error() -> Result<()> { let buffer = PerAttributeVecPointStorage::with_capacity(0, TestPointTypeSmall::layout()); let result = convexhull::convex_hull_as_triangle_mesh(&buffer); assert_eq!(result.unwrap_err().to_string(), "input buffer cointains too few linearly independent points"); Ok(()) } #[test] fn test_convex_1_point_output_mesh_error() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(1, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); let result = convexhull::convex_hull_as_triangle_mesh(&buffer); assert_eq!(result.unwrap_err().to_string(), "input buffer cointains too few linearly independent points"); Ok(()) } #[test] fn test_convex_2_point_output_mesh_error() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(2, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); let result = convexhull::convex_hull_as_triangle_mesh(&buffer); assert_eq!(result.unwrap_err().to_string(), "input buffer cointains too few linearly independent points"); Ok(()) } #[test] fn test_convex_3_point_output_mesh_error_same_point() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(3, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); let result = convexhull::convex_hull_as_triangle_mesh(&buffer); assert_eq!(result.unwrap_err().to_string(), "input buffer cointains too few linearly independent points"); Ok(()) } #[test] fn test_convex_3_point_output_mesh_error_line() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(3, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(2.0, 0.0, 0.0) }); let result = convexhull::convex_hull_as_triangle_mesh(&buffer); assert_eq!(result.unwrap_err().to_string(), "input buffer cointains too few linearly independent points"); Ok(()) } #[test] fn test_convex_3_point_output_mesh_no_error() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(3, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 1.0, 0.0) }); let result = convexhull::convex_hull_as_triangle_mesh(&buffer); let result_unwrapped = result.unwrap(); assert_eq!(result_unwrapped.len(), 2); assert!(result_unwrapped.contains(&Vector3::new(0, 1, 2))); assert!(result_unwrapped.contains(&Vector3::new(0, 2, 1))); Ok(()) } #[test] fn test_convex_3_point_output_points_line() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(3, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(2.0, 0.0, 0.0) }); let result = convexhull::convex_hull_as_points(&buffer); assert_eq!(result.len(), 2); assert!(result.contains(&0)); assert!(result.contains(&2)); Ok(()) } #[test] fn test_convex_4_point_output_point_in_triangle() -> Result<()> { let mut buffer = PerAttributeVecPointStorage::with_capacity(4, TestPointTypeSmall::layout()); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 0.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(-1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(1.0, -1.0, 0.0) }); buffer.push_point(TestPointTypeSmall { position: Vector3::new(0.0, 1.0, 0.0) }); let result = convexhull::convex_hull_as_points(&buffer); assert_eq!(result.len(), 3); assert!(result.contains(&1)); assert!(result.contains(&2)); assert!(result.contains(&3)); Ok(()) } #[derive(PointType, Default)] #[repr(C)] struct TestPointTypeNoPositions { #[pasture(BUILTIN_INTENSITY)] pub intensity: u16, } #[test] #[should_panic(expected = "point buffer contains no position attribute")] fn test_convex_no_positions_panic() { let mut buffer = PerAttributeVecPointStorage::with_capacity(1, TestPointTypeNoPositions::layout()); buffer.push_point(TestPointTypeNoPositions { intensity: 1 }); let _result = convexhull::convex_hull_as_triangle_mesh(&buffer); } }
// RGB Rust Library // Written in 2019 by // Dr. Maxim Orlovsky <dr.orlovsky@gmail.com> // basing on ideas from the original RGB rust library by // Alekos Filini <alekos.filini@gmail.com> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the MIT License // along with this software. // If not, see <https://opensource.org/licenses/MIT>. use crate::*; /// Trait to be used by custom contract blueprint implementation to provide its own custom fields. pub trait ContractBody: Sized { /// Validates given proof to have a correct structure matching RGB contract blueprint. /// This is default implementation that checks nothing, all required functionality for specific /// blueprint type (like checking proof metadata/scripts) must be implemented by custom /// classes implementing `ContractBody` trait. fn validate_proof(&self, _: &Proof<Self>) -> Result<(), RgbError<Self>> { Ok(()) } }
impl Solution { pub fn single_number(nums: Vec<i32>) -> i32 { let mut ans: i32 = 0; for num in &nums { ans = ans ^ num; } return ans; } }
#[doc = "Register `PUPDR` reader"] pub type R = crate::R<PUPDR_SPEC>; #[doc = "Register `PUPDR` writer"] pub type W = crate::W<PUPDR_SPEC>; #[doc = "Field `PUPD0` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD0_R = crate::FieldReader; #[doc = "Field `PUPD0` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD0_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD1` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD1_R = crate::FieldReader; #[doc = "Field `PUPD1` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD1_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD2` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD2_R = crate::FieldReader; #[doc = "Field `PUPD2` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD2_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD3` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD3_R = crate::FieldReader; #[doc = "Field `PUPD3` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD3_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD4` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD4_R = crate::FieldReader; #[doc = "Field `PUPD4` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD4_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD5` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD5_R = crate::FieldReader; #[doc = "Field `PUPD5` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD5_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD6` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD6_R = crate::FieldReader; #[doc = "Field `PUPD6` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD6_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD7` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD7_R = crate::FieldReader; #[doc = "Field `PUPD7` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD7_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD8` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD8_R = crate::FieldReader; #[doc = "Field `PUPD8` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD8_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD9` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD9_R = crate::FieldReader; #[doc = "Field `PUPD9` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD9_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD10` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD10_R = crate::FieldReader; #[doc = "Field `PUPD10` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD10_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD11` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD11_R = crate::FieldReader; #[doc = "Field `PUPD11` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD11_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD12` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD12_R = crate::FieldReader; #[doc = "Field `PUPD12` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD12_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD13` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD13_R = crate::FieldReader; #[doc = "Field `PUPD13` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD13_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD14` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD14_R = crate::FieldReader; #[doc = "Field `PUPD14` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD14_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; #[doc = "Field `PUPD15` reader - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD15_R = crate::FieldReader; #[doc = "Field `PUPD15` writer - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] pub type PUPD15_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>; impl R { #[doc = "Bits 0:1 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd0(&self) -> PUPD0_R { PUPD0_R::new((self.bits & 3) as u8) } #[doc = "Bits 2:3 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd1(&self) -> PUPD1_R { PUPD1_R::new(((self.bits >> 2) & 3) as u8) } #[doc = "Bits 4:5 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd2(&self) -> PUPD2_R { PUPD2_R::new(((self.bits >> 4) & 3) as u8) } #[doc = "Bits 6:7 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd3(&self) -> PUPD3_R { PUPD3_R::new(((self.bits >> 6) & 3) as u8) } #[doc = "Bits 8:9 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd4(&self) -> PUPD4_R { PUPD4_R::new(((self.bits >> 8) & 3) as u8) } #[doc = "Bits 10:11 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd5(&self) -> PUPD5_R { PUPD5_R::new(((self.bits >> 10) & 3) as u8) } #[doc = "Bits 12:13 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd6(&self) -> PUPD6_R { PUPD6_R::new(((self.bits >> 12) & 3) as u8) } #[doc = "Bits 14:15 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd7(&self) -> PUPD7_R { PUPD7_R::new(((self.bits >> 14) & 3) as u8) } #[doc = "Bits 16:17 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd8(&self) -> PUPD8_R { PUPD8_R::new(((self.bits >> 16) & 3) as u8) } #[doc = "Bits 18:19 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd9(&self) -> PUPD9_R { PUPD9_R::new(((self.bits >> 18) & 3) as u8) } #[doc = "Bits 20:21 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd10(&self) -> PUPD10_R { PUPD10_R::new(((self.bits >> 20) & 3) as u8) } #[doc = "Bits 22:23 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd11(&self) -> PUPD11_R { PUPD11_R::new(((self.bits >> 22) & 3) as u8) } #[doc = "Bits 24:25 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd12(&self) -> PUPD12_R { PUPD12_R::new(((self.bits >> 24) & 3) as u8) } #[doc = "Bits 26:27 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd13(&self) -> PUPD13_R { PUPD13_R::new(((self.bits >> 26) & 3) as u8) } #[doc = "Bits 28:29 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd14(&self) -> PUPD14_R { PUPD14_R::new(((self.bits >> 28) & 3) as u8) } #[doc = "Bits 30:31 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] pub fn pupd15(&self) -> PUPD15_R { PUPD15_R::new(((self.bits >> 30) & 3) as u8) } } impl W { #[doc = "Bits 0:1 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd0(&mut self) -> PUPD0_W<PUPDR_SPEC, 0> { PUPD0_W::new(self) } #[doc = "Bits 2:3 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd1(&mut self) -> PUPD1_W<PUPDR_SPEC, 2> { PUPD1_W::new(self) } #[doc = "Bits 4:5 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd2(&mut self) -> PUPD2_W<PUPDR_SPEC, 4> { PUPD2_W::new(self) } #[doc = "Bits 6:7 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd3(&mut self) -> PUPD3_W<PUPDR_SPEC, 6> { PUPD3_W::new(self) } #[doc = "Bits 8:9 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd4(&mut self) -> PUPD4_W<PUPDR_SPEC, 8> { PUPD4_W::new(self) } #[doc = "Bits 10:11 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd5(&mut self) -> PUPD5_W<PUPDR_SPEC, 10> { PUPD5_W::new(self) } #[doc = "Bits 12:13 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd6(&mut self) -> PUPD6_W<PUPDR_SPEC, 12> { PUPD6_W::new(self) } #[doc = "Bits 14:15 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd7(&mut self) -> PUPD7_W<PUPDR_SPEC, 14> { PUPD7_W::new(self) } #[doc = "Bits 16:17 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd8(&mut self) -> PUPD8_W<PUPDR_SPEC, 16> { PUPD8_W::new(self) } #[doc = "Bits 18:19 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd9(&mut self) -> PUPD9_W<PUPDR_SPEC, 18> { PUPD9_W::new(self) } #[doc = "Bits 20:21 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd10(&mut self) -> PUPD10_W<PUPDR_SPEC, 20> { PUPD10_W::new(self) } #[doc = "Bits 22:23 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd11(&mut self) -> PUPD11_W<PUPDR_SPEC, 22> { PUPD11_W::new(self) } #[doc = "Bits 24:25 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd12(&mut self) -> PUPD12_W<PUPDR_SPEC, 24> { PUPD12_W::new(self) } #[doc = "Bits 26:27 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd13(&mut self) -> PUPD13_W<PUPDR_SPEC, 26> { PUPD13_W::new(self) } #[doc = "Bits 28:29 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd14(&mut self) -> PUPD14_W<PUPDR_SPEC, 28> { PUPD14_W::new(self) } #[doc = "Bits 30:31 - Port x configuration I/O pin y (y = 15 to 0) These bits are written by software to configure the I/O pull-up or pull-down Note: The bitfield is reserved and must be kept to reset value when the corresponding I/O is not available on the selected package."] #[inline(always)] #[must_use] pub fn pupd15(&mut self) -> PUPD15_W<PUPDR_SPEC, 30> { PUPD15_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "GPIO port pull-up/pull-down register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pupdr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pupdr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct PUPDR_SPEC; impl crate::RegisterSpec for PUPDR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`pupdr::R`](R) reader structure"] impl crate::Readable for PUPDR_SPEC {} #[doc = "`write(|w| ..)` method takes [`pupdr::W`](W) writer structure"] impl crate::Writable for PUPDR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets PUPDR to value 0x6400_0000"] impl crate::Resettable for PUPDR_SPEC { const RESET_VALUE: Self::Ux = 0x6400_0000; }
pub mod dpkp_algorithm { use std::fmt; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::time::Instant; pub struct KnapsackDP { pub filename: String, pub path_to_results: String, pub n_items: i32, pub capacity: i32, pub profits: Vec<i32>, pub weights: Vec<i32>, pub table: Vec<Vec<i32>>, pub elapsed_time: f64, } impl Default for KnapsackDP { fn default() -> KnapsackDP { KnapsackDP { filename: "".to_string(), path_to_results: "".to_string(), n_items: -1, capacity: -1, profits: vec![], weights: vec![], table: vec![], elapsed_time: -1.0, } } } impl fmt::Display for KnapsackDP { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "n_items {}, Q = {}\nw = {:?}\np = {:?}", self.n_items, self.capacity, self.weights, self.profits ).expect("could not write object"); Ok(()) } } impl KnapsackDP { pub fn load_instance(&mut self) -> std::io::Result<()> { // Cargamos el fichero de instancia let file = File::open(self.filename.clone()).expect("Error opening File"); let mut reader = BufReader::new(file); let mut tmp_string = String::new(); reader.read_line(&mut tmp_string).expect("could not read file"); let n_items_and_q: Vec<String> = tmp_string .split_whitespace() .map(|s| s.to_string()) .collect(); self.n_items = n_items_and_q[0].parse::<i32>().unwrap(); self.capacity = n_items_and_q[1].parse::<i32>().unwrap(); self.profits.reserve(self.n_items as usize); self.weights.reserve(self.n_items as usize); self.table = vec![vec![0; (self.capacity + 1) as usize]; (self.n_items + 1) as usize]; for (_, line) in reader.lines().enumerate() { let w_p: Vec<i32> = line .unwrap() .split_whitespace() .map(|s| s.to_string().parse().unwrap()) .collect(); if w_p.len() == 2 { self.weights.push(w_p[0]); self.profits.push(w_p[1]); } } Ok(()) } pub fn run(&mut self) -> (i32, f64) { let start_time = Instant::now(); for i in 0..=self.n_items { for j in 0..=self.capacity { let value: i32 = if i == 0 || j == 0 { 0 } else if self.weights[(i - 1) as usize] <= j { std::cmp::max::<i32>( self.profits[(i - 1) as usize] + self.table[(i - 1) as usize] [(j - self.weights[(i - 1) as usize]) as usize], self.table[(i - 1) as usize][j as usize], ) } else { self.table[(i - 1) as usize][j as usize] }; self.table[i as usize][j as usize] = value; } } self.elapsed_time = start_time.elapsed().as_secs_f64(); let best = self.table[self.n_items as usize][self.capacity as usize]; return (best, self.elapsed_time); } } }
use crate::crypto::hash::{Hashable, H256}; use crate::transaction::{CoinId, Input, Transaction}; use std::collections::BTreeMap; use std::collections::HashMap; use std::collections::VecDeque; /// transactions storage #[derive(Debug)] pub struct MemoryPool { /// Number of transactions num_transactions: u64, /// Maximum number that the memory pool can hold max_transactions: u64, /// Counter for storage index counter: u64, /// By-hash storage by_hash: HashMap<H256, Entry>, /// Transactions by previous output, formatted as Input by_input: HashMap<Input, H256>, /// Storage for order by storage index, it is equivalent to FIFO by_storage_index: BTreeMap<u64, H256>, } #[derive(Debug, Clone)] pub struct Entry { /// Transaction pub transaction: Transaction, /// counter of the tx storage_index: u64, } impl MemoryPool { pub fn new(size_limit: u64) -> Self { Self { num_transactions: 0, max_transactions: size_limit, counter: 0, by_hash: HashMap::new(), by_input: HashMap::new(), by_storage_index: BTreeMap::new(), } } /// Insert a tx into memory pool. The input of it will also be recorded. pub fn insert(&mut self, tx: Transaction) { if self.num_transactions > self.max_transactions { return; } // assumes no duplicates nor double spends let hash = tx.hash(); let entry = Entry { transaction: tx, storage_index: self.counter, }; self.counter += 1; // associate all inputs with this transaction for input in &entry.transaction.input { self.by_input.insert(input.clone(), hash); } // add to btree self.by_storage_index.insert(entry.storage_index, hash); // add to hashmap self.by_hash.insert(hash, entry); self.num_transactions += 1; } pub fn get(&self, h: &H256) -> Option<&Entry> { let entry = self.by_hash.get(h)?; Some(entry) } /// Check whether a tx hash is in memory pool /// When adding tx into mempool, should check this. pub fn contains(&self, h: &H256) -> bool { self.by_hash.contains_key(h) } /// Check whether the input of a tx is already recorded. If so, this tx is a double spend. /// When adding tx into mempool, should check this. pub fn is_double_spend(&self, inputs: &[Input]) -> bool { inputs.iter().any(|input| self.by_input.contains_key(input)) } fn remove_and_get(&mut self, hash: &H256) -> Option<Entry> { let entry = self.by_hash.remove(hash)?; for input in &entry.transaction.input { self.by_input.remove(&input); } self.by_storage_index.remove(&entry.storage_index); self.num_transactions -= 1; Some(entry) } /// Remove a tx by its hash, also remove its recorded inputs pub fn remove_by_hash(&mut self, hash: &H256) { self.remove_and_get(hash); } /// Remove potential tx that use this input. /// This function runs recursively, so it may remove more transactions. pub fn remove_by_input(&mut self, prevout: &Input) { //use a deque to recursively remove, in case there are multi level dependency between txs. let mut queue: VecDeque<Input> = VecDeque::new(); queue.push_back(prevout.clone()); while let Some(prevout) = queue.pop_front() { if let Some(entry_hash) = self.by_input.get(&prevout) { let entry_hash = *entry_hash; let entry = self.remove_and_get(&entry_hash).unwrap(); for (index, output) in entry.transaction.output.iter().enumerate() { queue.push_back(Input { coin: CoinId { hash: entry_hash, index: index as u32, }, value: output.value, owner: output.recipient, }); } } } } /// get n transaction by fifo pub fn get_transactions(&self, n: u32) -> Vec<Transaction> { self.by_storage_index .values() .take(n as usize) .map(|hash| self.get(hash).unwrap().transaction.clone()) .collect() } /// get size/length pub fn len(&self) -> usize { self.by_hash.len() } } #[cfg(test)] pub mod tests {}
//! This example demonstrates using [`Color`] as a [`CellOption`] modifier to stylize //! the cells of a [`Table`]. //! //! * Note how the [`Color`] [setting](tabled::settings) is used to simplify creating //! reusable themes for backgrounds. use tabled::{ settings::{Color, Modify, Style}, Table, Tabled, }; #[derive(Tabled)] struct Bsd { distribution: &'static str, year_of_first_release: usize, is_active: bool, } impl Bsd { fn new(distribution: &'static str, year_of_first_release: usize, is_active: bool) -> Self { Self { distribution, year_of_first_release, is_active, } } } fn main() { let data = vec![ Bsd::new("BSD", 1978, false), Bsd::new("SunOS", 1982, false), Bsd::new("NetBSD", 1993, true), Bsd::new("FreeBSD", 1993, true), Bsd::new("OpenBSD", 1995, true), ]; let mut table = Table::new(data); table .with(Style::psql()) .with(Modify::new((0, 0)).with(Color::BG_BLUE)) .with(Modify::new((1, 1)).with(Color::BG_GREEN)) .with(Modify::new((2, 2)).with(Color::BG_RED)); println!("{table}"); }
#![feature(alloc_system)] extern crate alloc_system; extern crate relay_mono; extern crate tessel; use relay_mono::RelayArray; use tessel::Tessel; use std::thread::sleep; use std::time::Duration; fn main() { // Acquire port A. let (port_a, _) = Tessel::ports().unwrap(); // Create the relay array. let mut servos = RelayArray::new(port_a); servos.connect().expect("Could not connect to relay array."); println!("Toggling relays every 1s... (Press CTRL + C to stop)"); loop { println!("[0, 0]"); sleep(Duration::from_millis(3000)); servos.set_latch(1, true); println!("[1, 0]"); sleep(Duration::from_millis(3000)); servos.set_latch(2, true); println!("[1, 1]"); sleep(Duration::from_millis(3000)); servos.set_latch(1, false); println!("[0, 0]"); sleep(Duration::from_millis(3000)); servos.set_latch(2, false); } }
mod instruction; mod bus; mod opcodes; mod addressing_modes; mod registers; pub use bus::Bus; use opcodes::OPCODES; use registers::Registers; /// MOS 6502 Processor emulator pub struct Cpu { registers: Registers } impl Cpu { /// /// Constructs a Cpu struct. /// pub fn new() -> Cpu { Cpu { registers: Registers::new(), } } /// /// Resets the processor, fetching the reset handler and jumping to it. /// pub fn reset<T: Bus>(&mut self, bus: &T) { // Restore original state *self = Cpu::new(); // Jump to the reset handler. let low_byte : u16 = self.step_program_counter(bus).into(); let high_byte : u16 = self.step_program_counter(bus).into(); let reset_vector = low_byte | (high_byte << 8); self.registers.program_counter = reset_vector; } /// /// Runs a single instruction of the processor. /// /// # Example ///``` /// struct GndBus { } /// /// impl mos6502::Bus for GndBus { /// fn write(&mut self, _addr: u16, _value: u8) { /// // Can't write in the Gnd bus, everything is tied to 0. /// } /// fn read(&self, _addr: u16) -> u8 { /// 0u8 /// } /// } /// /// let mut bus = GndBus {}; /// let mut mos6502 = mos6502::Cpu::new(); /// mos6502.reset(&mut bus); /// mos6502.single_step(&mut bus); ///``` /// pub fn single_step<T>(&mut self, bus: &mut T) where T: Bus { // Fetch opcode let (instruction, addressing_mode) = OPCODES[self.step_program_counter(bus) as usize].unwrap(); let operand = addressing_mode.get_operand(bus, &mut self.registers); instruction.process(operand, bus, &mut self.registers); } /// Signals an interrupt (IRQB signal) to the core. pub fn signal_irq(&mut self) { self.registers.irq_active = true; } /// Signals a NMI Interrupt to the core. pub fn signal_nmi(&mut self) { self.registers.nmi_active = true; } /// /// Steps the program counter and returns the value at /// the current PC in the supplied Bus /// fn step_program_counter<T: Bus>(&mut self, bus: &T) -> u8 { let result = bus.read(self.registers.program_counter); self.registers.program_counter += 1; result } } #[cfg(test)] mod tests { use super::*; struct DummyBus { data: [u8; 0x10000], } impl DummyBus { fn new() -> DummyBus { DummyBus { data: [0u8; 0x10000] } } } impl<'a> Bus for DummyBus { fn write(&mut self, _addr: u16, _value: u8) { } fn read(&self, addr: u16) -> u8 { self.data[addr as usize] } } #[test] fn test_reset() { let mut cpu = Cpu::new(); let mut bus = DummyBus::new(); bus.data[0xFFFC] = 0x12; bus.data[0xFFFD] = 0x34; cpu.reset(&bus); assert_eq!(cpu.registers.program_counter, 0x3412); assert_eq!(cpu.registers.accumulator, 0); } }
use super::gpu72_work::*; use super::lists::*; use super::p95_work::*; use super::validators::*; use clap::{App, Arg, ArgGroup}; use std::env::current_dir; use std::fs::File; use std::io::{BufReader, Read}; #[derive(Clone, Debug)] pub struct GeneralOptions { pub work_directory: String, pub num_cache: usize, pub timeout: usize, } #[derive(Clone, Debug)] pub struct PrimenetOptions { pub credentials: (String, String), pub work_type: PrimenetWorkType, pub general_options: GeneralOptions, } #[derive(Clone, Debug)] pub struct Gpu72Options { pub primenet_credentials: Option<(String, String)>, pub gpu72_credentials: (String, String), pub work_type: Gpu72WorkType, pub max_exp: u8, pub general_options: GeneralOptions, } #[derive(Clone, Debug)] pub enum Options { Primenet(PrimenetOptions), Gpu72(Gpu72Options), } macro_rules! map_matches { ( $matches:ident, $work_string_i:literal => $worktype_i:path { $($worktype_i_opt_string_i:literal -> $worktype_i_opt_i:expr; $( $worktype_i_opt_string_ei:literal -> $worktype_i_opt_ei:expr; )* _ -> $worktype_i_opt_e:expr;)? $(_ -> $worktype_i_other:expr;)? } $($work_string_ei:literal => $worktype_ei:path { $($worktype_ei_opt_string_i:literal -> $worktype_ei_opt_i:expr; $( $worktype_ei_opt_string_ei:literal -> $worktype_ei_opt_ei:expr; )* _ -> $worktype_ei_opt_e:expr;)? $(_ -> $worktype_ei_other:expr;)? })* _ => $worktype_e:path { $($worktype_e_opt_string_i:literal -> $worktype_e_opt_i:expr; $( $worktype_e_opt_string_ei:literal -> $worktype_e_opt_ei:expr; )* _ -> $worktype_e_opt_e:expr;)? $(_ -> $worktype_e_other:expr;)? } ) => {{ if $matches.is_present($work_string_i) { $($worktype_i(if $matches.is_present($worktype_i_opt_string_i) { $worktype_i_opt_i } $(else if $matches.is_present($worktype_i_opt_string_ei) { $worktype_i_opt_ei })* else { $worktype_i_opt_e }))? $($worktype_i_other)? } $(else if $matches.is_present($work_string_ei) { $($worktype_ei(if $matches.is_present($worktype_ei_opt_string_i) { $worktype_ei_opt_i } $(else if $matches.is_present($worktype_ei_opt_string_ei) { $worktype_ei_opt_ei })* else { $worktype_ei_opt_e }))? $($worktype_ei_other)? })* else { $($worktype_e(if $matches.is_present($worktype_e_opt_string_i) { $worktype_e_opt_i } $(else if $matches.is_present($worktype_e_opt_string_ei) { $worktype_e_opt_ei })* else { $worktype_e_opt_e }))? $($worktype_e_other)? } }} } macro_rules! map_matches_simple { ( $matches:ident, $work_string_i:literal => $worktype_i_path:path; $($work_string_ei:literal => $worktype_ei_path:path;)* _ => $worktype_e_path:path; ) => {{ if $matches.is_present($work_string_i) { $worktype_i_path } $(else if $matches.is_present($work_string_ei) { $worktype_ei_path })* else { $worktype_e_path } }} } const GPU72_TYPES_AND_OPTS_HELP: &'static str = r"GPU to 72 work types and options: - Lucas-Lehmer trial factoring --gpu72-lucas-lehmer-trial-factor - What makes sense --gpu72-what-makes-sense - Lowest trial factor level --gpu72-lowest-trial-factor-level - Highest trial factor level --gpu72-highest-trial-factor-level - Lowest exponent --gpu72-lowest-exponent - Oldest exponent --gpu72-oldest-exponent - Lone Mersenne Hunters bit-first --gpu72-lone-mersenne-hunters-bit-first - Lone Mersenne Hunters depth-first --gpu72-lone-mersenne-hunters-depth-first - Let GPU to 72 decide --gpu72-let-gpu72-decide - Double-check trial factoring --gpu72-double-check-trial-factor - What makes sense --gpu72-what-makes-sense - Lowest trial factor level --gpu72-lowest-trial-factor-level - Highest trial factor level --gpu72-highest-trial-factor-level - Lowest exponent --gpu72-lowest-exponent - Oldest exponent --gpu72-oldest-exponent - Double-check already done --gpu72-double-check-already-done - Let GPU to 72 decide --gpu72-let-gpu72-decide - Lucas-Lehmer P-1 factoring --gpu72-lucas-lehmer-p1 - What makes sense --gpu72-what-makes-sense - Lowest exponent --gpu72-lowest-exponent - Oldest exponent --gpu72-oldest-exponent"; pub fn request_from_args() -> Result<Options, String> { let current_dir = format!("{}", current_dir().unwrap().display()); let matches = App::new("primenet-rs") .version("1.0.0") .about("Interface to request from and report to Primenet (GIMPS) and GPU to 72.") .author("Aurorans Solis") .subcommand( App::new("p95") .author("Aurorans Solis") .version("1.0.0") .about("Interface to request from and report to Primenet (GIMPS)") .arg( Arg::with_name("work-directory") .short('w') .long("work-directory") .takes_value(true) .number_of_values(1) .value_name("WORKDIR") .default_value(&current_dir) .validator(directory_validator) .help("Working directory with worktodo.txt/worktodo.ini and results.txt") ) .arg( Arg::with_name("num-cache") .short('n') .long("num-cache") .takes_value(true) .number_of_values(1) .value_name("NUM_CACHE") .default_value("1") .validator(numeric_validator) .help("Number of assignments to cache") ) .arg( Arg::with_name("timeout") .short('t') .long("timeout") .number_of_values(1) .value_name("TIMEOUT") .default_value("0") .validator(numeric_validator) .help( "Seconds to wait between network updates. Use 0 for a single update \ without looping." ), ) .group( ArgGroup::with_name("general options") .args(&["work-directory", "num-cache", "timeout"]) .multiple(true) ) .arg( Arg::with_name("username") .long("p95-username") .takes_value(true) .number_of_values(1) .value_name("USERNAME") .validator(p95_username_validator) .help("Primenet username") .required_unless("username-file") ) .arg( Arg::with_name("username-file") .long("p95-username-file") .takes_value(true) .number_of_values(1) .value_name("FILE_PATH") .validator(file_validator) .help("Path to file containing Primenet username") .required_unless("username") ) .arg( Arg::with_name("password") .long("p95-password") .takes_value(true) .number_of_values(1) .value_name("PASSWORD") .help("Primenet password") .required_unless("password-file") ) .arg( Arg::with_name("password-file") .long("p95-password-file") .takes_value(true) .number_of_values(1) .value_name("FILE_PATH") .validator(file_validator) .help("Path to file containing Primenet password") .required_unless("password") ) .arg( Arg::with_name("trial-factoring") .long("trial-factoring") .visible_alias("tf") .help("Request trial factoring work from Primenet") ) .arg( Arg::with_name("p1-factoring") .long("p1-factoring") .visible_alias("p1f") .help("Request P-1 factoring work from Primenet") ) .arg( Arg::with_name("ecm-factoring") .long("ecm-factoring") .visible_alias("ecmf") .help("Request Elliptic Curve Method factoring work from Primenet") ) .arg( Arg::with_name("ecm-factoring-of-mersenne-cofactors") .long("ecm-factoring-of-mersenne-cofactors") .visible_alias("ecmfomc") .help( "Request Elliptic Curve Method factoring of Mersenne cofactors work \ from Primenet" ) ) .arg( Arg::with_name("smallest-available-first-time-ll") .long("smallest-available-first-time-ll") .visible_alias("saftll") .help("Request smallest available first-time Lucas-Lehmer work from Primenet") ) .arg( Arg::with_name("double-check-ll") .long("double-check-ll") .visible_alias("dcll") .help("Request Lucas-Lehmer double-check work from Primenet") ) .arg( Arg::with_name("world-record-ll") .long("world-record-ll") .visible_alias("wrll") .help("Request world record-sized Lucas-Lehmer tests from Primenet") ) .arg( Arg::with_name("100m-digits-ll") .long("100m-digits-ll") .visible_alias("100mdll") .help("Request 100M digits Lucas-Lehmer tests from Primenet") ) .arg( Arg::with_name("smallest-available-first-time-prp") .long("smallest-available-first-time-prp") .visible_alias("saftprp") .help("Request smallest available probable prime work from Primenet") ) .arg( Arg::with_name("double-check-prp") .long("double-check-prp") .visible_alias("dcprp") .help("Request double-check of probable prime work from Primenet") ) .arg( Arg::with_name("world-record-prp") .long("world-record-prp") .visible_alias("wrprp") .help("Request world record-sized probable prime work from Primenet") ) .arg( Arg::with_name("100m-digits-prp") .long("100m-digits-prp") .visible_alias("100mdprp") .help("Request 100M digits probable prime tests from Primenet") ) .arg( Arg::with_name("first-prp-on-mersenne-cofactors") .long("first-prp-on-mersenne-cofactors") .visible_alias("fprpomc") .help("Request first PRP tests on Mersenne cofactors from Primenet") ) .arg( Arg::with_name("double-check-prp-on-mersenne-cofactors") .long("double-check-prp-on-mersenne-cofactors") .visible_alias("dcprpomc") .help( "Request double-checks of PRP tests on Mersenne cofactors from Primenet" ) ) .group( ArgGroup::with_name("worktype") .args(&[ "trial-factoring", "p1-factoring", "ecm-factoring", "ecm-factoring-of-mersenne-cofactors", "smallest-available-first-time-ll", "double-check-ll", "world-record-ll", "100m-digits-ll", "smallest-available-first-time-prp", "double-check-prp", "world-record-prp", "100m-digits-prp", "first-prp-on-mersenne-cofactors", "double-check-prp-on-mersenne-cofactors", ]) .required(true) .multiple(false) ) ) .subcommand( App::new("gpu72") .author("Aurorans Solis") .version("1.0.0") .about("Interface to request from and report to GPU to 72") .after_help(GPU72_TYPES_AND_OPTS_HELP) .arg( Arg::with_name("work-directory") .short('w') .long("work-directory") .takes_value(true) .number_of_values(1) .value_name("WORKDIR") .default_value(&current_dir) .validator(directory_validator) .help("Working directory with worktodo.txt/worktodo.ini and results.txt") ) .arg( Arg::with_name("num-cache") .short('n') .long("num-cache") .takes_value(true) .number_of_values(1) .value_name("NUM_CACHE") .default_value("1") .validator(numeric_validator) .help("Number of assignments to cache") ) .arg( Arg::with_name("timeout") .short('t') .long("timeout") .number_of_values(1) .value_name("TIMEOUT") .default_value("0") .validator(numeric_validator) .help( "Seconds to wait between network updates. Use 0 for a single update \ without looping." ) ) .group( ArgGroup::with_name("general options") .args(&["work-directory", "num-cache", "timeout"]) .multiple(true) ) .arg( Arg::with_name("gpu72-username") .long("gpu72-username") .takes_value(true) .number_of_values(1) .value_name("USERNAME") .validator(gpu72_username_validator) .help("GPU to 72 username") .required_unless("gpu72-username-file") .conflicts_with("gpu72-username-file") ) .arg( Arg::with_name("gpu72-username-file") .long("gpu72-username-file") .takes_value(true) .number_of_values(1) .value_name("FILE_PATH") .validator(file_validator) .help("Path to file containing GPU to 72 username") .required_unless("gpu72-username") .conflicts_with("gpu72-username") ) .arg( Arg::with_name("gpu72-password") .long("gpu72-password") .takes_value(true) .number_of_values(1) .value_name("PASSWORD") .help("GPU to 72 password") .required_unless("gpu72-password-file") .conflicts_with("gpu72-password-file") ) .arg( Arg::with_name("gpu72-password-file") .long("gpu72-password-file") .takes_value(true) .number_of_values(1) .value_name("FILE_PATH") .validator(file_validator) .help("Path to file containing GPU to 72 password") .required_unless("gpu72-password") .conflicts_with("gpu72-password") ) .arg( Arg::with_name("p95-username") .long("p95-username") .takes_value(true) .number_of_values(1) .value_name("USERNAME") .validator(p95_username_validator) .help("Primenet username") ) .arg( Arg::with_name("p95-username-file") .long("p95-username-file") .takes_value(true) .number_of_values(1) .value_name("FILE_PATH") .validator(file_validator) .help("Path to file containing Primenet username") ) .group( ArgGroup::with_name("p95-user") .args(&["p95-username", "p95-username-file"]) .multiple(false) ) .arg( Arg::with_name("p95-password") .long("p95-password") .takes_value(true) .number_of_values(1) .value_name("PASSWORD") .help("Primenet password") ) .arg( Arg::with_name("p95-password-file") .long("p95-password-file") .takes_value(true) .number_of_values(1) .value_name("FILE_PATH") .validator(file_validator) .help("Path to file containing Primenet password") ) .group( ArgGroup::with_name("p95-pass") .args(&["p95-password", "p95-password-file"]) .multiple(false) ) .group( ArgGroup::with_name("p95-credentials") .args(&["p95-user", "p95-pass"]) .multiple(true) .requires_all(&["p95-user", "p95-pass"]) ) .arg( Arg::with_name("p95-fallback") .long("p95-fallback") .help( "Fall back to Primenet if requests to GPU to 72 fail or it has no \ work. Always fetches trial factor work, regardless of GPU to 72 work \ type and options." ) .requires_all(&["p95-credentials", "p95-fallback-type"]) ) .arg( Arg::with_name("max-exponent") .long("max-exponent") .visible_alias("max-exp") .takes_value(true) .number_of_values(1) .value_name("NUM") .validator(max_exp_validator) .default_value("72") .help("Upper limit of exponent") ) .arg( Arg::with_name("lucas-lehmer-trial-factor") .visible_alias("lltf") .long("lucas-lehmer-trial-factor") .help("Request LL trial factoring work from GPU to 72") .required_unless_one(&GPU72LLTF_LIST) .conflicts_with_all(&GPU72LLTF_LIST) ) .arg( Arg::with_name("double-check-trial-factor") .visible_alias("dctf") .long("double-check-trial-factor") .help("Request double-check trial factoring work from GPU to 72") .required_unless_one(&GPU72DCTF_LIST) .conflicts_with_all(&GPU72DCTF_LIST) ) .arg( Arg::with_name("lucas-lehmer-p1") .visible_alias("llp1") .long("lucas-lehmer-p1") .help("Request LL P-1 work from GPU to 72") .required_unless_one(&GPU72LLP1_LIST) .conflicts_with_all(&GPU72LLP1_LIST) ) .arg( Arg::with_name("what-makes-most-sense") .visible_alias("wmms") .long("what-makes-most-sense") .help("Ask GPU to 72 to assign whatever makes most sense.") .required_unless_one(&GPU72WMS_LIST) .conflicts_with_all(&GPU72WMS_LIST) ) .arg( Arg::with_name("lowest-trial-factor-level") .visible_alias("ltfl") .long("lowest-trial-factor-level") .help("Request work of the lowest trial factoring level from GPU to 72") .required_unless_one(&GPU72LTFL_LIST) .conflicts_with_all(&GPU72LTFL_LIST) ) .arg( Arg::with_name("highest-trial-factor-level") .visible_alias("htfl") .long("highest-trial-factor-level") .help("Request work of the highest trial factoring level from GPU to 72") .required_unless_one(&GPU72HTFL_LIST) .conflicts_with_all(&GPU72HTFL_LIST) ) .arg( Arg::with_name("lowest-exponent") .visible_alias("le") .long("lowest-exponent") .help("Request the lowest exponent for the selected work type from GPU \ to 72") .required_unless_one(&GPU72LE_LIST) .conflicts_with_all(&GPU72LE_LIST) ) .arg( Arg::with_name("oldest-exponent") .visible_alias("oe") .long("oldest-exponent") .help("Request the oldest exponent for the selected work type from GPU \ to 72") .required_unless_one(&GPU72OE_LIST) .conflicts_with_all(&GPU72OE_LIST) ) .arg( Arg::with_name("double-check-already-done") .visible_alias("dcad") .long("double-check-already-done") .help( "Request double-check trial factoring work where a double check has \ already been done from GPU to 72" ) .required_unless_one(&GPU72DCAD_LIST) .conflicts_with_all(&GPU72DCAD_LIST) ) .arg( Arg::with_name("lone-mersenne-hunters-bit-first") .visible_alias("lmh-bf") .long("lone-mersenne-hunters-bit-first") .help("Request LMH bit-first work from GPU to 72") .required_unless_one(&GPU72LMHBF_LIST) .conflicts_with_all(&GPU72LMHBF_LIST) ) .arg( Arg::with_name("lone-mersenne-hunters-depth-first") .visible_alias("lmh-df") .long("lone-mersenne-hunters-depth-first") .help("Request LMH depth-first work from GPU to 72") .required_unless_one(&GPU72LMHDF_LIST) .conflicts_with_all(&GPU72LMHDF_LIST) ) .arg( Arg::with_name("let-gpu72-decide") .visible_alias("lgpu72d") .long("let-gpu72-decide") .help("Let GPU to 72 decide what type of work to do.") .required_unless_one(&GPU72LGPU72D_LIST) .conflicts_with_all(&GPU72LGPU72D_LIST) ) ).try_get_matches().map_err(|e| format!("{}", e))?; if let Some(matches) = matches.subcommand_matches("gpu72") { let gpu72_credentials = if matches.is_present("gpu72-userpass") { ( matches.value_of("gpu72-username").unwrap().to_string(), matches.value_of("gpu72-password").unwrap().to_string(), ) } else { let username_path = matches.value_of("gpu72-username-file").unwrap(); let mut username_file = BufReader::new(File::open(username_path).unwrap()); let mut username = String::new(); username_file .read_to_string(&mut username) .map_err(|e| format!("Error reading username file '{}': {}", username_path, e))?; let username = username.trim().to_string(); let password_path = matches.value_of("gpu72-password-file").unwrap(); let mut password_file = BufReader::new(File::open(password_path).unwrap()); let mut password = String::new(); password_file .read_to_string(&mut password) .map_err(|e| format!("Error reading password file '{}': {}", password_path, e))?; let password = password.trim().to_string(); (username, password) }; let primenet_credentials = if matches.is_present("p95-fallback") { if matches.is_present("p95-userpass") { Some(( matches.value_of("p95-username").unwrap().to_string(), matches.value_of("p95-password").unwrap().to_string(), )) } else { let username_path = matches.value_of("p95-username-file").unwrap(); let mut username_file = BufReader::new(File::open(username_path).unwrap()); let mut username = String::new(); username_file.read_to_string(&mut username).map_err(|e| { format!("Error reading username file '{}': {}", username_path, e) })?; let username = username.trim().to_string(); let password_path = matches.value_of("p95-password-file").unwrap(); let mut password_file = BufReader::new(File::open(password_path).unwrap()); let mut password = String::new(); password_file.read_to_string(&mut password).map_err(|e| { format!("Error reading password file '{}': {}", password_path, e) })?; let password = password.trim().to_string(); Some((username, password)) } } else { None }; let max_exp = matches .value_of("max-exponent") .unwrap() .parse::<u8>() .unwrap(); let work_directory = matches.value_of("work-directory").unwrap().to_string(); let num_cache = matches .value_of("num-cache") .unwrap() .parse::<usize>() .unwrap(); let timeout = matches .value_of("timeout") .unwrap() .parse::<usize>() .unwrap(); let general_options = GeneralOptions { work_directory, num_cache, timeout, }; let work_type = map_matches!( matches, "lucas-lehmer-trial-factor" => Gpu72WorkType::LucasLehmerTrialFactor { "what-makes-sense" -> Gpu72LLTFWorkOption::WhatMakesSense; "lowest-trial-factor-level" -> Gpu72LLTFWorkOption::LowestTrialFactorLevel; "highest-trial-factor-level" -> Gpu72LLTFWorkOption::HighestTrialFactorLevel; "lowest-exponent" -> Gpu72LLTFWorkOption::LowestExponent; "oldest-exponent" -> Gpu72LLTFWorkOption::OldestExponent; "lone-mersenne-hunters-bit-first" -> Gpu72LLTFWorkOption::LmhBitFirst; "lone-mersenne-hunters-depth-first" -> Gpu72LLTFWorkOption::LmhDepthFirst; _ -> Gpu72LLTFWorkOption::LetGpu72Decide; } "double-check-trial-factor" => Gpu72WorkType::DoubleCheckTrialFactor { "what-makes-sense" -> Gpu72DCTFWorkOption::WhatMakesSense; "lowest-trial-factor-level" -> Gpu72DCTFWorkOption::LowestTrialFactorLevel; "highest-trial-factor-level" -> Gpu72DCTFWorkOption::HighestTrialFactorLevel; "lowest-exponent" -> Gpu72DCTFWorkOption::LowestExponent; "oldest-exponent" -> Gpu72DCTFWorkOption::OldestExponent; "double-check-already-done" -> Gpu72DCTFWorkOption::DoubleCheckAlreadyDone; _ -> Gpu72DCTFWorkOption::LetGpu72Decide; } _ => Gpu72WorkType::LucasLehmerP1 { "lowest-exponent" -> Gpu72LLP1WorkOption::LowestExponent; "oldest-exponent" -> Gpu72LLP1WorkOption::OldestExponent; _ -> Gpu72LLP1WorkOption::WhatMakesSense; } ); Ok(Options::Gpu72(Gpu72Options { primenet_credentials, gpu72_credentials, work_type, max_exp, general_options, })) } else if let Some(matches) = matches.subcommand_matches("p95") { let username = if matches.is_present("username") { matches.value_of("username").unwrap().to_string() } else { let username_path = matches.value_of("username-file").unwrap(); let mut username_file = BufReader::new(File::open(username_path).unwrap()); let mut username = String::new(); username_file .read_to_string(&mut username) .map_err(|e| format!("Error reading username file '{}': {}", username_path, e))?; username.trim().to_string() }; let password = if matches.is_present("password") { matches.value_of("password").unwrap().to_string() } else { let password_path = matches.value_of("password-file").unwrap(); let mut password_file = BufReader::new(File::open(password_path).unwrap()); let mut password = String::new(); password_file .read_to_string(&mut password) .map_err(|e| format!("Error reading password file '{}': {}", password_path, e))?; password.trim().to_string() }; let credentials = (username, password); let work_directory = matches.value_of("work-directory").unwrap().to_string(); let num_cache = matches .value_of("num-cache") .unwrap() .parse::<usize>() .unwrap(); let timeout = matches .value_of("timeout") .unwrap() .parse::<usize>() .unwrap(); let general_options = GeneralOptions { work_directory, num_cache, timeout, }; let work_type = map_matches_simple!( matches, "trial-factoring" => PrimenetWorkType::TrialFactoring; "p1-factoring" => PrimenetWorkType::P1Factoring; "ecm-factoring" => PrimenetWorkType::EcmFactoring; "ecm-factoring-of-mersenne-cofactors" => PrimenetWorkType::EcmFactoringOfMersenneCofactors; "smallest-available-first-time-ll" => PrimenetWorkType::SmallestAvailableFirstTimeLlTests; "double-check-ll" => PrimenetWorkType::DoubleCheckLlTests; "world-record-ll" => PrimenetWorkType::WorldRecordLlTests; "100m-digits-ll" => PrimenetWorkType::HundredMillionDigitsLlTests; "smallest-available-first-time-prp" => PrimenetWorkType::SmallestAvailableFirstTimePrpTests; "double-check-prp" => PrimenetWorkType::DoubleCheckPrpTests; "world-record-prp" => PrimenetWorkType::WorldRecordPrpTests; "100m-digits-prp" => PrimenetWorkType::HundredMillionDigitsPrpTests; "first-prp-on-mersenne-cofactors" => PrimenetWorkType::FirstPrpTestsOnMersenneCofactors; _ => PrimenetWorkType::DoubleCheckPrpTestsOnMersenneCofactors; ); Ok(Options::Primenet(PrimenetOptions { credentials, work_type, general_options, })) } else { Err("No subcommand specified.".to_string()) } }
use crate::ctx::ClientContext; use crate::poll_result::ResultSummary; use maud::{html, Markup}; use rustimate_core::member::Member; use rustimate_core::poll::Poll; use uuid::Uuid; pub(crate) fn polls(ctx: &ClientContext, ps: Vec<&Poll>) -> Markup { html! { @if ps.is_empty() { li { "No polls" } } else { @for p in ps { (poll_summary(ctx, p)) } } } } pub(crate) fn poll_summary(ctx: &ClientContext, p: &Poll) -> Markup { html! { li { a.(ctx.user_profile().link_class()) onclick=(crate::html::onclick_event("poll-detail", &p.id().to_string(), "")) { (p.title()) } } } } pub(crate) fn vote_status(members: Vec<&Member>, votes: Vec<(Uuid, String)>) -> Markup { html! { div { @for member in members { div.vote-member { @if votes.iter().any(|x| &x.0 == member.user_id()) { div { (member.name()) div { div.icon data-uk-icon="icon: check; ratio: 2" {} } } } @else { div { (member.name()) div { div.icon data-uk-icon="icon: minus; ratio: 2" {} } } } } } } div.clear {} } } pub(crate) fn vote_choices(choices: &[String], current: Option<String>) -> Markup { html! { div { @for choice in choices { @if current.contains(choice) { div.vote-choice.active { (choice) } } @else { div.vote-choice onclick=(crate::html::onclick_event("select-choice", choice, "")) { (choice) } } } } div.clear {} } } pub(crate) fn vote_results(members: &[&Member], votes: &[(Uuid, String)]) -> Markup { html! { div { @for member in members { div.vote-member { div { (member.name()) div { div.large-text { (votes.iter().find_map(|v| if &v.0 == member.user_id() { Some(v.1.clone()) } else { None }).unwrap_or_else(|| "---".into())) } } } } } } div.clear {} } } pub(crate) fn vote_stats(summary: ResultSummary) -> Markup { html! { div { div { (format!("[{}/{}] votes counted", summary.valid_votes().len(), summary.valid_votes().len() + summary.invalid_votes().len())) } div { "Mean: " (summary.mean()) } div { "Median: " (summary.median()) } div { "Mode: " (summary.mode()) } } div.clear {} } }
use super::*; use {Error, CommandHandle}; extern { pub fn indy_collect_metrics(command_handle: CommandHandle, cb: Option<ResponseStringCB>) -> Error; }
extern crate cc; extern crate cmake; use std::env; fn main() { // RocksDB cmake script expect libz.a being under ${DEP_Z_ROOT}/lib, but libz-sys crate put it // under ${DEP_Z_ROOT}/build. Append the path to CMAKE_PREFIX_PATH to get around it. env::set_var("CMAKE_PREFIX_PATH", { let zlib_path = format!("{}/build", env::var("DEP_Z_ROOT").unwrap()); if let Ok(prefix_path) = env::var("CMAKE_PREFIX_PATH") { format!("{};{}", prefix_path, zlib_path) } else { zlib_path } }); let cur_dir = std::env::current_dir().unwrap(); let mut cfg = cmake::Config::new("titan"); if cfg!(feature = "portable") { cfg.define("PORTABLE", "ON"); } if cfg!(feature = "sse") { cfg.define("FORCE_SSE42", "ON"); } let dst = cfg .define("ROCKSDB_DIR", cur_dir.join("..").join("rocksdb")) .define("WITH_TITAN_TESTS", "OFF") .define("WITH_TITAN_TOOLS", "OFF") .register_dep("Z") .define("WITH_ZLIB", "ON") .register_dep("BZIP2") .define("WITH_BZ2", "ON") .register_dep("LZ4") .define("WITH_LZ4", "ON") .register_dep("ZSTD") .define("WITH_ZSTD", "ON") .register_dep("SNAPPY") .define("WITH_SNAPPY", "ON") .define("WITH_TITAN_TESTS", "OFF") .define("WITH_TITAN_TOOLS", "OFF") .build_target("titan") .very_verbose(true) .build(); println!("cargo:rustc-link-search=native={}/build", dst.display()); println!("cargo:rustc-link-lib=static=titan"); }
mod my; fn main() { my::function(); }
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT license. */ #![warn(missing_debug_implementations, missing_docs)] //! Aligned allocator use rand::{distributions::Uniform, prelude::Distribution, thread_rng}; use rayon::prelude::*; use std::cmp::min; use crate::common::ANNResult; use crate::utils::math_util::{calc_distance, compute_closest_centers, compute_vecs_l2sq}; /// Run Lloyds one iteration /// Given data in row-major num_points * dim, and centers in row-major /// num_centers * dim and squared lengths of ata points, output the closest /// center to each data point, update centers, and also return inverted index. /// If closest_centers == NULL, will allocate memory and return. /// Similarly, if closest_docs == NULL, will allocate memory and return. #[allow(clippy::too_many_arguments)] fn lloyds_iter( data: &[f32], num_points: usize, dim: usize, centers: &mut [f32], num_centers: usize, docs_l2sq: &[f32], mut closest_docs: &mut Vec<Vec<usize>>, closest_center: &mut [u32], ) -> ANNResult<f32> { let compute_residual = true; closest_docs.iter_mut().for_each(|doc| doc.clear()); compute_closest_centers( data, num_points, dim, centers, num_centers, 1, closest_center, Some(&mut closest_docs), Some(docs_l2sq), )?; centers.fill(0.0); centers .par_chunks_mut(dim) .enumerate() .for_each(|(c, center)| { let mut cluster_sum = vec![0.0; dim]; for &doc_index in &closest_docs[c] { let current = &data[doc_index * dim..(doc_index + 1) * dim]; for (j, current_val) in current.iter().enumerate() { cluster_sum[j] += *current_val as f64; } } if !closest_docs[c].is_empty() { for (i, sum_val) in cluster_sum.iter().enumerate() { center[i] = (*sum_val / closest_docs[c].len() as f64) as f32; } } }); let mut residual = 0.0; if compute_residual { let buf_pad: usize = 32; let chunk_size: usize = 2 * 8192; let nchunks = num_points / chunk_size + (if num_points % chunk_size == 0 { 0 } else { 1 } as usize); let mut residuals: Vec<f32> = vec![0.0; nchunks * buf_pad]; residuals .par_iter_mut() .enumerate() .for_each(|(chunk, res)| { for d in (chunk * chunk_size)..min(num_points, (chunk + 1) * chunk_size) { *res += calc_distance( &data[d * dim..(d + 1) * dim], &centers[closest_center[d] as usize * dim..], dim, ); } }); for chunk in 0..nchunks { residual += residuals[chunk * buf_pad]; } } Ok(residual) } /// Run Lloyds until max_reps or stopping criterion /// If you pass NULL for closest_docs and closest_center, it will NOT return /// the results, else it will assume appropriate allocation as closest_docs = /// new vec<usize> [num_centers], and closest_center = new size_t[num_points] /// Final centers are output in centers as row-major num_centers * dim. fn run_lloyds( data: &[f32], num_points: usize, dim: usize, centers: &mut [f32], num_centers: usize, max_reps: usize, ) -> ANNResult<(Vec<Vec<usize>>, Vec<u32>, f32)> { let mut residual = f32::MAX; let mut closest_docs = vec![Vec::new(); num_centers]; let mut closest_center = vec![0; num_points]; let mut docs_l2sq = vec![0.0; num_points]; compute_vecs_l2sq(&mut docs_l2sq, data, num_points, dim); let mut old_residual; for i in 0..max_reps { old_residual = residual; residual = lloyds_iter( data, num_points, dim, centers, num_centers, &docs_l2sq, &mut closest_docs, &mut closest_center, )?; if (i != 0 && (old_residual - residual) / residual < 0.00001) || (residual < f32::EPSILON) { println!( "Residuals unchanged: {} becomes {}. Early termination.", old_residual, residual ); break; } } Ok((closest_docs, closest_center, residual)) } /// Assume memory allocated for pivot_data as new float[num_centers * dim] /// and select randomly num_centers points as pivots fn selecting_pivots( data: &[f32], num_points: usize, dim: usize, pivot_data: &mut [f32], num_centers: usize, ) { let mut picked = Vec::new(); let mut rng = thread_rng(); let distribution = Uniform::from(0..num_points); for j in 0..num_centers { let mut tmp_pivot = distribution.sample(&mut rng); while picked.contains(&tmp_pivot) { tmp_pivot = distribution.sample(&mut rng); } picked.push(tmp_pivot); let data_offset = tmp_pivot * dim; let pivot_offset = j * dim; pivot_data[pivot_offset..pivot_offset + dim] .copy_from_slice(&data[data_offset..data_offset + dim]); } } /// Select pivots in k-means++ algorithm /// Points that are farther away from the already chosen centroids /// have a higher probability of being selected as the next centroid. /// The k-means++ algorithm helps avoid poor initial centroid /// placement that can result in suboptimal clustering. fn k_meanspp_selecting_pivots( data: &[f32], num_points: usize, dim: usize, pivot_data: &mut [f32], num_centers: usize, ) { if num_points > (1 << 23) { println!("ERROR: n_pts {} currently not supported for k-means++, maximum is 8388608. Falling back to random pivot selection.", num_points); selecting_pivots(data, num_points, dim, pivot_data, num_centers); return; } let mut picked: Vec<usize> = Vec::new(); let mut rng = thread_rng(); let real_distribution = Uniform::from(0.0..1.0); let int_distribution = Uniform::from(0..num_points); let init_id = int_distribution.sample(&mut rng); let mut num_picked = 1; picked.push(init_id); let init_data_offset = init_id * dim; pivot_data[0..dim].copy_from_slice(&data[init_data_offset..init_data_offset + dim]); let mut dist = vec![0.0; num_points]; dist.par_iter_mut().enumerate().for_each(|(i, dist_i)| { *dist_i = calc_distance( &data[i * dim..(i + 1) * dim], &data[init_id * dim..(init_id + 1) * dim], dim, ); }); let mut dart_val: f64; let mut tmp_pivot = 0; let mut sum_flag = false; while num_picked < num_centers { dart_val = real_distribution.sample(&mut rng); let mut sum: f64 = 0.0; for item in dist.iter().take(num_points) { sum += *item as f64; } if sum == 0.0 { sum_flag = true; } dart_val *= sum; let mut prefix_sum: f64 = 0.0; for (i, pivot) in dist.iter().enumerate().take(num_points) { tmp_pivot = i; if dart_val >= prefix_sum && dart_val < (prefix_sum + *pivot as f64) { break; } prefix_sum += *pivot as f64; } if picked.contains(&tmp_pivot) && !sum_flag { continue; } picked.push(tmp_pivot); let pivot_offset = num_picked * dim; let data_offset = tmp_pivot * dim; pivot_data[pivot_offset..pivot_offset + dim] .copy_from_slice(&data[data_offset..data_offset + dim]); dist.par_iter_mut().enumerate().for_each(|(i, dist_i)| { *dist_i = (*dist_i).min(calc_distance( &data[i * dim..(i + 1) * dim], &data[tmp_pivot * dim..(tmp_pivot + 1) * dim], dim, )); }); num_picked += 1; } } /// k-means algorithm interface pub fn k_means_clustering( data: &[f32], num_points: usize, dim: usize, centers: &mut [f32], num_centers: usize, max_reps: usize, ) -> ANNResult<(Vec<Vec<usize>>, Vec<u32>, f32)> { k_meanspp_selecting_pivots(data, num_points, dim, centers, num_centers); let (closest_docs, closest_center, residual) = run_lloyds(data, num_points, dim, centers, num_centers, max_reps)?; Ok((closest_docs, closest_center, residual)) } #[cfg(test)] mod kmeans_test { use super::*; use approx::assert_relative_eq; use rand::Rng; #[test] fn lloyds_iter_test() { let dim = 2; let num_points = 10; let num_centers = 3; let data: Vec<f32> = (1..=num_points * dim).map(|x| x as f32).collect(); let mut centers = [1.0, 2.0, 7.0, 8.0, 19.0, 20.0]; let mut closest_docs: Vec<Vec<usize>> = vec![vec![]; num_centers]; let mut closest_center: Vec<u32> = vec![0; num_points]; let docs_l2sq: Vec<f32> = data .chunks(dim) .map(|chunk| chunk.iter().map(|val| val.powi(2)).sum()) .collect(); let residual = lloyds_iter( &data, num_points, dim, &mut centers, num_centers, &docs_l2sq, &mut closest_docs, &mut closest_center, ) .unwrap(); let expected_centers: [f32; 6] = [2.0, 3.0, 9.0, 10.0, 17.0, 18.0]; let expected_closest_docs: Vec<Vec<usize>> = vec![vec![0, 1], vec![2, 3, 4, 5, 6], vec![7, 8, 9]]; let expected_closest_center: [u32; 10] = [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]; let expected_residual: f32 = 100.0; // sort data for assert centers.sort_by(|a, b| a.partial_cmp(b).unwrap()); for inner_vec in &mut closest_docs { inner_vec.sort(); } closest_center.sort_by(|a, b| a.partial_cmp(b).unwrap()); assert_eq!(centers, expected_centers); assert_eq!(closest_docs, expected_closest_docs); assert_eq!(closest_center, expected_closest_center); assert_relative_eq!(residual, expected_residual, epsilon = 1.0e-6_f32); } #[test] fn run_lloyds_test() { let dim = 2; let num_points = 10; let num_centers = 3; let max_reps = 5; let data: Vec<f32> = (1..=num_points * dim).map(|x| x as f32).collect(); let mut centers = [1.0, 2.0, 7.0, 8.0, 19.0, 20.0]; let (mut closest_docs, mut closest_center, residual) = run_lloyds(&data, num_points, dim, &mut centers, num_centers, max_reps).unwrap(); let expected_centers: [f32; 6] = [3.0, 4.0, 10.0, 11.0, 17.0, 18.0]; let expected_closest_docs: Vec<Vec<usize>> = vec![vec![0, 1, 2], vec![3, 4, 5, 6], vec![7, 8, 9]]; let expected_closest_center: [u32; 10] = [0, 0, 0, 1, 1, 1, 1, 2, 2, 2]; let expected_residual: f32 = 72.0; // sort data for assert centers.sort_by(|a, b| a.partial_cmp(b).unwrap()); for inner_vec in &mut closest_docs { inner_vec.sort(); } closest_center.sort_by(|a, b| a.partial_cmp(b).unwrap()); assert_eq!(centers, expected_centers); assert_eq!(closest_docs, expected_closest_docs); assert_eq!(closest_center, expected_closest_center); assert_relative_eq!(residual, expected_residual, epsilon = 1.0e-6_f32); } #[test] fn selecting_pivots_test() { let dim = 2; let num_points = 10; let num_centers = 3; // Generate some random data points let mut rng = rand::thread_rng(); let data: Vec<f32> = (0..num_points * dim).map(|_| rng.gen()).collect(); let mut pivot_data = vec![0.0; num_centers * dim]; selecting_pivots(&data, num_points, dim, &mut pivot_data, num_centers); // Verify that each pivot point corresponds to a point in the data for i in 0..num_centers { let pivot_offset = i * dim; let pivot = &pivot_data[pivot_offset..(pivot_offset + dim)]; // Make sure the pivot is found in the data let mut found = false; for j in 0..num_points { let data_offset = j * dim; let point = &data[data_offset..(data_offset + dim)]; if pivot == point { found = true; break; } } assert!(found, "Pivot not found in data"); } } #[test] fn k_meanspp_selecting_pivots_test() { let dim = 2; let num_points = 10; let num_centers = 3; // Generate some random data points let mut rng = rand::thread_rng(); let data: Vec<f32> = (0..num_points * dim).map(|_| rng.gen()).collect(); let mut pivot_data = vec![0.0; num_centers * dim]; k_meanspp_selecting_pivots(&data, num_points, dim, &mut pivot_data, num_centers); // Verify that each pivot point corresponds to a point in the data for i in 0..num_centers { let pivot_offset = i * dim; let pivot = &pivot_data[pivot_offset..pivot_offset + dim]; // Make sure the pivot is found in the data let mut found = false; for j in 0..num_points { let data_offset = j * dim; let point = &data[data_offset..data_offset + dim]; if pivot == point { found = true; break; } } assert!(found, "Pivot not found in data"); } } }
fn main() { // Set for switch on: //$Env:RUST_BACKTRACE=1 //panic!("Fire, fire, panic!"); let vector = vec![1,2,3]; vector[99]; }
use crate::core::compiler::unit_graph::UnitGraph; use crate::core::compiler::{BuildConfig, CompileKind, Unit}; use crate::core::profiles::Profiles; use crate::core::PackageSet; use crate::core::Workspace; use crate::util::config::Config; use crate::util::errors::CargoResult; use crate::util::interning::InternedString; use crate::util::Rustc; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; mod target_info; pub use self::target_info::{ FileFlavor, FileType, RustDocFingerprint, RustcTargetData, TargetInfo, }; /// The build context, containing all information about a build task. /// /// It is intended that this is mostly static information. Stuff that mutates /// during the build can be found in the parent `Context`. (I say mostly, /// because this has internal caching, but nothing that should be observable /// or require &mut.) pub struct BuildContext<'a, 'cfg> { /// The workspace the build is for. pub ws: &'a Workspace<'cfg>, /// The cargo configuration. pub config: &'cfg Config, pub profiles: Profiles, pub build_config: &'a BuildConfig, /// Extra compiler args for either `rustc` or `rustdoc`. pub extra_compiler_args: HashMap<Unit, Vec<String>>, /// Package downloader. /// /// This holds ownership of the `Package` objects. pub packages: PackageSet<'cfg>, /// Information about rustc and the target platform. pub target_data: RustcTargetData<'cfg>, /// The root units of `unit_graph` (units requested on the command-line). pub roots: Vec<Unit>, /// The dependency graph of units to compile. pub unit_graph: UnitGraph, /// Reverse-dependencies of documented units, used by the rustdoc --scrape-examples flag. pub scrape_units: Vec<Unit>, /// The list of all kinds that are involved in this build pub all_kinds: HashSet<CompileKind>, } impl<'a, 'cfg> BuildContext<'a, 'cfg> { pub fn new( ws: &'a Workspace<'cfg>, packages: PackageSet<'cfg>, build_config: &'a BuildConfig, profiles: Profiles, extra_compiler_args: HashMap<Unit, Vec<String>>, target_data: RustcTargetData<'cfg>, roots: Vec<Unit>, unit_graph: UnitGraph, scrape_units: Vec<Unit>, ) -> CargoResult<BuildContext<'a, 'cfg>> { let all_kinds = unit_graph .keys() .map(|u| u.kind) .chain(build_config.requested_kinds.iter().copied()) .chain(std::iter::once(CompileKind::Host)) .collect(); Ok(BuildContext { ws, config: ws.config(), packages, build_config, profiles, extra_compiler_args, target_data, roots, unit_graph, scrape_units, all_kinds, }) } pub fn rustc(&self) -> &Rustc { &self.target_data.rustc } /// Gets the user-specified linker for a particular host or target. pub fn linker(&self, kind: CompileKind) -> Option<PathBuf> { self.target_data .target_config(kind) .linker .as_ref() .map(|l| l.val.clone().resolve_program(self.config)) } /// Gets the host architecture triple. /// /// For example, x86_64-unknown-linux-gnu, would be /// - machine: x86_64, /// - hardware-platform: unknown, /// - operating system: linux-gnu. pub fn host_triple(&self) -> InternedString { self.target_data.rustc.host } /// Gets the number of jobs specified for this build. pub fn jobs(&self) -> u32 { self.build_config.jobs } pub fn rustflags_args(&self, unit: &Unit) -> &[String] { &self.target_data.info(unit.kind).rustflags } pub fn rustdocflags_args(&self, unit: &Unit) -> &[String] { &self.target_data.info(unit.kind).rustdocflags } pub fn extra_args_for(&self, unit: &Unit) -> Option<&Vec<String>> { self.extra_compiler_args.get(unit) } }
use super::super::{ program::CubeProgram, webgl::{WebGlF32Vbo, WebGlI16Ibo, WebGlRenderingContext}, ModelMatrix, }; use crate::block::{self, BlockId}; use ndarray::Array2; pub struct BoxblockCollectionRenderer { vertexis_buffer: WebGlF32Vbo, normals_buffer: WebGlF32Vbo, poly_index_buffer: WebGlI16Ibo, cube_program: CubeProgram, } impl BoxblockCollectionRenderer { pub fn new(gl: &WebGlRenderingContext) -> Self { let vertexis_buffer = gl.create_vbo_with_f32array( &[ [0.5, 0.5, 0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [-0.5, -0.5, 0.5], [0.5, 0.5, -0.5], [-0.5, 0.5, -0.5], [0.5, -0.5, -0.5], [-0.5, -0.5, -0.5], ] .concat(), ); let normals_buffer = gl.create_vbo_with_f32array( &[ Self::n(0.5, 0.5, 0.5), Self::n(-0.5, 0.5, 0.5), Self::n(0.5, -0.5, 0.5), Self::n(-0.5, -0.5, 0.5), Self::n(0.5, 0.5, -0.5), Self::n(-0.5, 0.5, -0.5), Self::n(0.5, -0.5, -0.5), Self::n(-0.5, -0.5, -0.5), ] .concat(), ); let poly_index_buffer = gl.create_ibo_with_i16array( &[ [0, 1, 2, 3, 2, 1], [4, 1, 0, 1, 4, 5], [0, 2, 4, 6, 4, 2], [5, 3, 1, 3, 5, 7], [2, 3, 6, 7, 6, 3], [6, 5, 4, 5, 6, 7], ] .concat(), ); let cube_program = CubeProgram::new(gl); Self { vertexis_buffer, poly_index_buffer, normals_buffer, cube_program, } } pub fn render<'a>( &self, gl: &WebGlRenderingContext, vp_matrix: &Array2<f32>, block_field: &block::Field, boxblocks: impl Iterator<Item = &'a BlockId>, ) { self.cube_program.use_program(gl); gl.set_attribute( &self.vertexis_buffer, &self.cube_program.a_vertex_location, 3, 0, ); gl.set_attribute( &self.normals_buffer, &self.cube_program.a_normal_location, 3, 0, ); gl.bind_buffer( web_sys::WebGlRenderingContext::ELEMENT_ARRAY_BUFFER, Some(&self.poly_index_buffer), ); gl.uniform3fv_with_f32_array(Some(&self.cube_program.u_light_location), &[0.5, -2.0, 1.0]); gl.uniform1f(Some(&self.cube_program.u_shade_location), 0.2); for (_, boxblock) in block_field.listed::<block::table_object::Boxblock>(boxblocks.collect()) { let s = boxblock.size(); let p = boxblock.position(); let model_matrix: Array2<f32> = ModelMatrix::new().with_scale(s).with_movement(p).into(); let inv_model_matrix: Array2<f32> = ModelMatrix::new() .with_movement(&[-p[0], p[1], -p[2]]) .with_scale(&[1.0 / s[0], 1.0 / s[1], 1.0 / s[2]]) .into(); let mvp_matrix = vp_matrix.dot(&model_matrix); let mvp_matrix = mvp_matrix.t(); gl.uniform_matrix4fv_with_f32_array( Some(&self.cube_program.u_translate_location), false, &[ mvp_matrix.row(0).to_vec(), mvp_matrix.row(1).to_vec(), mvp_matrix.row(2).to_vec(), mvp_matrix.row(3).to_vec(), ] .concat() .into_iter() .map(|a| a as f32) .collect::<Vec<f32>>(), ); let inv_model_matrix = inv_model_matrix.t(); gl.uniform_matrix4fv_with_f32_array( Some(&self.cube_program.u_inv_model_location), false, &[ inv_model_matrix.row(0).to_vec(), inv_model_matrix.row(1).to_vec(), inv_model_matrix.row(2).to_vec(), inv_model_matrix.row(3).to_vec(), ] .concat() .into_iter() .map(|a| a as f32) .collect::<Vec<f32>>(), ); gl.uniform4fv_with_f32_array( Some(&self.cube_program.u_mask_color_location), &boxblock.color().to_f32array(), ); gl.draw_elements_with_i32( web_sys::WebGlRenderingContext::TRIANGLES, 36, web_sys::WebGlRenderingContext::UNSIGNED_SHORT, 0, ); } } fn n(x: f32, y: f32, z: f32) -> [f32; 3] { let len = (x.powi(2) + y.powi(2) + z.powi(2)).sqrt(); [x / len, y / len, z / len] } }
use super::Register; /// Faults that may occur in this module pub enum Fault { /// Register is wrongly sized TooSmall(Register), /// Memory reference is void VoidRef, /// Register sizes in a Mem aren't the same size NotEqual(Register,Register) }
use async_std::os::unix::net::{UnixListener, UnixStream}; use futures::stream::StreamExt; use log::*; use std::io::Result; use std::path::Path; pub async fn accept<P, S, Cb>(path: P, state: S, mut onconnection: Cb) -> Result<()> where P: AsRef<Path>, S: Clone, Cb: FnMut(S, UnixStream), { let path = path.as_ref().to_path_buf(); // let sock_remover = SockRemover { path: path.clone() }; let listener = match UnixListener::bind(&path).await { Ok(listener) => listener, Err(_) => { std::fs::remove_file(&path)?; UnixListener::bind(&path).await? } }; info!("Listening on {}", path.to_str().unwrap()); let mut incoming = listener.incoming(); while let Some(stream) = incoming.next().await { let stream = stream?; onconnection(state.clone(), stream); } // TODO: On ctrl-c this is not run I think. std::fs::remove_file(&path).unwrap(); debug!("Deleted socket file {:?}", path); // let _ = sock_remover; Ok(()) } // TODO: This also did not run always. // struct SockRemover { // path: PathBuf, // } // impl Drop for SockRemover { // fn drop(&mut self) { // if std::fs::remove_file(&self.path).is_err() { // error!("Could not remove socket {}", self.path.to_str().unwrap()); // } else { // debug!("Removed socket {}", self.path.to_str().unwrap()); // } // } // }
use crate::types::{TryReadFrom, WriteInto}; pub trait Direction: Sized + 'static {} pub struct ServerBound; impl Direction for ServerBound {} pub struct ClientBound; impl Direction for ClientBound {} pub trait Stage: Sized + 'static {} pub struct HandshakeStage; impl Stage for HandshakeStage {} pub struct StatusStage; impl Stage for StatusStage {} pub struct LoginStage; impl Stage for LoginStage {} pub struct PlayStage; impl Stage for PlayStage {} /// Represents a packet. pub trait Packet: Send + Sync + Sized + TryReadFrom + WriteInto { type Direction: Direction; type Stage: Stage; fn id(&self) -> u32; fn name(&self) -> &'static str; } pub trait Protocol: Sized + 'static { type ServerBoundHandshakePacket: Packet<Direction = ServerBound, Stage = HandshakeStage>; type ClientBoundHandshakePacket: Packet<Direction = ClientBound, Stage = HandshakeStage>; type ServerBoundStatusPacket: Packet<Direction = ServerBound, Stage = StatusStage>; type ClientBoundStatusPacket: Packet<Direction = ClientBound, Stage = StatusStage>; type ServerBoundLoginPacket: Packet<Direction = ServerBound, Stage = LoginStage>; type ClientBoundLoginPacket: Packet<Direction = ClientBound, Stage = LoginStage>; type ServerBoundPlayPacket: Packet<Direction = ServerBound, Stage = PlayStage>; type ClientBoundPlayPacket: Packet<Direction = ClientBound, Stage = PlayStage>; fn version() -> u64; fn minecraft_version() -> &'static str; fn minecraft_major_version() -> &'static str; }
extern crate glutin_window; extern crate graphics; extern crate opengl_graphics; extern crate piston; mod code; use code::*; use piston::AfterRenderEvent; use piston::IdleEvent; use piston::RenderEvent; use piston::UpdateEvent; const TITLE: &str = "Calorie Run!"; const WINDOW_SIZE: [f64; 2] = [800.0, 600.0]; const OPENGL: opengl_graphics::OpenGL = opengl_graphics::OpenGL::V2_1; fn run_game() { use code::*; let mut window: glutin_window::GlutinWindow = piston::WindowSettings::new(TITLE, WINDOW_SIZE) .graphics_api(OPENGL) .exit_on_esc(true) .resizable(false) .build() .unwrap(); let mut gl = opengl_graphics::GlGraphics::new(OPENGL); let mut scene_idx = 0; let mut scenes = [ MainMenuScene::start(), DebugScene::start(), FinishScene::start(), ]; let mut current_scene: &mut Box<dyn Scene> = &mut scenes[scene_idx]; let mut mouse_cursor: [f64; 2] = [0.0, 0.0]; let mut events = piston::Events::new(piston::EventSettings::new()); while let Some(e) = events.next(&mut window) { match e { piston::Event::Input(i, _) => { match i { piston::Input::Button(btn_args) => { match btn_args.button { piston::Button::Mouse(mb) => { if btn_args.state == piston::ButtonState::Press { if mb == piston::mouse::MouseButton::Left { current_scene.on_mouse_left_click(mouse_cursor); } else if mb == piston::mouse::MouseButton::Right { current_scene.on_mouse_right_click(mouse_cursor); } } else { current_scene.on_mouse_release(mouse_cursor); } } piston::Button::Keyboard(key) => { if btn_args.state == piston::ButtonState::Press { current_scene.on_key_press(key); } else { current_scene.on_key_release(key); } } _ => {} }; } piston::Input::Move(m) => { match m { piston::Motion::MouseScroll(scroll) => { current_scene.on_mouse_scroll(scroll); } piston::Motion::MouseCursor(pos) => { mouse_cursor = pos; current_scene.on_mouse_move(pos); } _ => {} }; } _ => {} }; } piston::Event::Loop(event) => { match event { piston::Loop::Render(render_args) => { gl.draw(render_args.viewport(), |mut c, mut gl| { current_scene.render(&mut c, &mut gl, render_args); }); } piston::Loop::AfterRender(render_args) => { current_scene.after_render(render_args); } piston::Loop::Update(update_args) => { current_scene.update(update_args); } piston::Loop::Idle(idle_args) => { current_scene.on_idle(idle_args); if current_scene.done() { scene_idx += 1; if scene_idx == scenes.len() { return (); } current_scene = &mut scenes[scene_idx]; } } }; } piston::Event::Custom(..) => {} }; } } fn main() -> Result<(), ()> { loop { run_game(); } // run_game(); Ok(()) }
use async_trait::async_trait; use common::result::Result; use crate::domain::catalogue::Catalogue; #[async_trait] pub trait CatalogueRepository: Sync + Send { async fn find(&self) -> Result<Catalogue>; async fn save(&self, catalogue: &mut Catalogue) -> Result<()>; }
#[doc = "Register `CR1` reader"] pub type R = crate::R<CR1_SPEC>; #[doc = "Register `CR1` writer"] pub type W = crate::W<CR1_SPEC>; #[doc = "Field `AWDCH` reader - Analog watchdog channel select bits"] pub type AWDCH_R = crate::FieldReader; #[doc = "Field `AWDCH` writer - Analog watchdog channel select bits"] pub type AWDCH_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 5, O>; #[doc = "Field `EOCIE` reader - Interrupt enable for EOC"] pub type EOCIE_R = crate::BitReader<EOCIE_A>; #[doc = "Interrupt enable for EOC\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum EOCIE_A { #[doc = "0: EOC interrupt disabled"] Disabled = 0, #[doc = "1: EOC interrupt enabled"] Enabled = 1, } impl From<EOCIE_A> for bool { #[inline(always)] fn from(variant: EOCIE_A) -> Self { variant as u8 != 0 } } impl EOCIE_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EOCIE_A { match self.bits { false => EOCIE_A::Disabled, true => EOCIE_A::Enabled, } } #[doc = "EOC interrupt disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == EOCIE_A::Disabled } #[doc = "EOC interrupt enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == EOCIE_A::Enabled } } #[doc = "Field `EOCIE` writer - Interrupt enable for EOC"] pub type EOCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, EOCIE_A>; impl<'a, REG, const O: u8> EOCIE_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "EOC interrupt disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(EOCIE_A::Disabled) } #[doc = "EOC interrupt enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(EOCIE_A::Enabled) } } #[doc = "Field `AWDIE` reader - Analog watchdog interrupt enable"] pub type AWDIE_R = crate::BitReader<AWDIE_A>; #[doc = "Analog watchdog interrupt enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum AWDIE_A { #[doc = "0: Analogue watchdog interrupt disabled"] Disabled = 0, #[doc = "1: Analogue watchdog interrupt enabled"] Enabled = 1, } impl From<AWDIE_A> for bool { #[inline(always)] fn from(variant: AWDIE_A) -> Self { variant as u8 != 0 } } impl AWDIE_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> AWDIE_A { match self.bits { false => AWDIE_A::Disabled, true => AWDIE_A::Enabled, } } #[doc = "Analogue watchdog interrupt disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == AWDIE_A::Disabled } #[doc = "Analogue watchdog interrupt enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == AWDIE_A::Enabled } } #[doc = "Field `AWDIE` writer - Analog watchdog interrupt enable"] pub type AWDIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, AWDIE_A>; impl<'a, REG, const O: u8> AWDIE_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Analogue watchdog interrupt disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(AWDIE_A::Disabled) } #[doc = "Analogue watchdog interrupt enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(AWDIE_A::Enabled) } } #[doc = "Field `JEOCIE` reader - Interrupt enable for injected channels"] pub type JEOCIE_R = crate::BitReader<JEOCIE_A>; #[doc = "Interrupt enable for injected channels\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum JEOCIE_A { #[doc = "0: JEOC interrupt disabled"] Disabled = 0, #[doc = "1: JEOC interrupt enabled"] Enabled = 1, } impl From<JEOCIE_A> for bool { #[inline(always)] fn from(variant: JEOCIE_A) -> Self { variant as u8 != 0 } } impl JEOCIE_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> JEOCIE_A { match self.bits { false => JEOCIE_A::Disabled, true => JEOCIE_A::Enabled, } } #[doc = "JEOC interrupt disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == JEOCIE_A::Disabled } #[doc = "JEOC interrupt enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == JEOCIE_A::Enabled } } #[doc = "Field `JEOCIE` writer - Interrupt enable for injected channels"] pub type JEOCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, JEOCIE_A>; impl<'a, REG, const O: u8> JEOCIE_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "JEOC interrupt disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(JEOCIE_A::Disabled) } #[doc = "JEOC interrupt enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(JEOCIE_A::Enabled) } } #[doc = "Field `SCAN` reader - Scan mode"] pub type SCAN_R = crate::BitReader<SCAN_A>; #[doc = "Scan mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum SCAN_A { #[doc = "0: Scan mode disabled"] Disabled = 0, #[doc = "1: Scan mode enabled"] Enabled = 1, } impl From<SCAN_A> for bool { #[inline(always)] fn from(variant: SCAN_A) -> Self { variant as u8 != 0 } } impl SCAN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SCAN_A { match self.bits { false => SCAN_A::Disabled, true => SCAN_A::Enabled, } } #[doc = "Scan mode disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == SCAN_A::Disabled } #[doc = "Scan mode enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == SCAN_A::Enabled } } #[doc = "Field `SCAN` writer - Scan mode"] pub type SCAN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, SCAN_A>; impl<'a, REG, const O: u8> SCAN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Scan mode disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(SCAN_A::Disabled) } #[doc = "Scan mode enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(SCAN_A::Enabled) } } #[doc = "Field `AWDSGL` reader - Enable the watchdog on a single channel in scan mode"] pub type AWDSGL_R = crate::BitReader<AWDSGL_A>; #[doc = "Enable the watchdog on a single channel in scan mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum AWDSGL_A { #[doc = "0: Analog watchdog enabled on all channels"] AllChannels = 0, #[doc = "1: Analog watchdog enabled on a single channel"] SingleChannel = 1, } impl From<AWDSGL_A> for bool { #[inline(always)] fn from(variant: AWDSGL_A) -> Self { variant as u8 != 0 } } impl AWDSGL_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> AWDSGL_A { match self.bits { false => AWDSGL_A::AllChannels, true => AWDSGL_A::SingleChannel, } } #[doc = "Analog watchdog enabled on all channels"] #[inline(always)] pub fn is_all_channels(&self) -> bool { *self == AWDSGL_A::AllChannels } #[doc = "Analog watchdog enabled on a single channel"] #[inline(always)] pub fn is_single_channel(&self) -> bool { *self == AWDSGL_A::SingleChannel } } #[doc = "Field `AWDSGL` writer - Enable the watchdog on a single channel in scan mode"] pub type AWDSGL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, AWDSGL_A>; impl<'a, REG, const O: u8> AWDSGL_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Analog watchdog enabled on all channels"] #[inline(always)] pub fn all_channels(self) -> &'a mut crate::W<REG> { self.variant(AWDSGL_A::AllChannels) } #[doc = "Analog watchdog enabled on a single channel"] #[inline(always)] pub fn single_channel(self) -> &'a mut crate::W<REG> { self.variant(AWDSGL_A::SingleChannel) } } #[doc = "Field `JAUTO` reader - Automatic injected group conversion"] pub type JAUTO_R = crate::BitReader<JAUTO_A>; #[doc = "Automatic injected group conversion\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum JAUTO_A { #[doc = "0: Automatic injected group conversion disabled"] Disabled = 0, #[doc = "1: Automatic injected group conversion enabled"] Enabled = 1, } impl From<JAUTO_A> for bool { #[inline(always)] fn from(variant: JAUTO_A) -> Self { variant as u8 != 0 } } impl JAUTO_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> JAUTO_A { match self.bits { false => JAUTO_A::Disabled, true => JAUTO_A::Enabled, } } #[doc = "Automatic injected group conversion disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == JAUTO_A::Disabled } #[doc = "Automatic injected group conversion enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == JAUTO_A::Enabled } } #[doc = "Field `JAUTO` writer - Automatic injected group conversion"] pub type JAUTO_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, JAUTO_A>; impl<'a, REG, const O: u8> JAUTO_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Automatic injected group conversion disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(JAUTO_A::Disabled) } #[doc = "Automatic injected group conversion enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(JAUTO_A::Enabled) } } #[doc = "Field `DISCEN` reader - Discontinuous mode on regular channels"] pub type DISCEN_R = crate::BitReader<DISCEN_A>; #[doc = "Discontinuous mode on regular channels\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum DISCEN_A { #[doc = "0: Discontinuous mode on regular channels disabled"] Disabled = 0, #[doc = "1: Discontinuous mode on regular channels enabled"] Enabled = 1, } impl From<DISCEN_A> for bool { #[inline(always)] fn from(variant: DISCEN_A) -> Self { variant as u8 != 0 } } impl DISCEN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DISCEN_A { match self.bits { false => DISCEN_A::Disabled, true => DISCEN_A::Enabled, } } #[doc = "Discontinuous mode on regular channels disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == DISCEN_A::Disabled } #[doc = "Discontinuous mode on regular channels enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == DISCEN_A::Enabled } } #[doc = "Field `DISCEN` writer - Discontinuous mode on regular channels"] pub type DISCEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DISCEN_A>; impl<'a, REG, const O: u8> DISCEN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Discontinuous mode on regular channels disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(DISCEN_A::Disabled) } #[doc = "Discontinuous mode on regular channels enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(DISCEN_A::Enabled) } } #[doc = "Field `JDISCEN` reader - Discontinuous mode on injected channels"] pub type JDISCEN_R = crate::BitReader<JDISCEN_A>; #[doc = "Discontinuous mode on injected channels\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum JDISCEN_A { #[doc = "0: Discontinuous mode on injected channels disabled"] Disabled = 0, #[doc = "1: Discontinuous mode on injected channels enabled"] Enabled = 1, } impl From<JDISCEN_A> for bool { #[inline(always)] fn from(variant: JDISCEN_A) -> Self { variant as u8 != 0 } } impl JDISCEN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> JDISCEN_A { match self.bits { false => JDISCEN_A::Disabled, true => JDISCEN_A::Enabled, } } #[doc = "Discontinuous mode on injected channels disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == JDISCEN_A::Disabled } #[doc = "Discontinuous mode on injected channels enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == JDISCEN_A::Enabled } } #[doc = "Field `JDISCEN` writer - Discontinuous mode on injected channels"] pub type JDISCEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, JDISCEN_A>; impl<'a, REG, const O: u8> JDISCEN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Discontinuous mode on injected channels disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(JDISCEN_A::Disabled) } #[doc = "Discontinuous mode on injected channels enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(JDISCEN_A::Enabled) } } #[doc = "Field `DISCNUM` reader - Discontinuous mode channel count"] pub type DISCNUM_R = crate::FieldReader; #[doc = "Field `DISCNUM` writer - Discontinuous mode channel count"] pub type DISCNUM_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 3, O>; #[doc = "Field `JAWDEN` reader - Analog watchdog enable on injected channels"] pub type JAWDEN_R = crate::BitReader<JAWDEN_A>; #[doc = "Analog watchdog enable on injected channels\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum JAWDEN_A { #[doc = "0: Analog watchdog disabled on injected channels"] Disabled = 0, #[doc = "1: Analog watchdog enabled on injected channels"] Enabled = 1, } impl From<JAWDEN_A> for bool { #[inline(always)] fn from(variant: JAWDEN_A) -> Self { variant as u8 != 0 } } impl JAWDEN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> JAWDEN_A { match self.bits { false => JAWDEN_A::Disabled, true => JAWDEN_A::Enabled, } } #[doc = "Analog watchdog disabled on injected channels"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == JAWDEN_A::Disabled } #[doc = "Analog watchdog enabled on injected channels"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == JAWDEN_A::Enabled } } #[doc = "Field `JAWDEN` writer - Analog watchdog enable on injected channels"] pub type JAWDEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, JAWDEN_A>; impl<'a, REG, const O: u8> JAWDEN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Analog watchdog disabled on injected channels"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(JAWDEN_A::Disabled) } #[doc = "Analog watchdog enabled on injected channels"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(JAWDEN_A::Enabled) } } #[doc = "Field `AWDEN` reader - Analog watchdog enable on regular channels"] pub type AWDEN_R = crate::BitReader<AWDEN_A>; #[doc = "Analog watchdog enable on regular channels\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum AWDEN_A { #[doc = "0: Analog watchdog disabled on regular channels"] Disabled = 0, #[doc = "1: Analog watchdog enabled on regular channels"] Enabled = 1, } impl From<AWDEN_A> for bool { #[inline(always)] fn from(variant: AWDEN_A) -> Self { variant as u8 != 0 } } impl AWDEN_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> AWDEN_A { match self.bits { false => AWDEN_A::Disabled, true => AWDEN_A::Enabled, } } #[doc = "Analog watchdog disabled on regular channels"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == AWDEN_A::Disabled } #[doc = "Analog watchdog enabled on regular channels"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == AWDEN_A::Enabled } } #[doc = "Field `AWDEN` writer - Analog watchdog enable on regular channels"] pub type AWDEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, AWDEN_A>; impl<'a, REG, const O: u8> AWDEN_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Analog watchdog disabled on regular channels"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(AWDEN_A::Disabled) } #[doc = "Analog watchdog enabled on regular channels"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(AWDEN_A::Enabled) } } #[doc = "Field `RES` reader - Resolution"] pub type RES_R = crate::FieldReader<RES_A>; #[doc = "Resolution\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[repr(u8)] pub enum RES_A { #[doc = "0: 12-bit (15 ADCCLK cycles)"] TwelveBit = 0, #[doc = "1: 10-bit (13 ADCCLK cycles)"] TenBit = 1, #[doc = "2: 8-bit (11 ADCCLK cycles)"] EightBit = 2, #[doc = "3: 6-bit (9 ADCCLK cycles)"] SixBit = 3, } impl From<RES_A> for u8 { #[inline(always)] fn from(variant: RES_A) -> Self { variant as _ } } impl crate::FieldSpec for RES_A { type Ux = u8; } impl RES_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RES_A { match self.bits { 0 => RES_A::TwelveBit, 1 => RES_A::TenBit, 2 => RES_A::EightBit, 3 => RES_A::SixBit, _ => unreachable!(), } } #[doc = "12-bit (15 ADCCLK cycles)"] #[inline(always)] pub fn is_twelve_bit(&self) -> bool { *self == RES_A::TwelveBit } #[doc = "10-bit (13 ADCCLK cycles)"] #[inline(always)] pub fn is_ten_bit(&self) -> bool { *self == RES_A::TenBit } #[doc = "8-bit (11 ADCCLK cycles)"] #[inline(always)] pub fn is_eight_bit(&self) -> bool { *self == RES_A::EightBit } #[doc = "6-bit (9 ADCCLK cycles)"] #[inline(always)] pub fn is_six_bit(&self) -> bool { *self == RES_A::SixBit } } #[doc = "Field `RES` writer - Resolution"] pub type RES_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 2, O, RES_A>; impl<'a, REG, const O: u8> RES_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, REG::Ux: From<u8>, { #[doc = "12-bit (15 ADCCLK cycles)"] #[inline(always)] pub fn twelve_bit(self) -> &'a mut crate::W<REG> { self.variant(RES_A::TwelveBit) } #[doc = "10-bit (13 ADCCLK cycles)"] #[inline(always)] pub fn ten_bit(self) -> &'a mut crate::W<REG> { self.variant(RES_A::TenBit) } #[doc = "8-bit (11 ADCCLK cycles)"] #[inline(always)] pub fn eight_bit(self) -> &'a mut crate::W<REG> { self.variant(RES_A::EightBit) } #[doc = "6-bit (9 ADCCLK cycles)"] #[inline(always)] pub fn six_bit(self) -> &'a mut crate::W<REG> { self.variant(RES_A::SixBit) } } #[doc = "Field `OVRIE` reader - Overrun interrupt enable"] pub type OVRIE_R = crate::BitReader<OVRIE_A>; #[doc = "Overrun interrupt enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum OVRIE_A { #[doc = "0: Overrun interrupt disabled"] Disabled = 0, #[doc = "1: Overrun interrupt enabled"] Enabled = 1, } impl From<OVRIE_A> for bool { #[inline(always)] fn from(variant: OVRIE_A) -> Self { variant as u8 != 0 } } impl OVRIE_R { #[doc = "Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> OVRIE_A { match self.bits { false => OVRIE_A::Disabled, true => OVRIE_A::Enabled, } } #[doc = "Overrun interrupt disabled"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == OVRIE_A::Disabled } #[doc = "Overrun interrupt enabled"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == OVRIE_A::Enabled } } #[doc = "Field `OVRIE` writer - Overrun interrupt enable"] pub type OVRIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, OVRIE_A>; impl<'a, REG, const O: u8> OVRIE_W<'a, REG, O> where REG: crate::Writable + crate::RegisterSpec, { #[doc = "Overrun interrupt disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut crate::W<REG> { self.variant(OVRIE_A::Disabled) } #[doc = "Overrun interrupt enabled"] #[inline(always)] pub fn enabled(self) -> &'a mut crate::W<REG> { self.variant(OVRIE_A::Enabled) } } impl R { #[doc = "Bits 0:4 - Analog watchdog channel select bits"] #[inline(always)] pub fn awdch(&self) -> AWDCH_R { AWDCH_R::new((self.bits & 0x1f) as u8) } #[doc = "Bit 5 - Interrupt enable for EOC"] #[inline(always)] pub fn eocie(&self) -> EOCIE_R { EOCIE_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - Analog watchdog interrupt enable"] #[inline(always)] pub fn awdie(&self) -> AWDIE_R { AWDIE_R::new(((self.bits >> 6) & 1) != 0) } #[doc = "Bit 7 - Interrupt enable for injected channels"] #[inline(always)] pub fn jeocie(&self) -> JEOCIE_R { JEOCIE_R::new(((self.bits >> 7) & 1) != 0) } #[doc = "Bit 8 - Scan mode"] #[inline(always)] pub fn scan(&self) -> SCAN_R { SCAN_R::new(((self.bits >> 8) & 1) != 0) } #[doc = "Bit 9 - Enable the watchdog on a single channel in scan mode"] #[inline(always)] pub fn awdsgl(&self) -> AWDSGL_R { AWDSGL_R::new(((self.bits >> 9) & 1) != 0) } #[doc = "Bit 10 - Automatic injected group conversion"] #[inline(always)] pub fn jauto(&self) -> JAUTO_R { JAUTO_R::new(((self.bits >> 10) & 1) != 0) } #[doc = "Bit 11 - Discontinuous mode on regular channels"] #[inline(always)] pub fn discen(&self) -> DISCEN_R { DISCEN_R::new(((self.bits >> 11) & 1) != 0) } #[doc = "Bit 12 - Discontinuous mode on injected channels"] #[inline(always)] pub fn jdiscen(&self) -> JDISCEN_R { JDISCEN_R::new(((self.bits >> 12) & 1) != 0) } #[doc = "Bits 13:15 - Discontinuous mode channel count"] #[inline(always)] pub fn discnum(&self) -> DISCNUM_R { DISCNUM_R::new(((self.bits >> 13) & 7) as u8) } #[doc = "Bit 22 - Analog watchdog enable on injected channels"] #[inline(always)] pub fn jawden(&self) -> JAWDEN_R { JAWDEN_R::new(((self.bits >> 22) & 1) != 0) } #[doc = "Bit 23 - Analog watchdog enable on regular channels"] #[inline(always)] pub fn awden(&self) -> AWDEN_R { AWDEN_R::new(((self.bits >> 23) & 1) != 0) } #[doc = "Bits 24:25 - Resolution"] #[inline(always)] pub fn res(&self) -> RES_R { RES_R::new(((self.bits >> 24) & 3) as u8) } #[doc = "Bit 26 - Overrun interrupt enable"] #[inline(always)] pub fn ovrie(&self) -> OVRIE_R { OVRIE_R::new(((self.bits >> 26) & 1) != 0) } } impl W { #[doc = "Bits 0:4 - Analog watchdog channel select bits"] #[inline(always)] #[must_use] pub fn awdch(&mut self) -> AWDCH_W<CR1_SPEC, 0> { AWDCH_W::new(self) } #[doc = "Bit 5 - Interrupt enable for EOC"] #[inline(always)] #[must_use] pub fn eocie(&mut self) -> EOCIE_W<CR1_SPEC, 5> { EOCIE_W::new(self) } #[doc = "Bit 6 - Analog watchdog interrupt enable"] #[inline(always)] #[must_use] pub fn awdie(&mut self) -> AWDIE_W<CR1_SPEC, 6> { AWDIE_W::new(self) } #[doc = "Bit 7 - Interrupt enable for injected channels"] #[inline(always)] #[must_use] pub fn jeocie(&mut self) -> JEOCIE_W<CR1_SPEC, 7> { JEOCIE_W::new(self) } #[doc = "Bit 8 - Scan mode"] #[inline(always)] #[must_use] pub fn scan(&mut self) -> SCAN_W<CR1_SPEC, 8> { SCAN_W::new(self) } #[doc = "Bit 9 - Enable the watchdog on a single channel in scan mode"] #[inline(always)] #[must_use] pub fn awdsgl(&mut self) -> AWDSGL_W<CR1_SPEC, 9> { AWDSGL_W::new(self) } #[doc = "Bit 10 - Automatic injected group conversion"] #[inline(always)] #[must_use] pub fn jauto(&mut self) -> JAUTO_W<CR1_SPEC, 10> { JAUTO_W::new(self) } #[doc = "Bit 11 - Discontinuous mode on regular channels"] #[inline(always)] #[must_use] pub fn discen(&mut self) -> DISCEN_W<CR1_SPEC, 11> { DISCEN_W::new(self) } #[doc = "Bit 12 - Discontinuous mode on injected channels"] #[inline(always)] #[must_use] pub fn jdiscen(&mut self) -> JDISCEN_W<CR1_SPEC, 12> { JDISCEN_W::new(self) } #[doc = "Bits 13:15 - Discontinuous mode channel count"] #[inline(always)] #[must_use] pub fn discnum(&mut self) -> DISCNUM_W<CR1_SPEC, 13> { DISCNUM_W::new(self) } #[doc = "Bit 22 - Analog watchdog enable on injected channels"] #[inline(always)] #[must_use] pub fn jawden(&mut self) -> JAWDEN_W<CR1_SPEC, 22> { JAWDEN_W::new(self) } #[doc = "Bit 23 - Analog watchdog enable on regular channels"] #[inline(always)] #[must_use] pub fn awden(&mut self) -> AWDEN_W<CR1_SPEC, 23> { AWDEN_W::new(self) } #[doc = "Bits 24:25 - Resolution"] #[inline(always)] #[must_use] pub fn res(&mut self) -> RES_W<CR1_SPEC, 24> { RES_W::new(self) } #[doc = "Bit 26 - Overrun interrupt enable"] #[inline(always)] #[must_use] pub fn ovrie(&mut self) -> OVRIE_W<CR1_SPEC, 26> { OVRIE_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "control register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct CR1_SPEC; impl crate::RegisterSpec for CR1_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`cr1::R`](R) reader structure"] impl crate::Readable for CR1_SPEC {} #[doc = "`write(|w| ..)` method takes [`cr1::W`](W) writer structure"] impl crate::Writable for CR1_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets CR1 to value 0"] impl crate::Resettable for CR1_SPEC { const RESET_VALUE: Self::Ux = 0; }
#[macro_use] extern crate lazy_static; #[macro_use] extern crate if_chain; #[macro_use] extern crate custom_derive; #[macro_use] extern crate enum_derive; extern crate clap; extern crate regex; extern crate toml; extern crate unicode_segmentation; extern crate ordinal; extern crate anyhow; extern crate boolinator; extern crate thiserror; //extern crate nazonazo_macros; use regex::Regex; use serenity::{ client::Client, framework::standard::StandardFramework, model::gateway::Ready, prelude::*, }; use std::env; pub mod bot; pub mod commands; pub mod dictionary; pub mod error; pub mod settings; pub mod sort; use sort::Sorted; use commands::{executors, facade}; use serenity::model::id::{ChannelId, UserId}; #[macro_export] macro_rules! try_say { ($ctx: expr, $msg: expr, $response: expr) => { if let Err(why) = ($msg).channel_id.say(&($ctx), $response) { println!("{}", why); } }; } struct Handler; impl EventHandler for Handler { fn ready(&self, ctx: Context, ready: Ready) { ChannelId::from(621544952299782144_u64) .say(&ctx, "botがDiscordとの接続を完了しました。") .expect("fail to send"); println!("{} is connected!", ready.user.name); } } fn main() { println!("hello!"); // Login with a bot token from the environment let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler) .expect("Error creating client"); client.with_framework( StandardFramework::new() .configure(|c| { c.dynamic_prefix(|_, msg| { Some( settings::SETTINGS .lock() .unwrap() .prefix .dynamic .get(&msg.channel_id.as_u64().to_string()) .cloned() .unwrap_or_else(|| "~".to_string()), ) }) .on_mention(Some(UserId::from(621402474527588352))) .allow_dm(true) .no_dm_prefix(true) }) // set the bot's prefix to "~" .bucket("basic", |b| b.delay(1).time_span(0).limit(1)) .bucket("long", |b| b.delay(1).time_span(2).limit(1)) .before(|ctx, msg, command_name| { if command_name == "enable" { return true; } if !settings::SETTINGS .lock() .unwrap() .channel .enabled .contains(msg.channel_id.as_u64()) { return false; } if facade::QUIZ_COMMANDS_REGEX.is_match(&command_name.to_string()) { match &*bot::QUIZ.lock().unwrap() { bot::Status::Holding(ref ans, ..) => { try_say!( ctx, msg, format!("前回の出題が解かれていません\n問題: `{}`", ans.sorted()) ); false } bot::Status::Contesting(ref ans, ..) => { try_say!( ctx, msg, format!("現在コンテスト中です\n問題: `{}`", ans.sorted()) ); false } bot::Status::StandingBy => true, } } else { true } }) .normal_message(|ctx, msg| { println!("{}", msg.author.id); if !msg.author.bot { let re = Regex::new(r"^kick\(.*\);$").unwrap(); if re.is_match(&msg.content) { println!("{:?}", executors::kick(ctx, msg)); return; } executors::answer_check(ctx, msg); } }) .group(&commands::facade::QUIZ_GROUP) .group(&commands::facade::CONTEST_GROUP) .group(&commands::facade::SETTINGS_GROUP) .group(&commands::facade::EXTRA_GROUP) .help(&commands::facade::NAZONAZO_HELP), ); // start listening for events by starting a single shard if let Err(why) = client.start() { println!("An error occurred while running the client: {:?}", why); } }
/* * Datadog API V1 Collection * * Collection of all Datadog Public endpoints. * * The version of the OpenAPI document: 1.0 * Contact: support@datadoghq.com * Generated by: https://openapi-generator.tech */ /// SyntheticsStepDetail : Object describing a step for a Synthetic test. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SyntheticsStepDetail { /// Array of errors collected for a browser test. #[serde(rename = "browserErrors", skip_serializing_if = "Option::is_none")] pub browser_errors: Option<Vec<crate::models::SyntheticsBrowserError>>, #[serde(rename = "checkType", skip_serializing_if = "Option::is_none")] pub check_type: Option<crate::models::SyntheticsCheckType>, /// Description of the test. #[serde(rename = "description", skip_serializing_if = "Option::is_none")] pub description: Option<String>, /// Total duration in millisecond of the test. #[serde(rename = "duration", skip_serializing_if = "Option::is_none")] pub duration: Option<f64>, /// Error returned by the test. #[serde(rename = "error", skip_serializing_if = "Option::is_none")] pub error: Option<String>, #[serde(rename = "playingTab", skip_serializing_if = "Option::is_none")] pub playing_tab: Option<crate::models::SyntheticsPlayingTab>, /// Whether or not screenshots where collected by the test. #[serde(rename = "screenshotBucketKey", skip_serializing_if = "Option::is_none")] pub screenshot_bucket_key: Option<bool>, /// Whether or not to skip this step. #[serde(rename = "skipped", skip_serializing_if = "Option::is_none")] pub skipped: Option<bool>, /// Whether or not snapshots where collected by the test. #[serde(rename = "snapshotBucketKey", skip_serializing_if = "Option::is_none")] pub snapshot_bucket_key: Option<bool>, /// The step ID. #[serde(rename = "stepId", skip_serializing_if = "Option::is_none")] pub step_id: Option<i64>, /// If this steps include a sub-test. [Subtests documentation](https://docs.datadoghq.com/synthetics/browser_tests/advanced_options/#subtests). #[serde(rename = "subTestStepDetails", skip_serializing_if = "Option::is_none")] pub sub_test_step_details: Option<Vec<crate::models::SyntheticsStepDetail>>, /// Time before starting the step. #[serde(rename = "timeToInteractive", skip_serializing_if = "Option::is_none")] pub time_to_interactive: Option<f64>, #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub _type: Option<crate::models::SyntheticsStepType>, /// URL to perform the step against. #[serde(rename = "url", skip_serializing_if = "Option::is_none")] pub url: Option<String>, /// Value for the step. #[serde(rename = "value", skip_serializing_if = "Option::is_none")] pub value: Option<serde_json::Value>, /// Array of Core Web Vitals metrics for the step. #[serde(rename = "vitalsMetrics", skip_serializing_if = "Option::is_none")] pub vitals_metrics: Option<Vec<crate::models::SyntheticsCoreWebVitals>>, /// Warning collected that didn't failed the step. #[serde(rename = "warnings", skip_serializing_if = "Option::is_none")] pub warnings: Option<Vec<crate::models::SyntheticsStepDetailWarning>>, } impl SyntheticsStepDetail { /// Object describing a step for a Synthetic test. pub fn new() -> SyntheticsStepDetail { SyntheticsStepDetail { browser_errors: None, check_type: None, description: None, duration: None, error: None, playing_tab: None, screenshot_bucket_key: None, skipped: None, snapshot_bucket_key: None, step_id: None, sub_test_step_details: None, time_to_interactive: None, _type: None, url: None, value: None, vitals_metrics: None, warnings: None, } } }
use crate::grid::color::StaticColor; use crate::grid::config::{ AlignmentHorizontal, AlignmentVertical, Borders, CompactConfig, Indent, Line, Sides, }; /// A [`CompactConfig`] configuration plus vertical alignment. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct CompactMultilineConfig { config: CompactConfig, alignment_vertical: AlignmentVertical, formatting: Formatting, } impl CompactMultilineConfig { /// Create a new colored config. pub fn new(config: CompactConfig) -> Self { Self::from(config) } /// Set a horizontal alignment. pub const fn set_alignment_vertical(mut self, alignment: AlignmentVertical) -> Self { self.alignment_vertical = alignment; self } /// Get a alignment horizontal. pub const fn get_alignment_vertical(&self) -> AlignmentVertical { self.alignment_vertical } /// Set grid margin. pub const fn set_margin(mut self, margin: Sides<Indent>) -> Self { self.config = self.config.set_margin(margin); self } /// Returns a grid margin. pub const fn get_margin(&self) -> &Sides<Indent> { self.config.get_margin() } /// Set the [`Borders`] value as correct one. pub const fn set_borders(mut self, borders: Borders<char>) -> Self { self.config = self.config.set_borders(borders); self } /// Set the first horizontal line. /// /// It ignores the [`Borders`] horizontal value if set for 1st row. pub const fn set_first_horizontal_line(mut self, line: Line<char>) -> Self { self.config = self.config.set_first_horizontal_line(line); self } /// Set the first horizontal line. /// /// It ignores the [`Borders`] horizontal value if set for 1st row. pub const fn get_first_horizontal_line(&self) -> Option<Line<char>> { self.config.get_first_horizontal_line() } /// Returns a current [`Borders`] structure. pub const fn get_borders(&self) -> &Borders<char> { self.config.get_borders() } /// Returns a current [`Borders`] structure. pub const fn get_borders_color(&self) -> &Borders<StaticColor> { self.config.get_borders_color() } /// Set a padding to a given cells. pub const fn set_padding(mut self, padding: Sides<Indent>) -> Self { self.config = self.config.set_padding(padding); self } /// Get a padding for a given. pub const fn get_padding(&self) -> &Sides<Indent> { self.config.get_padding() } /// Set a horizontal alignment. pub const fn set_alignment_horizontal(mut self, alignment: AlignmentHorizontal) -> Self { self.config = self.config.set_alignment_horizontal(alignment); self } /// Get a alignment horizontal. pub const fn get_alignment_horizontal(&self) -> AlignmentHorizontal { self.config.get_alignment_horizontal() } /// Sets colors of border carcass on the grid. pub const fn set_borders_color(mut self, borders: Borders<StaticColor>) -> Self { self.config = self.config.set_borders_color(borders); self } /// Set colors for a margin. pub const fn set_margin_color(mut self, color: Sides<StaticColor>) -> Self { self.config = self.config.set_margin_color(color); self } /// Returns a margin color. pub const fn get_margin_color(&self) -> Sides<StaticColor> { self.config.get_margin_color() } /// Set a padding color to all cells. pub const fn set_padding_color(mut self, color: Sides<StaticColor>) -> Self { self.config = self.config.set_padding_color(color); self } /// get a padding color. pub const fn get_padding_color(&self) -> Sides<StaticColor> { self.config.get_padding_color() } /// Set formatting. pub const fn set_formatting(mut self, formatting: Formatting) -> Self { self.formatting = formatting; self } /// Get formatting. pub const fn get_formatting(&self) -> Formatting { self.formatting } } impl Default for CompactMultilineConfig { fn default() -> Self { Self { config: Default::default(), alignment_vertical: AlignmentVertical::Top, formatting: Formatting::default(), } } } impl From<CompactConfig> for CompactMultilineConfig { fn from(config: CompactConfig) -> Self { Self { config, alignment_vertical: AlignmentVertical::Top, formatting: Formatting::default(), } } } impl AsRef<CompactConfig> for CompactMultilineConfig { fn as_ref(&self) -> &CompactConfig { &self.config } } impl AsMut<CompactConfig> for CompactMultilineConfig { fn as_mut(&mut self) -> &mut CompactConfig { &mut self.config } } #[cfg(feature = "std")] impl From<CompactMultilineConfig> for crate::grid::config::SpannedConfig { fn from(compact: CompactMultilineConfig) -> Self { use crate::grid::config::Entity; let mut cfg = crate::grid::config::SpannedConfig::from(compact.config); cfg.set_alignment_vertical(Entity::Global, compact.alignment_vertical); cfg.set_formatting(Entity::Global, compact.formatting.into()); cfg } } /// Formatting represent a logic of formatting of a cell. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Formatting { /// An setting to allow horizontal trim. pub horizontal_trim: bool, /// An setting to allow vertical trim. pub vertical_trim: bool, /// An setting to allow alignment per line. pub allow_lines_alignment: bool, } impl Formatting { /// Creates a new [`Formatting`] structure. pub fn new(horizontal_trim: bool, vertical_trim: bool, allow_lines_alignment: bool) -> Self { Self { horizontal_trim, vertical_trim, allow_lines_alignment, } } } #[cfg(feature = "std")] impl From<Formatting> for crate::grid::config::Formatting { fn from(val: Formatting) -> Self { crate::grid::config::Formatting { allow_lines_alignment: val.allow_lines_alignment, horizontal_trim: val.horizontal_trim, vertical_trim: val.vertical_trim, } } }
// Copyright 2019, 2020 Wingchain // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use node_chain::DBTransaction; use node_consensus_base::support::ConsensusSupport; use primitives::errors::CommonResult; use primitives::{codec, Address}; use crate::proof::Proof; use crate::protocol::{Entry, EntryData, Proposal}; use parking_lot::RwLock; use std::collections::btree_map::Range; use std::collections::BTreeMap; use std::ops::RangeBounds; use std::sync::Arc; const DB_KEY_CURRENT_TERM: &[u8] = b"current_term"; const DB_KEY_CURRENT_VOTED_FOR: &[u8] = b"current_voted_for"; const DB_KEY_COMMIT_LOG_INDEX: &[u8] = b"commit_log_index"; const DB_KEY_LOGS: &[u8] = b"logs"; const DB_KEY_PROPOSAL: &[u8] = b"proposal"; pub struct Storage<S> where S: ConsensusSupport, { base_log_index: RwLock<u64>, base_log_term: RwLock<u64>, current_term: RwLock<u64>, current_voted_for: RwLock<Option<Address>>, commit_log_index: RwLock<u64>, logs: RwLock<BTreeMap<u64, Entry>>, proposal: RwLock<Option<Proposal>>, support: Arc<S>, } impl<S> Storage<S> where S: ConsensusSupport, { pub fn new(support: Arc<S>) -> CommonResult<Self> { let this = Self { base_log_index: RwLock::new(0), base_log_term: RwLock::new(0), current_term: RwLock::new(0), current_voted_for: RwLock::new(None), commit_log_index: RwLock::new(0), logs: RwLock::new(Default::default()), proposal: RwLock::new(None), support, }; this.refresh()?; Ok(this) } pub fn refresh(&self) -> CommonResult<()> { // init base_log_index, base_log_term let proof = self.get_proof()?; let (base_log_index, base_log_term) = match proof { Some(proof) => (proof.log_index, proof.log_term), None => (0, 0), }; // init current_term, current_voted_for // and fix if needed let mut current_term: u64 = self .support .get_consensus_data(DB_KEY_CURRENT_TERM)? .unwrap_or_default(); let mut current_voted_for: Option<Address> = self .support .get_consensus_data(DB_KEY_CURRENT_VOTED_FOR)? .unwrap_or_default(); if current_term < base_log_term { current_term = base_log_term; current_voted_for = None; self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_CURRENT_TERM, current_term)?; self.support.put_consensus_data( transaction, DB_KEY_CURRENT_VOTED_FOR, &current_voted_for, )?; Ok(()) })?; } // init commit_log_index // and fix if needed let mut commit_log_index: u64 = self .support .get_consensus_data(DB_KEY_COMMIT_LOG_INDEX)? .unwrap_or_default(); if commit_log_index < base_log_index { commit_log_index = base_log_index; self.commit_consensus_data(|transaction| { self.support.put_consensus_data( transaction, DB_KEY_COMMIT_LOG_INDEX, commit_log_index, )?; Ok(()) })?; } // init logs // and fix if needed let mut logs = { let logs: Vec<(u64, Entry)> = self .support .get_consensus_data(DB_KEY_LOGS)? .unwrap_or_default(); logs.into_iter().collect::<BTreeMap<_, _>>() }; let to_remove_key = logs .range(..=base_log_index) .map(|(k, _)| *k) .collect::<Vec<_>>(); for k in &to_remove_key { logs.remove(k); } if !to_remove_key.is_empty() { let logs_vec = logs.iter().collect::<Vec<_>>(); self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_LOGS, &logs_vec)?; Ok(()) })?; } // init proposal // and fix if needed let mut proposal: Option<Proposal> = self .support .get_consensus_data(DB_KEY_PROPOSAL)? .unwrap_or_default(); let contained_in_logs = logs.iter().any(|(_, v)| match &v.data { EntryData::Proposal { block_hash, .. } => { Some(block_hash) == proposal.as_ref().map(|p| &p.block_hash) } _ => false, }); if !contained_in_logs { proposal = None; self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_PROPOSAL, &proposal)?; Ok(()) })?; } (*self.base_log_index.write()) = base_log_index; (*self.base_log_term.write()) = base_log_term; (*self.current_term.write()) = current_term; (*self.current_voted_for.write()) = current_voted_for; (*self.commit_log_index.write()) = commit_log_index; (*self.logs.write()) = logs; (*self.proposal.write()) = proposal; Ok(()) } pub fn get_base_log_index_term(&self) -> (u64, u64) { (*self.base_log_index.read(), *self.base_log_term.read()) } pub fn get_last_log_index_term(&self) -> (u64, u64) { match self.logs.read().iter().last() { Some((_k, v)) => (v.index, v.term), None => (*self.base_log_index.read(), *self.base_log_term.read()), } } pub fn get_current_term(&self) -> u64 { *self.current_term.read() } pub fn update_current_term(&self, current_term: u64) -> CommonResult<()> { self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_CURRENT_TERM, current_term)?; Ok(()) })?; *self.current_term.write() = current_term; Ok(()) } pub fn get_current_voted_for(&self) -> Option<Address> { (*self.current_voted_for.read()).clone() } pub fn update_current_voted_for(&self, current_voted_for: Option<Address>) -> CommonResult<()> { self.commit_consensus_data(|transaction| { self.support.put_consensus_data( transaction, DB_KEY_CURRENT_VOTED_FOR, &current_voted_for, )?; Ok(()) })?; *self.current_voted_for.write() = current_voted_for; Ok(()) } pub fn get_commit_log_index(&self) -> u64 { *self.commit_log_index.read() } pub fn update_commit_log_index(&self, commit_log_index: u64) -> CommonResult<()> { self.commit_consensus_data(|transaction| { self.support.put_consensus_data( transaction, DB_KEY_COMMIT_LOG_INDEX, &commit_log_index, )?; Ok(()) })?; *self.commit_log_index.write() = commit_log_index; Ok(()) } pub fn get_log_entries<R>(&self, range: R) -> Vec<Entry> where R: RangeBounds<u64>, { self.get_log_entries_using(range, |x| x.map(|(_, v)| v.clone()).collect()) } pub fn get_log_entries_using<R, T, F>(&self, range: R, using: F) -> T where F: Fn(Range<u64, Entry>) -> T, R: RangeBounds<u64>, { using(self.logs.read().range(range)) } pub fn append_log_entries(&self, entry: Vec<Entry>) -> CommonResult<()> { (*self.logs.write()).extend(entry.into_iter().map(|x| (x.index, x))); let logs_vec = self .logs .read() .iter() .map(|(k, v)| (*k, v.clone())) .collect::<Vec<_>>(); self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_LOGS, &logs_vec)?; Ok(()) })?; Ok(()) } pub fn delete_log_entries<R>(&self, range: R) -> CommonResult<()> where R: RangeBounds<u64>, { let to_remove_key = self .logs .read() .range(range) .map(|(k, _)| *k) .collect::<Vec<_>>(); let mut guard = self.logs.write(); for key in &to_remove_key { guard.remove(key); } drop(guard); let logs_vec = self .logs .read() .iter() .map(|(k, v)| (*k, v.clone())) .collect::<Vec<_>>(); self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_LOGS, &logs_vec)?; Ok(()) })?; Ok(()) } pub fn get_proposal(&self) -> Option<Proposal> { self.get_proposal_using(|x| x.clone()) } pub fn get_proposal_using<T, F: Fn(&Option<Proposal>) -> T>(&self, using: F) -> T { using(&*self.proposal.read()) } pub fn update_proposal(&self, proposal: Option<Proposal>) -> CommonResult<()> { self.commit_consensus_data(|transaction| { self.support .put_consensus_data(transaction, DB_KEY_PROPOSAL, &proposal)?; Ok(()) })?; *self.proposal.write() = proposal; Ok(()) } fn get_proof(&self) -> CommonResult<Option<Proof>> { let current_state = self.support.get_current_state(); let confirmed_number = current_state.confirmed_number; let proof = match confirmed_number { 0 => None, _ => { let confirmed_block_hash = &current_state.confirmed_block_hash; let proof = self .support .get_proof(confirmed_block_hash)? .ok_or_else(|| { node_consensus_base::errors::ErrorKind::Data(format!( "Missing proof: block_hash: {}", confirmed_block_hash )) })?; let data = proof.data; let proof: Proof = codec::decode(&mut &data[..]).map_err(|_| { node_consensus_base::errors::ErrorKind::Data("Decode proof error".to_string()) })?; Some(proof) } }; Ok(proof) } fn commit_consensus_data<OP: Fn(&mut DBTransaction) -> CommonResult<()>>( &self, op: OP, ) -> CommonResult<()> { let mut transaction = DBTransaction::new(); op(&mut transaction)?; self.support.commit_consensus_data(transaction) } }
use std::io::{self, BufRead}; use std::str::FromStr; struct BoardingPass { row: u32, col: u32, } impl BoardingPass { fn uid(&self) -> u32 { // Just panic if overflow for now. self.row * 8 + self.col } } impl FromStr for BoardingPass { type Err = io::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { if s.len() != 10 { return Err(io::Error::new( io::ErrorKind::InvalidData, "Wrong input data length", )); } let row = s.chars().take(7).try_fold(0u32, |sum, c| match c { 'B' => Ok((sum << 1) | 1), 'F' => Ok((sum << 1) | 0), _ => Err(io::Error::new( io::ErrorKind::Other, "Need B/F in row specifier", )), })?; let col = s.chars().skip(7).take(3).try_fold(0u32, |sum, c| match c { 'R' => Ok((sum << 1) | 1), 'L' => Ok((sum << 1) | 0), _ => Err(io::Error::new( io::ErrorKind::Other, "Need B/F in col specifier", )), })?; Ok(BoardingPass { row, col }) } } fn challenge1() -> io::Result<u32> { let mut max = 0u32; for l in aoc20::input_bufreader("day05").lines() { let uid = l?.parse::<BoardingPass>()?.uid(); if uid > max { max = uid; } } Ok(max) } fn challenge2() -> io::Result<u32> { let bp_uids = { let mut v = aoc20::input_bufreader("day05") .lines() .map(|l| Ok(l?.parse::<BoardingPass>()?.uid())) .collect::<io::Result<Vec<_>>>()?; v.sort(); v }; Ok(bp_uids .windows(2) // From challenge description: // Your seat wasn't at the very front or back, though; the // seats with IDs +1 and -1 from yours will be in your list. .find(|w| w[1] - w[0] == 2) .ok_or(io::Error::new( io::ErrorKind::Other, "No unused boarding pass uid found", ))?[0] + 1) } fn main() -> io::Result<()> { println!("{}", challenge1()?); println!("{}", challenge2()?); Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn check_challenge1() -> io::Result<()> { assert_eq!(challenge1()?, 871); Ok(()) } #[test] fn check_challenge2() -> io::Result<()> { assert_eq!(challenge2()?, 640); Ok(()) } }
use std::io; use std::fmt; use std::cmp; use std::time::{SystemTime}; use std::ops::{Index, IndexMut}; const N: usize = 50; fn main() { let mut table = Table::read(); let mut vanished_table = table.clone(); vanished_table.vanish(); let mut rng = Rng::from_seed(urand()); let mut sa = Anealing::new(SystemTime::now(), 9500); let mut current_score: u32 = 0; let mut count = 0; while sa.remainig() > 0.0 { count += 1; let r = rng.next_idx(N); let mut row = *table.get_row(r); randomize(&mut row, &mut rng); let v_row_crr = *vanished_table.get_row(r); let mut v_row_nxt = row.clone(); vanish_row(&mut v_row_nxt); vanished_table.set_row(r, v_row_nxt); let next_score = vanished_table.score(); // if current_score < next_score && sa.remainig() < 0.3 { // row.update_prob(); // } // p(current_score); if sa.transition(current_score as i32, next_score as i32) { table.set_row(r, row); current_score = next_score; } else { vanished_table.set_row(r, v_row_crr); } } p(count); // vanished_table.show(); // table.show(); p(vanished_table.score()); // p(Table::rand()) } fn randomize(row: &mut Row, rng: &mut Rng) { for c in 1..N { if row[c] == 'o' || row[c] == 'x' { continue; } const SYM: [char; 3] = ['.', '+', '-']; row[c] = match rng.next_f() { v if v < row.prob.0 => SYM[0], v if v < row.prob.0 + row.prob.1 => SYM[1], _ => SYM[2] }; } } #[allow(dead_code)] struct Anealing { start_time: SystemTime, limit_ms: f32, p1: f32, p2: f32, rng: Rng } impl Anealing { fn new(start_time: SystemTime, limit_ms: u32) -> Anealing { let rng = Rng::from_seed(urand()); const FACTOR: f32 = 4.8; let p1 = FACTOR; let p2 = N as f32 / 2.0; Anealing { start_time: start_time, limit_ms: limit_ms as f32, p1: p1, p2: p2, rng: rng } } fn transition(&mut self, current_score: i32, next_score: i32) -> bool { if current_score < next_score { return true; } let probability = ((next_score - current_score) as f32 / self.temperture()).exp(); self.rng.next_f() < probability } fn temperture(&self) -> f32 { self.p2 * (self.remainig() * self.p1).exp() / self.p1.exp() } fn remainig(&self) -> f32 { let elapsed = self.start_time.elapsed().unwrap(); let elapsed_ms = (elapsed.as_secs() * 1000) as f32 + elapsed.subsec_nanos() as f32 / 1000000.0; (self.limit_ms - elapsed_ms) / self.limit_ms } } #[derive(Copy)] struct Row { row: [char; N], prob: (f32, f32, f32) } impl Row { fn new() -> Row { Row { row: ['.'; N], prob: (45.0 / 50.0, 4.0 / 50.0, 1.0 / 50.0) } } fn update_prob(&mut self) { let mut t = [0.0, 0.0, 0.0]; for c in 1..N { match self.row[c] { '.' => t[0] += 1.0, '+' => t[1] += 1.0, '-' => t[2] += 1.0, _ => () } } let f = 1.5; let sum = t[0] + t[1] + t[2]; self.prob.0 += t[0] / sum * f; self.prob.1 += t[1] / sum * f; self.prob.2 += t[2] / sum * f; let psum = self.prob.0 + self.prob.1 + self.prob.2; self.prob.0 /= psum; self.prob.1 /= psum; self.prob.2 /= psum; } } impl Clone for Row { fn clone(&self) -> Row { let mut row = ['.'; N]; for c in 0..N { row[c] = self.row[c] } Row { row: row, prob: self.prob } } } impl Index<usize> for Row { type Output = char; fn index(&self, id: usize) -> &char { &self.row[id] } } impl IndexMut<usize> for Row { fn index_mut<'a>(&'a mut self, id: usize) -> &mut char { &mut self.row[id] } } #[derive(Copy)] struct Table { rows: [Row; N] } impl Clone for Table { fn clone(&self) -> Table { let mut rows = [Row::new(); N]; for r in 0..N { rows[r] = self.rows[r].clone() } Table { rows: rows } } } impl Table { fn new(rows: Vec<String>) -> Table { let mut table_rows = [Row::new(); N]; for (i, row) in rows.iter().enumerate() { for (j, c) in row.as_bytes().iter().enumerate() { table_rows[j][N - 1 - i] = *c as char; } } Table { rows: table_rows } } fn read() -> Table { let rows = (0..N).map(|_| read_line()).collect::<Vec<_>>(); Table::new(rows) } // Debug #[allow(dead_code)] fn rand() -> Table { let val = ['.', '.', 'o', 'x']; let mut rng = Rng::from_seed(urand()); let mut table = Table { rows: [Row::new(); N] }; for r in 0..N { for c in 0..N { table.set(r, c, val[(rng.next() % 4) as usize]); } } table } fn show(&self) { for r in 0..N { for c in 0..N { print!("{}", self.rows[c][N - 1 - r]); } println!(""); } } fn get(&self, r: usize, c: usize) -> char { self.rows[r][c] } fn get_row(&self, r: usize) -> &Row { &self.rows[r] } fn set(&mut self, r: usize, c: usize, v: char) { self.rows[r][c] = v; } fn set_row(&mut self, r: usize, row: Row) { self.rows[r] = row } fn vanish(&mut self) { for r in 0..N { vanish_row(&mut self.rows[r]) } } fn score(&self) -> u32 { const SYM: [char; 2] = ['o', 'x']; fn score_rec(table: &Table, r: usize, c: usize, s: usize, visited: &mut [[[bool; N]; N]; 2]) -> u32 { if r >= N || c >= N || visited[s][r][c] || table.get(r, c) != SYM[s] { return 0; } visited[s][r][c] = true; 1 + score_rec(table, r, c + 1, s, visited) + score_rec(table, r + 1, c, s, visited) + score_rec(table, r, if c == 0 { 0 } else { c - 1 }, s, visited) + score_rec(table, if r == 0 { 0 } else { r - 1 }, c, s, visited) }; let mut visited = [[[false; N]; N]; 2]; let mut score_0 = 0; let mut score_1 = 0; for r in 0..N { for c in 0..N { score_0 = cmp::max(score_0, score_rec(self, r, c, 0, &mut visited)); score_1 = cmp::max(score_1, score_rec(self, r, c, 1, &mut visited)); } } score_0 + score_1 } } fn vanish_row(row: &mut Row) { let mut slide: usize = 0; for c in 0..N { while c + slide < N && row[c + slide] == '.' { slide += 1 }; let v = if c + slide < N { row[c + slide] } else { 'D' }; row[c] = match v { '-' => { if slide == 0 { '-' } else { slide -= 1; 'D' } } _ => v, } } } impl fmt::Display for Table { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for r in 0..N { for c in 0..N { write!(f, "{}", self.rows[r][c]).unwrap(); } writeln!(f, "").unwrap(); } write!(f, "") } } #[allow(dead_code)] fn urand() -> [u32; 4] { use std::io::Read; let mut buf = [0; 16]; let mut f = std::fs::File::open("/dev/urandom").unwrap(); f.read(&mut buf).unwrap(); let mut rnd = [0; 4]; for j in 0..4 { for i in 0..4 { rnd[j] <<= 8; rnd[j] ^= buf[i] as u32; } } rnd } struct Rng { x: u32, y: u32, z: u32, w: u32, } #[allow(dead_code)] impl Rng { fn new() -> Rng { return Rng { x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb, } } fn from_seed(seed: [u32; 4]) -> Rng { return Rng { x: seed[0], y: seed[1], z: seed[2], w: seed[3] } } fn next(&mut self) -> u32 { let t = self.x ^ (self.x << 11); self.x = self.y; self.y = self.z; self.z = self.w; self.w = (self.w ^ (self.w >> 19)) ^ (t ^ (t >> 8)); return self.w } fn next_f(&mut self) -> f32 { self.next() as f32 / std::u32::MAX as f32 } fn next_idx(&mut self, n: usize) -> usize { (self.next() % n as u32) as usize } } #[allow(dead_code)] fn p<T: std::fmt::Display>(v: T) { println!("{}", v) } #[allow(dead_code)] fn read_int() -> i32 { read_line().parse().unwrap() } #[allow(dead_code)] fn read_line() -> String { let mut line = String::new(); io::stdin().read_line(&mut line).unwrap(); line.trim().to_string() }
// q0119_pascals_triangle_ii struct Solution; impl Solution { pub fn get_row(row_index: i32) -> Vec<i32> { let mut ret = Solution::generate(row_index + 1); ret.pop().unwrap() } pub fn generate(num_rows: i32) -> Vec<Vec<i32>> { if num_rows == 0 { return vec![]; } let mut ret = Vec::with_capacity(num_rows as usize); ret.push(vec![1]); for line in 2..=num_rows { let line = line as usize; let last_line = ret.last().unwrap(); let mut tv = Vec::with_capacity(line); tv.push(last_line[0]); for i in 1..line - 1 { let t = last_line[i - 1] + last_line[i]; tv.push(t); } tv.push(last_line[line - 2]); ret.push(tv); } ret } } #[cfg(test)] mod tests { use super::Solution; #[test] fn it_works() { assert_eq!(Solution::get_row(3), vec![1, 3, 3, 1]); } }
fn twoArraysNthElement(mut a: Vec<i32>, b: Vec<i32>, n: i32) -> i32 { a.extend(b); a.sort(); a[n as usize] } // 92
//! This crate defines specification-friendly natural integers with an upper bound. Operations on //! these integers can be defined as modular (modulo the upper bound) or regular (with a panic //! on underflow or overflow). //! //! As each integer gets its own Rust type, the compiler detects and prevent any mixing between //! all the diffent integers you would have defined. //! //! # Defining a new integer type //! //! Here is the macro used to defined the `SizeNatExample` type of this crate: //! //! ```ignore //! define_abstract_integer_checked!(SizeNatExample, 64); //! ``` //! //! `SizeNat` is the name of the newly-created type. `64` is the number of bits of the machine //! representation of the type. From the number of bits is derived an upper bound for the integer //! for which all operations are checked for overflow. //! //! The resulting integer type is copyable, and supports addition, substraction, multiplication, //! integer division, remainder, comparison and equality. The `from_literal` method allows you to //! convert integer literals into your new type. //! //! # Refining an integer type for modular arithmetic //! //! On top of a previously defined abstract integer, you can define another type that lets you //! implement modular arithmetic. For instance, this crate defines the arithmetic field over the //! 9th Mersenne prime with: //! //! ```ignore //! define_refined_modular_integer!( //! SizeNatFieldExample, //! SizeNatExample, //! SizeNatExample::pow2(61) - SizeNatExample::from_literal(1) //! ); //! ``` //! //! The first argument of this new macro is the name of the newly defined refined type. The second //! argument is the name of the base abstract integer that will act as the representation. The //! third example is the modulo for all operations, defined as a value of the base type. //! //! //! # Example //! //! ``` //! use abstract_integers::*; //! //! abstract_public_nat_mod!(SizeNatFieldExample, SizeNatExample, 64, "1fffffffffffffff"); //! //! let x1 = SizeNatExample::from_literal(687165654266415); //! let x2 = SizeNatExample::from_literal(4298832000156); //! let x3 = x1 + x2; //! assert_eq!(SizeNatExample::from_literal(691464486266571), x3); //! let x4 = SizeNatExample::from_literal(8151084996540); //! let x5 = x3 - x4; //! assert_eq!(SizeNatExample::from_literal(683313401270031), x5.into()); //! let x6 = x5 / SizeNatExample::from_literal(1541654268); //! assert_eq!(SizeNatExample::from_literal(443233), x6.into()); //! let x7 : SizeNatFieldExample = SizeNatFieldExample::from_literal(2305843009213693951) + x6.into(); //! assert_eq!(x7, x6.into()); //! ``` //! #![cfg_attr(not(feature = "std"), no_std)] // Re-exports pub use core::cmp::Ordering; pub use core::num::ParseIntError; pub use core::ops::*; pub use num::{traits, traits::identities::*, CheckedSub, Zero}; pub use num_bigint::{BigInt, BigUint, Sign}; pub mod abstract_int; pub mod nat_mod;
static FILENAME: &str = "input/data"; static PREEMBLE: usize = 25; fn main() { let data = std::fs::read_to_string(FILENAME).expect("could not read file"); let nums = parse(&data).expect("failed to parse data"); let num = part_one(&nums, PREEMBLE).unwrap(); println!("part one: {}", num); println!("part two: {}", part_two(&nums, num).unwrap()); } fn part_one(nums: &[usize], size: usize) -> Option<usize> { for x in 0..nums.len() { if !is_num_ok(nums[x + size], &nums[x..x + size]) { return Some(nums[x + size]); } } None } fn part_two(nums: &[usize], num: usize) -> Option<usize> { for x in 0..nums.len() { match find_match(num, x, &nums) { Some(v) => return Some(v), None => {} } } None } fn is_num_ok(num: usize, nums: &[usize]) -> bool { for x in 0..nums.len() { for y in 0..nums.len() { if x == y { continue; } if nums[x] + nums[y] == num { return true; } } } false } fn find_match(num: usize, from_ix: usize, nums: &[usize]) -> Option<usize> { let mut count = 0; let mut x = from_ix; while count < num && x < nums.len() { count += nums[x]; if count == num { let slice = &nums[from_ix..x]; let sum = slice.iter().min().unwrap() + slice.iter().max().unwrap(); return Some(sum); } x += 1; } None } fn parse(data: &str) -> Option<Vec<usize>> { data.trim().split("\n").map(|s| s.parse().ok()).collect() } mod tests { #[test] fn test_part_one() { let data = std::fs::read_to_string(super::FILENAME).expect("could not read file"); let nums = super::parse(&data).expect("failed to parse data"); assert_eq!( 530627549, super::part_one(&nums, super::PREEMBLE).expect("failed part one") ); } #[test] fn test_part_two() { let data = std::fs::read_to_string(super::FILENAME).expect("could not read file"); let nums = super::parse(&data).expect("failed to parse data"); assert_eq!( 77730285, super::part_two(&nums, 530627549).expect("failed part two") ); } }
use anyhow::Result; use console::Style; use console::Term; use uvm_cli; use structopt::{ clap::crate_authors, clap::crate_description, clap::crate_version, clap::AppSettings, StructOpt, }; use uvm_cli::{options::ColorOption, set_colors_enabled, set_loglevel}; const SETTINGS: &'static [AppSettings] = &[ AppSettings::ColoredHelp, AppSettings::DontCollapseArgsInUsage, ]; #[derive(StructOpt, Debug)] #[structopt(version = crate_version!(), author = crate_authors!(), about = crate_description!(), settings = SETTINGS)] struct Opts { /// print a list with commands #[structopt(short, long)] list: bool, /// print single column list #[structopt(short = "1")] single_column: bool, /// print only the path to the commands #[structopt(short, long = "path")] path_only: bool, /// print debug output #[structopt(short, long)] debug: bool, /// print more output #[structopt(short, long, parse(from_occurrences))] verbose: i32, /// Color:. #[structopt(short, long, possible_values = &ColorOption::variants(), case_insensitive = true, default_value)] color: ColorOption, } fn main() -> Result<()> { let opt = Opts::from_args(); set_colors_enabled(&opt.color); set_loglevel(opt.debug.then(|| 2).unwrap_or(opt.verbose)); let commands = uvm_cli::find_sub_commands()?; let out_style = Style::new().cyan(); let path_style = Style::new().italic().green(); let list = opt.list || opt.single_column || opt.verbose > 0; let path_only = opt.path_only; let single_column = opt.single_column; let seperator = if list || !Term::stdout().is_term() { "\n" } else { " " }; let output = commands.fold(String::new(), |out, command| { let mut new_line = out; if !path_only || (list && !single_column) { new_line += &format!("{}", out_style.apply_to(command.command_name().to_string())); } if list && !single_column { new_line += " - "; } if path_only || (list && !single_column) { new_line += &format!("{}", path_style.apply_to(command.path().display())); } new_line += seperator; new_line }); eprintln!("{}", &output); Ok(()) }
#![allow(dead_code)] mod sexpr; mod scanner; mod parser; mod interpreter; fn main() { // let vec = vec!["abc".to_string(), "def".to_string()]; // let r = & A { vec }; // r.vec[0]; }
//! This crate implements an algorithm that performs zero-cost permutations and shuffling on a //! range of numbers. //! //! This method, discovered by Andrew Kensler in 2013, uses bit-twiddling to permute a range of //! numbers, from `[0..n)` without needing to mutate state or store the whole range of numbers. It //! is extremely efficient, with no memory overhead (i.e. you don't have to store the whole range //! of numbers). //! //! This is effectively the same as taking some vector of numbers from `[0..n)`, randomly shuffling //! each element, and then calling the nth index of that vector. Kensler's algorithm offers a way //! to achieve the same effect, except we don't need to store a whole vector for that range of //! numbers. //! //! # Example Usage //! //! Using this library is fairly simple: //! //! ```rust //! # use hashed_permutation::HashedPermutation; //! use std::num::NonZeroU32; //! //! let perm = HashedPermutation { //! seed: 1234, //! length: NonZeroU32::new(10).unwrap(), //! }; //! //! // Let's pick a randomly permuted number //! let permuted_number = perm.shuffle(0).unwrap(); //! ``` //! //! ## Iterators //! //! You can also use this structure as an iterator to iterate through a permuted set from `(0..n)`. //! //! ```rust //! # use hashed_permutation::HashedIter; //! use std::num::NonZeroU32; //! //! // Loop from (0..10) in a shuffled set //! let mut iterator = HashedIter::new_with_seed(NonZeroU32::new(10).unwrap(), 100); //! //! for i in iterator { //! println!("{}", i); //! } //! ``` mod error; mod iterator; mod kensler; pub use error::{PermutationError, PermutationResult}; pub use iterator::HashedIter; pub use kensler::HashedPermutation;
#![allow(non_snake_case)] use std::collections::{HashMap, HashSet}; use crate::{R, FontError, ParseResult}; use crate::opentype::coverage_table; use crate::parsers::{Parser, NomParser, FixedSize, array_iter}; use itertools::Itertools; use std::iter::FromIterator; use nom::number::complete::{be_i16, be_u16}; pub mod assembly; parser!(int16 : be_i16 -> i16); parser!(uint16 : be_u16 -> u16); pub fn parse_math(data: &[u8]) -> Result<MathHeader, FontError> { MathHeader::parse(data) } #[derive(Default, Clone, Debug)] pub struct MathValueRecord { pub value: i16 } impl NomParser for MathValueRecord { type Output = Self; fn parse2(i: &[u8])-> ParseResult<Self::Output> { let (i, value) = be_i16(i)?; let (i, _offset) = be_i16(i)?; Ok((i, MathValueRecord { value })) } } impl FixedSize for MathValueRecord { const SIZE: usize = 4; } table!(MathHeader { /// Major version of the MATH table, = 1. uint16 majorVersion, /// Minor version of the MATH table, = 0. uint16 minorVersion, /// Offset to MathConstants table - from the beginning of MATH table. @uint16 MathConstants constants, /// Offset to MathGlyphInfo table - from the beginning of MATH table. @uint16 MathGlyphInfo glyph_info, /// Offset to MathVariants table - from the beginning of MATH table. @uint16 MathVariants variants, }); table!(MathConstants { /// Percentage of scaling down for level 1 superscripts and subscripts. Suggested value: 80%. int16 script_percent_scale_down, /// Percentage of scaling down for level 2 (scriptScript) superscripts and subscripts. Suggested value: 60%. int16 script_script_percent_scale_down, /// Minimum height required for a delimited expression (contained within parentheses, etc.) to be treated as a sub-formula. Suggested value: normal line height × 1.5. uint16 delimited_sub_formula_min_height, /// Minimum height of n-ary operators (such as integral and summation) for formulas in display mode (that is, appearing as standalone page elements, not embedded inline within text). uint16 display_operator_min_height, /// White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height. MathValueRecord math_leading, /// Axis height of the font. In math typesetting, the term axis refers to a horizontal reference line used for positioning elements in a formula. The math axis is similar to but distinct from the baseline for regular text layout. For example, in a simple equation, a minus symbol or fraction rule would be on the axis, but a string for a variable name would be set on a baseline that is offset from the axis. The axisHeight value determines the amount of that offset. MathValueRecord axis_height, /// Maximum (ink) height of accent base that does not require raising the accents. Suggested: x‑height of the font (os2.sxHeight) plus any possible overshots MathValueRecord accent_base_height, /// Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight). MathValueRecord flattened_accent_base_height, /// The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset. MathValueRecord subscript_shift_down, /// Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x- height. MathValueRecord subscript_top_max, /// Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom. MathValueRecord subscript_baseline_drop_min, /// Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset. MathValueRecord superscript_shift_up, /// Standard shift of superscripts relative to the base, in cramped style. MathValueRecord superscript_shift_up_cramped, /// Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: ¼ x-height. MathValueRecord superscript_bottom_min, /// Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top. MathValueRecord superscript_baseline_drop_max, /// Minimum gap between the superscript and subscript ink. Suggested: 4 × default rule thickness. MathValueRecord sub_superscript_gap_min, /// The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height. MathValueRecord superscript_bottom_max_with_subscript, /// Extra white space to be added after each subscript and superscript. Suggested: 0.5 pt for a 12 pt font. MathValueRecord space_after_script, /// Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator. MathValueRecord upper_limit_gap_min, /// Minimum distance between baseline of upper limit and (ink) top of the base operator. MathValueRecord upper_limit_baseline_rise_min, /// Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator. MathValueRecord lower_limit_gap_min, /// Minimum distance between baseline of the lower limit and (ink) bottom of the base operator. MathValueRecord lower_limit_baseline_drop_min, /// Standard shift up applied to the top element of a stack. MathValueRecord stack_top_shift_up, /// Standard shift up applied to the top element of a stack in display style. MathValueRecord stack_top_display_style_shift_up, /// Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction. MathValueRecord stack_bottom_shift_down, /// Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction. MathValueRecord stack_bottom_display_style_shift_down, /// Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3 × default rule thickness. MathValueRecord stack_gap_min, /// Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7 × default rule thickness. MathValueRecord stack_display_style_gap_min, /// Standard shift up applied to the top element of the stretch stack. MathValueRecord stretch_stack_top_shift_up, /// Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction. MathValueRecord stretch_stack_bottom_shift_down, /// Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: same value as upperLimitGapMin. MathValueRecord stretch_stack_gap_above_min, /// Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: same value as lowerLimitGapMin. MathValueRecord stretch_stack_gap_below_min, /// Standard shift up applied to the numerator. MathValueRecord fraction_numerator_shift_up, /// Standard shift up applied to the numerator in display style. Suggested: same value as stackTopDisplayStyleShiftUp. MathValueRecord fraction_numerator_display_style_shift_up, /// Standard shift down applied to the denominator. Positive for moving in the downward direction. MathValueRecord fraction_denominator_shift_down, /// Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: same value as stackBottomDisplayStyleShiftDown. MathValueRecord fraction_denominator_display_style_shift_down, /// Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness. MathValueRecord fraction_numerator_gap_min, /// Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3 × default rule thickness. MathValueRecord fraction_num_display_style_gap_min, /// Thickness of the fraction bar. Suggested: default rule thickness. MathValueRecord fraction_rule_thickness, /// Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness. MathValueRecord fraction_denominator_gap_min, /// Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3 × default rule thickness. MathValueRecord fraction_denom_display_style_gap_min, /// Horizontal distance between the top and bottom elements of a skewed fraction. MathValueRecord skewed_fraction_horizontal_gap, /// Vertical distance between the ink of the top and bottom elements of a skewed fraction. MathValueRecord skewed_fraction_vertical_gap, /// Distance between the overbar and the (ink) top of he base. Suggested: 3 × default rule thickness. MathValueRecord overbar_vertical_gap, /// Thickness of overbar. Suggested: default rule thickness. MathValueRecord overbar_rule_thickness, /// Extra white space reserved above the overbar. Suggested: default rule thickness. MathValueRecord overbar_extra_ascender, /// Distance between underbar and (ink) bottom of the base. Suggested: 3 × default rule thickness. MathValueRecord underbar_vertical_gap, /// Thickness of underbar. Suggested: default rule thickness. MathValueRecord underbar_rule_thickness, /// Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness. MathValueRecord underbar_extra_descender, /// Space between the (ink) top of the expression and the bar over it. Suggested: 1¼ default rule thickness. MathValueRecord radical_vertical_gap, /// Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + ¼ x-height. MathValueRecord radical_display_style_vertical_gap, /// Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness. MathValueRecord radical_rule_thickness, /// Extra white space reserved above the radical. Suggested: same value as radicalRuleThickness. MathValueRecord radical_extra_ascender, /// Extra horizontal kern before the degree of a radical, if such is present. MathValueRecord radical_kern_before_degree, /// Negative kern after the degree of a radical, if such is present. Suggested: −10/18 of em. MathValueRecord radical_kern_after_degree, /// Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%. int16 radical_degree_bottom_raise_percent, }); table!(MathGlyphInfo { /// Offset to MathItalicsCorrectionInfo table, from the beginning of the MathGlyphInfo table. ?uint16 MathItalicsCorrectionInfo italics_correction_info, /// Offset to MathTopAccentAttachment table, from the beginning of the MathGlyphInfo table. @uint16 MathTopAccentAttachment top_accent_attachment, /// Offset to ExtendedShapes coverage table, from the beginning of the MathGlyphInfo table. When the glyph to the left or right of a box is an extended shape variant, the (ink) box should be used for vertical positioning purposes, not the default position defined by values in MathConstants table. May be NULL. ?uint16 ExtendedShapes extended_shape_coverage, /// Offset to MathKernInfo table, from the beginning of the MathGlyphInfo table. ?uint16 MathKernInfo kern_info, }); fn merge2<A, B, T, C>(a: A, b: B) -> Result<C, FontError> where A: Iterator, B: Iterator<Item=Result<T, FontError>>, C: FromIterator<(A::Item, T)> { a.zip(b).map(|(a, b)| match b { Ok(b) => Ok((a, b)), Err(e) => Err(e) }).try_collect() } fn merge2rr<A, B, T, U, C>(a: A, b: B) -> Result<C, FontError> where A: Iterator<Item=Result<T, FontError>>, B: Iterator<Item=Result<U, FontError>>, C: FromIterator<(T, U)> { a.zip(b).map(|(a, b)| match (a, b) { (Ok(a), Ok(b)) => Ok((a, b)), (Err(e), _) | (_, Err(e)) => Err(e) }).try_collect() } #[derive(Clone, Debug, Default)] pub struct MathItalicsCorrectionInfo { map: HashMap<u16, MathValueRecord> } impl MathItalicsCorrectionInfo { pub fn get(&self, gid: u16) -> Option<&MathValueRecord> { self.map.get(&gid) } } impl NomParser for MathItalicsCorrectionInfo { type Output = MathItalicsCorrectionInfo; fn parse2(data: &[u8]) -> ParseResult<Self> { let (i, italics_correction_coverage_offset) = be_u16(data)?; let italics_correction_coverage = coverage_table(slice!(data, italics_correction_coverage_offset as usize ..))?; let (i, italics_correction_count) = be_u16(i)?; let (i, italics_correction) = array_iter::<MathValueRecord>(i, italics_correction_count as usize)?; let map = merge2(italics_correction_coverage, italics_correction)?; Ok((i, MathItalicsCorrectionInfo { map })) } } #[derive(Clone, Debug)] pub struct MathTopAccentAttachment { map: HashMap<u16, MathValueRecord> } impl MathTopAccentAttachment { pub fn get(&self, gid: u16) -> Option<&MathValueRecord> { self.map.get(&gid) } } impl NomParser for MathTopAccentAttachment { type Output = MathTopAccentAttachment; fn parse2(data: &[u8]) -> ParseResult<Self> { let (i, top_accent_coverage_offset) = be_u16(data)?; let top_accent_coverage = coverage_table(slice!(data, top_accent_coverage_offset as usize ..))?; let (i, top_accent_attachment_count) = be_u16(i)?; let (i, top_accent_attachment) = array_iter::<MathValueRecord>(i, top_accent_attachment_count as usize)?; let map = top_accent_coverage.zip(top_accent_attachment).map(|(a, b)| b.map(|b| (a, b))).try_collect()?; Ok((i, MathTopAccentAttachment { map })) } } table!(MathGlyphVariantRecord { /// Glyph ID for the variant. uint16 variant_glyph, /// Advance width/height, in design units, of the variant, in the direction of requested glyph extension. uint16 advance_measurement, }); table!(GlyphPartRecord { /// Glyph ID for the part. uint16 glyph_id, /// Advance width/ height, in design units, of the straight bar connector material at the start of the glyph in the direction of the extension (the left end for horizontal extension, the bottom end for vertical extension). uint16 start_connector_length, /// Advance width/ height, in design units, of the straight bar connector material at the end of the glyph in the direction of the extension (the right end for horizontal extension, the top end for vertical extension). uint16 end_connector_length, /// Full advance width/height for this part in the direction of the extension, in design units. uint16 full_advance, /// Part qualifiers. PartFlags enumeration currently uses only one bit: /// 0x0001 fExtender If set, the part can be skipped or repeated. /// 0xFFFE Reserved. uint16 part_flags, }); impl GlyphPartRecord { #[inline] pub fn required(&self) -> bool { self.part_flags & 1 == 0 } #[inline] pub fn optional(&self) -> bool { self.part_flags & 1 != 0 } } #[derive(Clone, Debug)] pub struct GlyphAssembly { pub italics_correction: MathValueRecord, pub parts: Vec<GlyphPartRecord> } impl NomParser for GlyphAssembly { type Output = Self; fn parse2(data: &[u8]) -> ParseResult<Self> { let (i, italics_correction) = MathValueRecord::parse2(data)?; let (i, part_count) = be_u16(i)?; let (i, parts) = array_iter::<GlyphPartRecord>(i, part_count as usize)?; Ok((i, GlyphAssembly { italics_correction, parts: parts.try_collect()? })) } } #[derive(Clone, Debug)] pub struct MathGlyphConstruction { pub glyph_assembly: Option<GlyphAssembly>, pub variants: Vec<MathGlyphVariantRecord>, } impl Parser for MathGlyphConstruction { type Output = Self; fn parse(data: &[u8]) -> Result<Self, FontError> { let (i, glyph_assembly_offset) = be_u16(data)?; let glyph_assembly = match glyph_assembly_offset { 0 => None, off => Some(GlyphAssembly::parse(slice!(data, off as usize ..))?) }; let (i, variant_count) = be_u16(i)?; let (i, variants) = array_iter::<MathGlyphVariantRecord>(i, variant_count as usize)?; Ok(MathGlyphConstruction { glyph_assembly, variants: variants.try_collect()? }) } } #[derive(Clone, Debug)] pub struct MathVariants { pub min_connector_overlap: u16, pub vert_glyph_construction: HashMap<u16, MathGlyphConstruction>, pub horiz_glyph_construction: HashMap<u16, MathGlyphConstruction>, } impl Parser for MathVariants { type Output = MathVariants; fn parse(data: &[u8]) -> Result<Self, FontError> { let (i, min_connector_overlap) = be_u16(data)?; let (i, vert_glyph_coverage_offset) = be_u16(i)?; let (i, horiz_glyph_coverage_offset) = be_u16(i)?; let (i, vert_glyph_count) = be_u16(i)?; let (i, horiz_glyph_count) = be_u16(i)?; let (i, vert_glyph_construction_offsets) = array_iter::<uint16>(i, vert_glyph_count as usize)?; let (i, horiz_glyph_construction_offsets) = array_iter::<uint16>(i, horiz_glyph_count as usize)?; let vert_glyph_construction = if vert_glyph_coverage_offset != 0 { let vert_glyph_coverage = coverage_table(slice!(data, vert_glyph_coverage_offset as usize ..))?; let vert_glyph_construction = vert_glyph_construction_offsets.map(|off| MathGlyphConstruction::parse(slice!(data, off? as usize ..))); merge2(vert_glyph_coverage, vert_glyph_construction)? } else { HashMap::new() }; let horiz_glyph_construction = if horiz_glyph_coverage_offset != 0 { let horiz_glyph_coverage = coverage_table(slice!(data, horiz_glyph_coverage_offset as usize ..))?; let horiz_glyph_construction = horiz_glyph_construction_offsets.map(|off| MathGlyphConstruction::parse(slice!(data, off? as usize ..))); merge2(horiz_glyph_coverage, horiz_glyph_construction)? } else { HashMap::new() }; Ok(MathVariants { min_connector_overlap, vert_glyph_construction, horiz_glyph_construction }) } } #[derive(Default, Debug, Clone)] pub struct MathKern { pub pairs: Vec<(MathValueRecord, MathValueRecord)>, pub last: MathValueRecord } impl Parser for MathKern { type Output = Self; fn parse(i: &[u8]) -> Result<Self, FontError> { let (i, height_count) = be_u16(i)?; let (i, heights) = array_iter::<MathValueRecord>(i, height_count as usize)?; let (i, kerns) = array_iter::<MathValueRecord>(i, height_count as usize)?; let last = MathValueRecord::parse(i)?; let pairs = merge2rr(heights, kerns)?; Ok(MathKern { pairs, last }) } } impl MathKern { pub fn kern_for_height(&self, height: i16) -> i16 { for (h, k) in self.pairs.iter() { if height < h.value { return k.value; } } self.last.value } } #[derive(Debug, Clone)] pub struct MathKernInfoRecord { pub top_right: MathKern, pub top_left: MathKern, pub bottom_right: MathKern, pub bottom_left: MathKern, } #[derive(Clone, Debug, Default)] pub struct MathKernInfo { pub entries: HashMap<u16, MathKernInfoRecord> } impl Parser for MathKernInfo { type Output = Self; fn parse(data: &[u8]) -> Result<Self, FontError> { let (i, coverage_offset) = be_u16(data)?; let coverage = coverage_table(slice!(data, coverage_offset as usize ..))?; let (i, kern_count) = be_u16(i)?; let (i, records) = array_iter::<uint16>(i, 4 * kern_count as usize)?; let parse_kern = |off| if off > 0 { MathKern::parse(slice!(data, off as usize ..)) } else { Ok(MathKern::default()) }; let records = records.tuples().map(|(a, b, c, d)| { Ok(MathKernInfoRecord { top_right: parse_kern(a?)?, top_left: parse_kern(b?)?, bottom_right: parse_kern(c?)?, bottom_left: parse_kern(d?)?, }) }); let entries = merge2(coverage, records)?; Ok(MathKernInfo { entries }) } } #[derive(Clone, Debug, Default)] pub struct ExtendedShapes { pub glyphs: HashSet<u16> } impl Parser for ExtendedShapes { type Output = Self; fn parse(data: &[u8]) -> Result<Self, FontError> { let glyphs = coverage_table(data)?; Ok(ExtendedShapes { glyphs: glyphs.collect() }) } }
//! (Unimplemented) terminal implementation for the Redox operating system. use std::io; use termion::screen; pub fn enter_alternate_dimension(stdout: &mut io::Stdout) { write!(stdout, screen::ToAlternateScreen); } pub fn exit_alternate_dimension(stdout: &mut io::Stdout) { write!(stdout, screen::ToMainScreen); }
use crate::ast; use crate::{Spanned, ToTokens}; /// An index set operation `<target>[<index>] = <value>`. #[derive(Debug, Clone, ToTokens, Spanned)] pub struct ExprIndexSet { /// The target of the index set. pub target: Box<ast::Expr>, /// The opening bracket. pub open: ast::OpenBracket, /// The indexing expression. pub index: Box<ast::Expr>, /// The closening bracket. pub close: ast::CloseBracket, /// The equals sign. pub eq: ast::Eq, /// The value expression we are assigning. pub value: Box<ast::Expr>, }
extern crate num; /// The default epsilon value used for floating point comparisons. pub static EPSILON: f64 = 1.0E-8; pub trait Float: num::Float{ /// Tells if the two floating-point values `self` and `y` are considered equal /// within the specified `absolute == relative` tolerence value. /// /// The method of comparison used is described at: /// /// http://realtimecollisiondetection.net/blog/?p=89 /// /// Also consider using the `equal` method. /// /// # Examples /// /// ``` /// use fiz_math::Float; /// /// assert!(0.9.almost_equal(1.0, 0.1000001)); /// assert!(0.9.almost_equal(1.0, 0.1)); /// ``` fn almost_equal<T: Float>(self, y: Self, abs_tol: T) -> bool; /// equal is short-hand for `self.almost_equal(y, fiz_math::EPSILON)`. /// /// # Examples /// /// ``` /// use fiz_math::Float; /// /// assert!(1.00000001.equal(1.0)); /// assert!(1.0.equal(1.0)); /// assert!(!0.9.equal(1.0)); /// ``` fn equal(self, y: Self) -> bool; /// lerp performs a linear interpolation between `self` and `b`. The `t` /// parameter is a number in the range 0.0 - 1.0. /// /// The interpolation method is precise, as such it is guaranteed that /// `a.lerp(b, 1.0) == a`. /// /// # Examples /// /// ``` /// use fiz_math::Float; /// /// assert_eq!(0.0.lerp(10.0, 0.0), 0.0); /// assert_eq!(0.0.lerp(10.0, 0.5), 5.0); /// assert_eq!(0.0.lerp(10.0, 1.0), 10.0); /// ``` fn lerp(self, b: Self, t: Self) -> Self; } impl<T: num::Float> Float for T { fn almost_equal<N: num::Float>(self, y: T, abs_tol: N) -> bool { let r = T::from(1.0).unwrap().max(self.abs().max(y.abs())); self == y || ((self - y).abs() <= T::from(abs_tol).unwrap() * r) } fn equal(self, y: T) -> bool { self.almost_equal(y, T::from(EPSILON).unwrap()) } fn lerp(self, b: Self, t: Self) -> Self { (T::one() - t) * self + t * b } }
/* * Binomial heap (Rust) * * Copyright (c) 2022 Project Nayuki. (MIT License) * https://www.nayuki.io/page/binomial-heap * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * - The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * - The Software is provided "as is", without warranty of any kind, express or * implied, including but not limited to the warranties of merchantability, * fitness for a particular purpose and noninfringement. In no event shall the * authors or copyright holders be liable for any claim, damages or other * liability, whether in an action of contract, tort or otherwise, arising from, * out of or in connection with the Software or the use or other dealings in the * Software. */ use std; /*-- Fields --*/ #[derive(Clone)] pub struct BinomialHeap<E> { head: MaybeNode<E>, } impl<E: Ord> BinomialHeap<E> { /*-- Constructors --*/ pub fn new() -> Self { Self { head: None } } /*-- Methods --*/ pub fn is_empty(&self) -> bool { self.head.is_none() } pub fn len(&self) -> usize { let mut result: usize = 0; let mut node: &MaybeNode<E> = &self.head; while let Some(ref nd) = *node { result |= 1usize.checked_shl(u32::from(nd.rank)).unwrap(); node = &nd.next; } result } pub fn clear(&mut self) { self.head = None; } pub fn push(&mut self, val: E) { let other = Some(Box::new(Node::new(val))); self.merge_nodes(other); } pub fn peek(&self) -> Option<&E> { self.find_min().map(|x| x.0) } pub fn pop(&mut self) -> Option<E> { let mut minnode: Node<E>; { let minnodeindex: u32 = self.find_min()?.1; let mut node: &mut MaybeNode<E> = &mut self.head; for _ in 0 .. minnodeindex { node = &mut {node}.as_mut().unwrap().as_mut().next; } minnode = *node.take().unwrap(); std::mem::swap(node, &mut minnode.next); } self.merge_nodes(minnode.remove_root()); Some(minnode.value) } fn find_min(&self) -> Option<(&E, u32)> { let mut node: &Node<E> = self.head.as_ref()?; let mut minvalue: &E = &node.value; let mut minindex: u32 = 0; let mut index: u32 = 1; while let Some(ref next) = node.next { node = next.as_ref(); if node.value < *minvalue { minvalue = &node.value; minindex = index; } index += 1; } Some((minvalue, minindex)) } // Moves all the values in the given heap into this heap pub fn merge(&mut self, mut other: Self) { self.merge_nodes(other.head.take()); } fn merge_nodes(&mut self, mut other: MaybeNode<E>) { let mut this: MaybeNode<E> = self.head.take(); let mut merged: MaybeNode<E> = None; while this.is_some() || other.is_some() { let mut node: Box<Node<E>>; if other.is_none() || this.is_some() && this.as_ref().unwrap().rank <= other.as_ref().unwrap().rank { node = this.unwrap(); this = node.next.take(); } else { node = other.unwrap(); other = node.next.take(); } if merged.is_none() || merged.as_ref().unwrap().rank < node.rank { node.next = merged; merged = Some(node); } else { let mut mrgd = merged.unwrap(); if mrgd.rank == node.rank + 1 { node.next = mrgd.next.take(); mrgd.next = Some(node); } else { // Merge nodes assert_eq!(mrgd.rank, node.rank); if node.value < mrgd.value { std::mem::swap(&mut node.value, &mut mrgd.value); std::mem::swap(&mut node.down, &mut mrgd.down); } node.next = mrgd.down.take(); mrgd.down = Some(node); mrgd.rank += 1; } merged = Some(mrgd); } } self.head = reverse_nodes(merged); } // For unit tests pub fn check_structure(&self) { if let Some(ref node) = self.head { node.check_structure(true, None); } } } /*---- Helper struct: Binomial heap node ----*/ type MaybeNode<E> = Option<Box<Node<E>>>; /*-- Fields --*/ #[derive(Clone)] struct Node<E> { value: E, rank: u8, down: MaybeNode<E>, next: MaybeNode<E>, } impl<E: Ord> Node<E> { /*-- Constructor --*/ fn new(val: E) -> Self { Self { value: val, rank: 0, down: None, next: None, } } /*-- Methods --*/ fn remove_root(&mut self) -> MaybeNode<E> { assert!(self.next.is_none()); reverse_nodes(self.down.take()) } // For unit tests fn check_structure(&self, ismain: bool, lowerbound: Option<&E>) { // Basic checks assert_eq!(ismain, lowerbound.is_none(), "Invalid arguments"); assert!(ismain || self.value >= *lowerbound.unwrap(), "Min-heap property violated"); // Check children and non-main chains if self.rank > 0 { let down = self.down.as_ref().expect("Down node absent"); assert_eq!(down.rank, self.rank - 1, "Down node has invalid rank"); down.check_structure(false, Some(&self.value)); if !ismain { let next = self.next.as_ref().expect("Next node absent"); assert_eq!(next.rank, self.rank - 1, "Next node has invalid rank"); next.check_structure(false, lowerbound); } } else { assert!(self.down.is_none(), "Down node must be absent"); } // Check main chain if ismain { if let Some(ref next) = self.next { assert!(next.rank > self.rank); next.check_structure(true, None); } } } } fn reverse_nodes<E>(mut nodes: MaybeNode<E>) -> MaybeNode<E> { let mut result: MaybeNode<E> = None; while let Some(mut node) = nodes { nodes = std::mem::replace(&mut node.next, result); result = Some(node); } result } /*---- Helper struct: Binomial heap iterator ----*/ impl<E: Ord> IntoIterator for BinomialHeap<E> { type Item = E; type IntoIter = MoveIter<E>; fn into_iter(self) -> Self::IntoIter { MoveIter::<E>::new(self) } } pub struct MoveIter<E> { heap: BinomialHeap<E>, count: usize, } impl<E: Ord> MoveIter<E> { fn new(heap: BinomialHeap<E>) -> Self { Self { count: heap.len(), heap: heap, } } } impl<E: Ord> Iterator for MoveIter<E> { type Item = E; fn next(&mut self) -> Option<Self::Item> { let result = self.heap.pop(); if result.is_some() { self.count -= 1; } result } fn size_hint(&self) -> (usize,Option<usize>) { (self.count, Some(self.count)) } fn count(self) -> usize { self.count } }