text
stringlengths
8
4.13M
#[doc = "Register `SR` reader"] pub type R = crate::R<SR_SPEC>; #[doc = "Register `SR` writer"] pub type W = crate::W<SR_SPEC>; #[doc = "Field `DINIS` reader - Data input interrupt status This bit is set by hardware when the FIFO is ready to get a new block (16 locations are free). It is cleared by writing it to 0 or by writing the HASH_DIN register. When DINIS = 0, HASH_CSRx registers reads as zero."] pub type DINIS_R = crate::BitReader; #[doc = "Field `DINIS` writer - Data input interrupt status This bit is set by hardware when the FIFO is ready to get a new block (16 locations are free). It is cleared by writing it to 0 or by writing the HASH_DIN register. When DINIS = 0, HASH_CSRx registers reads as zero."] pub type DINIS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `DCIS` reader - Digest calculation completion interrupt status This bit is set by hardware when a digest becomes ready (the whole message has been processed). It is cleared by writing it to 0 or by writing the INIT bit to 1 in the HASH_CR register."] pub type DCIS_R = crate::BitReader; #[doc = "Field `DCIS` writer - Digest calculation completion interrupt status This bit is set by hardware when a digest becomes ready (the whole message has been processed). It is cleared by writing it to 0 or by writing the INIT bit to 1 in the HASH_CR register."] pub type DCIS_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>; #[doc = "Field `DMAS` reader - DMA Status This bit provides information on the DMA interface activity. It is set with DMAE and cleared when DMAE = 0 and no DMA transfer is ongoing. No interrupt is associated with this bit."] pub type DMAS_R = crate::BitReader; #[doc = "Field `BUSY` reader - Busy bit"] pub type BUSY_R = crate::BitReader; #[doc = "Field `NBWP` reader - Number of words already pushed This bitfield is the exact number of words in the message that have already been pushed into the FIFO. NBWP is incremented by 1 when a write access is performed to the HASH_DIN register. When a digest calculation starts, NBWP is updated to NBWP- block size (in words), and NBWP goes to zero when the INIT bit is written to 1."] pub type NBWP_R = crate::FieldReader; #[doc = "Field `DINNE` reader - DIN not empty This bit is set when the HASH_DIN register holds valid data (that is after being written at least once). It is cleared when either the INIT bit (initialization) or the DCAL bit (completion of the previous message processing) is written to 1."] pub type DINNE_R = crate::BitReader; #[doc = "Field `NBWE` reader - Number of words expected This bitfield reflects the number of words in the message that must be pushed into the FIFO to trigger a partial computation. NBWE is decremented by 1 when a write access is performed to the HASH_DIN register. NBWE is set to the expected block size +1 in words (0x11) when INIT bit is set in HASH_CR. It is set to the expected block size (0x10) when the partial digest calculation ends."] pub type NBWE_R = crate::FieldReader; impl R { #[doc = "Bit 0 - Data input interrupt status This bit is set by hardware when the FIFO is ready to get a new block (16 locations are free). It is cleared by writing it to 0 or by writing the HASH_DIN register. When DINIS = 0, HASH_CSRx registers reads as zero."] #[inline(always)] pub fn dinis(&self) -> DINIS_R { DINIS_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - Digest calculation completion interrupt status This bit is set by hardware when a digest becomes ready (the whole message has been processed). It is cleared by writing it to 0 or by writing the INIT bit to 1 in the HASH_CR register."] #[inline(always)] pub fn dcis(&self) -> DCIS_R { DCIS_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - DMA Status This bit provides information on the DMA interface activity. It is set with DMAE and cleared when DMAE = 0 and no DMA transfer is ongoing. No interrupt is associated with this bit."] #[inline(always)] pub fn dmas(&self) -> DMAS_R { DMAS_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - Busy bit"] #[inline(always)] pub fn busy(&self) -> BUSY_R { BUSY_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bits 9:13 - Number of words already pushed This bitfield is the exact number of words in the message that have already been pushed into the FIFO. NBWP is incremented by 1 when a write access is performed to the HASH_DIN register. When a digest calculation starts, NBWP is updated to NBWP- block size (in words), and NBWP goes to zero when the INIT bit is written to 1."] #[inline(always)] pub fn nbwp(&self) -> NBWP_R { NBWP_R::new(((self.bits >> 9) & 0x1f) as u8) } #[doc = "Bit 15 - DIN not empty This bit is set when the HASH_DIN register holds valid data (that is after being written at least once). It is cleared when either the INIT bit (initialization) or the DCAL bit (completion of the previous message processing) is written to 1."] #[inline(always)] pub fn dinne(&self) -> DINNE_R { DINNE_R::new(((self.bits >> 15) & 1) != 0) } #[doc = "Bits 16:20 - Number of words expected This bitfield reflects the number of words in the message that must be pushed into the FIFO to trigger a partial computation. NBWE is decremented by 1 when a write access is performed to the HASH_DIN register. NBWE is set to the expected block size +1 in words (0x11) when INIT bit is set in HASH_CR. It is set to the expected block size (0x10) when the partial digest calculation ends."] #[inline(always)] pub fn nbwe(&self) -> NBWE_R { NBWE_R::new(((self.bits >> 16) & 0x1f) as u8) } } impl W { #[doc = "Bit 0 - Data input interrupt status This bit is set by hardware when the FIFO is ready to get a new block (16 locations are free). It is cleared by writing it to 0 or by writing the HASH_DIN register. When DINIS = 0, HASH_CSRx registers reads as zero."] #[inline(always)] #[must_use] pub fn dinis(&mut self) -> DINIS_W<SR_SPEC, 0> { DINIS_W::new(self) } #[doc = "Bit 1 - Digest calculation completion interrupt status This bit is set by hardware when a digest becomes ready (the whole message has been processed). It is cleared by writing it to 0 or by writing the INIT bit to 1 in the HASH_CR register."] #[inline(always)] #[must_use] pub fn dcis(&mut self) -> DCIS_W<SR_SPEC, 1> { DCIS_W::new(self) } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } } #[doc = "HASH status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`sr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."] pub struct SR_SPEC; impl crate::RegisterSpec for SR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [`sr::R`](R) reader structure"] impl crate::Readable for SR_SPEC {} #[doc = "`write(|w| ..)` method takes [`sr::W`](W) writer structure"] impl crate::Writable for SR_SPEC { const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0; } #[doc = "`reset()` method sets SR to value 0x0011_0001"] impl crate::Resettable for SR_SPEC { const RESET_VALUE: Self::Ux = 0x0011_0001; }
use crate::dsp::types::*;
extern crate chrono; extern crate fern; extern crate hyper; extern crate log; extern crate mime; #[macro_use] extern crate serde_derive; extern crate gotham; extern crate shib_gotham; use gotham::http::response::create_response; use gotham::middleware::session::{NewSessionMiddleware, SessionData}; use gotham::pipeline::new_pipeline; use gotham::pipeline::set::*; use gotham::router::builder::*; use gotham::router::Router; use gotham::state::{FromState, State}; use hyper::{Response, StatusCode}; use log::LevelFilter; use shib_gotham::{AuthenticatedSession, ReceiverFailed, Shibbleware}; fn main() { set_logging(); let addr = "127.0.0.1:7878"; println!("Listening for requests at http://{}", addr); gotham::start(addr, router()) } fn set_logging() { fern::Dispatch::new() .level(LevelFilter::Error) .level_for("gotham", log::LevelFilter::Trace) .level_for("gotham::state", log::LevelFilter::Error) .level_for("todo_session", log::LevelFilter::Error) .chain(std::io::stdout()) .format(|out, message, record| { out.finish(format_args!( "{}[{}][{}]{}", chrono::Utc::now().format("[%Y-%m-%d %H:%M:%S%.9f]"), record.target(), record.level(), message )) }) .apply() .unwrap(); } #[derive(Default, Serialize, Deserialize)] struct Session { user: Option<UserAttributes>, } impl AuthenticatedSession for Session { fn is_authenticated(&self) -> bool { self.user.is_some() } } #[derive(Serialize, Deserialize, Debug)] struct UserAttributes { #[serde(rename = "User-Agent")] user_agent: String, #[serde(rename = "Accept")] accept: String, } mod controller { use super::*; pub fn welcome(state: State) -> (State, Response) { let body = br#" <html> <head> <title>shib-gotham - Attribute Reflector Example</title> </head> <body> <h2>Welcome</h2> <p><a href="/auth/login">Login</a></p> </body> </html> "#; let response = create_response( &state, StatusCode::Ok, Some((body.to_vec(), mime::TEXT_HTML)), ); (state, response) } pub fn attributes(state: State) -> (State, Response) { let body = format!( " <html> <head> <title>shib-gotham - Attribute Reflector Example</title> </head> <body> <h2>Attributes</h2> <pre><code>{:?}</code></pre> </body> </html> ", SessionData::<Session>::borrow_from(&state) .user .as_ref() .unwrap() ); let response = create_response( &state, StatusCode::Ok, Some((body.into_bytes(), mime::TEXT_HTML)), ); (state, response) } } fn receive_subject(state: &mut State, attributes: UserAttributes) -> Result<(), ReceiverFailed> { println!("received attributes: {:?}", attributes); SessionData::<Session>::borrow_mut_from(state).user = Some(attributes); Ok(()) } fn router() -> Router { let pipelines = new_pipeline_set(); let (pipelines, default) = pipelines.add( new_pipeline() .add( NewSessionMiddleware::default() .with_session_type::<Session>() .insecure(), ) .build(), ); let (pipelines, protected) = pipelines.add( new_pipeline() .add(Shibbleware::<Session>::new("/auth/login")) .build(), ); let pipelines = finalize_pipeline_set(pipelines); let default_pipeline_chain = (default, ()); let protected_pipeline_chain = (protected, (default, ())); let protected_router = build_router(protected_pipeline_chain, pipelines.clone(), |route| { route.get("/attributes").to(controller::attributes); }); build_router(default_pipeline_chain, pipelines, |route| { route.get("/").to(controller::welcome); route .delegate_without_pipelines("/protected") .to_router(protected_router); route .delegate("/auth") .to_router(shib_gotham::auth_router(receive_subject)); }) }
use core::ptr; const XDMA_BASE: u32 = 0x1000C000u32; macro_rules! xdmainst_size { (GO) => (6); (END) => (1); (KILL) => (1); (FLUSHP) => (2); (WFP) => (2); (WFE) => (); (LD) => (1); (LDPS) => (2); (LDPB) => (2); (ST) => (1); (STP) => (2); (STZ) => (1); (LP) => (2); (LPEND) => (2); (MOV) => (6); } macro_rules! xdmainst { (END) => ([0x00]); (KILL) => ([0x01]); (FLUSHP $which:expr) => ([0x35, $which << 3]); (WFP $which:expr, periph) => ([0x31, $which << 3]); (WFE) => (); (LD) => ([0x04]); (LDPS $which:expr) => ([0x25, $which << 3]); (LDPB $which:expr) => ([0x27, $which << 3]); (ST) => ([0x08]); (STP) => (); (STZ) => ([0x0C]); (LP $ctr:expr, $iters:expr) => ([0x20 | ($ctr << 1), $iters - 1]); (LPEND $ctr:expr) => ([0x38 | ($ctr << 2), 0]); (GO $chan:expr, $where:expr) => ({ let b = ($where as u32).to_le_bytes(); [0xa2, $chan, b[0], b[1], b[2], b[3]] }); (MOV $where:ident, $what:expr) => {{ #[allow(dead_code)] enum Reg { SAR = 0, CCR = 1, DAR = 2 } let b = ($what as u32).to_le_bytes(); [0xbc, Reg::$where as u8, b[0], b[1], b[2], b[3]] }}; } macro_rules! handle_lp { ($loop_rel:expr; LP $ctr:tt $($rest:tt)* ) => ({ assert!($loop_rel[$ctr].is_none()); $loop_rel[$ctr] = Some(0); }); ($loop_rel:expr; $($other:tt)*) => {} } macro_rules! handle_lpend { ($loop_rel:expr; $inst_buf:expr; LPEND $ctr:tt $($rest:tt)* ) => ({ assert!($loop_rel[$ctr].is_some()); let rel = $loop_rel[$ctr].take(); $inst_buf[1] = rel.unwrap() - xdmainst_size!(LPEND); }); ($loop_rel:expr; $inst_buf:expr; $($other:tt)*) => {} } macro_rules! xdma_compile_ { ( $( [ $inst_name:ident $($inst_param:tt),* ] )+ ) => {{ const LEN: usize = 0 $(+ xdmainst_size!($inst_name))+; let mut arr = [0u8; LEN]; let mut loop_rel: [Option<u8>; 2] = [None; 2]; { let arr_sl = &mut arr[..]; $( let inst_dat = { const INST_LEN: usize = xdmainst_size!( $inst_name ); let inst_dat: [u8; INST_LEN] = xdmainst!( $inst_name $($inst_param),* ); inst_dat }; arr_sl[..inst_dat.len()].copy_from_slice(&inst_dat); loop_rel[0].as_mut().map(|x| *x += xdmainst_size!( $inst_name )); loop_rel[1].as_mut().map(|x| *x += xdmainst_size!( $inst_name )); handle_lpend!( &mut loop_rel; arr_sl; $inst_name $($inst_param),* ); handle_lp!( &mut loop_rel; $inst_name $($inst_param),* ); let arr_sl = &mut arr_sl[inst_dat.len()..]; )+ drop(arr_sl); } arr }} } #[macro_export] macro_rules! xdma_compile { ( $( $inst_name:ident $(( $($params:tt),* ))* );+ ) => { xdma_compile_!( $( [ $inst_name $($($params),*)* ] )* ) } } pub enum XdmaSrc { // FillData(u32), // FixedAddr(*const u32), LinearBuf(*const u8, usize), } pub enum XdmaDst { // FixedAddr(*mut u32), LinearBuf(*mut u8, usize) } #[derive(Clone, Copy)] #[allow(non_camel_case_types)] #[allow(dead_code)] enum Reg { MANAGER_FTYPE = 0x038, CHANNEL_FTYPE0 = 0x040, CHANNEL_STAT0 = 0x100, CHANNEL_PC0 = 0x104, DEBUG_STAT = 0xD00, DEBUG_CMD = 0xD04, DEBUG_INST0 = 0xD08, DEBUG_INST1 = 0xD0C, } #[inline(never)] fn read_reg<T: Copy>(reg: Reg) -> T { unsafe { ptr::read_volatile((XDMA_BASE + reg as u32) as *const T) } } fn write_reg<T: Copy>(reg: Reg, val: T) { unsafe { ptr::write_volatile((XDMA_BASE + reg as u32) as *mut T, val); } } bf!(ChannelCtrl[u32] { src_inc: 0:0, src_burst_size: 1:3, src_burst_len: 4:7, src_prot: 8:10, src_cache: 11:13, dst_inc: 14:14, dst_burst_size: 15:17, dst_burst_len: 18:21, dst_prot: 22:24, dst_cache: 25:27 }); bf!(DmaInst[u64] { use_channel: 0:0, channel: 8:10, inst_b0: 16:23, inst_b1: 24:31, inst_b2: 32:39, inst_b3: 40:47, inst_b4: 48:55, inst_b5: 56:63 }); pub fn run_program(program: &[u8]) { let mut dmainst = DmaInst::new(0); let go = xdma_compile! { GO(0, (program.as_ptr() as u32)) }; dmainst.inst_b0.set(go[0] as u64); dmainst.inst_b1.set(go[1] as u64); dmainst.inst_b2.set(go[2] as u64); dmainst.inst_b3.set(go[3] as u64); dmainst.inst_b4.set(go[4] as u64); dmainst.inst_b5.set(go[5] as u64); write_reg(Reg::DEBUG_INST0, dmainst.val as u32); write_reg(Reg::DEBUG_INST1, (dmainst.val >> 32) as u32); write_reg(Reg::DEBUG_CMD, 0u32); let mut counter1 = 0x10000; while read_reg::<u32>(Reg::DEBUG_STAT) & 1 != 0 {} while counter1 != 0 && read_reg::<u32>(Reg::CHANNEL_STAT0) & 0xF != 0 { counter1 -= 1 } let ftype = read_reg::<u32>(Reg::CHANNEL_FTYPE0); if ftype != 0 { let pc = read_reg::<u32>(Reg::CHANNEL_PC0); let failed_inst = { let offset = (pc - (program.as_ptr() as u32)) as usize; let buf_size = (program.len() - offset).min(6); &program[offset..offset + buf_size]; }; panic!( "XDMA channel faulted!\n\ Final channel PC: {:08X}\n\ Data at channel PC: {:X?}\n\ Final manager fault type: {:08X}\n\ Final channel fault type: {:08X}\n\ Final channel state: {:08X}", pc, failed_inst, ftype, read_reg::<u32>(Reg::CHANNEL_FTYPE0), read_reg::<u32>(Reg::CHANNEL_STAT0) ); } } pub fn mem_transfer(src: XdmaSrc, dst: XdmaDst) { let XdmaSrc::LinearBuf(src, len) = src; let XdmaDst::LinearBuf(dst, dst_len) = dst; assert_eq!(len, dst_len); const LINE_SIZE: usize = 8; const BURST_LINES: usize = 16; let lines = len / LINE_SIZE; let chunks = lines / BURST_LINES; assert_eq!(src as usize % LINE_SIZE, 0, "XDMA source unaligned!"); assert_eq!(dst as usize % LINE_SIZE, 0, "XDMA dest unaligned!"); assert_eq!(len % (LINE_SIZE * BURST_LINES), 0, "XDMA xfer len is not a multiple of the transfer width!"); let mut ctrl_big = ChannelCtrl::new(0); ctrl_big.src_inc.set(1); ctrl_big.src_burst_size.set((LINE_SIZE.trailing_zeros()) as u32); ctrl_big.src_burst_len.set((BURST_LINES - 1) as u32); ctrl_big.src_prot.set(0b011); ctrl_big.src_cache.set(0b010); ctrl_big.dst_inc.set(1); ctrl_big.dst_burst_size.set((LINE_SIZE.trailing_zeros()) as u32); ctrl_big.dst_burst_len.set((BURST_LINES - 1) as u32); ctrl_big.dst_prot.set(0b011); ctrl_big.dst_cache.set(0b010); let program = xdma_compile! { MOV(SAR, (src as u32)); MOV(CCR, (ctrl_big.val)); MOV(DAR, (dst as u32)); LP(0, (chunks as u8)); LD; ST; LPEND(0); END }; run_program(&program); }
use sdl2::render::{TextureQuery, TextureCreator, Texture, Canvas}; use sdl2::video::{Window, WindowContext}; use sdl2::surface::Surface; use std::rc::Rc; use std::cell::RefCell; use engine::utils::rectangle::Rectangle; use engine::components::{Component, Components}; pub struct Sprite<'window> { sprite: Rc<RefCell<Texture<'window>>>, source: Rectangle, } impl<'window> Component for Sprite<'window> { fn get_type(&self) -> Components { Components::Sprite } fn render(&self, canvas: &mut Canvas<Window>, dest: Rectangle) -> Result<(), String> { canvas.copy(&mut self.sprite.borrow(), self.source.to_sdl(), dest.to_sdl()) } } impl<'window> Sprite<'window> { pub fn new(texture: Texture) -> Sprite { let texture_query = texture.query(); Sprite { sprite: Rc::new(RefCell::new(texture)), source: Rectangle { x: 0f64, y: 0f64, width: texture_query.width as f64, height: texture_query.height as f64, }, } } pub fn texture_query(&self) -> TextureQuery { self.sprite.borrow().query() } pub fn load(path: &str, texture_creator: &'window TextureCreator<WindowContext>) -> Option<Sprite<'window>> { let surface = Surface::load_bmp(path).unwrap(); match texture_creator.create_texture_from_surface(surface) { Ok(tex) => return Option::Some(Sprite::new(tex)), Err(_) => return Option::None, }; } // pub fn render(&self, canvas: &mut Canvas<Window>, dest: Rectangle) -> Result<(), String> { // canvas.copy(&mut self.sprite.borrow(), // self.source.to_sdl(), // dest.to_sdl()) // } }
use std::fmt; use std::fmt::{Display, Formatter}; use crate::changelog::{Scope, Section}; use crate::ChangeLog; impl ChangeLog { pub fn markdown(&self) -> MarkdownChangelog { let mut scopes: Vec<&Option<Scope>> = self.scopes().collect(); scopes.sort(); MarkdownChangelog { breaking_changes: MarkdownChangelogSection { title: "Breaking changes", scopes: scopes.clone(), section: self.breaking_changes(), }, features: MarkdownChangelogSection { title: "Features", scopes: scopes.clone(), section: self.features(), }, fixes: MarkdownChangelogSection { title: "Bug fixes", scopes, section: self.fixes(), }, } } } #[derive(Debug, Clone)] pub struct MarkdownChangelog<'a> { breaking_changes: MarkdownChangelogSection<'a>, features: MarkdownChangelogSection<'a>, fixes: MarkdownChangelogSection<'a>, } #[derive(Debug, Clone)] struct MarkdownChangelogSection<'a> { title: &'a str, scopes: Vec<&'a Option<Scope>>, section: &'a Section, } impl Display for MarkdownChangelog<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { write!( f, "{}{}{}", self.breaking_changes, self.features, self.fixes ) } } impl Display for MarkdownChangelogSection<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { if self.section.is_empty() { return Ok(()); } write!(f, "### {}\n\n", self.title)?; let iter = self .scopes .iter() .flat_map(|scope| self.section.get(scope).map(|changes| (scope, changes))); for (scope, changes) in iter { if let Some(title) = scope { write!(f, "#### {}\n\n", title)?; } for change in changes { writeln!(f, "* {}", change)?; } writeln!(f)?; } writeln!(f) } }
#[doc = "Reader of register RX_CTRL"] pub type R = crate::R<u32, super::RX_CTRL>; #[doc = "Writer for register RX_CTRL"] pub type W = crate::W<u32, super::RX_CTRL>; #[doc = "Register RX_CTRL `reset()`'s with value 0x0107"] impl crate::ResetValue for super::RX_CTRL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x0107 } } #[doc = "Reader of field `DATA_WIDTH`"] pub type DATA_WIDTH_R = crate::R<u8, u8>; #[doc = "Write proxy for field `DATA_WIDTH`"] pub struct DATA_WIDTH_W<'a> { w: &'a mut W, } impl<'a> DATA_WIDTH_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f); self.w } } #[doc = "Reader of field `MSB_FIRST`"] pub type MSB_FIRST_R = crate::R<bool, bool>; #[doc = "Write proxy for field `MSB_FIRST`"] pub struct MSB_FIRST_W<'a> { w: &'a mut W, } impl<'a> MSB_FIRST_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8); self.w } } #[doc = "Reader of field `MEDIAN`"] pub type MEDIAN_R = crate::R<bool, bool>; #[doc = "Write proxy for field `MEDIAN`"] pub struct MEDIAN_W<'a> { w: &'a mut W, } impl<'a> MEDIAN_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9); self.w } } impl R { #[doc = "Bits 0:3 - Dataframe width. DATA_WIDTH + 1 is the expected amount of bits in received data frame. This number does not include start, parity and stop bits. For UART mode, the valid range is \\[3, 8\\]. For SPI, the valid range is \\[3, 15\\]. For I2C the only valid value is 7. In EZ mode (for both SPI and I2C), the only valid value is 7."] #[inline(always)] pub fn data_width(&self) -> DATA_WIDTH_R { DATA_WIDTH_R::new((self.bits & 0x0f) as u8) } #[doc = "Bit 8 - Least significant bit first ('0') or most significant bit first ('1'). For I2C, this field should be '1'."] #[inline(always)] pub fn msb_first(&self) -> MSB_FIRST_R { MSB_FIRST_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Median filter. When '1', a digital 3 taps median filter is performed on input interface lines. This filter should reduce the susceptability to errors. However, its requires higher oversampling values. For UART IrDA submode, this field should always be '1'."] #[inline(always)] pub fn median(&self) -> MEDIAN_R { MEDIAN_R::new(((self.bits >> 9) & 0x01) != 0) } } impl W { #[doc = "Bits 0:3 - Dataframe width. DATA_WIDTH + 1 is the expected amount of bits in received data frame. This number does not include start, parity and stop bits. For UART mode, the valid range is \\[3, 8\\]. For SPI, the valid range is \\[3, 15\\]. For I2C the only valid value is 7. In EZ mode (for both SPI and I2C), the only valid value is 7."] #[inline(always)] pub fn data_width(&mut self) -> DATA_WIDTH_W { DATA_WIDTH_W { w: self } } #[doc = "Bit 8 - Least significant bit first ('0') or most significant bit first ('1'). For I2C, this field should be '1'."] #[inline(always)] pub fn msb_first(&mut self) -> MSB_FIRST_W { MSB_FIRST_W { w: self } } #[doc = "Bit 9 - Median filter. When '1', a digital 3 taps median filter is performed on input interface lines. This filter should reduce the susceptability to errors. However, its requires higher oversampling values. For UART IrDA submode, this field should always be '1'."] #[inline(always)] pub fn median(&mut self) -> MEDIAN_W { MEDIAN_W { w: self } } }
//! Indexed collection of values. //! //! # Remarks //! //! With the ``prelude`` module, we can easily convert a tuple of ``IntoIterator``s //! into ``Process`` for ease of use. The same can be achieved with the //! ``new`` method. //! //! # Examples //! //! Quick plot. //! ```no_run //! use preexplorer::prelude::*; //! ((0..10), (0..10)).preexplore().plot("my_identifier").unwrap(); //! ``` //! //! Compare ``Process``es. //! ```no_run //! use preexplorer::prelude::*; //! pre::Processes::new(vec![ //! ((0..10), (0..10)).preexplore(), //! ((0..10), (0..10)).preexplore(), //! ]) //! .plot("my_identifier").unwrap(); //! ``` // Traits pub use crate::traits::{Configurable, Plotable, Saveable}; use core::fmt::Display; use core::ops::Add; /// Process of histograms. pub mod bin; /// Compare various ``Process``es. pub mod comparison; /// Process of values with an associated error. pub mod error; /// Process of violin plots. pub mod violin; pub use bin::{ProcessBin, ProcessBins}; pub use comparison::Processes; pub use error::{ProcessError, ProcessErrors}; pub use violin::ProcessViolin; /// Indexed sequence of values. #[derive(Debug, PartialEq, Clone)] pub struct Process<T, S> where T: Display + Clone, S: Display + Clone, { domain: Vec<T>, image: Vec<S>, config: crate::configuration::Configuration, } impl<T, S> Process<T, S> where T: Display + Clone, S: Display + Clone, { /// Create a new ``Process``. /// /// # Examples /// /// From a complicated computation. /// ``` /// use preexplorer::prelude::*; /// let data = (0..10).map(|i| i * i + 1); /// let seq = pre::Process::new((0..10), data); /// ``` pub fn new<I, J>(domain: I, image: J) -> Process<T, S> where I: IntoIterator<Item = T>, J: IntoIterator<Item = S>, { let domain: Vec<T> = domain.into_iter().collect(); let image: Vec<S> = image.into_iter().collect(); let config = crate::configuration::Configuration::default(); Process { domain, image, config, } } } impl<T, S> Add for Process<T, S> where T: Display + Clone, S: Display + Clone, { type Output = crate::Processes<T, S>; fn add(self, other: crate::Process<T, S>) -> crate::Processes<T, S> { let mut cmp = self.into(); cmp += other; cmp } } impl<T, S> Configurable for Process<T, S> where T: Display + Clone, S: Display + Clone, { fn configuration_mut(&mut self) -> &mut crate::configuration::Configuration { &mut self.config } fn configuration(&self) -> &crate::configuration::Configuration { &self.config } } impl<T, S> Saveable for Process<T, S> where T: Display + Clone, S: Display + Clone, { fn plotable_data(&self) -> String { let mut plotable_data = String::new(); for (time, value) in self.domain.clone().into_iter().zip(self.image.clone()) { plotable_data.push_str(&format!("{}\t{}\n", time, value)); } plotable_data } } impl<T, S> Plotable for Process<T, S> where T: Display + Clone, S: Display + Clone, { fn plot_script(&self) -> String { let mut gnuplot_script = self.opening_plot_script(); let dashtype = match self.dashtype() { Some(dashtype) => dashtype, None => 1, }; gnuplot_script += &format!( "plot {:?} using 1:2 with {} dashtype {}\n", self.data_path(), self.style(), dashtype, ); gnuplot_script += &self.ending_plot_script(); gnuplot_script } }
use std::collections::HashMap; use crates_io_api::SyncClient; use failure::{Error, format_err}; use semver::Version; pub fn check_crates_io( name: Option<&str>, current_version: Option<Version>, ) -> Result<Option<Version>, Error> { let name = match name { Some(n) => n, None => env!("CARGO_PKG_NAME"), }; let current_version = match current_version { Some(n) => n, None => env!("CARGO_PKG_VERSION").parse()?, }; let c = SyncClient::new(); let latest_version = Version::parse(&c.get_crate(name)?.crate_data.max_version)?; if latest_version > current_version { Ok(Some(latest_version)) } else { Ok(None) } } pub async fn check_upgrenade( current_version: Option<Version>, url: &str, ) -> Result<Option<(Version, String)>, Error> { let current_version = match current_version { Some(n) => n, None => env!("CARGO_PKG_VERSION").parse()?, }; let resp = reqwest::get(&format!("{}/versions/latest", url)) .await? .json::<HashMap<String, String>>() .await?; let (v, link) = resp.iter().next().ok_or(format_err!("Invalid Upgrenade server response"))?; let latest_version = v.parse::<Version>()?; if latest_version > current_version { Ok(Some(("0.1.0".parse()?, link.clone()))) } else { Ok(None) } }
#[doc = "Reader of register SCAN_INTERVAL"] pub type R = crate::R<u32, super::SCAN_INTERVAL>; #[doc = "Writer for register SCAN_INTERVAL"] pub type W = crate::W<u32, super::SCAN_INTERVAL>; #[doc = "Register SCAN_INTERVAL `reset()`'s with value 0x10"] impl crate::ResetValue for super::SCAN_INTERVAL { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x10 } } #[doc = "Reader of field `SCAN_INTERVAL`"] pub type SCAN_INTERVAL_R = crate::R<u16, u16>; #[doc = "Write proxy for field `SCAN_INTERVAL`"] pub struct SCAN_INTERVAL_W<'a> { w: &'a mut W, } impl<'a> SCAN_INTERVAL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff); self.w } } impl R { #[doc = "Bits 0:15 - Scan interval register. Interval between two consecutive scanning events. Firmware sets the scanning interval value to this register before issuing start scan command. Range: 0x0004 to 0x4000 Default: 0x0010 (10 ms) Time = N * 0.625 msec Time Range: 2.5 msec to 10.24 sec. In MMMS mode, this register is used as SCAN_NI_TIMER when the SCAN_NI_VALID is set by firmware"] #[inline(always)] pub fn scan_interval(&self) -> SCAN_INTERVAL_R { SCAN_INTERVAL_R::new((self.bits & 0xffff) as u16) } } impl W { #[doc = "Bits 0:15 - Scan interval register. Interval between two consecutive scanning events. Firmware sets the scanning interval value to this register before issuing start scan command. Range: 0x0004 to 0x4000 Default: 0x0010 (10 ms) Time = N * 0.625 msec Time Range: 2.5 msec to 10.24 sec. In MMMS mode, this register is used as SCAN_NI_TIMER when the SCAN_NI_VALID is set by firmware"] #[inline(always)] pub fn scan_interval(&mut self) -> SCAN_INTERVAL_W { SCAN_INTERVAL_W { w: self } } }
use std::os::raw::c_char; use std::ffi::CStr; use photic::pipeline::shader::{Shader, ShaderSource}; pub struct X3DShader { pub shader: Shader, } impl X3DShader { pub fn new(vertex_shader: String, geometry_shader: Option<String>, tesselation_shader: Option<String>, fragment_shader: String) -> Self { let shader_source = ShaderSource { vertex_shader: vertex_shader, geometry_shader: geometry_shader, tesselation_shader: tesselation_shader, fragment_shader: fragment_shader, }; Self { shader: Shader::from_source(shader_source), } } } //TODO: Do error checking lol #[no_mangle] pub extern "C" fn x3d_new_shader(vs: *const c_char, gs: *const c_char, ts: *const c_char, fs: *const c_char) -> *mut X3DShader { let vs_c = unsafe { assert!(!vs.is_null()); CStr::from_ptr(vs).to_str().unwrap().to_string() }; let gs_c = unsafe { if gs.is_null() { None } else { Some(CStr::from_ptr(gs).to_str().unwrap().to_string()) } }; let ts_c = unsafe { if ts.is_null() { None } else { Some(CStr::from_ptr(ts).to_str().unwrap().to_string()) } }; let fs_c = unsafe { assert!(!fs.is_null()); CStr::from_ptr(fs).to_str().unwrap().to_string() }; Box::into_raw(Box::new(X3DShader::new(vs_c, gs_c, ts_c, fs_c))) } #[no_mangle] pub extern "C" fn x3d_drop_shader(ptr: *mut X3DShader) { if ptr.is_null() { return; //Invalid pointer, scary stuff } unsafe { Box::from_raw(ptr); } }
// Copyright (C) 2021 Subspace Labs, Inc. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Core primitives for Subspace Network. #![cfg_attr(not(feature = "std"), no_std)] #![warn(rust_2018_idioms, missing_docs)] #![cfg_attr(feature = "std", warn(missing_debug_implementations))] #![feature( array_chunks, const_option, const_trait_impl, const_try, new_uninit, portable_simd, slice_flatten, step_trait )] pub mod checksum; pub mod crypto; pub mod objects; mod pieces; mod segments; #[cfg(feature = "serde")] mod serde; #[cfg(test)] mod tests; extern crate alloc; use crate::crypto::kzg::{Commitment, Witness}; use crate::crypto::{blake2b_256_hash, blake2b_256_hash_list, blake2b_256_hash_with_key, Scalar}; #[cfg(feature = "serde")] use ::serde::{Deserialize, Serialize}; use alloc::boxed::Box; use alloc::vec::Vec; use core::convert::AsRef; use core::fmt; use core::iter::Iterator; use core::num::NonZeroU64; use core::simd::Simd; use derive_more::{Add, AsMut, AsRef, Deref, DerefMut, Display, Div, From, Into, Mul, Rem, Sub}; use num_traits::{WrappingAdd, WrappingSub}; use parity_scale_codec::{Decode, Encode, MaxEncodedLen}; pub use pieces::{ FlatPieces, Piece, PieceArray, PieceIndex, PieceOffset, RawRecord, Record, RecordCommitment, RecordWitness, SBucket, }; use scale_info::TypeInfo; pub use segments::{ArchivedHistorySegment, HistorySize, RecordedHistorySegment, SegmentIndex}; use uint::static_assertions::const_assert; // Refuse to compile on lower than 32-bit platforms const_assert!(core::mem::size_of::<usize>() >= core::mem::size_of::<u32>()); /// Byte length of a randomness type. pub const RANDOMNESS_LENGTH: usize = 32; /// Size of BLAKE2b-256 hash output (in bytes). pub const BLAKE2B_256_HASH_SIZE: usize = 32; /// BLAKE2b-256 hash output pub type Blake2b256Hash = [u8; BLAKE2B_256_HASH_SIZE]; /// Size of BLAKE3 hash output (in bytes). pub const BLAKE3_HASH_SIZE: usize = 32; /// BLAKE3 hash output pub type Blake3Hash = [u8; BLAKE3_HASH_SIZE]; /// 128 bits for the proof of time data types. pub type PotBytes = [u8; 16]; /// Type of randomness. #[derive( Debug, Default, Copy, Clone, Eq, PartialEq, From, Into, Deref, Encode, Decode, TypeInfo, MaxEncodedLen, )] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct Randomness( #[cfg_attr(feature = "serde", serde(with = "hex::serde"))] [u8; RANDOMNESS_LENGTH], ); impl AsRef<[u8]> for Randomness { #[inline] fn as_ref(&self) -> &[u8] { &self.0 } } impl AsMut<[u8]> for Randomness { #[inline] fn as_mut(&mut self) -> &mut [u8] { &mut self.0 } } impl Randomness { /// Derive global slot challenge from global randomness. // TODO: Separate type for global challenge pub fn derive_global_challenge(&self, slot: SlotNumber) -> Blake2b256Hash { blake2b_256_hash_list(&[&self.0, &slot.to_le_bytes()]) } } /// Block number in Subspace network. pub type BlockNumber = u32; /// Slot number in Subspace network. pub type SlotNumber = u64; /// Type of solution range. pub type SolutionRange = u64; /// BlockWeight type for fork choice rules. /// /// The closer solution's tag is to the target, the heavier it is. pub type BlockWeight = u128; /// Block hash (the bytes from H256) pub type BlockHash = [u8; 32]; // TODO: New type /// Segment commitment type. pub type SegmentCommitment = Commitment; /// Length of public key in bytes. pub const PUBLIC_KEY_LENGTH: usize = 32; /// Length of signature in bytes pub const REWARD_SIGNATURE_LENGTH: usize = 64; /// Proof of space seed. #[derive(Debug, Copy, Clone, Eq, PartialEq, Deref)] pub struct PosSeed([u8; Self::SIZE]); impl From<[u8; PosSeed::SIZE]> for PosSeed { #[inline] fn from(value: [u8; Self::SIZE]) -> Self { Self(value) } } impl From<PosSeed> for [u8; PosSeed::SIZE] { #[inline] fn from(value: PosSeed) -> Self { value.0 } } impl PosSeed { /// Size of proof of space seed in bytes. pub const SIZE: usize = 32; } /// Proof of space quality. #[derive(Debug, Copy, Clone, Eq, PartialEq, Deref)] pub struct PosQualityBytes([u8; Self::SIZE]); impl From<[u8; PosQualityBytes::SIZE]> for PosQualityBytes { #[inline] fn from(value: [u8; Self::SIZE]) -> Self { Self(value) } } impl From<PosQualityBytes> for [u8; PosQualityBytes::SIZE] { #[inline] fn from(value: PosQualityBytes) -> Self { value.0 } } impl PosQualityBytes { /// Size of proof of space quality in bytes. pub const SIZE: usize = 32; } /// Proof of space proof bytes. #[derive( Debug, Copy, Clone, Eq, PartialEq, Deref, DerefMut, Encode, Decode, TypeInfo, MaxEncodedLen, )] pub struct PosProof([u8; Self::SIZE]); impl From<[u8; PosProof::SIZE]> for PosProof { #[inline] fn from(value: [u8; Self::SIZE]) -> Self { Self(value) } } impl From<PosProof> for [u8; PosProof::SIZE] { #[inline] fn from(value: PosProof) -> Self { value.0 } } impl Default for PosProof { #[inline] fn default() -> Self { Self([0; Self::SIZE]) } } impl PosProof { /// Size of proof of space proof in bytes. pub const SIZE: usize = 17 * 8; /// Proof hash. pub fn hash(&self) -> Blake2b256Hash { blake2b_256_hash(&self.0) } } /// Proof of time key(input to the encryption). #[derive( Debug, Default, Copy, Clone, Eq, PartialEq, From, Into, AsRef, AsMut, Encode, Decode, TypeInfo, MaxEncodedLen, )] pub struct PotKey(PotBytes); /// Proof of time seed (input to the encryption). #[derive( Debug, Default, Copy, Clone, Eq, PartialEq, From, Into, AsRef, AsMut, Encode, Decode, TypeInfo, MaxEncodedLen, )] pub struct PotSeed(PotBytes); impl PotSeed { /// Builds the seed from block hash (e.g) used to create initial seed from /// genesis block hash. #[inline] pub fn from_block_hash(block_hash: BlockHash) -> Self { Self(truncate_32_bytes(block_hash)) } } /// Proof of time ciphertext (output from the encryption). #[derive( Debug, Default, Copy, Clone, Eq, PartialEq, From, Into, AsRef, AsMut, Encode, Decode, TypeInfo, MaxEncodedLen, )] pub struct PotCheckpoint(PotBytes); /// Proof of time. /// TODO: versioning. #[derive(Debug, Clone, Encode, Decode, Eq, PartialEq)] pub struct PotProof { /// Slot the proof was evaluated for. pub slot_number: SlotNumber, /// The seed used for evaluation. pub seed: PotSeed, /// The key used for evaluation. pub key: PotKey, /// The encrypted outputs from each stage. pub checkpoints: NonEmptyVec<PotCheckpoint>, /// Hash of last block at injection point. pub injected_block_hash: BlockHash, } impl PotProof { /// Create the proof. pub fn new( slot_number: SlotNumber, seed: PotSeed, key: PotKey, checkpoints: NonEmptyVec<PotCheckpoint>, injected_block_hash: BlockHash, ) -> Self { Self { slot_number, seed, key, checkpoints, injected_block_hash, } } /// Returns the last check point. pub fn output(&self) -> PotCheckpoint { self.checkpoints.last() } /// Derives the global randomness from the output. pub fn derive_global_randomness(&self) -> Blake2b256Hash { blake2b_256_hash(&PotBytes::from(self.output())) } /// Derives the next seed based on the injected randomness. pub fn next_seed(&self, injected_hash: Option<BlockHash>) -> PotSeed { match injected_hash { Some(injected_hash) => { // Next seed = Hash(last checkpoint + injected hash). let hash = blake2b_256_hash_list(&[&self.output().0, &injected_hash]); PotSeed::from(truncate_32_bytes(hash)) } None => { // No injected randomness, next seed = last checkpoint. PotSeed::from(self.output().0) } } } /// Derives the next key from the hash of the current seed. pub fn next_key(&self) -> PotKey { PotKey::from(truncate_32_bytes(blake2b_256_hash(&self.seed.0))) } } impl fmt::Display for PotProof { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "PotProof: [slot={}, seed={}, key={}, injected={}, checkpoints={}]", self.slot_number, hex::encode(self.seed.0), hex::encode(self.key.0), hex::encode(self.injected_block_hash), self.checkpoints.len() ) } } /// Helper to truncate the 32 bytes to 16 bytes. fn truncate_32_bytes(bytes: [u8; 32]) -> PotBytes { bytes[..core::mem::size_of::<PotBytes>()] .try_into() .expect("Hash is longer than seed; qed") } /// A Ristretto Schnorr public key as bytes produced by `schnorrkel` crate. #[derive( Debug, Default, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo, Deref, From, Into, )] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct PublicKey( #[cfg_attr(feature = "serde", serde(with = "hex::serde"))] [u8; PUBLIC_KEY_LENGTH], ); impl fmt::Display for PublicKey { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", hex::encode(self.0)) } } impl AsRef<[u8]> for PublicKey { #[inline] fn as_ref(&self) -> &[u8] { &self.0 } } impl PublicKey { /// Public key hash. pub fn hash(&self) -> Blake2b256Hash { blake2b_256_hash(&self.0) } } /// A Ristretto Schnorr signature as bytes produced by `schnorrkel` crate. #[derive( Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo, Deref, From, Into, )] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct RewardSignature( #[cfg_attr(feature = "serde", serde(with = "serde_arrays"))] [u8; REWARD_SIGNATURE_LENGTH], ); impl AsRef<[u8]> for RewardSignature { #[inline] fn as_ref(&self) -> &[u8] { &self.0 } } /// Progress of an archived block. #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub enum ArchivedBlockProgress { /// The block has been fully archived. Complete, /// Number of partially archived bytes of a block. Partial(u32), } impl Default for ArchivedBlockProgress { /// We assume a block can always fit into the segment initially, but it can definitely possible /// to be transitioned into the partial state after some overflow checkings. #[inline] fn default() -> Self { Self::Complete } } impl ArchivedBlockProgress { /// Return the number of partially archived bytes if the progress is not complete. pub fn partial(&self) -> Option<u32> { match self { Self::Complete => None, Self::Partial(number) => Some(*number), } } /// Sets new number of partially archived bytes. pub fn set_partial(&mut self, new_partial: u32) { *self = Self::Partial(new_partial); } } /// Last archived block #[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub struct LastArchivedBlock { /// Block number pub number: BlockNumber, /// Progress of an archived block. pub archived_progress: ArchivedBlockProgress, } impl LastArchivedBlock { /// Returns the number of partially archived bytes for a block. pub fn partial_archived(&self) -> Option<u32> { self.archived_progress.partial() } /// Sets new number of partially archived bytes. pub fn set_partial_archived(&mut self, new_partial: BlockNumber) { self.archived_progress.set_partial(new_partial); } /// Sets the archived state of this block to [`ArchivedBlockProgress::Complete`]. pub fn set_complete(&mut self) { self.archived_progress = ArchivedBlockProgress::Complete; } } /// Segment header for a specific segment. /// /// Each segment will have corresponding [`SegmentHeader`] included as the first item in the next /// segment. Each `SegmentHeader` includes hash of the previous one and all together form a chain of /// segment headers that is used for quick and efficient verification that some [`Piece`] /// corresponds to the actual archival history of the blockchain. #[derive(Debug, Copy, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Hash)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub enum SegmentHeader { /// V0 of the segment header data structure #[codec(index = 0)] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] V0 { /// Segment index segment_index: SegmentIndex, /// Root of commitments of all records in a segment. segment_commitment: SegmentCommitment, /// Hash of the segment header of the previous segment prev_segment_header_hash: Blake2b256Hash, /// Last archived block last_archived_block: LastArchivedBlock, }, } impl SegmentHeader { /// Hash of the whole segment header pub fn hash(&self) -> Blake2b256Hash { blake2b_256_hash(&self.encode()) } /// Segment index pub fn segment_index(&self) -> SegmentIndex { match self { Self::V0 { segment_index, .. } => *segment_index, } } /// Segment commitment of the records in a segment. pub fn segment_commitment(&self) -> SegmentCommitment { match self { Self::V0 { segment_commitment, .. } => *segment_commitment, } } /// Hash of the segment header of the previous segment pub fn prev_segment_header_hash(&self) -> Blake2b256Hash { match self { Self::V0 { prev_segment_header_hash, .. } => *prev_segment_header_hash, } } /// Last archived block pub fn last_archived_block(&self) -> LastArchivedBlock { match self { Self::V0 { last_archived_block, .. } => *last_archived_block, } } } /// Sector index in consensus pub type SectorIndex = u16; // TODO: Versioned solution enum /// Farmer solution for slot challenge. #[derive(Clone, Debug, Eq, PartialEq, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", serde(rename_all = "camelCase"))] pub struct Solution<PublicKey, RewardAddress> { /// Public key of the farmer that created the solution pub public_key: PublicKey, /// Address for receiving block reward pub reward_address: RewardAddress, /// Index of the sector where solution was found pub sector_index: SectorIndex, /// Size of the blockchain history at time of sector creation pub history_size: HistorySize, /// Pieces offset within sector pub piece_offset: PieceOffset, /// Record commitment that can use used to verify that piece was included in blockchain history pub record_commitment: Commitment, /// Witness for above record commitment pub record_witness: Witness, /// Chunk at above offset pub chunk: Scalar, /// Witness for above chunk pub chunk_witness: Witness, /// Audit chunk offset within above chunk pub audit_chunk_offset: u8, /// Proof of space for piece offset pub proof_of_space: PosProof, } impl<PublicKey, RewardAddressA> Solution<PublicKey, RewardAddressA> { /// Transform solution with one reward address type into solution with another compatible /// reward address type. pub fn into_reward_address_format<T, RewardAddressB>( self, ) -> Solution<PublicKey, RewardAddressB> where RewardAddressA: Into<T>, T: Into<RewardAddressB>, { let Solution { public_key, reward_address, sector_index, history_size, piece_offset, record_commitment, record_witness, chunk, chunk_witness, audit_chunk_offset, proof_of_space, } = self; Solution { public_key, reward_address: Into::<T>::into(reward_address).into(), sector_index, history_size, piece_offset, record_commitment, record_witness, chunk, chunk_witness, audit_chunk_offset, proof_of_space, } } } impl<PublicKey, RewardAddress> Solution<PublicKey, RewardAddress> { /// Dummy solution for the genesis block pub fn genesis_solution(public_key: PublicKey, reward_address: RewardAddress) -> Self { Self { public_key, reward_address, sector_index: 0, history_size: HistorySize::from(SegmentIndex::ZERO), piece_offset: PieceOffset::default(), record_commitment: Commitment::default(), record_witness: Witness::default(), chunk: Scalar::default(), chunk_witness: Witness::default(), audit_chunk_offset: 0, proof_of_space: PosProof::default(), } } } /// Bidirectional distance metric implemented on top of subtraction pub fn bidirectional_distance<T: WrappingSub + Ord>(a: &T, b: &T) -> T { let diff = a.wrapping_sub(b); let diff2 = b.wrapping_sub(a); // Find smaller diff between 2 directions. diff.min(diff2) } #[allow(clippy::assign_op_pattern, clippy::ptr_offset_with_cast)] mod private_u256 { //! This module is needed to scope clippy allows use parity_scale_codec::{Decode, Encode}; use scale_info::TypeInfo; uint::construct_uint! { #[derive(Encode, Decode, TypeInfo)] pub struct U256(4); } } /// 256-bit unsigned integer #[derive( Debug, Display, Add, Sub, Mul, Div, Rem, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash, Encode, Decode, TypeInfo, )] pub struct U256(private_u256::U256); impl U256 { /// Zero (additive identity) of this type. #[inline] pub const fn zero() -> Self { Self(private_u256::U256::zero()) } /// One (multiplicative identity) of this type. #[inline] pub fn one() -> Self { Self(private_u256::U256::one()) } /// Create from big endian bytes pub fn from_be_bytes(bytes: [u8; 32]) -> Self { Self(private_u256::U256::from_big_endian(&bytes)) } /// Convert to big endian bytes pub fn to_be_bytes(self) -> [u8; 32] { let mut arr = [0u8; 32]; self.0.to_big_endian(&mut arr); arr } /// Create from little endian bytes pub fn from_le_bytes(bytes: [u8; 32]) -> Self { Self(private_u256::U256::from_little_endian(&bytes)) } /// Convert to little endian bytes pub fn to_le_bytes(self) -> [u8; 32] { let mut arr = [0u8; 32]; self.0.to_little_endian(&mut arr); arr } /// Adds two numbers, checking for overflow. If overflow happens, `None` is returned. pub fn checked_add(&self, v: &Self) -> Option<Self> { self.0.checked_add(v.0).map(Self) } /// Subtracts two numbers, checking for underflow. If underflow happens, `None` is returned. pub fn checked_sub(&self, v: &Self) -> Option<Self> { self.0.checked_sub(v.0).map(Self) } /// Multiplies two numbers, checking for underflow or overflow. If underflow or overflow /// happens, `None` is returned. pub fn checked_mul(&self, v: &Self) -> Option<Self> { self.0.checked_mul(v.0).map(Self) } /// Divides two numbers, checking for underflow, overflow and division by zero. If any of that /// happens, `None` is returned. pub fn checked_div(&self, v: &Self) -> Option<Self> { self.0.checked_div(v.0).map(Self) } /// Saturating addition. Computes `self + other`, saturating at the relevant high or low /// boundary of the type. pub fn saturating_add(&self, v: &Self) -> Self { Self(self.0.saturating_add(v.0)) } /// Saturating subtraction. Computes `self - other`, saturating at the relevant high or low /// boundary of the type. pub fn saturating_sub(&self, v: &Self) -> Self { Self(self.0.saturating_sub(v.0)) } /// Saturating multiplication. Computes `self * other`, saturating at the relevant high or low /// boundary of the type. pub fn saturating_mul(&self, v: &Self) -> Self { Self(self.0.saturating_mul(v.0)) } /// The middle of the piece distance field. /// The analogue of `0b1000_0000` for `u8`. pub const MIDDLE: Self = { // TODO: This assumes that numbers are stored little endian, // should be replaced with just `Self::MAX / 2`, but it is not `const fn` in Rust yet. Self(private_u256::U256([ u64::MAX, u64::MAX, u64::MAX, u64::MAX / 2, ])) }; /// Maximum value. pub const MAX: Self = Self(private_u256::U256::MAX); } // Necessary for division derive impl From<U256> for private_u256::U256 { #[inline] fn from(number: U256) -> Self { number.0 } } impl WrappingAdd for U256 { #[inline] fn wrapping_add(&self, other: &Self) -> Self { Self(self.0.overflowing_add(other.0).0) } } impl WrappingSub for U256 { #[inline] fn wrapping_sub(&self, other: &Self) -> Self { Self(self.0.overflowing_sub(other.0).0) } } impl From<u8> for U256 { #[inline] fn from(number: u8) -> Self { Self(number.into()) } } impl From<u16> for U256 { #[inline] fn from(number: u16) -> Self { Self(number.into()) } } impl From<u32> for U256 { #[inline] fn from(number: u32) -> Self { Self(number.into()) } } impl From<u64> for U256 { #[inline] fn from(number: u64) -> Self { Self(number.into()) } } impl From<u128> for U256 { #[inline] fn from(number: u128) -> Self { Self(number.into()) } } impl TryFrom<U256> for u8 { type Error = &'static str; #[inline] fn try_from(value: U256) -> Result<Self, Self::Error> { Self::try_from(value.0) } } impl TryFrom<U256> for u16 { type Error = &'static str; #[inline] fn try_from(value: U256) -> Result<Self, Self::Error> { Self::try_from(value.0) } } impl TryFrom<U256> for u32 { type Error = &'static str; #[inline] fn try_from(value: U256) -> Result<Self, Self::Error> { Self::try_from(value.0) } } impl TryFrom<U256> for u64 { type Error = &'static str; #[inline] fn try_from(value: U256) -> Result<Self, Self::Error> { Self::try_from(value.0) } } impl Default for U256 { fn default() -> Self { Self::zero() } } /// Challenge used for a particular sector for particular slot #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Deref)] pub struct SectorSlotChallenge(Blake2b256Hash); impl SectorSlotChallenge { /// Index of s-bucket within sector to be audited #[inline] pub fn s_bucket_audit_index(&self) -> SBucket { SBucket::from( u16::try_from(U256::from_le_bytes(self.0) % U256::from(Record::NUM_S_BUCKETS as u32)) .expect( "Remainder of division by Record::NUM_S_BUCKETS is statically guaranteed \ to fit into SBucket; qed", ), ) } } /// Data structure representing sector ID in farmer's plot #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct SectorId(#[cfg_attr(feature = "serde", serde(with = "hex::serde"))] Blake2b256Hash); impl AsRef<[u8]> for SectorId { #[inline] fn as_ref(&self) -> &[u8] { &self.0 } } impl SectorId { /// Create new sector ID by deriving it from public key and sector index pub fn new(public_key_hash: Blake2b256Hash, sector_index: SectorIndex) -> Self { Self(blake2b_256_hash_with_key( &public_key_hash, &sector_index.to_le_bytes(), )) } /// Derive piece index that should be stored in sector at `piece_offset` for specified size of /// blockchain history pub fn derive_piece_index( &self, piece_offset: PieceOffset, history_size: HistorySize, max_pieces_in_sector: u16, recent_segments: HistorySize, recent_history_fraction: (HistorySize, HistorySize), ) -> PieceIndex { let recent_segments_in_pieces = recent_segments.in_pieces().get(); // Recent history must be at most `recent_history_fraction` of all history to use separate // policy for recent pieces let min_history_size_in_pieces = recent_segments_in_pieces * recent_history_fraction.1.in_pieces().get() / recent_history_fraction.0.in_pieces().get(); let input_hash = U256::from_le_bytes(blake2b_256_hash_with_key(&piece_offset.to_bytes(), &self.0)); let history_size_in_pieces = history_size.in_pieces().get(); let num_interleaved_pieces = 1.max( u64::from(max_pieces_in_sector) * recent_history_fraction.0.in_pieces().get() / recent_history_fraction.1.in_pieces().get() * 2, ); let piece_index = if history_size_in_pieces > min_history_size_in_pieces && u64::from(piece_offset) < num_interleaved_pieces && u16::from(piece_offset) % 2 == 1 { // For odd piece offsets at the beginning of the sector pick pieces at random from // recent history only input_hash % U256::from(recent_segments_in_pieces) + U256::from(history_size_in_pieces - recent_segments_in_pieces) } else { input_hash % U256::from(history_size_in_pieces) }; PieceIndex::from(u64::try_from(piece_index).expect( "Remainder of division by PieceIndex is guaranteed to fit into PieceIndex; qed", )) } /// Derive sector slot challenge for this sector from provided global challenge pub fn derive_sector_slot_challenge( &self, global_challenge: &Blake2b256Hash, ) -> SectorSlotChallenge { let sector_slot_challenge = Simd::from(self.0) ^ Simd::from(*global_challenge); SectorSlotChallenge(sector_slot_challenge.to_array()) } /// Derive evaluation seed pub fn derive_evaluation_seed( &self, piece_offset: PieceOffset, history_size: HistorySize, ) -> PosSeed { let evaluation_seed = blake2b_256_hash_list(&[ &self.0, &piece_offset.to_bytes(), &history_size.get().to_le_bytes(), ]); PosSeed::from(evaluation_seed) } /// Derive history size when sector created at `history_size` expires. /// /// Returns `None` on overflow. pub fn derive_expiration_history_size( &self, history_size: HistorySize, sector_expiration_check_segment_commitment: &SegmentCommitment, min_sector_lifetime: HistorySize, ) -> Option<HistorySize> { let sector_expiration_check_history_size = history_size.sector_expiration_check(min_sector_lifetime)?; let input_hash = U256::from_le_bytes(blake2b_256_hash_list(&[ &self.0, &sector_expiration_check_segment_commitment.to_bytes(), ])); let last_possible_expiration = min_sector_lifetime.checked_add(history_size.get().checked_mul(4u64)?)?; let expires_in = input_hash % U256::from( last_possible_expiration .get() .checked_sub(sector_expiration_check_history_size.get())?, ); let expires_in = u64::try_from(expires_in).expect("Number modulo u64 fits into u64; qed"); let expiration_history_size = sector_expiration_check_history_size.get() + expires_in; let expiration_history_size = NonZeroU64::try_from(expiration_history_size).expect( "History size is not zero, so result is not zero even if expires immediately; qed", ); Some(HistorySize::from(expiration_history_size)) } } /// A Vec<> that enforces the invariant that it cannot be empty. #[derive(Debug, Clone, Encode, Decode, Eq, PartialEq)] pub struct NonEmptyVec<T>(Vec<T>); /// Error codes for `NonEmptyVec`. #[derive(Debug)] pub enum NonEmptyVecErr { /// Tried to create with an empty Vec EmptyVec, } #[allow(clippy::len_without_is_empty)] impl<T: Clone> NonEmptyVec<T> { /// Creates the Vec. pub fn new(vec: Vec<T>) -> Result<Self, NonEmptyVecErr> { if vec.is_empty() { return Err(NonEmptyVecErr::EmptyVec); } Ok(Self(vec)) } /// Creates the Vec with the entry. pub fn new_with_entry(entry: T) -> Self { Self(alloc::vec![entry]) } /// Returns the number of entries. pub fn len(&self) -> usize { self.0.len() } /// Returns the slice of the entries. pub fn as_slice(&self) -> &[T] { self.0.as_slice() } /// Returns an iterator for the entries. pub fn iter(&self) -> Box<dyn Iterator<Item = &T> + '_> { Box::new(self.0.iter()) } /// Returns a mutable iterator for the entries. pub fn iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut T> + '_> { Box::new(self.0.iter_mut()) } /// Returns the first entry. pub fn first(&self) -> T { self.0 .first() .expect("NonEmptyVec::first(): collection cannot be empty") .clone() } /// Returns the last entry. pub fn last(&self) -> T { self.0 .last() .expect("NonEmptyVec::last(): collection cannot be empty") .clone() } /// Adds an entry to the end. pub fn push(&mut self, entry: T) { self.0.push(entry); } /// Returns the entries in the collection. pub fn to_vec(self) -> Vec<T> { self.0 } }
/*! ```rudra-poc [target] crate = "bam" version = "0.1.2" indexed_version = "0.1.0" [test] cargo_flags = ["--release"] [report] issue_url = "https://gitlab.com/tprodanov/bam/-/issues/4" issue_date = 2021-01-07 rustsec_url = "https://github.com/RustSec/advisory-db/pull/782" rustsec_id = "RUSTSEC-2021-0027" [[bugs]] analyzer = "UnsafeDataflow" bug_class = "UninitExposure" rudra_report_locations = ["src/bgzip/mod.rs:296:5: 332:6"] [[bugs]] analyzer = "Manual" guide = "UnsafeDataflow" bug_class = "Other" rudra_report_locations = [] ``` !*/ #![forbid(unsafe_code)] use bam::bgzip::Block; pub const MAX_BLOCK_SIZE: usize = 65536; const HEADER_SIZE: usize = 12; const MIN_EXTRA_SIZE: usize = 6; const FOOTER_SIZE: usize = 8; // WRAPPER_SIZE = 26 const WRAPPER_SIZE: usize = HEADER_SIZE + MIN_EXTRA_SIZE + FOOTER_SIZE; pub const MAX_COMPRESSED_SIZE: usize = MAX_BLOCK_SIZE - WRAPPER_SIZE; fn main() { let mut block = Block::new(); let mut buf: Vec<u8> = Vec::new(); buf.extend(&[31, 139, 8, 4, 5, 6, 7, 8, 9, 10, 0, 0]); // 12 bytes header buf.extend(&[66, 67, 2, 0, 0, 0]); // `block_size` is 0 block.load(None, &mut buf.as_slice()).ok(); let compressed_size = block.compressed_size(); println!("{}", compressed_size); assert!(compressed_size as usize <= MAX_COMPRESSED_SIZE + FOOTER_SIZE); }
#![cfg_attr(feature = "unstable", feature(test))] // Launch program : cargo run --release < input/input.txt // Launch benchmark : cargo +nightly bench --features "unstable" /* Benchmark results: running 5 tests test tests::test_part_1 ... ignored test tests::test_part_2 ... ignored test bench::bench_parse_input ... bench: 6,387,662 ns/iter (+/- 142,930) test bench::bench_part_1 ... bench: 116,764 ns/iter (+/- 15,101) test bench::bench_part_2 ... bench: 27,432 ns/iter (+/- 2,558) */ #[macro_use] extern crate lazy_static; extern crate regex; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::io::{self, Read, Write}; type Result<T> = ::std::result::Result<T, Box<dyn Error>>; macro_rules! err { ($($tt:tt)*) => { return Err(Box::<dyn Error>::from(format!($($tt)*))) } } #[derive(Debug, Clone)] struct Bag { color: String, contains: HashMap<String, usize>, contained_by: HashMap<String, usize>, } fn main() -> Result<()> { let mut input = String::new(); io::stdin().read_to_string(&mut input)?; let bags = parse_input(&input)?; writeln!(io::stdout(), "Part 1 : {}", part_1(&bags)?)?; writeln!(io::stdout(), "Part 2 : {}", part_2(&bags)?)?; Ok(()) } fn parse_input(input: &str) -> Result<HashMap<String, Bag>> { use regex::Regex; let mut bags: HashMap<String, Bag> = HashMap::new(); lazy_static! { static ref DAY_07_CONTAINER_BAG_REGEX: Regex = Regex::new(r"^(?P<container_bag_color>[a-z]+ [a-z]+) bags contain (?P<contained_bags>(?:no other bags)|(\d+ [a-z]+ [a-z]+ bag(s)?)(, \d+ [a-z]+ [a-z]+ bag(s)?)*)\.$") .expect("Invalid DAY_07_CONTAINER_BAG_REGEX!"); static ref DAY_07_CONTAINED_BAG_REGEX: Regex = Regex::new(r"(?P<contained_bag_number>\d+) (?P<contained_bag_color>[a-z]+ [a-z]+)") .expect("Invalid DAY_07_CONTAINED_BAG_REGEX!"); } for line in input.lines() { if let Some(cap) = DAY_07_CONTAINER_BAG_REGEX.captures(line) { let current_bag = match bags.entry(cap["container_bag_color"].into()) { Entry::Occupied(o) => o.into_mut(), Entry::Vacant(v) => v.insert(Bag { color: cap["container_bag_color"].to_string(), contains: HashMap::new(), contained_by: HashMap::new(), }), }; if &cap["contained_bags"] != "no other bags" { for subcap in DAY_07_CONTAINED_BAG_REGEX.captures_iter(&cap["contained_bags"]) { if current_bag .contains .insert( subcap["contained_bag_color"].to_string(), subcap["contained_bag_number"].parse::<usize>()?, ) .is_some() { err!( "Current bag already contains this bag color : {} => {}", &cap["container_bag_color"], &subcap["contained_bag_color"] ) } } } } else { err!("Couldn't parse input line : {}", line) } } for (bag_color, bag) in &bags.clone() { // Cannot borrow as a mutable more than once, but can clone this one to update the real one for (contained_bag_color, contained_bag_number) in &bag.contains { let contained_bag = match bags.entry(contained_bag_color.into()) { Entry::Occupied(o) => o.into_mut(), Entry::Vacant(v) => v.insert(Bag { color: contained_bag_color.clone(), contains: HashMap::new(), contained_by: HashMap::new(), }), }; if contained_bag .contained_by .insert(bag_color.clone(), *contained_bag_number) .is_some() { err!( "Current bag already is already contained by this bag color : {} => {}", &contained_bag_color, &bag_color ) } } } Ok(bags) } fn part_1(bags: &HashMap<String, Bag>) -> Result<usize> { let mut bags_checked: HashSet<String> = HashSet::new(); let mut bags_to_check: Vec<String> = vec![]; if let Some(shiny_gold_bag) = bags.get("shiny gold") { bags_to_check = shiny_gold_bag .contained_by .iter() .map(|(k, _)| k.clone()) .collect(); } else { err!("Couldn't find the shiny gold bag !") } while let Some(current_bag) = bags_to_check.pop() { if bags_checked.insert(current_bag.clone()) { if let Some(container_bag) = bags.get(&current_bag) { bags_to_check.extend( container_bag .contained_by .iter() .map(|(k, _)| k.clone()) .collect::<Vec<String>>(), ); } else { err!("Couldn't find a container bag : {}", &current_bag) } } } Ok(bags_checked.len()) } fn part_2(bags: &HashMap<String, Bag>) -> Result<usize> { let mut number_of_bags = 0; let mut bags_to_check: Vec<(String, usize)> = vec![]; if let Some(shiny_gold_bag) = bags.get("shiny gold") { bags_to_check = shiny_gold_bag .contains .iter() .map(|(k, v)| (k.clone(), *v)) .collect(); } else { err!("Couldn't find the shiny gold bag !") } while let Some((current_bag_color, current_bag_number)) = bags_to_check.pop() { if let Some(contained_bag) = bags.get(&current_bag_color) { number_of_bags += current_bag_number; bags_to_check.extend( contained_bag .contains .iter() .map(|(k, v)| (k.clone(), v * current_bag_number)) .collect::<Vec<(String, usize)>>(), ); } else { err!("Couldn't find a container bag : {}", &current_bag_color) } } Ok(number_of_bags) } #[cfg(test)] mod tests { use super::*; use std::fs::File; fn read_test_file() -> Result<String> { let mut input = String::new(); File::open("input/test.txt")?.read_to_string(&mut input)?; Ok(input) } fn read_test_file_2() -> Result<String> { let mut input = String::new(); File::open("input/test2.txt")?.read_to_string(&mut input)?; Ok(input) } #[test] fn test_part_1() -> Result<()> { let bags = parse_input(&read_test_file()?)?; assert_eq!(part_1(&bags)?, 4); Ok(()) } #[test] fn test_part_2() -> Result<()> { let bags = parse_input(&read_test_file()?)?; assert_eq!(part_2(&bags)?, 32); let bags = parse_input(&read_test_file_2()?)?; assert_eq!(part_2(&bags)?, 126); Ok(()) } } #[cfg(all(feature = "unstable", test))] mod bench { extern crate test; use super::*; use std::fs::File; use test::Bencher; fn read_input_file() -> Result<String> { let mut input = String::new(); File::open("input/input.txt")?.read_to_string(&mut input)?; Ok(input) } #[bench] fn bench_parse_input(b: &mut Bencher) -> Result<()> { let input = read_input_file()?; b.iter(|| test::black_box(parse_input(&input))); Ok(()) } #[bench] fn bench_part_1(b: &mut Bencher) -> Result<()> { let bags = parse_input(&read_input_file()?)?; b.iter(|| test::black_box(part_1(&bags))); Ok(()) } #[bench] fn bench_part_2(b: &mut Bencher) -> Result<()> { let bags = parse_input(&read_input_file()?)?; b.iter(|| test::black_box(part_2(&bags))); Ok(()) } }
#[cfg(feature = "dll")] pub extern crate libc; #[cfg(feature = "dll")] pub extern crate libloading; pub extern crate uuid; #[macro_use] extern crate error_chain; mod error; mod macros; mod types; pub use error::*; pub use types::*; #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!(2 + 2, 4); } }
use serde::{Deserialize, Serialize}; use uuid::Uuid; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub enum PollStatus { Pending, Active, Complete } impl std::str::FromStr for PollStatus { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<Self> { match s { "Pending" => Ok(Self::Pending), "Active" => Ok(Self::Active), "Complete" => Ok(Self::Complete), _ => Err(anyhow::anyhow!("Invalid theme [{}]", s)) } } } impl std::fmt::Display for PollStatus { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match self { Self::Pending => "Pending", Self::Active => "Active", Self::Complete => "Complete" }; write!(f, "{}", s) } } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Poll { id: Uuid, idx: u32, author_id: Uuid, title: String, status: PollStatus, final_vote: Option<String> } impl Poll { pub const fn new(id: Uuid, idx: u32, author_id: Uuid, title: String, status: PollStatus) -> Self { Self { id, idx, author_id, title, status, final_vote: None } } pub const fn id(&self) -> &Uuid { &self.id } pub const fn idx(&self) -> u32 { self.idx } pub const fn title(&self) -> &String { &self.title } pub fn set_title(&mut self, t: String) { self.title = t; } pub const fn status(&self) -> &PollStatus { &self.status } pub fn set_status(&mut self, s: PollStatus) { self.status = s; } } #[derive(Clone, Debug, Deserialize, Serialize)] pub enum PollActionType { UpdateTitle, StatusChange, CastVote } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct PollAction { id: Uuid, poll_id: Uuid, user_id: Uuid, t: PollActionType, ctx: std::collections::HashMap<String, String>, message: String } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Vote { poll_id: Uuid, user_id: Uuid, choice: String } impl Vote { pub const fn new(poll_id: Uuid, user_id: Uuid, choice: String) -> Self { Self { poll_id, user_id, choice } } pub const fn poll_id(&self) -> &Uuid { &self.poll_id } pub const fn user_id(&self) -> &Uuid { &self.user_id } pub const fn choice(&self) -> &String { &self.choice } pub fn set_choice(&mut self, c: String) { self.choice = c; } }
use crate::SnailfishNumber::*; use itertools::Itertools; use std::cmp::max; use std::fs; #[derive(PartialEq, Eq, Debug, Hash)] enum SnailfishNumber { Literal(u8), Pair(Box<SnailfishNumber>, Box<SnailfishNumber>), } impl SnailfishNumber { fn new_pair(left: SnailfishNumber, right: SnailfishNumber) -> Self { Pair(Box::new(left), Box::new(right)) } fn parse(s: &str) -> (Self, usize) { if &s[0..1] == "[" { // parse pair let (left, left_num_chars_read) = SnailfishNumber::parse(&s[1..]); let (right, right_num_chars_read) = SnailfishNumber::parse(&s[((left_num_chars_read + 2)..)]); // + 2 for left bracket and comma ( SnailfishNumber::new_pair(left, right), left_num_chars_read + right_num_chars_read + 3, // + 3 for brackets and comma ) } else { (Literal(s[0..1].parse().unwrap()), 1) } } fn magnitude(&self) -> usize { match self { Literal(v) => (*v).into(), Pair(left, right) => 3 * left.magnitude() + 2 * right.magnitude(), } } fn add(self, other: SnailfishNumber) -> Self { SnailfishNumber::new_pair(self, other).reduce() } fn reduce(self) -> Self { let mut curr = self; let mut action_occurred = true; while action_occurred { action_occurred = false; let (exploded, maybe_exploded) = curr.explode(0); curr = exploded; if let Some(_) = maybe_exploded { action_occurred = true; } else { let (split, did_split) = curr.split(); curr = split; if did_split { action_occurred = true; } } } curr } fn explode(self, depth: usize) -> (Self, Option<(Option<u8>, Option<u8>)>) { match self { Pair(left, right) => { if let (Literal(lv), Literal(rv), 4) = (left.as_ref(), right.as_ref(), depth) { // explode this, propagate values up tree to be added elsewhere return (Literal(0), Some((Some(*lv), Some(*rv)))); } // explode left then right pair, shortcutting if left explodes first. // If left explodes, attempt to send right value to right pair, if exists // If right explodes, attempt to send left value to left pair, if exists let exploded_left = if let Pair(_, _) = left.as_ref() { let (exploded_left, exploded_vals) = left.explode(depth + 1); if let Some((l_opt, r_opt)) = exploded_vals { // your left pair exploded! // if the right value is present, add it to the leftmost number in your right pair, and send up None in its place. // otherwise, send up both values let (r_opt, right) = match r_opt { None => (r_opt, right), Some(rv) => (None, Box::new(right.add_to_leftmost_literal(rv))), }; return (Pair(Box::new(exploded_left), right), Some((l_opt, r_opt))); } Box::new(exploded_left) } else { left }; let exploded_right = if let Pair(_, _) = right.as_ref() { let (exploded_right, exploded_vals) = right.explode(depth + 1); if let Some((l_opt, r_opt)) = exploded_vals { // your right pair exploded! // if the left value is present, add it to the rightmost number in your left pair, and send up None in its place. // otherwise, send up both values let (l_opt, left) = match l_opt { None => (l_opt, exploded_left), Some(lv) => { (None, Box::new(exploded_left.add_to_rightmost_literal(lv))) } }; return (Pair(left, Box::new(exploded_right)), Some((l_opt, r_opt))); } Box::new(exploded_right) } else { right }; (Pair(exploded_left, exploded_right), None) } // we shouldn't even traverse to literals _ => panic!("wtf we exploded a literal"), } } fn add_to_leftmost_literal(self, val: u8) -> Self { match self { Literal(n) => Literal(n + val), Pair(left, right) => Pair(Box::new(left.add_to_leftmost_literal(val)), right), } } fn add_to_rightmost_literal(self, val: u8) -> Self { match self { Literal(n) => Literal(n + val), Pair(left, right) => Pair(left, Box::new(right.add_to_rightmost_literal(val))), } } fn split(self) -> (Self, bool) { match self { Literal(v) => { if v >= 10 { let (div, rem) = (v / 2, v % 2); ( SnailfishNumber::new_pair(Literal(div), Literal(div + rem)), true, ) } else { (self, false) } } Pair(left, right) => { let (split_left, did_split) = left.split(); let left = Box::new(split_left); if did_split { return (Pair(left, right), true); } let (split_right, did_split) = right.split(); let right = Box::new(split_right); (Pair(left, right), did_split) } } } } impl Clone for SnailfishNumber { fn clone(&self) -> Self { match self { Literal(v) => Literal(*v), Pair(left, right) => Pair(left.clone(), right.clone()), } } } fn main() { let filename = "input/input.txt"; let num_strs = parse_input_file(filename); println!("num_strs: {:?}", num_strs); println!(); let snailfish_nums: Vec<_> = num_strs .into_iter() .map(|s| SnailfishNumber::parse(&s).0) .collect(); println!("snailfish_nums: {:?}", snailfish_nums); // Part 1 let sum = snailfish_nums .iter() .cloned() .reduce(|acc, num| acc.add(num)) .unwrap(); println!("sum: {:?}", sum); println!("magnitude of sum: {}", sum.magnitude()); // Part 2 let max_mag = snailfish_nums .iter() .cartesian_product(snailfish_nums.iter()) .filter(|(num_a, num_b)| num_a != num_b) .map(|(num_a, num_b)| num_a.clone().add(num_b.clone()).magnitude()) .max() .unwrap(); println!( "max magnitude of any sum of 2 distinct snailfish nums: {:?}", max_mag ); } fn parse_input_file(filename: &str) -> Vec<String> { let file_contents = fs::read_to_string(filename).unwrap(); file_contents.split("\n").map(|l| l.to_string()).collect() } #[cfg(test)] mod tests { use super::*; use std::collections::HashMap; #[test] fn parse_nums() { let tests = HashMap::from([ ("[1,1]", SnailfishNumber::new_pair(Literal(1), Literal(1))), ( "[[1,2],3]", SnailfishNumber::new_pair( SnailfishNumber::new_pair(Literal(1), Literal(2)), Literal(3), ), ), ( "[9,[8,7]]", SnailfishNumber::new_pair( Literal(9), SnailfishNumber::new_pair(Literal(8), Literal(7)), ), ), ( "[[1,9],[8,5]]", SnailfishNumber::new_pair( SnailfishNumber::new_pair(Literal(1), Literal(9)), SnailfishNumber::new_pair(Literal(8), Literal(5)), ), ), ( "[[[[1,2],[3,4]],[[5,6],[7,8]]],9]", SnailfishNumber::new_pair( SnailfishNumber::new_pair( SnailfishNumber::new_pair( SnailfishNumber::new_pair(Literal(1), Literal(2)), SnailfishNumber::new_pair(Literal(3), Literal(4)), ), SnailfishNumber::new_pair( SnailfishNumber::new_pair(Literal(5), Literal(6)), SnailfishNumber::new_pair(Literal(7), Literal(8)), ), ), Literal(9), ), ), ]); for (s, exptected_sf_num) in tests { let (sf_num, _) = SnailfishNumber::parse(s); assert_eq!(sf_num, exptected_sf_num); } } #[test] fn explode() { let tests = HashMap::from([ ("[[[[[9,8],1],2],3],4]", "[[[[0,9],2],3],4]"), ("[7,[6,[5,[4,[3,2]]]]]", "[7,[6,[5,[7,0]]]]"), ("[[6,[5,[4,[3,2]]]],1]", "[[6,[5,[7,0]]],3]"), ( "[[3,[2,[1,[7,3]]]],[6,[5,[4,[3,2]]]]]", "[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]", ), ( "[[3,[2,[8,0]]],[9,[5,[4,[3,2]]]]]", "[[3,[2,[8,0]]],[9,[5,[7,0]]]]", ), ]); for (s, after_exploded_str) in tests { let (sf_num, _) = SnailfishNumber::parse(s); let (exploded_sf_num, _) = sf_num.explode(0); let (expected_exploded_num, _) = SnailfishNumber::parse(after_exploded_str); assert_eq!(exploded_sf_num, expected_exploded_num); } } #[test] fn split() { let tests = HashMap::from([ ( SnailfishNumber::new_pair(Literal(1), Literal(2)), SnailfishNumber::new_pair(Literal(1), Literal(2)), ), ( SnailfishNumber::new_pair(Literal(1), Literal(10)), SnailfishNumber::new_pair( Literal(1), SnailfishNumber::new_pair(Literal(5), Literal(5)), ), ), ( SnailfishNumber::new_pair(Literal(1), Literal(11)), SnailfishNumber::new_pair( Literal(1), SnailfishNumber::new_pair(Literal(5), Literal(6)), ), ), ( SnailfishNumber::new_pair(Literal(13), Literal(11)), SnailfishNumber::new_pair( SnailfishNumber::new_pair(Literal(6), Literal(7)), Literal(11), ), ), ]); for (sf_num, after_split_num) in tests { let (split_sf_num, _) = sf_num.split(); assert_eq!(split_sf_num, after_split_num); } } #[test] fn reduce() { let tests = HashMap::from([( "[[[[[4,3],4],4],[7,[[8,4],9]]],[1,1]]", "[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", )]); for (s, after_reduce_str) in tests { let (sf_num, _) = SnailfishNumber::parse(s); let reduced_sf_num = sf_num.reduce(); let (expected_reduced_num, _) = SnailfishNumber::parse(after_reduce_str); assert_eq!(reduced_sf_num, expected_reduced_num); } } #[test] fn add() { let tests = HashMap::from([ ( vec!["[1,1]", "[2,2]", "[3,3]", "[4,4]"], "[[[[1,1],[2,2]],[3,3]],[4,4]]", ), ( vec!["[1,1]", "[2,2]", "[3,3]", "[4,4]", "[5,5]"], "[[[[3,0],[5,3]],[4,4]],[5,5]]", ), ( vec!["[1,1]", "[2,2]", "[3,3]", "[4,4]", "[5,5]", "[6,6]"], "[[[[5,0],[7,4]],[5,5]],[6,6]]", ), ( vec![ "[[[0,[4,5]],[0,0]],[[[4,5],[2,6]],[9,5]]]", "[7,[[[3,7],[4,3]],[[6,3],[8,8]]]]", "[[2,[[0,8],[3,4]]],[[[6,7],1],[7,[1,6]]]]", "[[[[2,4],7],[6,[0,5]]],[[[6,8],[2,8]],[[2,1],[4,5]]]]", "[7,[5,[[3,8],[1,4]]]]", "[[2,[2,2]],[8,[8,1]]]", "[2,9]", "[1,[[[9,3],9],[[9,0],[0,7]]]]", "[[[5,[7,4]],7],1]", "[[[[4,2],2],6],[8,7]]", ], "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", ), ]); for (num_strs, after_sum_str) in tests { let sum = num_strs .into_iter() .map(|s| SnailfishNumber::parse(s).0) .reduce(|acc, num| acc.add(num)) .unwrap(); let (expected_sum, _) = SnailfishNumber::parse(after_sum_str); assert_eq!(sum, expected_sum); } } #[test] fn magnitude() { let tests = HashMap::from([ ("[9,1]", 29), ("[[9,1],[1,9]]", 129), ("[[1,2],[[3,4],5]]", 143), ("[[[[0,7],4],[[7,8],[6,0]]],[8,1]]", 1384), ("[[[[1,1],[2,2]],[3,3]],[4,4]]", 445), ("[[[[3,0],[5,3]],[4,4]],[5,5]]", 791), ("[[[[5,0],[7,4]],[5,5]],[6,6]]", 1137), ( "[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]", 3488, ), ]); for (s, expected_mag) in tests { let (sf_num, _) = SnailfishNumber::parse(s); let mag = sf_num.magnitude(); assert_eq!(mag, expected_mag); } } }
// =============================================================================== // Authors: AFRL/RQQA // Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division // // Copyright (c) 2017 Government of the United State of America, as represented by // the Secretary of the Air Force. No copyright is claimed in the United States under // Title 17, U.S. Code. All Other Rights Reserved. // =============================================================================== // This file was auto-created by LmcpGen. Modifications will be overwritten. use avtas::lmcp::{Error, ErrorType, Lmcp, LmcpSubscription, SrcLoc, Struct, StructInfo}; use std::fmt::Debug; #[derive(Clone, Debug, Default)] #[repr(C)] pub struct TaskImplementationRequest { pub request_id: i64, pub corresponding_automation_request_id: i64, pub starting_waypoint_id: i64, pub vehicle_id: i64, pub start_position: Box<::afrl::cmasi::location3d::Location3DT>, pub start_heading: f32, pub start_time: i64, pub region_id: i64, pub task_id: i64, pub option_id: i64, pub time_threshold: i64, pub neighbor_locations: Vec<Box<::uxas::messages::task::planning_state::PlanningStateT>>, } impl PartialEq for TaskImplementationRequest { fn eq(&self, _other: &TaskImplementationRequest) -> bool { true && &self.request_id == &_other.request_id && &self.corresponding_automation_request_id == &_other.corresponding_automation_request_id && &self.starting_waypoint_id == &_other.starting_waypoint_id && &self.vehicle_id == &_other.vehicle_id && &self.start_position == &_other.start_position && &self.start_heading == &_other.start_heading && &self.start_time == &_other.start_time && &self.region_id == &_other.region_id && &self.task_id == &_other.task_id && &self.option_id == &_other.option_id && &self.time_threshold == &_other.time_threshold && &self.neighbor_locations == &_other.neighbor_locations } } impl LmcpSubscription for TaskImplementationRequest { fn subscription() -> &'static str { "uxas.messages.task.TaskImplementationRequest" } } impl Struct for TaskImplementationRequest { fn struct_info() -> StructInfo { StructInfo { exist: 1, series: 6149757930721443840u64, version: 7, struct_ty: 14, } } } impl Lmcp for TaskImplementationRequest { fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> { let mut pos = 0; { let x = Self::struct_info().ser(buf)?; pos += x; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.request_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.corresponding_automation_request_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.starting_waypoint_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.vehicle_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.start_position.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.start_heading.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.start_time.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.region_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.task_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.option_id.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.time_threshold.ser(r)?; pos += writeb; } { let r = get!(buf.get_mut(pos ..)); let writeb: usize = self.neighbor_locations.ser(r)?; pos += writeb; } Ok(pos) } fn deser(buf: &[u8]) -> Result<(TaskImplementationRequest, usize), Error> { let mut pos = 0; let (si, u) = StructInfo::deser(buf)?; pos += u; if si == TaskImplementationRequest::struct_info() { let mut out: TaskImplementationRequest = Default::default(); { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.request_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.corresponding_automation_request_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.starting_waypoint_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.vehicle_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (Box<::afrl::cmasi::location3d::Location3DT>, usize) = Lmcp::deser(r)?; out.start_position = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (f32, usize) = Lmcp::deser(r)?; out.start_heading = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.start_time = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.region_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.task_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.option_id = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (i64, usize) = Lmcp::deser(r)?; out.time_threshold = x; pos += readb; } { let r = get!(buf.get(pos ..)); let (x, readb): (Vec<Box<::uxas::messages::task::planning_state::PlanningStateT>>, usize) = Lmcp::deser(r)?; out.neighbor_locations = x; pos += readb; } Ok((out, pos)) } else { Err(error!(ErrorType::InvalidStructInfo)) } } fn size(&self) -> usize { let mut size = 15; size += self.request_id.size(); size += self.corresponding_automation_request_id.size(); size += self.starting_waypoint_id.size(); size += self.vehicle_id.size(); size += self.start_position.size(); size += self.start_heading.size(); size += self.start_time.size(); size += self.region_id.size(); size += self.task_id.size(); size += self.option_id.size(); size += self.time_threshold.size(); size += self.neighbor_locations.size(); size } } pub trait TaskImplementationRequestT: Debug + Send { fn as_uxas_messages_task_task_implementation_request(&self) -> Option<&TaskImplementationRequest> { None } fn as_mut_uxas_messages_task_task_implementation_request(&mut self) -> Option<&mut TaskImplementationRequest> { None } fn request_id(&self) -> i64; fn request_id_mut(&mut self) -> &mut i64; fn corresponding_automation_request_id(&self) -> i64; fn corresponding_automation_request_id_mut(&mut self) -> &mut i64; fn starting_waypoint_id(&self) -> i64; fn starting_waypoint_id_mut(&mut self) -> &mut i64; fn vehicle_id(&self) -> i64; fn vehicle_id_mut(&mut self) -> &mut i64; fn start_position(&self) -> &Box<::afrl::cmasi::location3d::Location3DT>; fn start_position_mut(&mut self) -> &mut Box<::afrl::cmasi::location3d::Location3DT>; fn start_heading(&self) -> f32; fn start_heading_mut(&mut self) -> &mut f32; fn start_time(&self) -> i64; fn start_time_mut(&mut self) -> &mut i64; fn region_id(&self) -> i64; fn region_id_mut(&mut self) -> &mut i64; fn task_id(&self) -> i64; fn task_id_mut(&mut self) -> &mut i64; fn option_id(&self) -> i64; fn option_id_mut(&mut self) -> &mut i64; fn time_threshold(&self) -> i64; fn time_threshold_mut(&mut self) -> &mut i64; fn neighbor_locations(&self) -> &Vec<Box<::uxas::messages::task::planning_state::PlanningStateT>>; fn neighbor_locations_mut(&mut self) -> &mut Vec<Box<::uxas::messages::task::planning_state::PlanningStateT>>; } impl Clone for Box<TaskImplementationRequestT> { fn clone(&self) -> Box<TaskImplementationRequestT> { if let Some(x) = TaskImplementationRequestT::as_uxas_messages_task_task_implementation_request(self.as_ref()) { Box::new(x.clone()) } else { unreachable!() } } } impl Default for Box<TaskImplementationRequestT> { fn default() -> Box<TaskImplementationRequestT> { Box::new(TaskImplementationRequest::default()) } } impl PartialEq for Box<TaskImplementationRequestT> { fn eq(&self, other: &Box<TaskImplementationRequestT>) -> bool { if let (Some(x), Some(y)) = (TaskImplementationRequestT::as_uxas_messages_task_task_implementation_request(self.as_ref()), TaskImplementationRequestT::as_uxas_messages_task_task_implementation_request(other.as_ref())) { x == y } else { false } } } impl Lmcp for Box<TaskImplementationRequestT> { fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> { if let Some(x) = TaskImplementationRequestT::as_uxas_messages_task_task_implementation_request(self.as_ref()) { x.ser(buf) } else { unreachable!() } } fn deser(buf: &[u8]) -> Result<(Box<TaskImplementationRequestT>, usize), Error> { let (si, _) = StructInfo::deser(buf)?; if si == TaskImplementationRequest::struct_info() { let (x, readb) = TaskImplementationRequest::deser(buf)?; Ok((Box::new(x), readb)) } else { Err(error!(ErrorType::InvalidStructInfo)) } } fn size(&self) -> usize { if let Some(x) = TaskImplementationRequestT::as_uxas_messages_task_task_implementation_request(self.as_ref()) { x.size() } else { unreachable!() } } } impl TaskImplementationRequestT for TaskImplementationRequest { fn as_uxas_messages_task_task_implementation_request(&self) -> Option<&TaskImplementationRequest> { Some(self) } fn as_mut_uxas_messages_task_task_implementation_request(&mut self) -> Option<&mut TaskImplementationRequest> { Some(self) } fn request_id(&self) -> i64 { self.request_id } fn request_id_mut(&mut self) -> &mut i64 { &mut self.request_id } fn corresponding_automation_request_id(&self) -> i64 { self.corresponding_automation_request_id } fn corresponding_automation_request_id_mut(&mut self) -> &mut i64 { &mut self.corresponding_automation_request_id } fn starting_waypoint_id(&self) -> i64 { self.starting_waypoint_id } fn starting_waypoint_id_mut(&mut self) -> &mut i64 { &mut self.starting_waypoint_id } fn vehicle_id(&self) -> i64 { self.vehicle_id } fn vehicle_id_mut(&mut self) -> &mut i64 { &mut self.vehicle_id } fn start_position(&self) -> &Box<::afrl::cmasi::location3d::Location3DT> { &self.start_position } fn start_position_mut(&mut self) -> &mut Box<::afrl::cmasi::location3d::Location3DT> { &mut self.start_position } fn start_heading(&self) -> f32 { self.start_heading } fn start_heading_mut(&mut self) -> &mut f32 { &mut self.start_heading } fn start_time(&self) -> i64 { self.start_time } fn start_time_mut(&mut self) -> &mut i64 { &mut self.start_time } fn region_id(&self) -> i64 { self.region_id } fn region_id_mut(&mut self) -> &mut i64 { &mut self.region_id } fn task_id(&self) -> i64 { self.task_id } fn task_id_mut(&mut self) -> &mut i64 { &mut self.task_id } fn option_id(&self) -> i64 { self.option_id } fn option_id_mut(&mut self) -> &mut i64 { &mut self.option_id } fn time_threshold(&self) -> i64 { self.time_threshold } fn time_threshold_mut(&mut self) -> &mut i64 { &mut self.time_threshold } fn neighbor_locations(&self) -> &Vec<Box<::uxas::messages::task::planning_state::PlanningStateT>> { &self.neighbor_locations } fn neighbor_locations_mut(&mut self) -> &mut Vec<Box<::uxas::messages::task::planning_state::PlanningStateT>> { &mut self.neighbor_locations } } #[cfg(test)] pub mod tests { use super::*; use quickcheck::*; impl Arbitrary for TaskImplementationRequest { fn arbitrary<G: Gen>(_g: &mut G) -> TaskImplementationRequest { TaskImplementationRequest { request_id: Arbitrary::arbitrary(_g), corresponding_automation_request_id: Arbitrary::arbitrary(_g), starting_waypoint_id: Arbitrary::arbitrary(_g), vehicle_id: Arbitrary::arbitrary(_g), start_position: Box::new(::afrl::cmasi::location3d::Location3D::arbitrary(_g)), start_heading: Arbitrary::arbitrary(_g), start_time: Arbitrary::arbitrary(_g), region_id: Arbitrary::arbitrary(_g), task_id: Arbitrary::arbitrary(_g), option_id: Arbitrary::arbitrary(_g), time_threshold: Arbitrary::arbitrary(_g), neighbor_locations: Vec::<::uxas::messages::task::planning_state::PlanningState>::arbitrary(_g).into_iter().map(|x| Box::new(x) as Box<::uxas::messages::task::planning_state::PlanningStateT>).collect(), } } } quickcheck! { fn serializes(x: TaskImplementationRequest) -> Result<TestResult, Error> { use std::u16; if x.neighbor_locations.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); } let mut buf: Vec<u8> = vec![0; x.size()]; let sx = x.ser(&mut buf)?; Ok(TestResult::from_bool(sx == x.size())) } fn roundtrips(x: TaskImplementationRequest) -> Result<TestResult, Error> { use std::u16; if x.neighbor_locations.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); } let mut buf: Vec<u8> = vec![0; x.size()]; let sx = x.ser(&mut buf)?; let (y, sy) = TaskImplementationRequest::deser(&buf)?; Ok(TestResult::from_bool(sx == sy && x == y)) } } }
use std::fs::File; use std::io::{self, BufWriter, Stdout, Write}; use std::path::PathBuf; pub enum Output { Console(BufWriter<Stdout>), File(BufWriter<File>), Mem(Vec<u8>), } impl Output { pub fn console() -> Output { Output::Console(io::BufWriter::new(std::io::stdout())) } pub fn file(path: &PathBuf) -> io::Result<Output> { Ok(Output::File( // Instead of typical 8kb buffer, we'll make it 800kb to speed up writing io::BufWriter::with_capacity(819200, std::fs::File::create(path)?), )) } pub fn mem() -> Output { Output::Mem(vec![]) } } impl<'a> Write for Output { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { match self { Output::Console(w) => w.write(buf), Output::File(w) => w.write(buf), Output::Mem(v) => { // let mut b = Vec::with_capacity(buf.len()); // b.copy_from_slice(buf); let mut b = Vec::from(buf); v.append(&mut b); io::Result::Ok(buf.len()) } } } fn flush(&mut self) -> io::Result<()> { match self { Output::Console(w) => w.flush(), Output::File(w) => w.flush(), Output::Mem(_) => io::Result::Ok(()), } } }
use std::marker::PhantomData; use log::{error, warn}; use rodio::OutputStream; #[cfg(feature = "profiler")] use thread_profiler::profile_scope; use amethyst_assets::AssetStorage; use amethyst_core::ecs::{ DispatcherBuilder, ParallelRunnable, Resources, System, SystemBuilder, SystemBundle, World, }; use amethyst_error::Error; use crate::{ output::{init_output, Output}, source::{Source, SourceHandle}, }; /// Bundle for [`DjSystem`]; initializes output and loads necessary resources. /// /// This will initialize audio output by loading an [`OutputStream`] and an [`Output`] /// to the world's resources. If no audio output devices are available in the system, /// it will not load any of these resources. #[derive(Debug)] pub struct DjSystemBundle<F, R> where F: FnMut(&mut R) -> Option<SourceHandle> + Send + Sync + 'static, R: Send + Sync + 'static, { f: F, _marker: PhantomData<R>, } impl<F, R> DjSystemBundle<F, R> where F: FnMut(&mut R) -> Option<SourceHandle> + Send + Sync + 'static, R: Send + Sync + 'static, { /// Creates a new [`DjSystemBundle`] where `f` is a function which produces music [`SourceHandle`]. pub fn new(f: F) -> Self { Self { f, _marker: PhantomData, } } } impl<F, R> SystemBundle for DjSystemBundle<F, R> where F: FnMut(&mut R) -> Option<SourceHandle> + Send + Sync + 'static + Copy, R: Send + Sync + 'static, { fn load( &mut self, _world: &mut World, resources: &mut Resources, builder: &mut DispatcherBuilder, ) -> Result<(), Error> { // Try to initialize output using the system's default audio device. if let Ok((stream, output)) = init_output() { resources.get_or_insert::<OutputStream>(stream); resources.get_or_insert::<Output>(output); } else { warn!("The default audio device is not available, sound will not work!"); } builder.add_system(DjSystem { f: self.f, _phantom: PhantomData, }); Ok(()) } } /// Calls a closure if the `AudioSink` is empty. #[derive(Debug, Clone)] pub struct DjSystem<F, R> where F: FnMut(&mut R) -> Option<SourceHandle> + Send + Sync, R: Send + Sync, { f: F, _phantom: std::marker::PhantomData<R>, } impl<F, R> System for DjSystem<F, R> where F: FnMut(&mut R) -> Option<SourceHandle> + Send + Sync + 'static, R: Send + Sync + 'static, { fn build(mut self) -> Box<dyn ParallelRunnable + 'static> { Box::new( SystemBuilder::new("DjSystem") .read_resource::<AssetStorage<Source>>() .read_resource::<Output>() .write_resource::<R>() .build(move |_commands, _world, (storage, output, res), _queries| { #[cfg(feature = "profiler")] profile_scope!("dj_system"); let sink = output.try_spawn_sink().unwrap(); if sink.empty() { if let Some(source) = (self.f)(res).and_then(|h| storage.get(&h)) { if let Err(e) = sink.append(source, 1.0) { error!("DJ cannot append source to sink. {}", e); } } } }), ) } }
//! Tests auto-converted from "sass-spec/spec/core_functions/color" #[allow(unused)] use super::rsass; mod adjust_color; // From "sass-spec/spec/core_functions/color/adjust_hue.hrx" mod adjust_hue { #[allow(unused)] use super::rsass; #[test] fn above_max() { assert_eq!( rsass( "a {b: adjust-hue(red, 540)}\ \n" ) .unwrap(), "a {\ \n b: aqua;\ \n}\ \n" ); } #[test] fn alpha() { assert_eq!( rsass( "a {b: adjust-hue(rgba(red, 0.1), 359)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 4, 0.1);\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color", error tests are not supported yet. // Ignoring "hue", error tests are not supported yet. } } #[test] fn fraction() { assert_eq!( rsass( "a {b: adjust-hue(red, 0.5)}\ \n" ) .unwrap(), "a {\ \n b: #ff0200;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "a {b: adjust-hue(red, 359)}\ \n" ) .unwrap(), "a {\ \n b: #ff0004;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: adjust-hue(red, 123)}\ \n" ) .unwrap(), "a {\ \n b: #00ff0d;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: adjust-hue(blue, 0)}\ \n" ) .unwrap(), "a {\ \n b: blue;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: adjust-hue($color: red, $degrees: 123)}\ \n" ) .unwrap(), "a {\ \n b: #00ff0d;\ \n}\ \n" ); } #[test] fn negative() { assert_eq!( rsass( "a {b: adjust-hue(red, -180)}\ \n" ) .unwrap(), "a {\ \n b: aqua;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/alpha.hrx" mod alpha { #[allow(unused)] use super::rsass; mod color { #[allow(unused)] use super::rsass; #[test] fn max() { assert_eq!( rsass( "a {b: alpha(red)}\ \n" ) .unwrap(), "a {\ \n b: 1;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: alpha(rgba(red, 0.42))}\ \n" ) .unwrap(), "a {\ \n b: 0.42;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: alpha(rgba(red, 0))}\ \n" ) .unwrap(), "a {\ \n b: 0;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: alpha($color: rgba(red, 0.73))}\ \n" ) .unwrap(), "a {\ \n b: 0.73;\ \n}\ \n" ); } } mod error { #[allow(unused)] use super::rsass; // Ignoring "quoted_string", error tests are not supported yet. // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. mod unquoted_string { #[allow(unused)] use super::rsass; // Ignoring "no_equals", error tests are not supported yet. // Ignoring "non_identifier_before_equals", error tests are not supported yet. } } mod filter { #[allow(unused)] use super::rsass; #[test] fn multi_args() { assert_eq!( rsass( "a {b: alpha(c=d, e=f, g=h)}\ \n" ) .unwrap(), "a {\ \n b: alpha(c=d, e=f, g=h);\ \n}\ \n" ); } #[test] fn one_arg() { assert_eq!( rsass( "a {b: alpha(c=d)}\ \n" ) .unwrap(), "a {\ \n b: alpha(c=d);\ \n}\ \n" ); } #[test] fn space_before_equals() { assert_eq!( rsass( "a {b: alpha(unquote(\"c = d\"))}\ \n" ) .unwrap(), "a {\ \n b: alpha(c = d);\ \n}\ \n" ); } } mod opacity { #[allow(unused)] use super::rsass; #[test] fn filter() { assert_eq!( rsass( "a {b: opacity(10%)}\ \n" ) .unwrap(), "a {\ \n b: opacity(10%);\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: opacity($color: rgba(red, 0.2))}\ \n" ) .unwrap(), "a {\ \n b: 0.2;\ \n}\ \n" ); } #[test] fn positional() { assert_eq!( rsass( "a {b: opacity(rgba(red, 0.2))}\ \n" ) .unwrap(), "a {\ \n b: 0.2;\ \n}\ \n" ); } } } // From "sass-spec/spec/core_functions/color/blackness.hrx" mod blackness { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] #[ignore] // wrong result fn fraction() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness(color.hwb(0, 0%, 0.5%))}\ \n" ) .unwrap(), "a {\ \n b: 0.3921568627%;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness(black)}\ \n" ) .unwrap(), "a {\ \n b: 100%;\ \n}\ \n" ); } mod middle { #[allow(unused)] use super::rsass; #[test] #[ignore] // wrong result fn half_whiteness() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness(color.hwb(0, 50%, 50%))}\ \n" ) .unwrap(), "a {\ \n b: 49.8039215686%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn high_whiteness() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness(color.hwb(0, 70%, 70%))}\ \n" ) .unwrap(), "a {\ \n b: 49.8039215686%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn zero_whiteness() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness(color.hwb(0, 0%, 50%))}\ \n" ) .unwrap(), "a {\ \n b: 49.8039215686%;\ \n}\ \n" ); } } #[test] fn min() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness(white)}\ \n" ) .unwrap(), "a {\ \n b: 0%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn named() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.blackness($color: color.hwb(0, 0%, 42%))}\ \n" ) .unwrap(), "a {\ \n b: 41.9607843137%;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/blue.hrx" mod blue { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] fn max() { assert_eq!( rsass( "a {b: blue(rgb(0, 0, 255))}\ \n" ) .unwrap(), "a {\ \n b: 255;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: blue(rgb(0, 0, 123))}\ \n" ) .unwrap(), "a {\ \n b: 123;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: blue(rgb(0, 0, 0))}\ \n" ) .unwrap(), "a {\ \n b: 0;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: blue($color: rgb(0, 0, 234))}\ \n" ) .unwrap(), "a {\ \n b: 234;\ \n}\ \n" ); } } mod change_color; // From "sass-spec/spec/core_functions/color/complement.hrx" mod complement { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: complement(rgba(turquoise, 0.7))}\ \n" ) .unwrap(), "a {\ \n b: rgba(224, 64, 80, 0.7);\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } mod grayscale { #[allow(unused)] use super::rsass; #[test] fn black() { assert_eq!( rsass( "a {b: complement(black)}\ \n" ) .unwrap(), "a {\ \n b: black;\ \n}\ \n" ); } #[test] fn gray() { assert_eq!( rsass( "a {b: complement(gray)}\ \n" ) .unwrap(), "a {\ \n b: gray;\ \n}\ \n" ); } #[test] fn white() { assert_eq!( rsass( "a {b: complement(white)}\ \n" ) .unwrap(), "a {\ \n b: white;\ \n}\ \n" ); } } #[test] fn named() { assert_eq!( rsass( "a {b: complement($color: red)}\ \n" ) .unwrap(), "a {\ \n b: aqua;\ \n}\ \n" ); } #[test] fn red() { assert_eq!( rsass( "a {b: complement(red)}\ \n" ) .unwrap(), "a {\ \n b: aqua;\ \n}\ \n" ); } #[test] fn turquoise() { assert_eq!( rsass( "a {b: complement(turquoise)}\ \n" ) .unwrap(), "a {\ \n b: #e04050;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/darken.hrx" mod darken { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: darken(rgba(red, 0.2), 100%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(0, 0, 0, 0.2);\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color", error tests are not supported yet. // Ignoring "lightness", error tests are not supported yet. } } #[test] fn fraction() { assert_eq!( rsass( "a {b: darken(red, 0.5%)}\ \n" ) .unwrap(), "a {\ \n b: #fc0000;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "a {b: darken(red, 100%)}\ \n" ) .unwrap(), "a {\ \n b: black;\ \n}\ \n" ); } #[test] fn max_remaining() { assert_eq!( rsass( "a {b: darken(red, 50%)}\ \n" ) .unwrap(), "a {\ \n b: black;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: darken(red, 14%)}\ \n" ) .unwrap(), "a {\ \n b: #b80000;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: darken(red, 0%)}\ \n" ) .unwrap(), "a {\ \n b: red;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: darken($color: red, $amount: 14%)}\ \n" ) .unwrap(), "a {\ \n b: #b80000;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/desaturate.hrx" mod desaturate { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: desaturate(rgba(plum, 0.3), 100%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(191, 191, 191, 0.3);\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } mod one_arg { #[allow(unused)] use super::rsass; // Ignoring "test_type", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color", error tests are not supported yet. // Ignoring "lightness", error tests are not supported yet. } } #[test] fn max() { assert_eq!( rsass( "a {b: desaturate(plum, 100%)}\ \n" ) .unwrap(), "a {\ \n b: #bfbfbf;\ \n}\ \n" ); } #[test] fn max_remaining() { assert_eq!( rsass( "a {b: desaturate(plum, 48%)}\ \n" ) .unwrap(), "a {\ \n b: #bfbfbf;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: desaturate(plum, 14%)}\ \n" ) .unwrap(), "a {\ \n b: #d4a9d4;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: desaturate(plum, 0%)}\ \n" ) .unwrap(), "a {\ \n b: plum;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: desaturate($color: plum, $amount: 14%)}\ \n" ) .unwrap(), "a {\ \n b: #d4a9d4;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/fade_in.hrx" mod fade_in { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "alpha", error tests are not supported yet. // Ignoring "color", error tests are not supported yet. } } #[test] fn max() { assert_eq!( rsass( "a {b: fade-in(rgba(red, 0.5), 1)}\ \n" ) .unwrap(), "a {\ \n b: red;\ \n}\ \n" ); } #[test] fn max_remaining() { assert_eq!( rsass( "a {b: fade-in(rgba(red, 0.5), 0.5)}\ \n" ) .unwrap(), "a {\ \n b: red;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: fade-in(rgba(red, 0.5), 0.14)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.64);\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: fade-in(rgba(red, 0.5), 0)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.5);\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: fade-in($color: rgba(red, 0.5), $amount: 0.14)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.64);\ \n}\ \n" ); } #[test] fn opacify() { assert_eq!( rsass( "a {b: opacify($color: rgba(red, 0.5), $amount: 0.14)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.64);\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/fade_out.hrx" mod fade_out { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "alpha", error tests are not supported yet. // Ignoring "color", error tests are not supported yet. } } #[test] fn max() { assert_eq!( rsass( "a {b: fade-out(rgba(red, 0.5), 1)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0);\ \n}\ \n" ); } #[test] fn max_remaining() { assert_eq!( rsass( "a {b: fade-out(rgba(red, 0.5), 0.5)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0);\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: fade-out(rgba(red, 0.5), 0.14)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.36);\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: fade-out(rgba(red, 0.5), 0)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.5);\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: fade-out($color: rgba(red, 0.5), $amount: 0.14)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.36);\ \n}\ \n" ); } #[test] fn transparentize() { assert_eq!( rsass( "a {b: transparentize($color: rgba(red, 0.5), $amount: 0.14)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 0, 0, 0.36);\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/grayscale.hrx" mod grayscale { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: grayscale(rgba(#633736, 0.3))}\ \n" ) .unwrap(), "a {\ \n b: rgba(77, 77, 77, 0.3);\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] fn max_saturation() { assert_eq!( rsass( "a {b: grayscale(red)}\ \n" ) .unwrap(), "a {\ \n b: gray;\ \n}\ \n" ); } #[test] fn mid_saturation() { assert_eq!( rsass( "a {b: grayscale(#633736)}\ \n" ) .unwrap(), "a {\ \n b: #4d4d4d;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: grayscale($color: white)}\ \n" ) .unwrap(), "a {\ \n b: white;\ \n}\ \n" ); } mod no_saturation { #[allow(unused)] use super::rsass; #[test] fn black() { assert_eq!( rsass( "a {b: grayscale(black)}\ \n" ) .unwrap(), "a {\ \n b: black;\ \n}\ \n" ); } #[test] fn gray() { assert_eq!( rsass( "a {b: grayscale(#494949)}\ \n" ) .unwrap(), "a {\ \n b: #494949;\ \n}\ \n" ); } #[test] fn white() { assert_eq!( rsass( "a {b: grayscale(white)}\ \n" ) .unwrap(), "a {\ \n b: white;\ \n}\ \n" ); } } #[test] fn number() { assert_eq!( rsass( "// A number should produce a plain function string, for CSS filter functions.\ \na {b: grayscale(15%)}\ \n" ) .unwrap(), "a {\ \n b: grayscale(15%);\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/green.hrx" mod green { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] fn max() { assert_eq!( rsass( "a {b: green(rgb(0, 255, 0))}\ \n" ) .unwrap(), "a {\ \n b: 255;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: green(rgb(0, 123, 0))}\ \n" ) .unwrap(), "a {\ \n b: 123;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: green(rgb(0, 0, 0))}\ \n" ) .unwrap(), "a {\ \n b: 0;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: green($color: rgb(0, 234, 0))}\ \n" ) .unwrap(), "a {\ \n b: 234;\ \n}\ \n" ); } } mod hsl; mod hsla; // From "sass-spec/spec/core_functions/color/hue.hrx" mod hue { #[allow(unused)] use super::rsass; #[test] #[ignore] // wrong result fn above_max() { assert_eq!( rsass( "a {b: hue(hsl(540, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 180deg;\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] #[ignore] // wrong result fn fraction() { assert_eq!( rsass( "a {b: hue(hsl(0.5, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 0.5deg;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn max() { assert_eq!( rsass( "a {b: hue(hsl(359, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 359deg;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn middle() { assert_eq!( rsass( "a {b: hue(hsl(123, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 123deg;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: hue(hsl(0, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 0deg;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn named() { assert_eq!( rsass( "a {b: hue($color: hsl(234, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 234deg;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn negative() { assert_eq!( rsass( "a {b: hue(hsl(-180, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 180deg;\ \n}\ \n" ); } } mod hwb; // From "sass-spec/spec/core_functions/color/ie_hex_str.hrx" mod ie_hex_str { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] fn leading_zero() { assert_eq!( rsass( "a {b: ie-hex-str(rgba(#020304, 0.003))}\ \n" ) .unwrap(), "a {\ \n b: #01020304;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: ie-hex-str($color: #daddee)}\ \n" ) .unwrap(), "a {\ \n b: #FFDADDEE;\ \n}\ \n" ); } #[test] fn opaque() { assert_eq!( rsass( "a {b: ie-hex-str(#daddee)}\ \n" ) .unwrap(), "a {\ \n b: #FFDADDEE;\ \n}\ \n" ); } #[test] fn translucent() { assert_eq!( rsass( "a {b: ie-hex-str(rgba(#daddee, 0.3))}\ \n" ) .unwrap(), "a {\ \n b: #4DDADDEE;\ \n}\ \n" ); } #[test] fn transparent() { assert_eq!( rsass( "a {b: ie-hex-str(rgba(turquoise, 0))}\ \n" ) .unwrap(), "a {\ \n b: #0040E0D0;\ \n}\ \n" ); } #[test] fn test_type() { assert_eq!( rsass( "a {b: type-of(ie-hex-str(#daddee))}\ \n" ) .unwrap(), "a {\ \n b: string;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/invert.hrx" mod invert { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: invert(rgba(turquoise, 0.4))}\ \n" ) .unwrap(), "a {\ \n b: rgba(191, 31, 47, 0.4);\ \n}\ \n" ); } #[test] fn black() { assert_eq!( rsass( "a {b: invert(black)}\ \n" ) .unwrap(), "a {\ \n b: white;\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } // Ignoring "number_with_weight", error tests are not supported yet. // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color", error tests are not supported yet. // Ignoring "weight", error tests are not supported yet. } } #[test] fn gray() { assert_eq!( rsass( "a {b: invert(gray)}\ \n" ) .unwrap(), "a {\ \n b: #7f7f7f;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: invert($color: turquoise, $weight: 0%)}\ \n" ) .unwrap(), "a {\ \n b: turquoise;\ \n}\ \n" ); } #[test] fn number() { assert_eq!( rsass( "a {b: invert(10%)}\ \n" ) .unwrap(), "a {\ \n b: invert(10%);\ \n}\ \n" ); } #[test] fn turquoise() { assert_eq!( rsass( "a {b: invert(turquoise)}\ \n" ) .unwrap(), "a {\ \n b: #bf1f2f;\ \n}\ \n" ); } mod weighted { #[allow(unused)] use super::rsass; #[test] fn high() { assert_eq!( rsass( "a {b: invert(turquoise, 92%)}\ \n" ) .unwrap(), "a {\ \n b: #b52e3c;\ \n}\ \n" ); } #[test] fn low() { assert_eq!( rsass( "a {b: invert(turquoise, 23%)}\ \n" ) .unwrap(), "a {\ \n b: #5db4ab;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "a {b: invert(turquoise, 100%)}\ \n" ) .unwrap(), "a {\ \n b: #bf1f2f;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: invert(turquoise, 50%)}\ \n" ) .unwrap(), "a {\ \n b: gray;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: invert(turquoise, 0%)}\ \n" ) .unwrap(), "a {\ \n b: turquoise;\ \n}\ \n" ); } } #[test] fn white() { assert_eq!( rsass( "a {b: invert(white)}\ \n" ) .unwrap(), "a {\ \n b: black;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/lighten.hrx" mod lighten { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: lighten(rgba(red, 0.4), 100%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 255, 255, 0.4);\ \n}\ \n" ); } mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color", error tests are not supported yet. // Ignoring "lightness", error tests are not supported yet. } } #[test] fn fraction() { assert_eq!( rsass( "a {b: lighten(red, 0.5%)}\ \n" ) .unwrap(), "a {\ \n b: #ff0303;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "a {b: lighten(red, 100%)}\ \n" ) .unwrap(), "a {\ \n b: white;\ \n}\ \n" ); } #[test] fn max_remaining() { assert_eq!( rsass( "a {b: lighten(red, 50%)}\ \n" ) .unwrap(), "a {\ \n b: white;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: lighten(red, 14%)}\ \n" ) .unwrap(), "a {\ \n b: #ff4747;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: lighten(red, 0%)}\ \n" ) .unwrap(), "a {\ \n b: red;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: lighten($color: red, $amount: 14%)}\ \n" ) .unwrap(), "a {\ \n b: #ff4747;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/lightness.hrx" mod lightness { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] fn fraction() { assert_eq!( rsass( "a {b: lightness(hsl(0, 100%, 0.5%))}\ \n" ) .unwrap(), "a {\ \n b: 0.5%;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "a {b: lightness(hsl(0, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 100%;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: lightness(hsl(0, 100%, 50%))}\ \n" ) .unwrap(), "a {\ \n b: 50%;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: lightness(hsl(0, 100%, 0%))}\ \n" ) .unwrap(), "a {\ \n b: 0%;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: lightness($color: hsl(0, 100%, 42%))}\ \n" ) .unwrap(), "a {\ \n b: 42%;\ \n}\ \n" ); } } // From "sass-spec/spec/core_functions/color/mix.hrx" mod mix { #[allow(unused)] use super::rsass; mod alpha { #[allow(unused)] use super::rsass; #[test] fn even() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.3), rgba(#0144bf, 0.3))}\ \n" ) .unwrap(), "a {\ \n b: rgba(73, 147, 151, 0.3);\ \n}\ \n" ); } #[test] fn first() { assert_eq!( rsass( "a {b: mix(#91e16f, transparent)}\ \n" ) .unwrap(), "a {\ \n b: rgba(145, 225, 111, 0.5);\ \n}\ \n" ); } #[test] fn firstwards() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.8), rgba(#0144bf, 0.3))}\ \n" ) .unwrap(), "a {\ \n b: rgba(109, 186, 131, 0.55);\ \n}\ \n" ); } #[test] fn last() { assert_eq!( rsass( "a {b: mix(transparent, #0144bf)}\ \n" ) .unwrap(), "a {\ \n b: rgba(1, 68, 191, 0.5);\ \n}\ \n" ); } #[test] fn lastwards() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.4), rgba(#0144bf, 0.9))}\ \n" ) .unwrap(), "a {\ \n b: rgba(37, 107, 171, 0.65);\ \n}\ \n" ); } } mod both_weights { #[allow(unused)] use super::rsass; #[test] fn contradiction() { assert_eq!( rsass( "// When we weight entirely towards a transparent color, the formula for\ \n// computing the combined alpha would divide by zero, so we just return\ \n// transparent as a special case.\ \na {b: mix(transparent, #0144bf, 100%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(0, 0, 0, 0);\ \n}\ \n" ); } mod mixed { #[allow(unused)] use super::rsass; #[test] fn firstwards() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.8), rgba(#0144bf, 0.3), 63%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(121, 199, 124, 0.615);\ \n}\ \n" ); } #[test] fn lastwards() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.2), rgba(#0144bf, 0.7), 42%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(29, 99, 175, 0.49);\ \n}\ \n" ); } } mod transparent { #[allow(unused)] use super::rsass; #[test] fn first() { assert_eq!( rsass( "a {b: mix(transparent, #0144bf, 70%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(1, 68, 191, 0.3);\ \n}\ \n" ); } #[test] fn last() { assert_eq!( rsass( "a {b: mix(#91e16f, transparent, 70%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(145, 225, 111, 0.7);\ \n}\ \n" ); } } mod weighted { #[allow(unused)] use super::rsass; #[test] fn first() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.2), rgba(#0144bf, 0.7), 100%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(145, 225, 111, 0.2);\ \n}\ \n" ); } #[test] fn last() { assert_eq!( rsass( "a {b: mix(rgba(#91e16f, 0.2), rgba(#0144bf, 0.7), 0%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(1, 68, 191, 0.7);\ \n}\ \n" ); } } } mod error { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color1", error tests are not supported yet. // Ignoring "color2", error tests are not supported yet. // Ignoring "weight", error tests are not supported yet. } } mod explicit_weight { #[allow(unused)] use super::rsass; #[test] fn even() { assert_eq!( rsass( "a {b: mix(#91e16f, #0144bf, 50%)}\ \n" ) .unwrap(), "a {\ \n b: #499397;\ \n}\ \n" ); } #[test] fn first() { assert_eq!( rsass( "a {b: mix(#91e16f, #0144bf, 100%)}\ \n" ) .unwrap(), "a {\ \n b: #91e16f;\ \n}\ \n" ); } #[test] fn firstwards() { assert_eq!( rsass( "a {b: mix(#91e16f, #0144bf, 92%)}\ \n" ) .unwrap(), "a {\ \n b: #85d475;\ \n}\ \n" ); } #[test] fn last() { assert_eq!( rsass( "a {b: mix(#91e16f, #0144bf, 0%)}\ \n" ) .unwrap(), "a {\ \n b: #0144bf;\ \n}\ \n" ); } #[test] fn lastwards() { assert_eq!( rsass( "a {b: mix(#91e16f, #0144bf, 43%)}\ \n" ) .unwrap(), "a {\ \n b: #3f889d;\ \n}\ \n" ); } } #[test] fn named() { assert_eq!( rsass( "a {b: mix($color1: #91e16f, $color2: #0144bf, $weight: 92%)}\ \n" ) .unwrap(), "a {\ \n b: #85d475;\ \n}\ \n" ); } mod unweighted { #[allow(unused)] use super::rsass; #[test] fn average() { assert_eq!( rsass( "// All channels should be averaged across the two colors.\ \na {b: mix(#91e16f, #0144bf)}\ \n" ) .unwrap(), "a {\ \n b: #499397;\ \n}\ \n" ); } #[test] fn identical() { assert_eq!( rsass( "// If two channels have the same values, they should be the same in the output.\ \na {b: mix(#123456, #123456)}\ \n" ) .unwrap(), "a {\ \n b: #123456;\ \n}\ \n" ); } #[test] fn min_and_max() { assert_eq!( rsass( "// Each channel becomes the average of 255 and 0, which is 128 = 0xAA.\ \na {b: mix(#ff00ff, #00ff00)}\ \n" ) .unwrap(), "a {\ \n b: gray;\ \n}\ \n" ); } } } // From "sass-spec/spec/core_functions/color/red.hrx" mod red { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] fn max() { assert_eq!( rsass( "a {b: red(rgb(255, 0, 0))}\ \n" ) .unwrap(), "a {\ \n b: 255;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: red(rgb(123, 0, 0))}\ \n" ) .unwrap(), "a {\ \n b: 123;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: red(rgb(0, 0, 0))}\ \n" ) .unwrap(), "a {\ \n b: 0;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: red($color: rgb(234, 0, 0))}\ \n" ) .unwrap(), "a {\ \n b: 234;\ \n}\ \n" ); } } mod rgb; mod rgba; // From "sass-spec/spec/core_functions/color/saturate.hrx" mod saturate { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; mod one_arg { #[allow(unused)] use super::rsass; // Ignoring "test_type", error tests are not supported yet. } // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. mod two_args { #[allow(unused)] use super::rsass; mod bounds { #[allow(unused)] use super::rsass; // Ignoring "too_high", error tests are not supported yet. // Ignoring "too_low", error tests are not supported yet. } mod test_type { #[allow(unused)] use super::rsass; // Ignoring "color", error tests are not supported yet. // Ignoring "lightness", error tests are not supported yet. } } } mod one_arg { #[allow(unused)] use super::rsass; #[test] fn named() { assert_eq!( rsass( "a {b: saturate($amount: 50%)}\ \n" ) .unwrap(), "a {\ \n b: saturate(50%);\ \n}\ \n" ); } #[test] fn unit() { assert_eq!( rsass( "a {b: saturate(50%)}\ \n" ) .unwrap(), "a {\ \n b: saturate(50%);\ \n}\ \n" ); } #[test] fn unitless() { assert_eq!( rsass( "a {b: saturate(1)}\ \n" ) .unwrap(), "a {\ \n b: saturate(1);\ \n}\ \n" ); } } mod two_args { #[allow(unused)] use super::rsass; #[test] fn alpha() { assert_eq!( rsass( "a {b: saturate(rgba(plum, 0.5), 100%)}\ \n" ) .unwrap(), "a {\ \n b: rgba(255, 126, 255, 0.5);\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "a {b: saturate(plum, 100%)}\ \n" ) .unwrap(), "a {\ \n b: #ff7eff;\ \n}\ \n" ); } #[test] fn max_remaining() { assert_eq!( rsass( "a {b: saturate(plum, 53%)}\ \n" ) .unwrap(), "a {\ \n b: #ff7eff;\ \n}\ \n" ); } #[test] fn middle() { assert_eq!( rsass( "a {b: saturate(plum, 14%)}\ \n" ) .unwrap(), "a {\ \n b: #e697e6;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: saturate(plum, 0%)}\ \n" ) .unwrap(), "a {\ \n b: plum;\ \n}\ \n" ); } #[test] fn named() { assert_eq!( rsass( "a {b: saturate($color: plum, $amount: 14%)}\ \n" ) .unwrap(), "a {\ \n b: #e697e6;\ \n}\ \n" ); } } } // From "sass-spec/spec/core_functions/color/saturation.hrx" mod saturation { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] #[ignore] // wrong result fn fraction() { assert_eq!( rsass( "a {b: saturation(hsl(0, 0.5%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 0.5%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn max() { assert_eq!( rsass( "a {b: saturation(hsl(0, 100%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 100%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn middle() { assert_eq!( rsass( "a {b: saturation(hsl(0, 50%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 50%;\ \n}\ \n" ); } #[test] fn min() { assert_eq!( rsass( "a {b: saturation(hsl(0, 0%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 0%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn named() { assert_eq!( rsass( "a {b: saturation($color: hsl(0, 42%, 100%))}\ \n" ) .unwrap(), "a {\ \n b: 42%;\ \n}\ \n" ); } } mod scale_color; // From "sass-spec/spec/core_functions/color/whiteness.hrx" mod whiteness { #[allow(unused)] use super::rsass; mod error { #[allow(unused)] use super::rsass; // Ignoring "too_few_args", error tests are not supported yet. // Ignoring "too_many_args", error tests are not supported yet. // Ignoring "test_type", error tests are not supported yet. } #[test] #[ignore] // wrong result fn fraction() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness(color.hwb(0, 0.5%, 0%))}\ \n" ) .unwrap(), "a {\ \n b: 0.3921568627%;\ \n}\ \n" ); } #[test] fn max() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness(white)}\ \n" ) .unwrap(), "a {\ \n b: 100%;\ \n}\ \n" ); } mod middle { #[allow(unused)] use super::rsass; #[test] #[ignore] // wrong result fn half_blackness() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness(color.hwb(0, 50%, 50%))}\ \n" ) .unwrap(), "a {\ \n b: 50.1960784314%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn high_blackness() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness(color.hwb(0, 70%, 70%))}\ \n" ) .unwrap(), "a {\ \n b: 50.1960784314%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn zero_blackness() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness(color.hwb(0, 50%, 0%))}\ \n" ) .unwrap(), "a {\ \n b: 50.1960784314%;\ \n}\ \n" ); } } #[test] fn min() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness(black)}\ \n" ) .unwrap(), "a {\ \n b: 0%;\ \n}\ \n" ); } #[test] #[ignore] // wrong result fn named() { assert_eq!( rsass( "@use \'sass:color\';\ \na {b: color.whiteness($color: color.hwb(0, 42%, 0%))}\ \n" ) .unwrap(), "a {\ \n b: 41.9607843137%;\ \n}\ \n" ); } }
//! This is a generic module to work with side metadata (vs. in-object metadata) //! //! This module enables the implementation of a wide range of GC algorithms for VMs which do not provide (any/sufficient) in-object space for GC-specific metadata (e.g. marking bits, logging bit, etc.). //! //! //! # Design //! //! MMTk side metadata is designed to be **generic**, and **space-** and **time-** efficient. //! //! It aims to support two categories of side metadata: //! //! 1. **Global** metadata bits which are plan-specific but common to all policies, and //! 2. **Policy-specific** bits which are only used exclusively by certain policies. //! //! To support these categories, MMTk side metadata provides the following features: //! //! 1. The granularity of the source data (minimum data size) is configurable to $2^n$ bytes, where $n >= 0$. //! 2. The number of metadata bits per source data unit is configurable to $2^m$ bits, where $m >= 0$. //! 3. The total number of metadata bit-sets is constrained by the worst-case ratio of global and policy-specific metadata. //! 4. Metadata space is only allocated on demand. //! 5. Bulk-zeroing of metadata bits should be possible. For this, the memory space for each metadata bit-set is contiguous per chunk. //! //!โ€Œ MMTK side metadata is organized per chunk of data (each chunk is managed exclusively by one policy). //! This means, when a new chunk is mapped, the side metadata for the whole chunk, which includes the global and policy-specific metadata, is also mapped. //! //! //! # How to Use //! //! For each global side metadata bit-set, a constant object of the `SideMetadataSpec` struct should be created. //! //! For the first global side metadata bit-set: //! //! ``` //! const GLOBAL_META_1: SideMetadataSpec = SideMetadataSpec { //! scope: SideMetadataScope::Global, //! offset: 0, //! log_num_of_bits: b1, //! log_min_obj_size: s1, //! }; //! ``` //! //! Here, the number of bits per data is $2^b1$, and the minimum object size is $2^s1$. //! The `offset` is a constant which shows the offset of the beginning of this metadata bit-set from the beginning of the metadata chunk. //! For the first bit-set, `offset` is zero. //! //! Now, to add a second side metadata bit-set, offset needs to be calculated based-on the first global bit-set: //! //! ``` //! const GLOBAL_META_2: SideMetadataSpec = SideMetadataSpec { //! scope: SideMetadataScope::Global, //! offset: meta_bytes_per_chunk(s1, b1), //! log_num_of_bits: b2, //! log_min_obj_size: s2, //! }; //! ``` //! //! where `meta_bytes_per_chunk` is a const function which calculates the offset based-on `s` and `b` from the first global bit-set. //! //! A schematic of a sample metadata chunk looks like: //! _______________________________ <= offset-g1 = 0x0 //! | | //! | Global-1 | //! |_____________________________| <= offset-g2 = meta_bytes_per_chunk(s1, b1) //! | | //! | Global-2 | //! | | //! |_____________________________| <= offset-g3 = offset-g2 + meta_bytes_per_chunk(s2, b2) //! | | //! | Not Mapped | //! | | //! |_____________________________| <= offset-l1 = 4MB * Global_worst_case_ratio //! | | //! | PolicySpecific-1 | //! | | //! |_____________________________| <= offset-l2 = offset-l1 + meta_bytes_per_chunk(s3, b3) //! | | //! | PolicySpecific-2 | //! | | //! |_____________________________| <= offset-l3 = offset-l2 + meta_bytes_per_chunk(s4, b4) //! | | //! | Not Mapped | //! | | //! | | //! |_____________________________| <= 4MB * (Global_WCR + PolicySpecific_WCR) //! //! So far, no metadata space is allocated. //! //! For this purpose, each plan should override `fn global_side_metadata_per_chunk(&self) -> usize;` to return the size of the global side metadata it needs per chunk. This can be calculated using the `meta_bytes_per_chunk` function. //! //! For the local metadata bit-sets, each policy needs to follow the same pattern as the global metadata, with two differences: //! //! 1. scope should be `SideMetadataScope::PolicySpecific`, //! 2. each policy needs to override `fn local_side_metadata_per_chunk(&self) -> usize; //! //! After mapping the metadata space, the following operations can be performed on the metadata: //! //! 1. atomic load //! 2. atomic store //! 3. atomic compare-and-exchange //! 4. atomic fetch-and-add //! 5. atomic fetch-and-sub //! 6. load (non-atomic) //! 7. store (non-atomic) //! 8. bulk zeroing //! mod constants; mod global; mod helpers; pub use global::*; pub(crate) use helpers::*;
use alloc::vec::Vec; use crate::{buffer::Buffer, Renderer, VertexFormat}; pub struct Mesh { pub(crate) vertex_buffers: Vec<Buffer>, pub(crate) strides: Vec<usize>, pub(crate) index_buffer: Buffer, pub(crate) vertex_formats: Vec<VertexFormat>, } impl Mesh { pub async fn new(renderer: &Renderer, vertex_data: &[&[u8]], strides: &[usize], index_data: &[u8], vertex_formats: Vec<VertexFormat>) -> Self { let mut vertex_buffers = Vec::with_capacity(vertex_data.len()); for vertex_datum in vertex_data { let buffer = renderer.buffer_pool.alloc(vertex_datum.len()); buffer.write(vertex_datum).await.unwrap(); vertex_buffers.push(buffer); } let index_buffer = renderer.buffer_pool.alloc(index_data.len()); index_buffer.write(index_data).await.unwrap(); Self { vertex_buffers, strides: Vec::from(strides), index_buffer, vertex_formats, } } }
use crate::endpoints::params::Preconditions; use drogue_cloud_database_common::{ error::ServiceError, models::{Constraints, Resource}, }; use uuid::Uuid; /// check if an expected UUID is equal to the actual one. /// /// Returns `None` if the UUIDs don't match, otherwise `Some` containing the UUID. fn is_ok_and_equal(expected: &str, actual: Uuid) -> Option<Uuid> { if expected.is_empty() { return Some(actual); } match uuid::Uuid::parse_str(expected) { Ok(expected) if expected == actual => Some(expected), _ => None, } } /// Check if the provided preconditions match the provided current state. /// /// This function relies on [`check_versions`] for the actual check. /// /// The function will also return a set of `Constraints`, which maybe be used further on for /// optimistic locking. pub fn check_preconditions( preconditions: &Option<Preconditions>, current: &dyn Resource, ) -> Result<Constraints, ServiceError> { check_versions( preconditions.as_ref().map(|p| p.uid.as_str()).unwrap_or(""), preconditions .as_ref() .map(|p| p.resource_version.as_str()) .unwrap_or(""), current, ) } /// Check if the expected UID and version match the provided current state. /// /// The function will also return a set of `Constraints`, which maybe be used further on for /// optimistic locking. pub fn check_versions<S1, S2>( expected_uid: S1, expected_resource_version: S2, current: &dyn Resource, ) -> Result<Constraints, ServiceError> where S1: AsRef<str>, S2: AsRef<str>, { let expected_uid = expected_uid.as_ref(); let expected_resource_version = expected_resource_version.as_ref(); // check the uid let uid = if !expected_uid.is_empty() { if let Some(expected_uid) = is_ok_and_equal(expected_uid, current.uid()) { expected_uid } else { return Err(ServiceError::Conflict(format!( "Update request for non-existent ID - current: {}, requested: {}", current.uid(), expected_uid ))); } } else { current.uid() }; // check the resource version let resource_version = if let Some(expected_resource_version) = is_ok_and_equal(expected_resource_version, current.resource_version()) { expected_resource_version } else { return Err(ServiceError::Conflict(format!( "Update request for modified object - current: {}, requested: {}", current.resource_version(), expected_resource_version ))); }; // return result Ok(Constraints { uid, resource_version, }) }
extern crate crypto; use crypto::sha2::Sha256; use crypto::digest::Digest; use super::primitive::hash::{H256}; pub fn hash(input: &[u8]) -> H256 { let mut hash = H256::default(); let mut hasher = Sha256::new(); hasher.input(input); hasher.result(&mut hash.0[..]); hash }
use shorthand::ShortHand; #[derive(ShortHand)] #[shorthand] struct Example { value: usize, } #[derive(ShortHand)] #[shorthand = ""] struct Example2 { value: usize, } fn main() {}
use bismit::Cortex; use bismit::map::{self, LayerTags, LayerMapKind, LayerMapScheme, LayerMapSchemeList, AreaSchemeList, CellScheme, InputScheme, AxonKind, LayerKind, AreaScheme}; // use bismit::proto::{ProtolayerMap, ProtolayerMaps, ProtoareaMaps, Axonal, Spatial, Horizontal, // Cortical, Thalamic, Protocell, Protofilter, Protoinput}; /* Eventually move defines to a config file or some such */ pub fn define_lm_schemes() -> LayerMapSchemeList { const MOTOR_UID: u32 = 543; // const OLFAC_UID: u32 = 654; LayerMapSchemeList::new() .lmap(LayerMapScheme::new("visual", LayerMapKind::Cortical) //.layer("test_noise", 1, map::DEFAULT, LayerKind::Axonal(Spatial)) .axn_layer("motor_ctx", map::NS_IN | LayerTags::uid(MOTOR_UID), AxonKind::Horizontal) // .axn_layer("olfac", map::NS_IN | LayerTags::with_uid(OLFAC_UID), Horizontal) .axn_layer("eff_in", map::FB_IN, AxonKind::Spatial) .axn_layer("aff_in", map::FF_IN, AxonKind::Spatial) // .axn_layer("vector_in", map::FF_IN | LayerTags::uid(1000), AxonKind::Spatial) .axn_layer("aff_in_0", map::FF_IN | LayerTags::uid(1000), AxonKind::Spatial) .axn_layer("aff_in_1", map::FF_IN | LayerTags::uid(1001), AxonKind::Spatial) .axn_layer("aff_in_2", map::FF_IN | LayerTags::uid(1002), AxonKind::Spatial) .axn_layer("aff_in_3", map::FF_IN | LayerTags::uid(1003), AxonKind::Spatial) // .axn_layer("out", map::FF_FB_OUT, Spatial) .axn_layer("unused", map::UNUSED_TESTING, AxonKind::Spatial) .layer("mcols", 1, map::FF_FB_OUT, CellScheme::minicolumn("iv", "iii")) .layer("iv_inhib", 0, map::DEFAULT, CellScheme::inhibitory(4, "iv")) .layer("iv", 1, map::PSAL, CellScheme::spiny_stellate(4, vec!["aff_in_0", "aff_in_1", "aff_in_2", "aff_in_3"], 300, 8)) .layer("iii", 2, map::PTAL, CellScheme::pyramidal(1, 4, vec!["iii"], 700, 8) .apical(vec!["eff_in"/*, "olfac"*/], 12)) ) // .lmap(LayerMapScheme::new("v0_lm", LayerMapKind::Subcortical) // .layer("spatial", 1, map::FF_OUT, LayerKind::Axonal(AxonKind::Spatial)) // .layer("horiz_ns", 1, map::NS_OUT | LayerTags::uid(MOTOR_UID), // LayerKind::Axonal(AxonKind::Horizontal)) // ) .lmap(LayerMapScheme::new("v0b_lm", LayerMapKind::Subcortical) // .layer("vector", 4, map::FF_OUT | LayerTags::uid(1000), // LayerKind::Axonal(AxonKind::Spatial)) .layer("vector_0", 1, map::FF_OUT | LayerTags::uid(1000), LayerKind::Axonal(AxonKind::Spatial)) .layer("vector_1", 1, map::FF_OUT | LayerTags::uid(1001), LayerKind::Axonal(AxonKind::Spatial)) .layer("vector_2", 1, map::FF_OUT | LayerTags::uid(1002), LayerKind::Axonal(AxonKind::Spatial)) .layer("vector_3", 1, map::FF_OUT | LayerTags::uid(1003), LayerKind::Axonal(AxonKind::Spatial)) ) } pub fn define_a_schemes() -> AreaSchemeList { // const CYCLES_PER_FRAME: usize = 1; // const HZS: u32 = 16; // const ENCODE_SIZE: u32 = 48; // had been used for GlyphSequences const ENCODE_SIZE: u32 = 24; // for SensoryTract const AREA_SIDE: u32 = 32; AreaSchemeList::new() // .area_ext("v0", "v0_lm", ENCODE_SIZE, // // InputScheme::GlyphSequences { seq_lens: (5, 5), seq_count: 10, scale: 1.4, hrz_dims: (16, 16) }, // InputScheme::SensoryTract, // None, // None, // ) // .area_ext("v0b", "v0b_lm", ENCODE_SIZE, // InputScheme::VectorEncoder { ranges: vec![ // (-0.0, 0.0), (-0.0, 0.0), (-0.0, 0.0), (-0.0, 0.0) // ] }, // None, // None, // ) // .area("v1", "visual", AREA_SIDE, // // Some(vec![FilterScheme::new("retina", None)]), // None, // // Some(vec!["v0"]), // Some(vec!["v0b"]), // ) .area(AreaScheme::new("v0b", "v0b_lm", ENCODE_SIZE) .input(InputScheme::VectorEncoder { ranges: vec![(-0.0, 0.0), (-0.0, 0.0), (-0.0, 0.0), (-0.0, 0.0)] }) ) .area(AreaScheme::new("v1", "visual", AREA_SIDE) .eff_areas(vec!["v0b"]) ) // .area("b1", "visual", AREA_SIDE, // None, // Some(vec!["v1"]), // ) // .area("a1", "visual", AREA_SIDE, None, Some(vec!["b1"])) // .area("a2", "visual", AREA_SIDE, None, Some(vec!["a1"])) // .area("a3", "visual", AREA_SIDE, None, Some(vec!["a2"])) // .area("a4", "visual", AREA_SIDE, None, Some(vec!["a3"])) // .area("a5", "visual", AREA_SIDE, None, Some(vec!["a4"])) // .area("a6", "visual", AREA_SIDE, None, Some(vec!["a5"])) // .area("a7", "visual", AREA_SIDE, None, Some(vec!["a6"])) // .area("a8", "visual", AREA_SIDE, None, Some(vec!["a7"])) // .area("a9", "visual", AREA_SIDE, None, Some(vec!["a8"])) // .area("aA", "visual", AREA_SIDE, None, Some(vec!["a9"])) // .area("aB", "visual", AREA_SIDE, None, Some(vec!["aA"])) // .area("aC", "visual", AREA_SIDE, None, Some(vec!["aB"])) // .area("aD", "visual", AREA_SIDE, None, Some(vec!["aC"])) // .area("aE", "visual", AREA_SIDE, None, Some(vec!["aD"])) // .area("aF", "visual", AREA_SIDE, None, Some(vec!["aE"])) //let mut ir_labels = IdxStreamer::new(LayerMapKind::CorticalDims::new(1, 1, 1, 0, None), "data/train-labels-idx1-ubyte", 1); // .area_ext("u0", "external", AREA_SIDE, AREA_SIDE, // InputScheme::IdxStreamer { // file_name: "data/train-labels-idx1-ubyte", // cyc_per: CYCLES_PER_FRAME, // }, // None, // Some(vec!["u1"]), // ) // .area("u1", "visual", AREA_SIDE, AREA_SIDE, None, // //None, // Some(vec!["b1"]), // ) // .area_ext("o0sp", "v0_layer_map", AREA_SIDE, // InputScheme::IdxStreamerLoop { // file_name: "data/train-images-idx3-ubyte", // cyc_per: CYCLES_PER_FRAME, // scale: 1.3, // loop_frames: 31, // }, // None, // None, // ) // .area_ext("o0", "o0_lm", 24, InputScheme::Zeros, None, None) // .area("o1", "visual", AREA_SIDE, // None, // Some(vec!["o0sp", "o0nsp"]), // ) } #[allow(unused_variables)] pub fn disable_stuff(cortex: &mut Cortex) { /* ######################### */ /* ##### DISABLE STUFF ##### */ /* ######################### */ // for (_, area) in &mut cortex.areas { // // area.psal_mut().dens_mut().syns_mut().set_offs_to_zero_temp(); // // area.bypass_inhib = true; // // area.bypass_filters = true; // // area.disable_pyrs = true; // // area.disable_ssts = true; // // area.disable_mcols = true; // // area.disable_learning = true; // // area.disable_regrowth = true; // } }
fn main() { let first = String::from("first"); assert_eq!("irst-fay", piggify(&first)); let apple = String::from("apple"); assert_eq!("apple-hay", piggify(&apple)); let hay = String::from("hay"); assert_eq!("ay-hay", piggify(&hay)); let iter = String::from("iter"); assert_eq!("iter-hay", piggify(&iter)); } fn piggify(word: &String) -> String { let mut chars = word.chars(); match chars.nth(0) { Some(c) if is_vowel(c) => format!("{}-hay", word), Some(c) => format!("{}-{}ay", chars.collect::<String>(), c), None => "".to_string(), } } fn is_vowel(c: char) -> bool { c == 'a' || c == 'e' || c == 'i' || c == 'o' || c == 'u' }
extern crate exact_cover; use exact_cover::instances::sudoku::{SudokuSolver, sudoku_solver, solution_as_matrix}; use std::io::{stdin}; fn read_sudoku() -> (String, Option<SudokuSolver>) { let mut line = String::new(); if stdin().read_line(&mut line).is_err() { return (line, None) } let v: Vec<usize> = line.chars().map(|x| if x == '.' { '0' } else { x }) .filter_map(|x| x.to_digit(10)) .map(|x| x as usize).collect(); if v.len() != 81 { return (line, None) } // This crate also contains implemtations for specific problems. // For instance, `sudoku_solver()` takes a 1-d slice of usizes // representing a sudoku problem and returns a `Solver` object // that will generate solutions. (line, sudoku_solver(&v).ok()) } /// Read a sudoku from the command line and solve. fn main() { loop { let (s, solver) = read_sudoku(); if solver.is_none() { break; } let sol = solver.unwrap().first_solution(); match sol { Some(a) => { print!("{}", s); let x = solution_as_matrix(9, &a); for (i, v) in x.iter().enumerate() { if i == 3 || i == 6 { println!("โ”€โ”€โ”€โ”ผโ”€โ”€โ”€โ”ผโ”€โ”€โ”€"); } for (j, c) in v.iter().enumerate() { if j == 3 || j == 6 { print!("โ”‚"); } print!("{}", c); } println!(""); } }, None => { println!("No solution found."); } } } }
use cosmwasm_std::{Binary, Decimal, Uint128}; use cw721::Cw721ReceiveMsg; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] pub struct InstantiateMsg { pub name: String, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum ExecuteMsg { WithdrawNft { offering_id: u64, }, BuyNft { offering_id: u64, }, ReceiveNft(Cw721ReceiveMsg), /// Mint a new NFT, can only be called by the contract minter MintNft { contract: String, msg: Binary, }, SetPayment { denom: String, ratio: Decimal, }, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct SellNft { pub price: Uint128, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub struct BuyNft { pub offering_id: u64, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] #[serde(rename_all = "snake_case")] pub enum QueryMsg { // GetOfferings returns a list of all offerings GetOfferings { offset: Option<u64>, limit: Option<u8>, order: Option<u8>, }, GetPayment { denom: String, }, }
// Copyright ยฉ 2015-2017 Peter Atashian // Licensed under the MIT License <LICENSE.md> //! A simple interface to the Google URL Shortener API. extern crate hyper; extern crate hyper_native_tls; extern crate rustc_serialize; extern crate url; use hyper::Client; use hyper::net::HttpsConnector; use hyper_native_tls::NativeTlsClient; use hyper::Error as HttpError; use hyper::header::{ContentType}; use hyper::mime::{Mime, SubLevel, TopLevel}; use hyper::status::{StatusCode}; use rustc_serialize::json::{BuilderError, Json}; use std::borrow::{ToOwned}; use std::io::Read; use std::io::Error as IoError; use url::form_urlencoded::{Serializer}; const BASEURL: &'static str = "https://www.googleapis.com/urlshortener/v1/url"; /// Contains all possible errors you might get while shortening a URL #[derive(Debug)] pub enum Error { BadStatus(StatusCode, String), Http(HttpError), Io(IoError), Json(BuilderError), MissingId(Json), } impl From<HttpError> for Error { fn from(err: HttpError) -> Error { Error::Http(err) } } impl From<IoError> for Error { fn from(err: IoError) -> Error { Error::Io(err) } } impl From<BuilderError> for Error { fn from(err: BuilderError) -> Error { Error::Json(err) } } /// Shortens a URL using the Google URL Shortener API pub fn shorten(key: &str, longurl: &str) -> Result<String, Error> { let ssl = NativeTlsClient::new().unwrap(); let connector = HttpsConnector::new(ssl); let client = Client::with_connector(connector); let args = &[("key", key)]; let query = Serializer::new(String::new()).extend_pairs(args).finish(); let url = format!("{}?{}", BASEURL, query); let body = vec![("longUrl".to_owned(), Json::String(longurl.to_owned()))]; let body = Json::Object(body.into_iter().collect()).to_string(); let mut response = try!(client.post(&url) .header(ContentType(Mime(TopLevel::Application, SubLevel::Json, vec![]))) .body(&body) .send()); let mut body = String::new(); try!(response.read_to_string(&mut body)); if response.status != StatusCode::Ok { return Err(Error::BadStatus(response.status, body)) } let json = try!(Json::from_str(&*body)); let id = json.find("id").and_then(|x| x.as_string()); match id { Some(id) => Ok(id.to_owned()), None => Err(Error::MissingId(json.clone())), //FIXME - nonlexical borrows } }
fn main() { sudo_rs::su_main(); }
use { http::{header, StatusCode}, tsukuyomi::{ endpoint, test::{self, loc, TestServer}, App, }, }; #[test] fn test_into_response_preset() -> test::Result { use { std::fmt, tsukuyomi::{ future::{Poll, TryFuture}, input::Input, output::{Preset, Response}, upgrade::NeverUpgrade, }, }; struct Display; impl<T> Preset<T> for Display where T: fmt::Display, { type Upgrade = NeverUpgrade; type Error = tsukuyomi::Error; type Respond = DisplayRespond<T>; fn respond(this: T) -> Self::Respond { DisplayRespond(this) } } struct DisplayRespond<T>(T); impl<T> TryFuture for DisplayRespond<T> where T: fmt::Display, { type Ok = Response; type Error = tsukuyomi::Error; fn poll_ready(&mut self, _: &mut Input<'_>) -> Poll<Self::Ok, Self::Error> { Ok(http::Response::builder() .header("content-type", "text/plain; charset=utf-8") .body(self.0.to_string().into()) .unwrap() .into()) } } #[derive(tsukuyomi::output::Responder)] #[response(preset = "Display")] struct Foo(String); impl fmt::Display for Foo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.0.fmt(f) } } #[derive(tsukuyomi::output::Responder)] #[response(preset = "Display", bound = "T: fmt::Display")] struct Bar<T>(T); impl<T: fmt::Display> fmt::Display for Bar<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.0, f) } } let app = App::build(|mut s| { s.at("/foo")?.to(endpoint::call(|| Foo("Foo".into())))?; s.at("/bar")?.to(endpoint::call(|| Bar("Bar"))) })?; let mut server = TestServer::new(app)?; let mut client = server.connect(); client .get("/foo") .assert(loc!(), StatusCode::OK)? .assert( loc!(), test::header::eq(header::CONTENT_TYPE, "text/plain; charset=utf-8"), )? .assert(loc!(), test::body::eq("Foo"))?; client.get("/bar").assert(loc!(), test::body::eq("Bar"))?; Ok(()) }
use std::sync::Arc; use apllodb_shared_components::{ ApllodbError, ApllodbResult, ApllodbSessionError, ApllodbSessionResult, Session, SessionWithTx, }; use apllodb_sql_parser::apllodb_ast::{ AlterTableCommand, Command, CreateTableCommand, TableElement, }; use apllodb_storage_engine_interface::{ AlterTableAction, ColumnDefinition, StorageEngine, TableConstraintKind, TableConstraints, TableName, WithTxMethods, }; use crate::ast_translator::AstTranslator; use super::sql_processor_context::SqlProcessorContext; /// Processes DDL command. #[derive(Clone, Debug, new)] pub(crate) struct DdlProcessor<Engine: StorageEngine> { context: Arc<SqlProcessorContext<Engine>>, } impl<Engine: StorageEngine> DdlProcessor<Engine> { /// Executes DDL command. pub async fn run( &self, session: SessionWithTx, command: Command, ) -> ApllodbSessionResult<SessionWithTx> { match command { Command::CreateTableCommandVariant(cc) => match self.run_helper_create_table(cc) { Ok((table_name, table_constraints, column_definitions)) => { self.context .engine .with_tx() .create_table(session, table_name, table_constraints, column_definitions) .await } Err(e) => Err(ApllodbSessionError::new(e, Session::from(session))), }, Command::AlterTableCommandVariant(ac) => match self.run_helper_alter_table(ac) { Ok((table_name, action)) => { self.context .engine .with_tx() .alter_table(session, table_name, action) .await } Err(e) => Err(ApllodbSessionError::new(e, Session::from(session))), }, _ => Err(ApllodbSessionError::new( ApllodbError::feature_not_supported( "only CREATE TABLE / ALTER TABLE are supported for DDL currently", ), Session::from(session), )), } } fn run_helper_create_table( &self, command: CreateTableCommand, ) -> ApllodbResult<(TableName, TableConstraints, Vec<ColumnDefinition>)> { let table_name = AstTranslator::table_name(command.table_name)?; let column_definitions: Vec<ColumnDefinition> = command .table_elements .as_vec() .iter() .filter_map(|table_element| { if let TableElement::ColumnDefinitionVariant(cd) = table_element { Some(cd) } else { None } }) .map(|cd| AstTranslator::column_definition(cd.clone())) .collect::<ApllodbResult<_>>()?; let table_constraints: Vec<TableConstraintKind> = command .table_elements .as_vec() .iter() .filter_map(|table_element| { if let TableElement::TableConstraintVariant(tc) = table_element { Some(tc) } else { None } }) .map(|tc| AstTranslator::table_constraint(tc.clone())) .collect::<ApllodbResult<_>>()?; Ok(( table_name, TableConstraints::new(table_constraints)?, column_definitions, )) } fn run_helper_alter_table( &self, command: AlterTableCommand, ) -> ApllodbResult<(TableName, AlterTableAction)> { let table_name = AstTranslator::table_name(command.table_name)?; let ast_actions = command.actions.into_vec(); let ast_action = if ast_actions.len() > 1 { Err(ApllodbError::feature_not_supported( "ALTER TABLE does not support multiple actions currently", )) } else { Ok(ast_actions .first() .expect("NonEmptyVec assures first element") .clone()) }?; let action = AstTranslator::alter_table_action(ast_action)?; Ok((table_name, action)) } } #[cfg(test)] mod tests { use std::sync::Arc; use super::DdlProcessor; use crate::sql_processor::sql_processor_context::SqlProcessorContext; use apllodb_shared_components::{ApllodbResult, SqlType}; use apllodb_sql_parser::ApllodbSqlParser; use apllodb_storage_engine_interface::{ test_support::{default_mock_engine, test_models::People, MockWithTxMethods}, ColumnConstraints, ColumnDataType, ColumnDefinition, TableConstraintKind, TableConstraints, TableName, }; use futures::FutureExt; use mockall::predicate::{always, eq}; #[derive(Clone, PartialEq, Debug, new)] struct TestDatum { in_create_table_sql: String, expected_table_name: TableName, expected_table_constraints: Vec<TableConstraintKind>, expected_column_definitions: Vec<ColumnDefinition>, } #[async_std::test] #[allow(clippy::redundant_clone)] async fn test_ddl_processor_with_sql() -> ApllodbResult<()> { let parser = ApllodbSqlParser::default(); fn test_data() -> Vec<TestDatum> { vec![TestDatum::new( " CREATE TABLE people ( id INTEGER, age INTEGER, PRIMARY KEY (id) )" .to_string(), People::table_name(), vec![TableConstraintKind::PrimaryKey { column_names: vec![People::tc_id().as_column_name().clone()], }], vec![ ColumnDefinition::new( ColumnDataType::new( People::tc_id().as_column_name().clone(), SqlType::integer(), true, ), ColumnConstraints::default(), ), ColumnDefinition::new( ColumnDataType::new( People::tc_age().as_column_name().clone(), SqlType::integer(), true, ), ColumnConstraints::default(), ), ], )] } for test_datum in test_data().into_iter() { let sql = test_datum.in_create_table_sql.clone(); log::debug!("testing with SQL: {}", &sql); // mocking create_table() let mut engine = default_mock_engine(); engine.expect_with_tx().returning(move || { let test_datum = test_datum.clone(); let mut with_tx = MockWithTxMethods::new(); with_tx .expect_create_table() .with( always(), eq(test_datum.expected_table_name), eq(TableConstraints::new(test_datum.expected_table_constraints).unwrap()), eq(test_datum.expected_column_definitions), ) .returning(|session, _, _, _| async { Ok(session) }.boxed_local()); with_tx }); let context = Arc::new(SqlProcessorContext::new(engine)); let ast = parser.parse(&sql).unwrap(); DdlProcessor::run_directly(context.clone(), ast.0).await?; } Ok(()) } }
// These are arbitrarily chosen, but with care not to overlap // processor defined exceptions or interrupt vectors. pub const T_SYSCALL: u32 = 64; // system call pub const T_DEFAULT: u32 = 500; // catchall pub const T_IRQ0: u32 = 32; // IRQ 0 corresponds to int T_IRQ pub const IRQ_TIMER: u32 = 0; pub const IRQ_KBD: u32 = 1; pub const IRQ_COM1: u32 = 4; pub const IRQ_IDE: u32 = 14; pub const IRQ_ERROR: u32 = 19; pub const IRQ_SPURIOUS: u32 = 31; use super::memory::gate; use super::memory::seg; use utils::x86; /// Interrupt descriptor table (shared by all CPUs). static mut IDT: [gate::GateDesc; 256] = [gate::GateDesc::new(); 256]; // build.rs generates vector.S global_asm!(include_str!(concat!(env!("OUT_DIR"), "/vectors.S"))); /* vector.S looks like: .globl alltraps .globl vector0 vector0: pushl $0 pushl $0 jmp alltraps .globl vector1 vector1: pushl $0 pushl $1 jmp alltraps ... .data .globl VECTORS VECTORS: .long vector0 .long vector1 .long vector2 ... */ extern "C" { /// in vectors.S: array of 256 entry pointers static VECTORS: [u32; 256]; } use super::lock::spin::SpinMutex; static TICKS: SpinMutex<u32> = SpinMutex::new("time", 0); pub fn init() { unsafe { for i in 0..256 { IDT[i].set( false, (seg::SEG_KCODE << 3) as u16, VECTORS[i] as *const u32 as u32, 0, ); } IDT[T_SYSCALL as usize].set( true, (seg::SEG_KCODE << 3) as u16, VECTORS[T_SYSCALL as usize] as *const u32 as u32, seg::dpl::USER, ); } } /// Layout of the trap frame built on the stack by the /// hardware and by alltraps, and passed to trap(). #[repr(C)] pub struct TrapFrame { // registers as pushed by pushal pub edi: u32, pub esi: u32, pub ebp: u32, orig_esp: u32, // useless & ignored pub ebx: u32, pub edx: u32, pub ecx: u32, pub eax: u32, // rest of trap frame pub gs: u16, _padding1: u16, pub fs: u16, _padding2: u16, pub es: u16, _padding3: u16, pub ds: u16, _padding4: u16, pub trap_no: u32, // below here defined by x86 hardware pub err: u32, pub eip: usize, pub cs: u16, _padding5: u16, pub eflags: u32, // below here only when crossing rings, such as from user to kernel pub esp: usize, pub ss: u16, _padding6: u16, } pub fn idt_init() { const IDT_SZ: usize = core::mem::size_of::<[gate::GateDesc; 256]>(); x86::lidt(unsafe { IDT.as_ptr() as *const u8 }, IDT_SZ as u16); } #[no_mangle] pub extern "C" fn trap(trap_frame: *const TrapFrame) { // use super::proc::my_cpu_id; // log!("[cpu:{}] trap", my_cpu_id()); super::lapic::eoi(); } extern "C" { pub fn trapret(); } global_asm! {r#" # vectors.S sends all traps here. .globl alltraps alltraps: # Build trap frame. pushl %ds pushl %es pushl %fs pushl %gs pushal # Set up data segments. movw $(2<<3), %ax # SEG_KDATA<<3 movw %ax, %ds movw %ax, %es # Call trap(tf), where tf=%esp pushl %esp call trap addl $4, %esp # Return falls through to trapret... .globl trapret trapret: popal popl %gs popl %fs popl %es popl %ds addl $0x8, %esp # trap_no and err iret # pop %eip, %cs, %eflags # (and also %esp, %ss when crossing rings) # then return "#}
extern crate sdl2; extern crate chiprs; use sdl2::pixels::Color; use sdl2::event::Event; use sdl2::keyboard::Keycode; use sdl2::render::WindowCanvas; use sdl2::rect::Rect; use std::time::{Duration, Instant}; use chiprs::Chip; use chiprs::display::{Display, DISPLAY_W, DISPLAY_H}; const PIXEL_W: usize = 10; const PIXEL_H: usize = 10; pub fn run_sdl_interface(chip: &mut Chip) { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let window = video_subsystem.window("chiprs", (DISPLAY_W*PIXEL_W) as u32, (DISPLAY_H*PIXEL_H) as u32) .position_centered() .opengl() .build() .unwrap(); let mut canvas = window.into_canvas().build().unwrap(); let mut events = sdl_context.event_pump().unwrap(); let mut next_tick = Instant::now(); let mut next_step = Instant::now(); 'running: loop { for event in events.poll_iter() { match event { Event::Quit {..} => { break 'running; } Event::KeyDown { keycode: Some(key), .. } => { if key == Keycode::Escape { break 'running; } if let Some(k) = get_chip8_key(key) { chip.key_down(k); } } Event::KeyUp { keycode: Some(key), .. } => { if let Some(k) = get_chip8_key(key) { chip.key_up(k); } } _ => {} } } draw_display(&mut canvas, &chip.display); canvas.present(); let now = Instant::now(); while next_tick < now { chip.tick(); next_tick += Duration::new(0, 1_000_000_000u32 / 60); } while next_step < now { chip.step().unwrap(); next_step += Duration::new(0, 1_000_000_000u32 / 5000); } } } fn draw_display(canvas: &mut WindowCanvas, display: &Display) { canvas.set_draw_color(Color::RGB(200, 200, 200)); canvas.clear(); canvas.set_draw_color(Color::RGB(100, 100, 100)); for i in 0..DISPLAY_W { for j in 0..DISPLAY_H { if display.at(i, j) { canvas.fill_rect(Rect::new( (i * PIXEL_W) as i32, (j * PIXEL_H) as i32, PIXEL_W as u32, PIXEL_H as u32)).unwrap(); } } } } fn get_chip8_key(key: Keycode) -> Option<u8> { use sdl2::keyboard::Keycode::*; match key { Num0 | Kp0 => Some(0x0), Num1 | Kp1 => Some(0x1), Num2 | Kp2 | Up => Some(0x2), Num3 | Kp3 => Some(0x3), Num4 | Kp4 | Left => Some(0x4), Num5 | Kp5 => Some(0x5), Num6 | Kp6 | Right => Some(0x6), Num7 | Kp7 => Some(0x7), Num8 | Kp8 | Down => Some(0x8), Num9 | Kp9 => Some(0x9), A => Some(0xA), B => Some(0xB), C => Some(0xC), D => Some(0xD), E => Some(0xE), F => Some(0xF), _ => None, } }
use super::*; use std::ffi::CString; use std::fs::File; use std::io::Read; use std::error::Error; pub enum ShaderType { Vertex , Fragment, Compute , Geometry } impl ShaderType{ fn value(&self) -> GLenum{ match *self { ShaderType::Vertex => gl::VERTEX_SHADER, ShaderType::Fragment => gl::FRAGMENT_SHADER, ShaderType::Compute => gl::COMPUTE_SHADER, ShaderType::Geometry => gl::GEOMETRY_SHADER } } } /// A wrapper around opengl shader objects. pub struct Shader { id: u32, _shader_type: ShaderType } impl Drop for Shader { fn drop(&mut self) { unsafe { println!("Dropping shader {}", self.id); gl::DeleteShader(self.id); } } } impl Shader { pub fn get_id(&self) -> GLuint { self.id } // Creates a new empty shader object pub fn new(shader_type: ShaderType) -> Shader { unsafe { Shader { id: gl::CreateShader(shader_type.value()), _shader_type: shader_type } } } /// Loads and compiles a shader from memory. pub fn load_from_memory(&mut self, data: &str) -> Result<(), Box<dyn Error>> { unsafe { let shader_id = self.id; let c_string = CString::new(data)?; gl::ShaderSource(shader_id, 1, &c_string.as_ptr(), ::std::ptr::null()); gl::CompileShader(shader_id); let mut success: GLint = 0; gl::GetShaderiv(shader_id, gl::COMPILE_STATUS, &mut success); match success { 0 => { let mut log_size: GLint = 0; gl::GetShaderiv(shader_id, gl::INFO_LOG_LENGTH, &mut log_size); let mut msg: Vec<u8> = Vec::new(); msg.resize(log_size as usize, 0); let mut new_length = 0; gl::GetShaderInfoLog( shader_id, log_size, &mut new_length, msg.as_ptr() as *mut GLchar, ); let msg = format!( "Failed to compile shader : {}", String::from_utf8(msg).unwrap() ); Err(From::from(msg)) } _ => Ok(()), // Return empty OK } } } /// Loads and compiles a shader from a file on disk. pub fn load_from_file(&mut self, path: &str) -> Result<(), Box<dyn Error> > { let mut file = File::open(&path)?; let mut content = String::new(); file.read_to_string(&mut content) .expect("Failed to read from file"); let c_content: CString = CString::new(content.as_bytes()).unwrap(); match c_content.to_str() { Ok(v) => { self.load_from_memory(v) }, Err(err) => { Err(From::from(format!("Failed to load shader for path \"{}\": \n{}", path, err))) } } } } /// Enum that carries data for specific Uniforms in GLSL. /// It describes what data to bind for the uniform retrieved with glGetUniformLocation #[allow(dead_code)] pub enum Uniform { Float(f32), Int(i32), Vec2(f32,f32), Sampler2D(GLuint), }
use std::cmp::max; use std::cmp::min; fn read_line() -> String { let mut line = String::new(); std::io::stdin().read_line(&mut line).unwrap(); line.trim_end().to_owned() } fn main() { let _n: i64 = read_line().parse().unwrap(); let aa = read_line() .split_whitespace() .map(|v| v.parse().unwrap()) .collect(); let solver = Solver::new(aa); let stdout = solver.solve(); stdout.iter().for_each(|s| { println!("{}", s); }) } struct Solver { aa: Vec<u64>, } impl Solver { fn new(aa: Vec<u64>) -> Solver { Solver { aa: aa } } fn solve(&self) -> Vec<String> { let size = self.aa.len(); // dp[l][r] = x let mut dp = vec![vec![std::u64::MAX; size]; size]; let mut ans = 0; for l in 0..size { for r in 0..size { if l > r { continue; } let x = if r == 0 { self.aa[l] } else { dp[l][r - 1] }; dp[l][r] = min(x, self.aa[r]); if dp[l][r] != std::u64::MAX { ans = max(ans, ((r - l + 1) as u64) * dp[l][r]); } } } let mut buf = Vec::new(); buf.push(format!("{}", ans)); buf } } #[test] fn test_solve_1() { let solver = Solver::new(vec![2, 4, 4, 9, 4, 9]); assert_eq!(solver.solve(), vec!("20")); }
use crate::shared::tree_node::TreeNode; use std::cell::RefCell; use std::rc::Rc; struct Solution; /// https://leetcode.com/problems/kth-smallest-element-in-a-bst/ impl Solution { /// 0 ms 3.1 MB pub fn kth_smallest(root: Option<Rc<RefCell<TreeNode>>>, k: i32) -> i32 { fn helper(node: &Rc<RefCell<TreeNode>>, k: i32, index: i32) -> Result<i32, (i32, i32)> { let node = node.borrow(); let left_result = if let Some(left) = node.left.as_ref() { helper(left, k, index) } else { Err((k, index)) }; if left_result.is_ok() { return left_result; } let (k, mut index) = left_result.unwrap_err(); if k == index { return Ok(node.val); } index += 1; if let Some(right) = node.right.as_ref() { helper(right, k, index) } else { Err((k, index)) } } let root = root.as_ref().unwrap(); match helper(root, k, 1) { Ok(index) => index, Err(_) => panic!() } } } #[cfg(test)] mod tests { use super::*; #[test] fn test() { assert_eq!( Solution::kth_smallest(TreeNode::from_str_and_wrap("[3,1,4,null,2]"), 1), 1 ); assert_eq!( Solution::kth_smallest(TreeNode::from_str_and_wrap("[5,3,6,2,4,null,null,1]"), 3), 3 ); assert_eq!( Solution::kth_smallest(TreeNode::from_str_and_wrap("[4,2,5,null,3]"), 1), 2 ); } }
use std::cell::Cell; use std::collections::HashMap; use std::{time::Instant, fmt::Formatter, error::Error}; use std::fmt::Debug; #[derive(Debug, Copy, Clone, PartialEq)] enum ValueKind { Int, Float, Bool, Object, } #[derive(Debug, Copy, Clone, PartialEq)] enum Value { Int(i64), Float(f64), True, False, Object(usize), } impl Value { fn kind(&self) -> ValueKind { match self { Value::Int(_) => ValueKind::Int, Value::Float(_) => ValueKind::Float, Value::True => ValueKind::Bool, Value::False => ValueKind::Bool, Value::Object(_) => ValueKind::Object, } } fn as_int(&self) -> i64 { match self { Value::Int(i) => *i, Value::Float(f) => *f as i64, Value::True => 1, Value::False => 0, Value::Object(i) => panic!("Object value {} is not an integer", i), } } fn as_float(&self) -> f64 { match self { Value::Int(i) => *i as f64, Value::Float(f) => *f, Value::True => 1.0, Value::False => 0.0, Value::Object(i) => panic!("Object value {} is not a float", i), } } fn add_float(lhs: Value, rhs: Value) -> Value { Value::Float(lhs.as_float() + rhs.as_float()) } fn add_int(lhs: Value, rhs: Value) -> Value { Value::Int(lhs.as_int() + rhs.as_int()) } // Intentionally overcomplicated so I could see when inline caching would help. fn get_add_method(&self, b: Value) -> Box<dyn Fn(Value, Value) -> Value> { match (self, b) { (Value::Int(_), Value::Int(_)) => Box::new(Value::add_int), (Value::Float(_), Value::Float(_)) => Box::new(Value::add_float), (Value::Int(_), Value::Float(_)) => Box::new(Value::add_int), (Value::Float(_), Value::Int(_)) => Box::new(Value::add_float), _ => panic!("Invalid types for addition"), } } fn get_id(&self) -> usize { match self { Value::Object(i) => *i, _ => panic!("Value is not an object"), } } } // Things we could add // Objects with properties // Functions // Add being polymorphic // Adding an inline cache // Adding object shapes #[derive(Copy, Clone)] struct CacheValue { kind: ValueKind, value: fn(Value, Value) -> Value } impl Debug for CacheValue { fn fmt(&self, f: &mut Formatter) -> Result<(), std::fmt::Error> { write!(f, "{:?}", self.kind) } } #[derive(Debug, Clone)] enum Instruction { Push(Value), Pop, Add, AddWithCache(Cell<Option<CacheValue>>), Mul, Sub, Div, Ret, Eq, Jump(usize), JumpT(usize), JumpF(usize), CreateObject, GetField(String), SetField(String), } enum Expr { Int(i64), Float(f64), True, False, Add(Box<Expr>, Box<Expr>), Plus(Box<Expr>, Box<Expr>), Mul(Box<Expr>, Box<Expr>), Sub(Box<Expr>, Box<Expr>), Div(Box<Expr>, Box<Expr>), Eq(Box<Expr>, Box<Expr>), If(Box<Expr>, Box<Expr>, Box<Expr>), Object(Vec<(String, Expr)>), GetField(Box<Expr>, String), SetField(Box<Expr>, String, Box<Expr>), Ret, } impl Expr { fn compile(&self) -> Vec<Instruction> { use Instruction::*; let mut instructions = Vec::new(); match self { Expr::Int(i) => instructions.push(Push(Value::Int(*i))), Expr::Float(f) => instructions.push(Push(Value::Float(*f))), Expr::True =>instructions.push(Push(Value::True)), Expr::False =>instructions.push(Push(Value::False)), Expr::Add(lhs, rhs) => { instructions.extend(lhs.compile()); instructions.extend(rhs.compile()); instructions.push(Add); } Expr::Plus(lhs, rhs) => { instructions.extend(lhs.compile()); instructions.extend(rhs.compile()); instructions.push(AddWithCache(Cell::new(None))); } Expr::Mul(lhs, rhs) => { instructions.extend(lhs.compile()); instructions.extend(rhs.compile()); instructions.push(Mul); } Expr::Sub(lhs, rhs) => { instructions.extend(lhs.compile()); instructions.extend(rhs.compile()); instructions.push(Sub); } Expr::Div(lhs, rhs) => { instructions.extend(lhs.compile()); instructions.extend(rhs.compile()); instructions.push(Div); } Expr::Ret => instructions.push(Ret), Expr::Eq(lhs, rhs) => { instructions.extend(lhs.compile()); instructions.extend(rhs.compile()); instructions.push(Eq); } Expr::If(cond, then, else_) => { // I need to jump to a specific point in the program // Or I could make my jumps relative. // For right now I'm going assume these are all the instructions there are // If that wasn't true, I'd need to patch up the jump instructions let cond = cond.compile(); let then = then.compile(); let else_ = else_.compile(); let else_location = cond.len() + then.len() + 2; let exit_location = else_location + else_.len(); instructions.extend(cond); instructions.push(JumpF(else_location)); instructions.extend(then); instructions.push(Jump(exit_location)); instructions.extend(else_); } Expr::Object(fields) => { instructions.push(CreateObject); for (name, expr) in fields { instructions.extend(expr.compile()); instructions.push(SetField(name.clone())); } } Expr::GetField(obj, name) => { instructions.extend(obj.compile()); instructions.push(GetField(name.clone())); } Expr::SetField(obj, name, value) => { instructions.extend(obj.compile()); instructions.extend(value.compile()); instructions.push(SetField(name.clone())); } } instructions } } struct ObjectInfo { fields: HashMap<String, Value>, } struct Vm { pc: usize, memory: Vec<Option<ObjectInfo>>, next_object_id: usize, stack: Vec<Value>, code: Vec<Instruction>, } impl Vm { fn new() -> Vm { Vm { pc: 0, memory: Vec::new(), stack: Vec::new(), code: Vec::new(), next_object_id: 0, } } fn reset(&mut self) { self.pc = 0; self.stack.clear(); } fn get_ret(&self) -> Value { *self.stack.last().unwrap() } fn run(&mut self) { while self.pc < self.code.len() { // println!("{:?}, {} {}", self.stack, self.pc, self.code.len()); let instruction = &self.code[self.pc]; match instruction { Instruction::Push(value) => { self.stack.push(value.clone()); self.pc += 1; } Instruction::Pop => { self.stack.pop(); self.pc += 1; } Instruction::AddWithCache(cache) => { let cache_value = cache.get(); let lhs = self.stack.pop().unwrap(); if let Some(cache) = cache_value { if lhs.kind() == cache.kind { let rhs = self.stack.pop().unwrap(); self.stack.push((cache.value)(lhs, rhs)); self.pc += 1; continue; } } let left_kind = lhs.kind(); cache.set( match left_kind { ValueKind::Int => Some(CacheValue { kind: ValueKind::Int, value: Value::add_int, }), ValueKind::Float => Some(CacheValue { kind: ValueKind::Float, value: Value::add_float, }), _ => panic!("Can't add {:?}", left_kind), } ); let cache = cache.get().unwrap(); let rhs = self.stack.pop().unwrap(); self.stack.push((cache.value)(lhs, rhs)); self.pc += 1; } Instruction::Add => { let a = self.stack.pop().unwrap(); let b = self.stack.pop().unwrap(); let method = a.get_add_method(b); self.stack.push(method(a, b)); self.pc += 1; } Instruction::Mul => { let a = self.stack.pop().unwrap(); let b = self.stack.pop().unwrap(); self.stack.push(match (a, b) { (Value::Int(a), Value::Int(b)) => Value::Int(a * b), _ => panic!("invalid operands for mul"), }); self.pc += 1; } Instruction::Sub => { let a = self.stack.pop().unwrap(); let b = self.stack.pop().unwrap(); self.stack.push(match (a, b) { (Value::Int(a), Value::Int(b)) => Value::Int(a - b), _ => panic!("invalid operands for sub"), }); self.pc += 1; } Instruction::Div => { let a = self.stack.pop().unwrap(); let b = self.stack.pop().unwrap(); self.stack.push(match (a, b) { (Value::Int(a), Value::Int(b)) => Value::Int(a / b), _ => panic!("invalid operands for div"), }); self.pc += 1; } Instruction::Ret => { self.pc += 1; } Instruction::Jump(position) => { self.pc = *position; } Instruction::JumpT(position) => { let a = self.stack.pop().unwrap(); if a == Value::True { self.pc = *position; } else { self.pc += 1; } } Instruction::JumpF(position) => { let a = self.stack.pop().unwrap(); if a == Value::False { self.pc = *position; } else { self.pc += 1; } } Instruction::Eq => { let a = self.stack.pop().unwrap(); let b = self.stack.pop().unwrap(); self.stack.push(match (a, b) { (Value::Int(a), Value::Int(b)) => if a == b { Value::True } else { Value::False }, (Value::True, Value::True) => Value::True, (Value::False, Value::False) => Value::True, _ => Value::False, }); self.pc += 1; } Instruction::CreateObject => { let object_id = self.next_object_id; self.next_object_id += 1; self.memory.push(Some(ObjectInfo { fields: HashMap::new(), })); self.stack.push(Value::Object(object_id)); self.pc += 1; } Instruction::GetField(s) => { let object_id = self.stack.pop().unwrap(); let object = self.memory[object_id.get_id() as usize].as_ref().unwrap(); let field = object.fields.get(s).unwrap(); self.stack.push(field.clone()); self.pc += 1; } Instruction::SetField(s) => { let value = self.stack.pop().unwrap(); let object = self.stack.pop().unwrap(); let object_info = self.memory[object.get_id() as usize].as_mut().unwrap(); object_info.fields.insert(s.clone(), value); self.stack.push(object); self.pc += 1; } } } } } macro_rules! lang { (true) => { Expr::True }; (false) => { Expr::False }; ($int:literal) => { Expr::Int($int) }; ((f $float:tt)) => { Expr::Float($float) }; ((+ $arg1:tt $arg2:tt)) => { Expr::Add(Box::new(lang!($arg1)), Box::new(lang!($arg2))) }; ((++ $arg1:tt $arg2:tt)) => { Expr::Plus(Box::new(lang!($arg1)), Box::new(lang!($arg2))) }; ((* $arg1:tt $arg2:tt)) => { Expr::Mul(Box::new(lang!($arg1)), Box::new(lang!($arg2))) }; ((- $arg1:tt $arg2:tt)) => { Expr::Sub(Box::new(lang!($arg1)), Box::new(lang!($arg2))) }; ((/ $arg1:tt $arg2:tt)) => { Expr::Div(Box::new(lang!($arg1)), Box::new(lang!($arg2))) }; (($if:tt $cond:tt $then:tt $else:tt)) => { Expr::If(Box::new(lang!($cond)), Box::new(lang!($then)), Box::new(lang!($else))) }; ((= $arg1:tt $arg2:tt)) => { Expr::Eq(Box::new(lang!($arg1)), Box::new(lang!($arg2))) }; ({ $attr:tt : $value:tt, $($attr2:tt : $value2:tt),* }) => { Expr::Object(vec![ ($attr.to_string(), lang!($value)), $(($attr2.to_string(), lang!($value2))),* ]) }; ((. $obj:tt $attr:tt)) => { Expr::GetField(Box::new(lang!($obj)), $attr.to_string()) }; ((.set $obj:tt $attr:tt $value:tt)) => { Expr::SetField(Box::new(lang!($obj)), $attr.to_string(), Box::new(lang!($value))) }; // How should I handle return? (ret) => { Expr::Ret }; } fn main() { let my_expr = lang!( (+ (+ (f 2.0) 2) (+ 3 (f 4.0))) ); let my_expr2 = lang!( (++ (++ (f 2.0) 2) (++ 3 (f 4.0))) ); let my_expr3 = lang!( (. (.set { "a": 1, "b": 2, "c": 3 } "a" 3) "a") ); let mut vm = Vm::new(); vm.code = my_expr.compile(); println!("{:?}", vm.code); let mut vm = Vm::new(); vm.code = my_expr3.compile(); println!("{:?}", vm.code); vm.run(); println!("{:?}", vm.get_ret()); let mut result = 0; let now = Instant::now(); for _ in 0..10000 { result = 2 + 2 + 3 + 4; } println!("{:?} {}", now.elapsed(), result); let now = Instant::now(); for _ in 0..10000 { vm.run(); // println!("{:?}", vm.get_ret()); vm.reset(); } vm.run(); println!("{:?} {:?}", now.elapsed(), vm.get_ret()); vm.code = my_expr2.compile(); let now = Instant::now(); for _ in 0..10000 { vm.run(); // println!("{:?}", vm.get_ret()); vm.reset(); } vm.run(); println!("{:?} {:?}", now.elapsed(), vm.get_ret()); }
use regex::Regex; use std::{ net::IpAddr, str::FromStr }; pub fn is_ipv4(value: &str) -> bool { //! Check to see if a given value corresponds to IPv4 Address. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_ipv4; //! //! fn main() { //! assert!(is_ipv4("128.10.133.22")); //! } //! ``` let ip = if let Ok(ipaddr) = IpAddr::from_str(value){ ipaddr } else { return false }; ip.is_ipv4() } pub fn is_ipv4_cidr(value: &str) -> bool { //! Check to see if a given IPv4 Address with CIDR is valid. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_ipv4_cidr; //! //! fn main() { //! assert!(is_ipv4_cidr("10.0.0.0/24")); //! } //! ``` let splitted_groups: Vec<&str> = value.splitn(2, '/').collect(); let prefix = splitted_groups[0]; let suffix = splitted_groups[1]; let nsuffix: u32 = match suffix.parse() { Ok(x) => x, Err(_) => { return false; } }; if nsuffix > 32 { return false; } if !is_ipv4(prefix) { return false; } true } pub fn is_ipv6(value: &str) -> bool { //! Check to see if a given value corresponds to IPv6 Address. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_ipv6; //! //! fn main() { //! assert!(is_ipv6("::ffff:127.0.0.1")); //! } //! ``` let ip = if let Ok(ipaddr) = IpAddr::from_str(value){ ipaddr } else { return false }; ip.is_ipv6() } pub fn is_ipv6_cidr(value: &str) -> bool { //! Check to see if a given IPv6 Address with CIDR is valid. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_ipv6_cidr; //! //! fn main() { //! assert!(is_ipv6_cidr("2001:0DB8:1234::/48")); //! } //! ``` let splitted_groups: Vec<&str> = value.splitn(2, '/').collect(); let prefix = splitted_groups[0]; let suffix = splitted_groups[1]; let nsuffix: u32 = match suffix.parse() { Ok(x) => x, Err(_) => { return false; } }; if nsuffix > 128 { return false; } if !is_ipv6(prefix) { return false; } true } pub fn is_ip_loopback(value: &str) -> bool { //! Check to see if a given value corresponds to Local/loopback IP Address. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_ip_loopback; //! //! fn main() { //! assert!(is_ip_loopback("::1")); //! } //! ``` let ip = if let Ok(ipaddr) = IpAddr::from_str(value){ ipaddr } else { return false }; ip.is_loopback() } pub fn is_ipv_any(value: &str) -> bool { //! Check to see if a given value corresponds to IP Address. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_ipv_any; //! //! fn main() { //! assert!(is_ipv_any("::ffff:127.0.0.1")); //! } //! ``` if is_ipv4(value) { return true; } else if is_ipv6(value) { return true; } false } pub fn which_ipv(value: &str) -> Option<&str> { //! Check to see if a given value corresponds to IP Address & return its IP version. //! //! ## Example Usage //! ```rust //! use validaten::networks::which_ipv; //! //! fn main() { //! assert_eq!(which_ipv("::1"), Some("IPv6")); //! } //! ``` if is_ipv4(value) { return Some("IPv4") } else if is_ipv6(value) { return Some("IPv6") } None } pub fn is_mac_address(value: &str) -> bool { //! Check to see if a given value corresponds to MAC Address. //! //! ## Example Usage //! ```rust //! use validaten::networks::is_mac_address; //! //! fn main() { //! assert!(is_mac_address("F6-7C-9E-36-C9-E3")); //! } //! ``` let pattern = match Regex::new(r#"^(?:[0-9a-fA-F]{2}[:.-]){5}[0-9a-fA-F]{2}$"#) { Ok(x) => x, Err(_) => { return false; } }; if pattern.is_match(value) { return true; } false } #[cfg(test)] mod tests { use super::*; #[test] fn test_is_ipv4() { assert!(is_ipv4("10.10.10.1")); assert!(is_ipv4("100.128.10.132")); assert!(!is_ipv4("12.110.105.256")); assert!(!is_ipv4("10.2.13")); assert!(!is_ipv4("256.10.10.1000")); assert!(is_ipv4("100.17.5.119")); assert!(is_ipv4("127.0.0.1")); } #[test] fn test_is_ipv4_cidr() { assert!(is_ipv4_cidr("10.0.0.0/8")); assert!(is_ipv4_cidr("10.0.0.0/32")); assert!(!is_ipv4_cidr("10.0.0.0/33")); assert!(!is_ipv4_cidr("270.0.0.1000/24")); } #[test] fn test_is_ipv6() { assert!(is_ipv6("2041:0000:140F::875B:131B")); assert!(is_ipv6("::ffff:127.0.0.1")); assert!(is_ipv6("::ffff:7f00:1")); assert!(is_ipv6("::1")); assert!(is_ipv6("2041:0:140F::875B:131B")); assert!(is_ipv6("2041:0000:140F::875B:131B")); assert!(is_ipv6("fcb7:360a:242a:2d0d:392e:bc22:a45:3573")); assert!(!is_ipv6("2002:::1234::")); assert!(is_ipv6("3b8f:473b:d1a7:ba09:d28c:3cd:7f46:c95e")); assert!(is_ipv6("0000:0000:0000:0000:0000:FFFF:2BE0:9E74")); assert!(is_ipv6("::ffff:43.224.158.116")); } #[test] fn test_is_ipv6_cidr() { assert!(is_ipv6_cidr("2001:0DB8:1234::/48")); assert!(is_ipv6_cidr("2001:0DB8:12a4::/128")); assert!(!is_ipv6_cidr("2005:0DB8:1234::/130")); assert!(!is_ipv6_cidr("2002:::1234::/48")); } #[test] fn test_is_ip_loopback() { assert!(is_ip_loopback("127.0.0.1")); assert!(is_ip_loopback("::1")); assert!(!is_ip_loopback("10.122.1.130")); assert!(!is_ip_loopback("::ffff:7f00:1")); } #[test] fn test_is_ipv_any() { assert!(is_ipv_any("127.0.0.1")); assert!(is_ipv_any("::1")); } #[test] fn test_which_ipv() { assert_eq!(which_ipv("::1"), Some("IPv6")); assert_eq!(which_ipv("127.0.0.1"), Some("IPv4")); assert_eq!(which_ipv("2002:::1234::"), None); } #[test] fn test_is_mac_address() { assert!(is_mac_address("F6-7C-9E-36-C9-E3")); assert!(is_mac_address("F6:7C:9E:36:C9:E3")); assert!(is_mac_address("F6.7C.9E.36.C9.E3")); } }
#[cfg(any(target_os = "macos", target_os = "ios"))] #[path = "darwin.rs"] pub mod sys; #[cfg(target_os = "linux")] #[path = "linux.rs"] pub mod sys; pub use sys::{setup_ip, DNSSetup};
// implements the surface volume structure pub struct Surface { } impl Surface { }
use crossbeam::channel; use std::thread; use std::time::Duration; fn expensive_sum(v: Vec<i32>) -> i32 { pause_ms(500); println!("Child thread: just about finished"); v.iter() .filter(|x| { *x%2 == 0}) .map(|&x| { x * x}) .sum() } fn pause_ms(ms: u64) { thread::sleep(Duration::from_millis(ms)); } pub fn main() { let my_vector = vec![2, 5, 1, 0, 4, 3]; let handle = thread::spawn(move || {expensive_sum(my_vector) }); for letter in vec!["a", "b", "c", "d", "e", "f"] { println!("Main thread: Letter {}", letter); pause_ms(200); } let sum = handle.join().unwrap(); println!("The child thread's expensive sum is {}", sum); let (tx, rx) = channel::unbounded(); let tx2 = tx.clone(); let handle_a = thread::spawn(move || { pause_ms(0); tx2.send("Thread A: 1").unwrap(); pause_ms(200); tx2.send("Thread A: 2").unwrap(); }); pause_ms(100); // Make sure Thread A has time to get going before we spawn Thread B let handle_b = thread::spawn(move || { pause_ms(0); tx.send("Thread B: 1").unwrap(); pause_ms(200); tx.send("Thread B: 2").unwrap(); }); for msg in rx { println!("Main thread: Received {}", msg); } handle_a.join().unwrap(); handle_b.join().unwrap(); let (tx, rx) = channel::unbounded(); let rx1 = rx.clone(); let handle_a = thread::spawn(move || { for msg in rx { println!("thread1: Received {}", msg); } }); let handle_b = thread::spawn(move || { for msg in rx1 { println!("thread2: Received {}", msg); } }); for i in 1..=15{ tx.send(i).unwrap(); } drop(tx); handle_a.join().unwrap(); handle_b.join().unwrap(); println!("Main thread: Exiting.") }
use crate::types::{Array, Str}; use crate::value::{FromValue, ToValue, Value}; macro_rules! value_i { ($t:ty) => { impl ToValue for $t { fn to_value(&self) -> $crate::Value { $crate::Value::i64(self.clone() as i64) } } impl FromValue for $t { fn from_value(v: $crate::Value) -> $t { v.i64_val() as $t } } }; ($($t:ty),*) => { $(value_i!($t);)* } } macro_rules! value_f { ($t:ty) => { impl ToValue for $t { fn to_value(&self) -> $crate::Value { $crate::Value::f64(self.clone().into()) } } impl FromValue for $t { fn from_value(v: $crate::Value) -> $t { v.f64_val() as $t } } }; ($($t:ty),*) => { $(value_f!($t);)* } } value_i!(i8, u8, i16, u16, i32, u32, i64, u64, isize, usize); value_f!(f32, f64); impl ToValue for bool { fn to_value(&self) -> Value { Value::i64(*self as i64) } } impl FromValue for bool { fn from_value(v: Value) -> bool { v.i64_val() != 0 } } impl ToValue for String { fn to_value(&self) -> Value { let s = Str::from(self.as_str()); Value::from(s) } } impl FromValue for String { fn from_value(v: Value) -> String { let s = Str::from(v); String::from(s.as_str()) } } impl ToValue for () { fn to_value(&self) -> Value { Value::unit() } } impl FromValue for &str { fn from_value(value: Value) -> Self { let len = unsafe { crate::core::mlvalues::caml_string_length(value.0) }; let ptr = string_val!(value.0) as *mut u8; unsafe { let slice = ::std::slice::from_raw_parts_mut(ptr, len); ::std::str::from_utf8_unchecked_mut(slice) } } } impl ToValue for &str { fn to_value(&self) -> Value { let s = Str::from(*self); Value::from(s) } } impl<V: ToValue> ToValue for Vec<V> { fn to_value(&self) -> Value { let tmp: Vec<Value> = self.iter().map(|x| x.to_value()).collect(); crate::array!(_ tmp).into() } } impl<V: FromValue> FromValue for Vec<V> { fn from_value(v: Value) -> Vec<V> { let arr = Array::from(v); let mut dst = Vec::with_capacity(arr.len()); for i in 0..arr.len() { dst.push(V::from_value(arr.get(i).unwrap())) } dst } }
mod dijk; use crate::BurrowLocation::{Hallway, Room}; use lazy_static::lazy_static; use std::cmp::{max, min}; use std::collections::HashMap; use std::fs; lazy_static! { static ref ORGANIZED_SIDE_ROOMS: [Vec<Amphipod>; 4] = [ vec![Amphipod::A, Amphipod::A, Amphipod::A, Amphipod::A], vec![Amphipod::B, Amphipod::B, Amphipod::B, Amphipod::B], vec![Amphipod::C, Amphipod::C, Amphipod::C, Amphipod::C], vec![Amphipod::D, Amphipod::D, Amphipod::D, Amphipod::D], ]; static ref STARTING_TEST_SIDE_ROOMS: [Vec<Amphipod>; 4] = [ vec![Amphipod::A, Amphipod::B], vec![Amphipod::D, Amphipod::C], vec![Amphipod::C, Amphipod::B], vec![Amphipod::A, Amphipod::D], ]; static ref AMPHIPOD_ENERGY_COSTS: HashMap<Amphipod, usize> = HashMap::from([ (Amphipod::A, 1), (Amphipod::B, 10), (Amphipod::C, 100), (Amphipod::D, 1000) ]); } const ORGANIZED_AMPHIPOD_ROOMS: [Amphipod; 4] = [Amphipod::A, Amphipod::B, Amphipod::C, Amphipod::D]; const VALID_HALLWAY_IDXS: [usize; 7] = [0, 1, 3, 5, 7, 9, 10]; type Position = (isize, isize); fn manhattan_distance((ax, ay): &Position, (bx, by): &Position) -> usize { ((ax - bx).abs() + ay + by) as usize } #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] enum Amphipod { A, B, C, D, } #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Debug)] struct Burrow { hallways: [Option<Amphipod>; 11], // room is stack rooms: [Vec<Amphipod>; 4], room_size: usize, } impl Burrow { fn new(rooms: [Vec<Amphipod>; 4]) -> Self { let room_size = rooms[0].len(); Burrow { hallways: Default::default(), rooms, room_size, } } fn apply_move(&self, amphipod_move: &Move) -> Self { let mut burrow_clone = self.clone(); amphipod_move.apply(&mut burrow_clone); burrow_clone } fn is_organized(&self) -> bool { // for room_idx in 0..4 { // let expected_amphipod = ORGANIZED_AMPHIPOD_ROOMS[room_idx]; // let room = &self.rooms[room_idx]; // if room.len() != self.room_size || room.iter().any(|a| a != &expected_amphipod) { // return false; // } // } // true self.rooms[..] == ORGANIZED_SIDE_ROOMS[..] } fn reachable_hallways(&self, room_idx: usize) -> Vec<usize> { let room_hallway_idx = 2 + 2 * room_idx; assert!(!VALID_HALLWAY_IDXS.contains(&room_hallway_idx)); let blocked_left: Option<usize> = (0..room_hallway_idx) .rev() .find(|i| self.hallways[*i as usize].is_some()); let blocked_right: Option<usize> = ((room_hallway_idx + 1)..11).find(|i| self.hallways[*i as usize].is_some()); let valid_left = match blocked_left { None => 0, // will never be blocked passed 7 to the left Some(blocked_hallway_idx) => { if blocked_hallway_idx == 0 { 1 } else { blocked_hallway_idx + 2 } } }; let valid_right = match blocked_right { None => 10, // will never be blocked passed 3 to the right Some(blocked_hallway_idx) => { if blocked_hallway_idx == 10 { 9 } else { blocked_hallway_idx - 2 } } }; VALID_HALLWAY_IDXS .iter() .filter(|&&i| { (i >= valid_left && i < room_hallway_idx) || (i > room_hallway_idx && i <= valid_right) }) .copied() .collect() } fn room_is_organized(&self, room_idx: usize) -> bool { let expected_amphipod = ORGANIZED_AMPHIPOD_ROOMS[room_idx]; let room = self.rooms.get(room_idx).unwrap(); room.len() == self.room_size && room.iter().all(|a| a == &expected_amphipod) } fn moves(&self) -> Vec<Move> { let mut valid_next_moves: Vec<Move> = vec![]; for unorganized_room_idx in (0..4).filter(|&i| !self.room_is_organized(i)) { let from_location = Room(unorganized_room_idx); let unorganized_room = self.rooms.get(unorganized_room_idx).unwrap(); if let Some(moving_amphipod) = unorganized_room.last() { // room to hallway let reachable_hallway_idxs = self.reachable_hallways(unorganized_room_idx); let hallway_moves: Vec<Move> = reachable_hallway_idxs .iter() .map(|hidx| Move { from: from_location, to: Hallway(*hidx), }) .collect(); valid_next_moves.extend(hallway_moves); // room to room let destination_room_idx = ORGANIZED_AMPHIPOD_ROOMS .iter() .position(|organized_amph| organized_amph == moving_amphipod) .unwrap(); // if you're in the right room, no need to move if destination_room_idx == unorganized_room_idx { continue; } let destination_room = self.rooms.get(destination_room_idx).unwrap(); // path must not be blocked and destination room must have space and not contain other types of amphipods let unorganized_room_hallway_idx = 2 + 2 * unorganized_room_idx; let destination_room_hallway_idx = 2 + 2 * destination_room_idx; let min_hidx = min(unorganized_room_hallway_idx, destination_room_hallway_idx); let max_hidx = max(unorganized_room_hallway_idx, destination_room_hallway_idx); let path_to_dest_room_blocked = self .hallways .iter() .enumerate() .filter_map(|(hallway_idx, amph)| amph.map(|_| (hallway_idx))) .any(|hallway_idx| hallway_idx > min_hidx && hallway_idx < max_hidx); if !path_to_dest_room_blocked && destination_room.len() < self.room_size && destination_room.iter().all(|a| a == moving_amphipod) { valid_next_moves.push(Move { from: from_location, to: Room(destination_room_idx), }) } } } // hallway to room for (current_hallway_idx, moving_amphipod) in self .hallways .iter() .enumerate() .filter_map(|(hidx, amph)| amph.map(|a| (hidx, a))) { let destination_room_idx = ORGANIZED_AMPHIPOD_ROOMS .iter() .position(|organized_amph| organized_amph == &moving_amphipod) .unwrap(); let destination_room = self.rooms.get(destination_room_idx).unwrap(); // path must not be blocked and destination room must have space and not contain other types of amphipods let destination_room_hallway_idx = 2 + 2 * destination_room_idx; let min_hidx = min(current_hallway_idx, destination_room_hallway_idx); let max_hidx = max(current_hallway_idx, destination_room_hallway_idx); let path_to_dest_room_blocked = self .hallways .iter() .enumerate() .filter_map(|(hidx, amph)| amph.map(|_| hidx)) .any(|hallway_idx| hallway_idx > min_hidx && hallway_idx < max_hidx); if !path_to_dest_room_blocked && destination_room.len() < self.room_size && destination_room.iter().all(|a| a == &moving_amphipod) { valid_next_moves.push(Move { from: Hallway(current_hallway_idx), to: Room(destination_room_idx), }) } } valid_next_moves } fn print(&self) { let amph_to_char = |a: &Amphipod| match a { Amphipod::A => 'A', Amphipod::B => 'B', Amphipod::C => 'C', Amphipod::D => 'D', }; println!("#############"); let hallway_str: String = self .hallways .iter() .map(|amph| match amph { None => '.', Some(a) => amph_to_char(a), }) .collect(); println!("#{}#", hallway_str); let top_slots: Vec<char> = self .rooms .iter() .map(|r| { if r.len() < 4 { '.' } else { amph_to_char(r.get(3).unwrap()) } }) .collect(); let third_slots: Vec<char> = self .rooms .iter() .map(|r| { if r.len() < 3 { '.' } else { amph_to_char(r.get(2).unwrap()) } }) .collect(); let second_slots: Vec<char> = self .rooms .iter() .map(|r| { if r.len() < 2 { '.' } else { amph_to_char(r.get(1).unwrap()) } }) .collect(); let bottom_slots: Vec<char> = self .rooms .iter() .map(|r| { if r.is_empty() { '.' } else { amph_to_char(r.get(0).unwrap()) } }) .collect(); println!( "###{}#{}#{}#{}###", top_slots[0], top_slots[1], top_slots[2], top_slots[3] ); println!( "###{}#{}#{}#{}###", third_slots[0], third_slots[1], third_slots[2], third_slots[3] ); println!( "###{}#{}#{}#{}###", second_slots[0], second_slots[1], second_slots[2], second_slots[3] ); println!( " #{}#{}#{}#{}#", bottom_slots[0], bottom_slots[1], bottom_slots[2], bottom_slots[3] ); println!(" #########"); } } #[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Debug)] enum BurrowLocation { Hallway(usize), Room(usize), } impl BurrowLocation { fn valid(&self) -> bool { match self { BurrowLocation::Hallway(i) => ![2, 4, 6, 8].contains(i), BurrowLocation::Room(i) => i < &4, } } fn is_open(&self, burrow: &Burrow) -> bool { match self { BurrowLocation::Hallway(i) => burrow.hallways[*i].is_none(), BurrowLocation::Room(i) => burrow.rooms[*i].len() < burrow.room_size, } } fn get_amphipod(&self, burrow: &Burrow) -> Option<Amphipod> { match self { BurrowLocation::Hallway(i) => burrow.hallways[*i], BurrowLocation::Room(i) => burrow.rooms[*i].last().copied(), } } fn position(&self, burrow: &Burrow) -> Position { match self { BurrowLocation::Hallway(i) => (*i as isize, 0), // 0 => 2, 1 => 4, 2 => 6, 3 => 8 BurrowLocation::Room(i) => ( (2 + 2 * i) as isize, ((burrow.room_size + 1) - burrow.rooms[*i].len()) as isize, ), } } } // Note that movement from and to the same Position variant is disallowed. // E.g. moving from one hallway location to another, since amphipods will only move out of their rooms once before spending their other move going into a room. #[derive(PartialOrd, Ord, PartialEq, Eq, Hash, Clone, Copy, Debug)] struct Move { from: BurrowLocation, to: BurrowLocation, } impl Move { fn is_valid(&self, burrow: &Burrow) -> bool { let hallway_to_hallway = matches!((self.from, self.to), (Hallway(_), Hallway(_))); self.from.valid() && self.to.valid() && self.to.is_open(burrow) && !hallway_to_hallway } fn cost(&self, burrow: &Burrow) -> usize { let amphipod = self.from.get_amphipod(burrow).unwrap(); let from_pos = self.from.position(burrow); // let to_pos = self.to.position(burrow); let to_pos = match self.to { BurrowLocation::Hallway(i) => (i as isize, 0), // 0 => 2, 1 => 4, 2 => 6, 3 => 8 BurrowLocation::Room(i) => ( (2 + 2 * i) as isize, (burrow.room_size - burrow.rooms[i].len()) as isize, ), }; AMPHIPOD_ENERGY_COSTS[&amphipod] * manhattan_distance(&from_pos, &to_pos) } fn apply(&self, burrow: &mut Burrow) { if !self.is_valid(burrow) { panic!("AAAAAAAAAA"); } let amphipod = match self.from { Hallway(i) => { let amph = burrow.hallways[i]; burrow.hallways[i] = None; amph } Room(i) => burrow.rooms.get_mut(i).unwrap().pop(), } .unwrap(); match self.to { Hallway(i) => { assert!(burrow.hallways[i].is_none()); burrow.hallways[i] = Some(amphipod); } Room(i) => burrow.rooms.get_mut(i).unwrap().push(amphipod), }; } } fn main() { let filename = "input/part2_input.txt"; let rooms = parse_input_file(filename); let burrow = Burrow::new(rooms); println!("burrow: {:?}", burrow); println!(); let get_neighbors = |burrow: &Burrow| { // burrow.print(); // println!(); burrow .moves() .iter() .map(|mv| { // println!("move: {:?}", mv); (burrow.apply_move(mv), mv.cost(burrow)) }) .collect() }; let is_finished = |burrow: &Burrow| burrow.is_organized(); let dijkstra_solver = dijk::Dijkstra::new(Box::new(get_neighbors), Box::new(is_finished)); if let Some((path, cost)) = dijkstra_solver.shortest_path(burrow) { println!("cost: {:?}", cost); for (b, bc) in path { b.print(); println!("cost to move: {}", bc); println!(); } } else { println!("not solvable"); } } fn parse_input_file(filename: &str) -> [Vec<Amphipod>; 4] { let file_contents = fs::read_to_string(filename).unwrap(); let amphipod_strs: Vec<_> = file_contents .split('\n') .skip(2) .take(4) .map(|l| { (3..=9) .step_by(2) .map(|n| match &l[n..=n] { "A" => Amphipod::A, "B" => Amphipod::B, "C" => Amphipod::C, "D" => Amphipod::D, x => panic!("unknown amphipod: {}", x), }) .collect::<Vec<_>>() }) .collect(); assert_eq!(amphipod_strs.len(), 4); assert_eq!(amphipod_strs.get(0).unwrap().len(), 4); assert_eq!(amphipod_strs.get(1).unwrap().len(), 4); let ts = &amphipod_strs[0]; let bs = &amphipod_strs[1]; [ vec![ amphipod_strs[3][0], amphipod_strs[2][0], amphipod_strs[1][0], amphipod_strs[0][0], ], vec![ amphipod_strs[3][1], amphipod_strs[2][1], amphipod_strs[1][1], amphipod_strs[0][1], ], vec![ amphipod_strs[3][2], amphipod_strs[2][2], amphipod_strs[1][2], amphipod_strs[0][2], ], vec![ amphipod_strs[3][3], amphipod_strs[2][3], amphipod_strs[1][3], amphipod_strs[0][3], ], ] } #[cfg(test)] mod tests { use super::*; const EMPTY_HALLWAY: [Option<Amphipod>; 11] = [None; 11]; #[test] fn test_burrow_is_organized() { let tests: HashMap<[Vec<Amphipod>; 4], bool> = HashMap::from([ (STARTING_TEST_SIDE_ROOMS.clone(), false), (ORGANIZED_SIDE_ROOMS.clone(), true), ]); for (rooms, expected_result) in tests { let burrow = Burrow::new(rooms); assert_eq!(burrow.is_organized(), expected_result); } } #[test] fn test_reachable_hallways() { // open hallway for i in 0..=3 { assert_eq!( Burrow::new(STARTING_TEST_SIDE_ROOMS.clone()).reachable_hallways(i), VALID_HALLWAY_IDXS.to_vec() ); } // one hallway occupied in middle let mut occupied_middle_hallway = EMPTY_HALLWAY; occupied_middle_hallway[5] = Some(Amphipod::B); let burrow_with_occupied_middle_hallway = Burrow { hallways: occupied_middle_hallway, rooms: STARTING_TEST_SIDE_ROOMS.clone(), room_size: 2, }; for i in 0..=1 { assert_eq!( burrow_with_occupied_middle_hallway.reachable_hallways(i), [0, 1, 3] ); } for i in 2..=3 { assert_eq!( burrow_with_occupied_middle_hallway.reachable_hallways(i), [7, 9, 10] ); } // room has no valid hallways let mut occupied_hallway_around_first_room = EMPTY_HALLWAY; occupied_hallway_around_first_room[1] = Some(Amphipod::A); occupied_hallway_around_first_room[3] = Some(Amphipod::A); let burrow_with_no_first_room_moves = Burrow { hallways: occupied_hallway_around_first_room, rooms: STARTING_TEST_SIDE_ROOMS.clone(), room_size: 2, }; assert_eq!(burrow_with_no_first_room_moves.reachable_hallways(0), []); } #[test] fn test_generate_moves() { let from_rooms_to_hallways: Vec<Move> = (0..4) .flat_map(|ri| { VALID_HALLWAY_IDXS .iter() .map(|hi| Move { from: Room(ri), to: Hallway(*hi), }) .collect::<Vec<_>>() }) .collect(); let tests = HashMap::from([(STARTING_TEST_SIDE_ROOMS.clone(), from_rooms_to_hallways)]); for (rooms, expected_moves) in tests { let burrow = Burrow::new(rooms); assert_eq!(burrow.moves(), expected_moves); } } }
pub use executor::Executor; pub use translate_and_validate::TranslateAndValidate; mod executor; mod translate_and_validate; // Notes on Query Optimization // -- See the optimization-skeleton branch for an example // implementation of an optimization flow based on SimpleDb // // Query optimization would likely be done using a module whose // functions are run in-between calls to TranslateAndValidate and // Executor // In server.rs calling the optimization functions might look something like this: // server::DBServer::run_query() { // ... // Some(Some(db)) => { // let lp = TranslateAndValidate::from_sql(query, db)?; // let annotated_lp = Optimizer::new(lp, db); // Ok(Executor::new(db, &annotated_lp)?.execute()) // ... // } // // Putting optimization there would give optimization functions access to // both the catalog and logical plan (currently a graph of logical relations). // // Actually writing the optimizer would likely involve processing a // common-old::logical_plan::LogicalPlan. The function // executor::Executor::logical_plan_to_op_iterator gives an example of // recursively processing a logical plan graph. // // Alternatively, SimpleDb does query optimization by processing lists of logical // operators. // // If the SimpleDB approach ends up being easier, the branch // *** optimization-skeleton *** refactors the current flow so that // TranslateAndValidate produces lists of logical operators, rather // than a graph. Then, a separate function on the branch (inside // optimizer.rs) turns the lists of operators into a graph, similar // to the way SimipleDB's LogicalPlan.physicalPlan() function works. // // Note: the Executor expects a logical plan as input, so the // optimizer would have to output a graph based annotated structure // similar to a logical plan in order to process the plan using the // current version of the executor. Otherwise, some of the executor // may need to be rewritten to accommodate whatever new intermediate // representation is chosen. Hopefully, though, it will be easy to // just create an annotated version of the LogicalPlan struct that the // executor can process almost exactly like it currently processes // LogicalPlans
//! # extended-collections-rs //! [![extended-collections](http://meritbadge.herokuapp.com/extended-collections)](https://crates.io/crates/extended-collections) //! [![Documentation](https://docs.rs/extended-collections/badge.svg)](https://docs.rs/extended-collections) //! [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) //! [![Build Status](https://travis-ci.org/jeffrey-xiao/extended-collections-rs.svg?branch=master)](https://travis-ci.org/jeffrey-xiao/extended-collections-rs) //! [![codecov](https://codecov.io/gh/jeffrey-xiao/extended-collections-rs/branch/master/graph/badge.svg)](https://codecov.io/gh/jeffrey-xiao/extended-collections-rs) //! //! `extended-collections` contains various implementations of collections that are not found in the standard library. //! //! ## Usage //! Add this to your `Cargo.toml`: //! ```toml //! [dependencies] //! extended-collections = "*" //! ``` //! and this to your crate root: //! ```rust //! extern crate extended_collections; //! ``` //! //! ## References //! - [Fast set operations using treaps](https://dl.acm.org/citation.cfm?id=277660) //! > Blelloch, Guy E., and Margaret Reid-Miller. 1998. โ€œFast Set Operations Using Treaps.โ€ In *Proceedings of the Tenth Annual Acm Symposium on Parallel Algorithms and Architectures*, 16โ€“26. SPAA โ€™98. New York, NY, USA: ACM. doi:[10.1145/277651.277660](https://doi.org/10.1145/277651.277660). //! - [A Skip List Cookbook.](https://dl.acm.org/citation.cfm?id=93711) //! > Pugh, William. 1990a. โ€œA Skip List Cookbook.โ€ College Park, MD, USA: University of Maryland at College Park. //! - [Skip Lists: A Probabilistic Alternative to Balanced Trees](https://dl.acm.org/citation.cfm?id=78977) //! > Pugh, William. 1990b. โ€œSkip Lists: A Probabilistic Alternative to Balanced Trees.โ€ *Commun. ACM* 33 (6). New York, NY, USA: ACM: 668โ€“76. doi:[10.1145/78973.78977](https://doi.org/10.1145/78973.78977). #![warn(missing_docs)] extern crate bincode; extern crate byteorder; extern crate crossbeam_epoch as epoch; extern crate probabilistic_collections; extern crate rand; extern crate serde; #[macro_use] extern crate serde_derive; mod entry; pub mod arena; pub mod avl_tree; pub mod bp_tree; pub mod lsm_tree; pub mod radix; pub mod skiplist; pub mod sync; pub mod treap;
#[cfg(feature = "client")] mod client; mod server; mod common;
//! This project is used for explaining the DTFSE operation. Here, we have a //! periodic square signal. The complex form of this signal is represented with //! s_complex array. DTFSE coefficients are calculated, then, the signal is //! approximated with the DTFSE function. This function returns its output in //! real form because original signal has only real parts in this example. The //! result is kept in the y_real array. //! //! Runs entirely locally without hardware. Rounding might be different than on //! device. Except for when printing you must be vigilent to not become reliant //! on any std tools that can't otherwise port over no no_std without alloc. //! //! `cargo run --example 4_9` use textplots::{Chart, Plot, Shape}; use core::f32::consts::PI; use itertools::Itertools; use microfft::{complex::cfft_16, Complex32}; use typenum::Unsigned; type N = heapless::consts::U16; const TRIANGLE_AMPLITUDE: f32 = 1.5; const TRIANGLE_PERIOD: usize = 16; fn main() { // Collecting to turn the Cycle into a clean iterator for our naive display fn let triangle = (0..TRIANGLE_PERIOD) .map(|n| { let period = TRIANGLE_PERIOD as f32; if n < (TRIANGLE_PERIOD / 2) { (2.0 * TRIANGLE_AMPLITUDE / (period / 2.0)) * n as f32 - TRIANGLE_AMPLITUDE } else { -(2.0 * TRIANGLE_AMPLITUDE / (period / 2.0)) * (n as f32 - period / 2.0) + TRIANGLE_AMPLITUDE } }) .cycle() .take(N::to_usize()) .collect::<heapless::Vec<f32, N>>(); display::<N, _>("triangle signal", triangle.iter().cloned()); //map it to real, leave im blank well fill in with cfft let mut dtfsecoef = triangle .iter() .cloned() .map(|f| Complex32 { re: f, im: 0.0 }) .collect::<heapless::Vec<Complex32, N>>(); // Coefficient calculation with CFFT function // arm_cfft_f32 uses a forward transform with enables bit reversal of output // well use microfft uses an in place Radix-2 FFT, for some reasons returns itself we dont need let _ = cfft_16(&mut dtfsecoef[..]); println!("dtfsecoef: {:?}", &dtfsecoef[..]); //dtfse to reclaim our original signal, note this is a bad approximation for our square wave let y_real = dtfse::<N, _>(dtfsecoef.iter().cloned(), 2).collect::<heapless::Vec<f32, N>>(); display::<N, _>("y_real 2", y_real.iter().cloned()); //a bit better let y_real = dtfse::<N, _>(dtfsecoef.iter().cloned(), 5).collect::<heapless::Vec<f32, N>>(); display::<N, _>("y_real 5", y_real.iter().cloned()); //good let y_real = dtfse::<N, _>(dtfsecoef.iter().cloned(), 8).collect::<heapless::Vec<f32, N>>(); display::<N, _>("y_real 8", y_real.iter().cloned()); //good let y_real = dtfse::<N, _>(dtfsecoef.iter().cloned(), 15).collect::<heapless::Vec<f32, N>>(); display::<N, _>("y_real 15", y_real.iter().cloned()); } fn dtfse<N: Unsigned, I: Iterator<Item = Complex32> + Clone>( coeff: I, k_var: usize, ) -> impl Iterator<Item = f32> { let size = N::to_usize() as f32; (0..N::to_usize()).map(move |n| { coeff .clone() .take(k_var + 1) .enumerate() .map(|(k, complex)| { let a = (complex.re * complex.re + complex.im * complex.im).sqrt(); let p = complex.im.atan2(complex.re); a * ((2.0 * PI * k as f32 * n as f32 / size) + p).cos() / size }) .sum::<f32>() }) } // Points isn't a great representation as you can lose the line in the graph, // however while Lines occasionally looks good it also can be terrible. // Continuous requires to be in a fn pointer closure which cant capture any // external data so not useful without lots of code duplication. fn display<N, I>(name: &str, input: I) where N: Unsigned, I: Iterator<Item = f32> + core::clone::Clone + std::fmt::Debug, { println!("{:?}: {:.4?}", name, input.clone().format(", ")); let display = input .enumerate() .map(|(idx, y)| (idx as f32, y)) .collect::<Vec<(f32, f32)>>(); Chart::new(120, 60, 0.0, N::to_usize() as f32) .lineplot(Shape::Points(&display[..])) .display(); }
use specs::Join; use std::f32::consts::PI; pub struct UpdateDynamicDrawEraserSystem; impl<'a> ::specs::System<'a> for UpdateDynamicDrawEraserSystem { type SystemData = ( ::specs::ReadStorage<'a, ::component::Aim>, ::specs::ReadStorage<'a, ::component::PhysicBody>, ::specs::ReadStorage<'a, ::component::Hook>, ::specs::WriteStorage<'a, ::component::DynamicGraphicsAssets>, ::specs::WriteStorage<'a, ::component::DynamicDraw>, ::specs::WriteStorage<'a, ::component::Reducer>, ::specs::Fetch<'a, ::resource::PhysicWorld>, ); fn run( &mut self, ( aims, bodies, hooks, mut dynamic_graphics_assets, mut dynamic_draws, mut reducers, physic_world, ): Self::SystemData, ) { for (assets, body) in (&mut dynamic_graphics_assets, &bodies).join() { let mut trans = body.get(&physic_world).position() * assets.primitive_trans; assets.world_trans = ::graphics::shader::draw1_vs::ty::World { world: trans.unwrap().into(), } } for (hook, body, aim) in (&hooks, &bodies, &aims).join() { if let Some(ref anchor) = hook.anchor { let body_hook_local_pos = ::na::Vector3::new(0.0, 0.2, -0.2); let hook_body_pos = body.get(&physic_world).position().translation.vector + aim.rotation*body_hook_local_pos; let aimto = hook_body_pos - anchor.pos; let assets = dynamic_graphics_assets.get_mut(hook.draw).unwrap(); let trans = ::na::Isometry3::from_parts( ::na::Translation::from_vector(anchor.pos), ::na::UnitQuaternion::rotation_between(&::na::Vector3::new(0.0, 1.0, 0.0), &aimto).unwrap(), ) * assets.primitive_trans; assets.world_trans = ::graphics::shader::draw1_vs::ty::World { world: trans.unwrap().into(), }; // because we don't want to see the end of the chain we don't draw it when it is // viewable let angle = ::na::UnitQuaternion::rotation_between(&(aim.rotation*::na::Vector3::new(1.0, 0.0, 0.0)), &aimto).unwrap().angle(); if angle > PI/3.0 { dynamic_draws.insert(hook.draw, ::component::DynamicDraw); } else { dynamic_draws.remove(hook.draw); } } else { dynamic_draws.remove(hook.draw); } } for (assets, reducer) in (&mut dynamic_graphics_assets, &mut reducers).join() { let radius = 1.0 - (reducer.timer/reducer.duration); let x_radius = if reducer.reduce_x { radius } else { 1.0 }; let y_radius = if reducer.reduce_y { radius } else { 1.0 }; let z_radius = if reducer.reduce_z { radius } else { 1.0 }; let trans = assets.primitive_trans * ::graphics::resizer(x_radius, y_radius, z_radius); assets.world_trans = ::graphics::shader::draw1_vs::ty::World { world: trans.unwrap().into(), }; } } }
//! The module contains a list of helpers for [`IntoRecords`] //! //! [`IntoRecords`]: crate::grid::records::IntoRecords pub mod limit_column_records; pub mod limit_row_records; #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub mod buf_records; #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub mod either_string; #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub mod truncate_records; #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub use buf_records::{BufColumns, BufRows}; #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] pub use truncate_records::TruncateContent; pub use limit_column_records::LimitColumns; pub use limit_row_records::LimitRows;
extern crate bulletrs; extern crate cgmath; use cgmath::{Vector3, Vector4}; use bulletrs::*; #[test()] fn ray_test() { let configuration = CollisionConfiguration::new_default(); let mut dynamics_world = DynamicsWorld::new_discrete_world( CollisionDispatcher::new(&configuration), Broadphase::new(BroadphaseInterface::DbvtBroadphase), ConstraintSolver::new(), configuration, ); dynamics_world.set_gravity(Vector3::new(0.0, 0.0, 0.0)); let shape = Shape::new_sphere(1.0); let mass = 0.1; let body1 = RigidBody::new( mass, shape.calculate_local_inertia(mass), shape, Vector3::new(-4.0, 0.0, 0.0), Vector4::new(0.0, 0.0, 0.0, 1.0), ); dynamics_world.add_rigid_body(body1); let shape2 = Shape::new_sphere(1.0); let mass = 0.1; let body2 = RigidBody::new( mass, shape2.calculate_local_inertia(mass), shape2, Vector3::new(4.0, 0.0, 0.0), Vector4::new(0.0, 0.0, 0.0, 1.0), ); dynamics_world.add_rigid_body(body2); for _ in 0 .. 500 { dynamics_world.step_simulation(0.1, 5, 1.0 / 60.0); } let result = dynamics_world.raytest(ClosestRayResultCallback::new( Vector3::new(-10.0, 0.0, 0.0), Vector3::new(10.0, 0.0, 0.0), )); assert_eq!(result.closest_hit_fraction(), 0.25); assert_eq!(result.intersections().len(), 1); let result = dynamics_world.raytest(AllRayResultCallback::new( Vector3::new(-10.0, 0.0, 0.0), Vector3::new(10.0, 0.0, 0.0), )); assert_eq!(result.intersections().len(), 2); for result in result.intersections() { assert_eq!(result.rigidbody().as_ref().unwrap().removed(), false); dynamics_world.remove_body(result.rigidbody().as_ref().unwrap()); assert_eq!(result.rigidbody().as_ref().unwrap().removed(), true); } }
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // // Portions Copyright 2017 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. use epoll; use libc::EAGAIN; use libc::EFD_NONBLOCK; use std::cmp; use std::io::Read; use std::io::{self, Write}; use std::mem; use std::net::Ipv4Addr; use std::os::unix::io::{AsRawFd, RawFd}; use std::result; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, RwLock}; use std::thread; use std::vec::Vec; use net_gen; use super::Error as DeviceError; use super::{ ActivateError, ActivateResult, DeviceEventT, Queue, VirtioDevice, VirtioDeviceType, VirtioInterruptType, }; use crate::VirtioInterrupt; use net_util::{MacAddr, Tap, TapError, MAC_ADDR_LEN}; use virtio_bindings::bindings::virtio_net::*; use vm_device::{Migratable, MigratableError, Pausable, Snapshotable}; use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; use vmm_sys_util::eventfd::EventFd; /// The maximum buffer size when segmentation offload is enabled. This /// includes the 12-byte virtio net header. /// http://docs.oasis-open.org/virtio/virtio/v1.0/virtio-v1.0.html#x1-1740003 const MAX_BUFFER_SIZE: usize = 65562; const QUEUE_SIZE: u16 = 256; const NUM_QUEUES: usize = 2; const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES]; // A frame is available for reading from the tap device to receive in the guest. const RX_TAP_EVENT: DeviceEventT = 0; // The guest has made a buffer available to receive a frame into. const RX_QUEUE_EVENT: DeviceEventT = 1; // The transmit queue has a frame that is ready to send from the guest. const TX_QUEUE_EVENT: DeviceEventT = 2; // The device has been dropped. pub const KILL_EVENT: DeviceEventT = 3; // Number of DeviceEventT events supported by this implementation. pub const NET_EVENTS_COUNT: usize = 4; // The device should be paused. const PAUSE_EVENT: DeviceEventT = 5; #[derive(Debug)] pub enum Error { /// Open tap device failed. TapOpen(TapError), /// Setting tap IP failed. TapSetIp(TapError), /// Setting tap netmask failed. TapSetNetmask(TapError), /// Setting tap interface offload flags failed. TapSetOffload(TapError), /// Setting vnet header size failed. TapSetVnetHdrSize(TapError), /// Enabling tap interface failed. TapEnable(TapError), } pub type Result<T> = result::Result<T, Error>; struct TxVirtio { queue_evt: EventFd, queue: Queue, iovec: Vec<(GuestAddress, usize)>, frame_buf: [u8; MAX_BUFFER_SIZE], } impl TxVirtio { fn new(queue: Queue, queue_evt: EventFd) -> Self { let tx_queue_max_size = queue.get_max_size() as usize; TxVirtio { queue_evt, queue, iovec: Vec::with_capacity(tx_queue_max_size), frame_buf: [0u8; MAX_BUFFER_SIZE], } } } struct RxVirtio { queue_evt: EventFd, deferred_frame: bool, deferred_irqs: bool, queue: Queue, bytes_read: usize, frame_buf: [u8; MAX_BUFFER_SIZE], } impl RxVirtio { fn new(queue: Queue, queue_evt: EventFd) -> Self { RxVirtio { queue_evt, deferred_frame: false, deferred_irqs: false, queue, bytes_read: 0, frame_buf: [0u8; MAX_BUFFER_SIZE], } } } fn vnet_hdr_len() -> usize { mem::size_of::<virtio_net_hdr_v1>() } struct NetEpollHandler { mem: Arc<RwLock<GuestMemoryMmap>>, tap: Tap, rx: RxVirtio, tx: TxVirtio, interrupt_cb: Arc<VirtioInterrupt>, kill_evt: EventFd, pause_evt: EventFd, epoll_fd: RawFd, rx_tap_listening: bool, } impl NetEpollHandler { fn signal_used_queue(&self, queue: &Queue) -> result::Result<(), DeviceError> { (self.interrupt_cb)(&VirtioInterruptType::Queue, Some(queue)).map_err(|e| { error!("Failed to signal used queue: {:?}", e); DeviceError::FailedSignalingUsedQueue(e) }) } // Copies a single frame from `self.rx.frame_buf` into the guest. Returns true // if a buffer was used, and false if the frame must be deferred until a buffer // is made available by the driver. fn rx_single_frame(&mut self) -> bool { let mem = self.mem.read().unwrap(); let mut next_desc = self.rx.queue.iter(&mem).next(); if next_desc.is_none() { // Queue has no available descriptors if self.rx_tap_listening { self.unregister_tap_rx_listener().unwrap(); self.rx_tap_listening = false; } return false; } // We just checked that the head descriptor exists. let head_index = next_desc.as_ref().unwrap().index; let mut write_count = 0; // Copy from frame into buffer, which may span multiple descriptors. loop { match next_desc { Some(desc) => { if !desc.is_write_only() { break; } let limit = cmp::min(write_count + desc.len as usize, self.rx.bytes_read); let source_slice = &self.rx.frame_buf[write_count..limit]; let write_result = mem.write_slice(source_slice, desc.addr); match write_result { Ok(_) => { write_count = limit; } Err(e) => { error!("Failed to write slice: {:?}", e); break; } }; if write_count >= self.rx.bytes_read { break; } next_desc = desc.next_descriptor(); } None => { warn!("Receiving buffer is too small to hold frame of current size"); break; } } } self.rx.queue.add_used(&mem, head_index, write_count as u32); // Mark that we have at least one pending packet and we need to interrupt the guest. self.rx.deferred_irqs = true; write_count >= self.rx.bytes_read } fn process_rx(&mut self) -> result::Result<(), DeviceError> { // Read as many frames as possible. loop { match self.read_tap() { Ok(count) => { self.rx.bytes_read = count; if !self.rx_single_frame() { self.rx.deferred_frame = true; break; } } Err(e) => { // The tap device is non-blocking, so any error aside from EAGAIN is // unexpected. match e.raw_os_error() { Some(err) if err == EAGAIN => (), _ => { error!("Failed to read tap: {:?}", e); return Err(DeviceError::FailedReadTap); } }; break; } } } if self.rx.deferred_irqs { self.rx.deferred_irqs = false; self.signal_used_queue(&self.rx.queue) } else { Ok(()) } } fn resume_rx(&mut self) -> result::Result<(), DeviceError> { if self.rx.deferred_frame { if self.rx_single_frame() { self.rx.deferred_frame = false; // process_rx() was interrupted possibly before consuming all // packets in the tap; try continuing now. self.process_rx() } else if self.rx.deferred_irqs { self.rx.deferred_irqs = false; self.signal_used_queue(&self.rx.queue) } else { Ok(()) } } else { Ok(()) } } fn process_tx(&mut self) -> result::Result<(), DeviceError> { let mem = self.mem.read().unwrap(); while let Some(avail_desc) = self.tx.queue.iter(&mem).next() { let head_index = avail_desc.index; let mut read_count = 0; let mut next_desc = Some(avail_desc); self.tx.iovec.clear(); while let Some(desc) = next_desc { if desc.is_write_only() { break; } self.tx.iovec.push((desc.addr, desc.len as usize)); read_count += desc.len as usize; next_desc = desc.next_descriptor(); } read_count = 0; // Copy buffer from across multiple descriptors. // TODO(performance - Issue #420): change this to use `writev()` instead of `write()` // and get rid of the intermediate buffer. for (desc_addr, desc_len) in self.tx.iovec.drain(..) { let limit = cmp::min((read_count + desc_len) as usize, self.tx.frame_buf.len()); let read_result = mem.read_slice( &mut self.tx.frame_buf[read_count..limit as usize], desc_addr, ); match read_result { Ok(_) => { // Increment by number of bytes actually read read_count += limit - read_count; } Err(e) => { error!("Failed to read slice: {:?}", e); break; } } } let write_result = self.tap.write(&self.tx.frame_buf[..read_count as usize]); match write_result { Ok(_) => {} Err(e) => { warn!("net: tx: error failed to write to tap: {}", e); } }; self.tx.queue.add_used(&mem, head_index, 0); } Ok(()) } fn read_tap(&mut self) -> io::Result<usize> { self.tap.read(&mut self.rx.frame_buf) } fn register_tap_rx_listener(&self) -> std::result::Result<(), std::io::Error> { epoll::ctl( self.epoll_fd, epoll::ControlOptions::EPOLL_CTL_ADD, self.tap.as_raw_fd(), epoll::Event::new(epoll::Events::EPOLLIN, u64::from(RX_TAP_EVENT)), )?; Ok(()) } fn unregister_tap_rx_listener(&self) -> std::result::Result<(), std::io::Error> { epoll::ctl( self.epoll_fd, epoll::ControlOptions::EPOLL_CTL_DEL, self.tap.as_raw_fd(), epoll::Event::new(epoll::Events::EPOLLIN, u64::from(RX_TAP_EVENT)), )?; Ok(()) } fn run(&mut self, paused: Arc<AtomicBool>) -> result::Result<(), DeviceError> { // Create the epoll file descriptor self.epoll_fd = epoll::create(true).map_err(DeviceError::EpollCreateFd)?; // Add events epoll::ctl( self.epoll_fd, epoll::ControlOptions::EPOLL_CTL_ADD, self.rx.queue_evt.as_raw_fd(), epoll::Event::new(epoll::Events::EPOLLIN, u64::from(RX_QUEUE_EVENT)), ) .map_err(DeviceError::EpollCtl)?; epoll::ctl( self.epoll_fd, epoll::ControlOptions::EPOLL_CTL_ADD, self.tx.queue_evt.as_raw_fd(), epoll::Event::new(epoll::Events::EPOLLIN, u64::from(TX_QUEUE_EVENT)), ) .map_err(DeviceError::EpollCtl)?; self.register_tap_rx_listener() .map_err(DeviceError::EpollCtl)?; self.rx_tap_listening = true; epoll::ctl( self.epoll_fd, epoll::ControlOptions::EPOLL_CTL_ADD, self.kill_evt.as_raw_fd(), epoll::Event::new(epoll::Events::EPOLLIN, u64::from(KILL_EVENT)), ) .map_err(DeviceError::EpollCtl)?; epoll::ctl( self.epoll_fd, epoll::ControlOptions::EPOLL_CTL_ADD, self.pause_evt.as_raw_fd(), epoll::Event::new(epoll::Events::EPOLLIN, u64::from(PAUSE_EVENT)), ) .map_err(DeviceError::EpollCtl)?; const EPOLL_EVENTS_LEN: usize = 100; let mut events = vec![epoll::Event::new(epoll::Events::empty(), 0); EPOLL_EVENTS_LEN]; 'epoll: loop { let num_events = match epoll::wait(self.epoll_fd, -1, &mut events[..]) { Ok(res) => res, Err(e) => { if e.kind() == io::ErrorKind::Interrupted { // It's well defined from the epoll_wait() syscall // documentation that the epoll loop can be interrupted // before any of the requested events occurred or the // timeout expired. In both those cases, epoll_wait() // returns an error of type EINTR, but this should not // be considered as a regular error. Instead it is more // appropriate to retry, by calling into epoll_wait(). continue; } return Err(DeviceError::EpollWait(e)); } }; for event in events.iter().take(num_events) { let ev_type = event.data as u16; match ev_type { RX_QUEUE_EVENT => { debug!("RX_QUEUE_EVENT received"); if let Err(e) = self.rx.queue_evt.read() { error!("Failed to get rx queue event: {:?}", e); break 'epoll; } self.resume_rx().unwrap(); if !self.rx_tap_listening { self.register_tap_rx_listener().unwrap(); self.rx_tap_listening = true; } } TX_QUEUE_EVENT => { debug!("TX_QUEUE_EVENT received"); if let Err(e) = self.tx.queue_evt.read() { error!("Failed to get tx queue event: {:?}", e); break 'epoll; } self.process_tx().unwrap(); } RX_TAP_EVENT => { debug!("RX_TAP_EVENT received"); if self.rx.deferred_frame // Process a deferred frame first if available. Don't read from tap again // until we manage to receive this deferred frame. { if self.rx_single_frame() { self.rx.deferred_frame = false; self.process_rx().unwrap(); } else if self.rx.deferred_irqs { self.rx.deferred_irqs = false; self.signal_used_queue(&self.rx.queue).unwrap(); } } else { self.process_rx().unwrap(); } } KILL_EVENT => { debug!("KILL_EVENT received, stopping epoll loop"); break 'epoll; } PAUSE_EVENT => { debug!("PAUSE_EVENT received, pausing virtio-net epoll loop"); // We loop here to handle spurious park() returns. // Until we have not resumed, the paused boolean will // be true. while paused.load(Ordering::SeqCst) { thread::park(); } } _ => { error!("Unknown event for virtio-net"); } } } } Ok(()) } } pub struct Net { kill_evt: Option<EventFd>, pause_evt: Option<EventFd>, tap: Option<Tap>, avail_features: u64, acked_features: u64, // The config space will only consist of the MAC address specified by the user, // or nothing, if no such address if provided. config_space: Vec<u8>, queue_evts: Option<Vec<EventFd>>, interrupt_cb: Option<Arc<VirtioInterrupt>>, epoll_thread: Option<thread::JoinHandle<result::Result<(), DeviceError>>>, paused: Arc<AtomicBool>, } impl Net { /// Create a new virtio network device with the given TAP interface. pub fn new_with_tap(tap: Tap, guest_mac: Option<&MacAddr>, iommu: bool) -> Result<Self> { // Set offload flags to match the virtio features below. tap.set_offload( net_gen::TUN_F_CSUM | net_gen::TUN_F_UFO | net_gen::TUN_F_TSO4 | net_gen::TUN_F_TSO6, ) .map_err(Error::TapSetOffload)?; let vnet_hdr_size = vnet_hdr_len() as i32; tap.set_vnet_hdr_size(vnet_hdr_size) .map_err(Error::TapSetVnetHdrSize)?; let mut avail_features = 1 << VIRTIO_NET_F_GUEST_CSUM | 1 << VIRTIO_NET_F_CSUM | 1 << VIRTIO_NET_F_GUEST_TSO4 | 1 << VIRTIO_NET_F_GUEST_UFO | 1 << VIRTIO_NET_F_HOST_TSO4 | 1 << VIRTIO_NET_F_HOST_UFO | 1 << VIRTIO_F_VERSION_1; if iommu { avail_features |= 1u64 << VIRTIO_F_IOMMU_PLATFORM; } let mut config_space; if let Some(mac) = guest_mac { config_space = Vec::with_capacity(MAC_ADDR_LEN); // This is safe, because we know the capacity is large enough. unsafe { config_space.set_len(MAC_ADDR_LEN) } config_space[..].copy_from_slice(mac.get_bytes()); // When this feature isn't available, the driver generates a random MAC address. // Otherwise, it should attempt to read the device MAC address from the config space. avail_features |= 1 << VIRTIO_NET_F_MAC; } else { config_space = Vec::new(); } Ok(Net { kill_evt: None, pause_evt: None, tap: Some(tap), avail_features, acked_features: 0u64, config_space, queue_evts: None, interrupt_cb: None, epoll_thread: None, paused: Arc::new(AtomicBool::new(false)), }) } /// Create a new virtio network device with the given IP address and /// netmask. pub fn new( ip_addr: Ipv4Addr, netmask: Ipv4Addr, guest_mac: Option<&MacAddr>, iommu: bool, ) -> Result<Self> { let tap = Tap::new().map_err(Error::TapOpen)?; tap.set_ip_addr(ip_addr).map_err(Error::TapSetIp)?; tap.set_netmask(netmask).map_err(Error::TapSetNetmask)?; tap.enable().map_err(Error::TapEnable)?; Self::new_with_tap(tap, guest_mac, iommu) } } impl Drop for Net { fn drop(&mut self) { if let Some(kill_evt) = self.kill_evt.take() { // Ignore the result because there is nothing we can do about it. let _ = kill_evt.write(1); } } } impl VirtioDevice for Net { fn device_type(&self) -> u32 { VirtioDeviceType::TYPE_NET as u32 } fn queue_max_sizes(&self) -> &[u16] { QUEUE_SIZES } fn features(&self, page: u32) -> u32 { match page { // Get the lower 32-bits of the features bitfield. 0 => self.avail_features as u32, // Get the upper 32-bits of the features bitfield. 1 => (self.avail_features >> 32) as u32, _ => { warn!("Received request for unknown features page: {}", page); 0u32 } } } fn ack_features(&mut self, page: u32, value: u32) { let mut v = match page { 0 => u64::from(value), 1 => u64::from(value) << 32, _ => { warn!("Cannot acknowledge unknown features page: {}", page); 0u64 } }; // Check if the guest is ACK'ing a feature that we didn't claim to have. let unrequested_features = v & !self.avail_features; if unrequested_features != 0 { warn!("Received acknowledge request for unknown feature: {:x}", v); // Don't count these features as acked. v &= !unrequested_features; } self.acked_features |= v; } fn read_config(&self, offset: u64, mut data: &mut [u8]) { let config_len = self.config_space.len() as u64; if offset >= config_len { error!("Failed to read config space"); return; } if let Some(end) = offset.checked_add(data.len() as u64) { // This write can't fail, offset and end are checked against config_len. data.write_all(&self.config_space[offset as usize..cmp::min(end, config_len) as usize]) .unwrap(); } } fn write_config(&mut self, offset: u64, data: &[u8]) { let data_len = data.len() as u64; let config_len = self.config_space.len() as u64; if offset + data_len > config_len { error!("Failed to write config space"); return; } let (_, right) = self.config_space.split_at_mut(offset as usize); right.copy_from_slice(&data[..]); } fn activate( &mut self, mem: Arc<RwLock<GuestMemoryMmap>>, interrupt_cb: Arc<VirtioInterrupt>, mut queues: Vec<Queue>, mut queue_evts: Vec<EventFd>, ) -> ActivateResult { if queues.len() != NUM_QUEUES || queue_evts.len() != NUM_QUEUES { error!( "Cannot perform activate. Expected {} queue(s), got {}", NUM_QUEUES, queues.len() ); return Err(ActivateError::BadActivate); } let (self_kill_evt, kill_evt) = EventFd::new(EFD_NONBLOCK) .and_then(|e| Ok((e.try_clone()?, e))) .map_err(|e| { error!("failed creating kill EventFd pair: {}", e); ActivateError::BadActivate })?; self.kill_evt = Some(self_kill_evt); let (self_pause_evt, pause_evt) = EventFd::new(EFD_NONBLOCK) .and_then(|e| Ok((e.try_clone()?, e))) .map_err(|e| { error!("failed creating pause EventFd pair: {}", e); ActivateError::BadActivate })?; self.pause_evt = Some(self_pause_evt); if let Some(tap) = self.tap.clone() { // Save the interrupt EventFD as we need to return it on reset // but clone it to pass into the thread. self.interrupt_cb = Some(interrupt_cb.clone()); let mut tmp_queue_evts: Vec<EventFd> = Vec::new(); for queue_evt in queue_evts.iter() { // Save the queue EventFD as we need to return it on reset // but clone it to pass into the thread. tmp_queue_evts.push(queue_evt.try_clone().map_err(|e| { error!("failed to clone queue EventFd: {}", e); ActivateError::BadActivate })?); } self.queue_evts = Some(tmp_queue_evts); let rx_queue = queues.remove(0); let tx_queue = queues.remove(0); let rx_queue_evt = queue_evts.remove(0); let tx_queue_evt = queue_evts.remove(0); let mut handler = NetEpollHandler { mem, tap, rx: RxVirtio::new(rx_queue, rx_queue_evt), tx: TxVirtio::new(tx_queue, tx_queue_evt), interrupt_cb, kill_evt, pause_evt, epoll_fd: 0, rx_tap_listening: false, }; let paused = self.paused.clone(); thread::Builder::new() .name("virtio_net".to_string()) .spawn(move || handler.run(paused)) .map(|thread| self.epoll_thread = Some(thread)) .map_err(|e| { error!("failed to clone the virtio-net epoll thread: {}", e); ActivateError::BadActivate })?; return Ok(()); } Err(ActivateError::BadActivate) } fn reset(&mut self) -> Option<(Arc<VirtioInterrupt>, Vec<EventFd>)> { // We first must resume the virtio thread if it was paused. if self.pause_evt.take().is_some() { self.resume().ok()?; } if let Some(kill_evt) = self.kill_evt.take() { // Ignore the result because there is nothing we can do about it. let _ = kill_evt.write(1); } // Return the interrupt and queue EventFDs Some(( self.interrupt_cb.take().unwrap(), self.queue_evts.take().unwrap(), )) } } virtio_pausable!(Net); impl Snapshotable for Net {} impl Migratable for Net {}
/*! ```rudra-poc [target] crate = "pulse-simple" version = "1.0.1" [report] issue_url = "https://github.com/astro/rust-pulse-simple/issues/5" issue_date = 2021-02-05 [[bugs]] analyzer = "UnsafeDataflow" bug_class = "HigherOrderInvariant" bug_count = 2 rudra_report_locations = [ "src/lib.rs:144:5: 150:6", "src/lib.rs:180:5: 186:6", ] ``` !*/ #![forbid(unsafe_code)] fn main() { panic!("This issue was reported without PoC"); }
use super::*; use stun::message::BINDING_REQUEST; #[test] fn test_use_candidate_attr_add_to() -> Result<(), Error> { let mut m = Message::new(); assert!(!UseCandidateAttr::is_set(&m), "should not be set"); m.build(&[Box::new(BINDING_REQUEST), Box::new(UseCandidateAttr::new())])?; let mut m1 = Message::new(); m1.write(&m.raw)?; assert!(UseCandidateAttr::is_set(&m1), "should be set"); Ok(()) }
use super::prelude::*; pub fn setup(window: &gtk::Window) { let screen: gdk::Screen = match window.get_screen() { Some(screen) => screen, None => { println!("Failed to get the screen for window."); return; } }; let css_provider = gtk::CssProvider::new(); let css_src = include_str!("../../style/style.css"); match css_provider.load_from_data(css_src.as_bytes()) { Err(err) => println!("Failed to load css provider from data: {}", err), Ok(_) => { gtk::StyleContext::add_provider_for_screen( &screen, &css_provider, gtk::STYLE_PROVIDER_PRIORITY_APPLICATION, ); } } }
/** * To allow future revisions of this specification to add new attributes if needed, the attribute space is divided into two ranges. Attributes with type values between 0x0000 and 0x7FFF are comprehension-required attributes, which means that the STUN agent cannot successfully process the message unless it understands the attribute. Attributes with type values between 0x8000 and 0xFFFF are comprehension-optional attributes, which means that those attributes can be ignored by the STUN agent if it does not understand them. */ #[derive(Eq, PartialEq, Debug)] pub enum Attribute { MappedAddress(Address), // same as MappedAddress, but bits are xored with the magic cookie XorMappedAddress(Address), // user credentials UserName(String), // hmac-sha1 of the message MessageIntegrity([u8; 20]), // crc-32 of the message FingerPrint(u32), ErrorCode { code: u32, reason: String }, Realm(String), Nonce(String), // a list of unknown attribute kinds UnknownAttributes(Vec<u16>), Software(String), AlternateServer(Address), // unrecognized attributes UnRecognized { kind: u16 }, } #[derive(Debug, Eq, PartialEq)] pub struct Address { pub address: Vec<u8>, pub port: u16, pub ip_kind: IPKind, } impl Address { pub fn ipv4(address: [u8; 4], port: u16) -> Address { Address { address: address.to_vec(), port, ip_kind: IPKind::IPv4, } } pub fn ipv6(address: [u8; 16], port: u16) -> Address { Address { address: address.to_vec(), port, ip_kind: IPKind::IPv6, } } } #[derive(Debug, Eq, PartialEq)] pub enum IPKind { IPv4, IPv6, }
#[derive(Copy, Clone)] pub enum Direction { Up, Down, Left, Right } pub struct Wall { pub x: usize, pub y: usize, pub dir: Direction, }
#![recursion_limit = "1024"] #![feature(match_default_bindings)] #![feature(nll)] #![feature(try_trait)] #![feature(use_nested_groups)] #[macro_use] extern crate failure; #[macro_use] extern crate indoc; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; #[macro_use] extern crate maplit; #[macro_use] extern crate matches; #[macro_use] extern crate serenity; extern crate chrono; extern crate ddate; extern crate env_logger; extern crate fnorder; extern crate log_panics; extern crate parking_lot; extern crate rand; extern crate regex; extern crate rink; extern crate typemap; mod commands; mod data; mod eris; mod ext; mod utils; fn main() { log_panics::init(); env_logger::init(); if let Err(err) = eris::run() { let mut causes = err.causes(); error!("Error: {}", causes.next().unwrap()); // Causes always contains at least one Fail. for cause in causes { error!("Caused by: {}", cause); } error!("{}", err.backtrace()); std::process::exit(1); } }
use crate::interfaces::*; use crate::introspect::*; use crate::*; use rustbus::wire::unmarshal::Error as UnmarshalError; use std::cell::Cell; use std::fmt::Write; use std::path::{Path, PathBuf}; /// `LocalDescBase` is used to create GATT descriptors to be added to `LocalServiceBase` pub struct LocalDescBase { pub(crate) path: PathBuf, pub(crate) index: u16, pub(crate) uuid: UUID, pub(crate) serv_uuid: UUID, pub(crate) char_uuid: UUID, handle: u16, pub vf: ValOrFn, pub flags: DescFlags, pub write_callback: Option<Box<dyn FnMut(&[u8]) -> Result<Option<ValOrFn>, (String, Option<String>)>>>, } impl LocalDescBase { pub fn new<T: ToUUID>(uuid: T, flags: DescFlags) -> Self { let uuid = uuid.to_uuid(); LocalDescBase { uuid, flags, vf: ValOrFn::default(), path: PathBuf::new(), serv_uuid: Rc::from(""), char_uuid: Rc::from(""), write_callback: None, index: 0, handle: 0, } } pub(super) fn update_path(&mut self, base: &Path) { self.path = base.to_owned(); let mut name = String::with_capacity(8); write!(&mut name, "desc{:04x}", self.index).unwrap(); self.path.push(name); } } impl Debug for LocalDescBase { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { let wc_str = if let Some(_) = self.write_callback { "Some(FnMut)" } else { "None" }; // TODO: change to use the formatter helper functions write!(f, "LocalDescBase{{vf: {:?}, index: {:?}, handle: {:?}, uuid: {:?}, char_uuid: {:?}, serv_uuid: {:?}, path: {:?}, flags: {:?}, write_callback: {}}}", self.vf, self.index, self.handle, self.uuid, self.char_uuid, self.serv_uuid, self.path, self.flags, wc_str) } } impl AttObject for LocalDescBase { fn path(&self) -> &Path { &self.path } fn uuid(&self) -> &UUID { &self.uuid } } /// Represents a descriptor hosted by local instance of `Bluetooth`. pub struct LocalDesc<'a, 'b, 'c> { uuid: UUID, character: &'a mut LocalChar<'b, 'c>, } impl<'a, 'b, 'c> LocalDesc<'a, 'b, 'c> { pub(crate) fn new<T: ToUUID>(character: &'a mut LocalChar<'b, 'c>, uuid: T) -> Self { let uuid = uuid.to_uuid(); LocalDesc { character, uuid } } fn get_desc_base(&self) -> &LocalDescBase { self.character .get_char_base() .descs .get(&self.uuid) .unwrap() } fn get_desc_base_mut(&mut self) -> &mut LocalDescBase { self.character .get_char_base_mut() .descs .get_mut(&self.uuid) .unwrap() } pub(crate) fn desc_call(&mut self, call: MarshalledMessage) -> MarshalledMessage { let base = self.get_desc_base_mut(); match &call.dynheader.member.as_ref().unwrap()[..] { "ReadValue" => { if base.flags.read || base.flags.encrypt_read || base.flags.encrypt_auth_read || base.flags.secure_read { let dict: HashMap<String, Variant> = match call.body.parser().get() { Ok(d) => d, Err(e) => match e { UnmarshalError::EndOfMessage => HashMap::new(), _ => { return call.dynheader.make_error_response( BLUEZ_FAILED.to_string(), Some("Unexpected type for uint 16.".to_string()), ) } }, }; let offset = match dict.get("offset") { Some(v) => match v.get::<u16>() { Ok(offset) => offset, Err(_) => { return call.dynheader.make_error_response( BLUEZ_FAILED.to_string(), Some("Expected type for 'offset' to be uint16.".to_string()), ) } }, None => 0, } as usize; let mut reply = call.dynheader.make_response(); let val = base.vf.to_value(); if offset >= val.len() { // TODO: should this return an error instead of an empty array reply.body.push_param::<&[u8]>(&[]).unwrap(); } else { reply.body.push_param(&val[offset..]).unwrap(); } reply } else { call.dynheader.make_error_response( BLUEZ_NOT_PERM.to_string(), Some("This is not a readable descriptor.".to_string()), ) } } "WriteValue" => { if base.flags.write || base.flags.encrypt_write || base.flags.encrypt_auth_write || base.flags.secure_write { let mut parser = call.body.parser(); let bytes = match parser.get() { Ok(bytes) => bytes, Err(_) => { return call.dynheader.make_error_response( BLUEZ_FAILED.to_string(), Some("Expected byte array as first parameter.".to_string()), ) } }; let dict: HashMap<String, Variant> = match parser.get() { Ok(d) => d, Err(e) => match e { UnmarshalError::EndOfMessage => HashMap::new(), _ => { return call.dynheader.make_error_response( BLUEZ_FAILED.to_string(), Some("Expected dict as second parameter.".to_string()), ) } }, }; let offset = match dict.get("offset") { Some(var) => match var.get::<u16>() { Ok(val) => val, Err(_) => { return call.dynheader.make_error_response( BLUEZ_FAILED.to_string(), Some("Expected type for 'offset' to be uint16.".to_string()), ) } }, None => 0, } as usize; let mut cur_val = base.vf.to_value(); let l = cur_val.len() + offset; if l > 512 { return call .dynheader .make_error_response(BLUEZ_INVALID_LEN.to_string(), None); } cur_val.update(bytes, offset); match &mut base.write_callback { Some(cb) => match cb(&cur_val[..]) { Ok(vf) => { if let Some(vf) = vf { base.vf = vf; } } Err((s1, s2)) => return call.dynheader.make_error_response(s1, s2), }, None => base.vf = ValOrFn::Value(cur_val), } call.dynheader.make_response() } else { call.dynheader.make_error_response( BLUEZ_NOT_PERM.to_string(), Some("This is not a writable descriptor.".to_string()), ) } } _ => call .dynheader .make_error_response(UNKNOWN_METHOD.to_string(), None), } } } /// Flags for GATT descriptors. /// /// What each flags does is detailed on /// page 1552 (Table 3.5) and page 1554 (Table 3.8) of the [Core Specification (5.2)] /// /// [Core Specification (5.2)]: https://www.bluetooth.com/specifications/bluetooth-core-specification/ #[derive(Clone, Copy, Default, Debug)] pub struct DescFlags { pub read: bool, pub write: bool, pub encrypt_read: bool, pub encrypt_write: bool, pub encrypt_auth_read: bool, pub encrypt_auth_write: bool, pub secure_read: bool, pub secure_write: bool, pub authorize: bool, } impl DescFlags { pub fn to_strings(&self) -> Vec<String> { let mut ret = Vec::new(); if self.read { ret.push("read".to_string()); } if self.write { ret.push("write".to_string()) } if self.encrypt_read { ret.push("encrypt-read".to_string()); } if self.encrypt_write { ret.push("encrypt-write".to_string()); } if self.encrypt_auth_read { ret.push("encrypt-authenticated-read".to_string()); } if self.encrypt_auth_write { ret.push("encrypt-authenticated-write".to_string()); } if self.secure_write { ret.push("secure-write".to_string()); } if self.secure_read { ret.push("secure-read".to_string()); } if self.authorize { unimplemented!(); ret.push("authorize".to_string()); } ret } } impl Properties for LocalDescBase { const INTERFACES: &'static [(&'static str, &'static [&'static str])] = &[DESC_IF, PROP_IF]; fn get_inner<'a, 'b>(&mut self, interface: &str, prop: &str) -> Option<Param<'a, 'b>> { match interface { DESC_IF_STR => match prop { UUID_PROP => Some(base_param_to_variant(self.uuid.to_string().into())), CHAR_PROP => Some(base_param_to_variant(Base::ObjectPath( self.path.parent().unwrap().to_str().unwrap().to_string(), ))), VALUE_PROP => { let bytes: Vec<Param<'a, 'b>> = self .vf .to_value() .into_iter() .map(|b| Param::Base(Base::Byte(*b))) .collect(); Some(container_param_to_variant(Container::Array( params::Array { element_sig: signature::Type::Base(signature::Base::Byte), values: bytes, }, ))) } FLAGS_PROP => { let flags: Vec<Param<'a, 'b>> = self .flags .to_strings() .into_iter() .map(|s| Param::Base(Base::String(s))) .collect(); Some(container_param_to_variant(Container::Array( params::Array { element_sig: signature::Type::Base(signature::Base::String), values: flags, }, ))) } HANDLE_PROP => Some(base_param_to_variant(self.index.into())), _ => None, }, PROP_IF_STR => None, _ => None, } } fn set_inner(&mut self, interface: &str, prop: &str, val: Variant) -> Option<String> { match interface { DESC_IF_STR => match prop { HANDLE_PROP => match val.get() { Ok(handle) => { self.handle = handle; None } Err(_) => Some("UnexpectedType".to_string()), }, _ => unimplemented!(), }, PROP_IF_STR => Some("UnknownProperty".to_string()), _ => Some("UnknownInterface".to_string()), } } } impl Introspectable for LocalDescBase { fn introspectable_str(&self) -> String { let mut ret = String::new(); ret.push_str(INTROSPECT_FMT_P1); ret.push_str(self.path.to_str().unwrap()); ret.push_str(INTROSPECT_FMT_P2); ret.push_str(PROP_STR); ret.push_str(DESC_STR); ret.push_str(INTROSPECT_FMT_P3); ret } } impl AttObject for LocalDesc<'_, '_, '_> { fn path(&self) -> &Path { self.get_desc_base().path() } fn uuid(&self) -> &UUID { self.get_desc_base().uuid() } } impl FlaggedAtt for LocalDesc<'_, '_, '_> { type Flags = DescFlags; fn flags(&self) -> Self::Flags { self.get_desc_base().flags } } pub(crate) struct RemoteDescBase { uuid: UUID, value: Rc<Cell<AttValue>>, path: PathBuf, } impl RemoteDescBase { pub(crate) fn from_props( mut props: HashMap<String, Variant>, path: PathBuf, ) -> Result<Self, Error> { let uuid = match props.remove("UUID") { Some(addr) => match addr.get::<String>() { Ok(addr) => addr.to_uuid(), Err(_) => { return Err(Error::DbusReqErr( "Invalid descriptor returned; UUID field is invalid type".to_string(), )) } }, None => { return Err(Error::DbusReqErr( "Invalid descriptor returned; missing UUID field".to_string(), )) } }; let value = match props.remove("Value") { Some(var) => match var.get() { Ok(cv) => Rc::new(Cell::new(cv)), Err(_) => { return Err(Error::DbusReqErr( "Invalid descriptor returned; Value field is invalid type".to_string(), )) } }, None => { return Err(Error::DbusReqErr( "Invalid descriptor returned; missing Value field".to_string(), )) } }; Ok(RemoteDescBase { uuid, value, path }) } } impl AttObject for RemoteDescBase { fn path(&self) -> &Path { &self.path } fn uuid(&self) -> &UUID { &self.uuid } } /// Represents a descriptor present on a remote device. pub struct RemoteDesc<'a, 'b, 'c, 'd> { pub(super) character: &'a mut RemoteChar<'b, 'c, 'd>, pub(super) uuid: UUID, } impl RemoteDesc<'_, '_, '_, '_> { fn get_desc_base(&self) -> &RemoteDescBase { self.character .get_char_base() .descs .get(&self.uuid) .unwrap() } } impl AttObject for RemoteDesc<'_, '_, '_, '_> { fn path(&self) -> &Path { let base = self.get_desc_base(); base.path() } fn uuid(&self) -> &UUID { &self.uuid } }
use crate::background::BgEvent; use crate::utils::spawn; use crate::{Event, TaskEntity}; use crate::widgets::Task; //because TASK IS NOT FOUND anywhere use async_channel::Sender; use glib::{clone, SourceId}; // use glib::SourceId; ARE CHANGES REQUIRED HERE TOO BECAUSEE I IMPORTED IT ABOVE use gtk::prelude::*; use slotmap::SlotMap; use std::time::Duration; pub struct App { pub container: gtk::Grid, pub delete_button: gtk::Button, pub tasks: SlotMap<TaskEntity, Task>, pub scheduled_write: Option<SourceId>, pub tx: Sender<Event>, pub btx: Sender<BgEvent>, } impl App { pub fn new(app: &gtk::Application, tx: Sender<Event>, btx: Sender<BgEvent>) -> Self { let container = cascade! { gtk::Grid::new(); ..set_column_spacing(4); ..set_row_spacing(4); ..set_border_width(4); ..show(); }; let scrolled = cascade! { //code changed here so make the changes in the gitbook and the example src code gtk::ScrolledWindow::new(gtk::NONE_ADJUSTMENT,gtk::NONE_ADJUSTMENT); // ..hscrollbar_policy(gtk::PolicyType::Never); ..set_policy(gtk::PolicyType::Never, gtk::PolicyType::Automatic); }; // let scrolled = gtk::ScrolledWindowBuilder::new() // .hscrollbar_policy(gtk::PolicyType::Never) // .build(); scrolled.add(&container); let delete_button = cascade!{ gtk::Button::from_icon_name(Some("edit-delete-symbolic"), gtk::IconSize::Button); ..set_label("Delete"); ..set_always_show_image(true); ..set_no_show_all(true); ..style_context().add_class(&gtk::STYLE_CLASS_DESTRUCTIVE_ACTION); //ALSO CHANGE HERE IN 2X09 ..connect_clicked(clone!(@strong tx => move |_|{ let tx = tx.clone(); spawn(async move { let _ = tx.send(Event::Delete).await; }); })); }; let headerbar = cascade!{ gtk::HeaderBar::new(); ..pack_end(&delete_button); ..set_title(Some("gtk-todo")); ..set_show_close_button(true); }; let _window = cascade! { gtk::ApplicationWindow::new(app); ..set_titlebar(Some(&headerbar)); ..add(&scrolled); ..connect_delete_event(clone!(@strong tx, @strong scrolled => move |win, _| { //detach the window preserving the entry widgets which contain the text win.remove(&scrolled); let tx = tx.clone(); spawn(async move { let _ = tx.send(Event::Closed).await; }); gtk::Inhibit(false) })); ..show_all(); }; gtk::Window::set_default_icon_name("not_yet_designed_icon_name_here"); let mut app = Self { delete_button, container, tasks: SlotMap::with_key(), scheduled_write: None, tx, btx, }; app.insert_row(0); app } pub fn clear (&mut self){ while let Some(entity) = self.tasks.keys().next(){ self.remove_(entity) } } pub fn load(&mut self, data: String){ self.clear(); for (row, line) in data.lines().enumerate(){ let entity = self.insert_row(row as i32); self.tasks[entity].set_text(line); } } fn insert_row(&mut self, row: i32) -> TaskEntity { for task in self.tasks.values_mut() { if task.row >= row { task.row += 1; } } self.container.insert_row(row); //why are we calling a function inside itself let task = Task::new(row); self.container.attach(&task.check, 0, row, 1, 1); self.container.attach(&task.entry, 1, row, 1, 1); self.container.attach(&task.insert, 2, row, 1, 1); task.entry.grab_focus(); let entity = self.tasks.insert(task); self.tasks[entity].connect(self.tx.clone(), entity); //what does connect do here? return entity; } pub fn insert(&mut self, entity: TaskEntity) { let mut insert_at = 0; if let Some(task) = self.tasks.get(entity) { insert_at = task.row + 1; } //should semi-colon be here self.insert_row(insert_at); } pub fn modified(&mut self) { if let Some(id) = self.scheduled_write.take() { glib::source_remove(id); } let tx = self.tx.clone(); self.scheduled_write = Some(glib::timeout_add_local(Duration::from_secs(5), move || { //CORRECTIONS HERE REQUIRED 2x07 let tx = tx.clone(); spawn(async move { let _ = tx.send(Event::SyncToDisk).await; }); glib::Continue(false) })); } pub async fn sync_to_disk(&mut self) { self.scheduled_write = None; let contents = fomat_macros::fomat!( for node in self.tasks.values(){ if node.entry.to_string().len() != 0 { //CHANGE HERE //to_string().len() 2x07 (node.entry.to_string()) "\n" //CHANGE HERE to_string 2x07 } } ); let _ = self.btx.send(BgEvent::Save("Task".into(), contents)).await; } pub async fn closed(&mut self){ self.sync_to_disk().await; let _ = self.btx.send(BgEvent::Quit).await; } pub fn remove(&mut self, entity: TaskEntity) { if self.tasks.len() == 1 { return; } self.remove_(entity); } fn remove_(&mut self, entity: TaskEntity) { if let Some(removed) = self.tasks.remove(entity) { self.container.remove_row(removed.row); //decrement the row by one for task in self.tasks.values_mut() { if task.row > removed.row { task.row -= 1; } } } //should semi-colon be here } }
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(unused, clippy::needless_pass_by_value)] #![warn(clippy::map_entry)] use std::collections::{BTreeMap, HashMap}; use std::hash::Hash; fn foo() {} fn insert_if_absent0<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, v: V) { if !m.contains_key(&k) { m.insert(k, v); } } fn insert_if_absent1<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, v: V) { if !m.contains_key(&k) { foo(); m.insert(k, v); } } fn insert_if_absent2<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, v: V) { if !m.contains_key(&k) { m.insert(k, v) } else { None }; } fn insert_if_present2<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, v: V) { if m.contains_key(&k) { None } else { m.insert(k, v) }; } fn insert_if_absent3<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, v: V) { if !m.contains_key(&k) { foo(); m.insert(k, v) } else { None }; } fn insert_if_present3<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, v: V) { if m.contains_key(&k) { None } else { foo(); m.insert(k, v) }; } fn insert_in_btreemap<K: Ord, V>(m: &mut BTreeMap<K, V>, k: K, v: V) { if !m.contains_key(&k) { foo(); m.insert(k, v) } else { None }; } fn insert_other_if_absent<K: Eq + Hash, V>(m: &mut HashMap<K, V>, k: K, o: K, v: V) { if !m.contains_key(&k) { m.insert(o, v); } } fn main() {}
use crate::config; use amethyst::input::{InputHandler, StringBindings}; use serde::{Deserialize, Serialize}; pub fn input_movement(input: &InputHandler<StringBindings>) -> (f32, f32) { let raw_movement_y = match input.axis_value("player_vertical") { Some(mov) => mov, None => 0.0, }; let raw_movement_x = match input.axis_value("player_horizontal") { Some(mov) => mov, None => 0.0, }; (raw_movement_x, raw_movement_y) } #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize, PartialOrd, Ord)] pub enum Side { UpperSide, LowerSide, } /// Return the opposite side (Upper if Lower, and viceversa) pub fn opposite_side(side: Side) -> Side { match side { Side::UpperSide => Side::LowerSide, Side::LowerSide => Side::UpperSide, } } /// Return the default position for player number i. pub fn player_position(i: usize, side: Side) -> [f32; 2] { let mut position = config::RESET_PLAYER_POSITIONS[i]; if side == Side::UpperSide { position[1] = config::SCREEN_HEIGHT - position[1]; } position }
use anyhow::Result; use kira::instance::InstanceSettings; use kira::manager::{AudioManager, AudioManagerSettings}; use kira::sound::{self, handle::SoundHandle, SoundSettings}; use std::io::Cursor; pub(crate) struct Player { _manager: AudioManager, sounds: Sounds, } struct Sounds { music: SoundHandle, jump: SoundHandle, splat: SoundHandle, } impl Player { pub(crate) fn new() -> Result<Self> { let mut manager = AudioManager::new(AudioManagerSettings::default())?; let music = Cursor::new(include_bytes!( "../cc0/01_-_A.T.M.O.M._-_Nochnoe_Dykhanie_Taigi.ogg" )); let music_settings = SoundSettings { default_loop_start: Some(0.0), ..SoundSettings::default() }; let sound = sound::Sound::from_ogg_reader(music, music_settings)?; let music = manager.add_sound(sound)?; let jump = Cursor::new(include_bytes!("../assets/jump.ogg")); let sound = sound::Sound::from_ogg_reader(jump, SoundSettings::default())?; let jump = manager.add_sound(sound)?; let splat = Cursor::new(include_bytes!("../assets/splat.ogg")); let sound = sound::Sound::from_ogg_reader(splat, SoundSettings::default())?; let splat = manager.add_sound(sound)?; let sounds = Sounds { music, jump, splat }; let player = Self { _manager: manager, sounds, }; Ok(player) } pub(crate) fn music(&mut self) -> Result<()> { self.sounds.music.play(InstanceSettings::default())?; Ok(()) } pub(crate) fn jump(&mut self) { self.sounds.jump.play(InstanceSettings::default()).ok(); } pub(crate) fn splat(&mut self) { self.sounds.splat.play(InstanceSettings::default()).ok(); } }
// Copyright 2018-2019 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. use bincode::serialize; use serde::Serialize; use crate::error::DataError; pub trait EncodableKey { fn to_bytes(&self) -> Result<Vec<u8>, DataError>; } impl<T> EncodableKey for T where T: Serialize, { fn to_bytes(&self) -> Result<Vec<u8>, DataError> { serialize(self).map_err(|e| e.into()) } }
// Supress warnings generated by bindgen: https://github.com/rust-lang/rust-bindgen/issues/1651 #![allow(deref_nullptr)] #![allow(unknown_lints)] //! Setup and control loop devices. //! //! Provides rust interface with similar functionality to the Linux utility `losetup`. //! //! # Examples //! //! Default options: //! //! ```no_run //! use loopdev::LoopControl; //! let lc = LoopControl::open().unwrap(); //! let ld = lc.next_free().unwrap(); //! //! println!("{}", ld.path().unwrap().display()); //! //! ld.attach_file("disk.img").unwrap(); //! // ... //! ld.detach().unwrap(); //! ``` //! //! Custom options: //! //! ```no_run //! # use loopdev::LoopControl; //! # let lc = LoopControl::open().unwrap(); //! # let ld = lc.next_free().unwrap(); //! # //! ld.with() //! .part_scan(true) //! .offset(512 * 1024 * 1024) // 512 MiB //! .size_limit(1024 * 1024 * 1024) // 1GiB //! .attach("disk.img").unwrap(); //! // ... //! ld.detach().unwrap(); //! ``` use crate::bindings::{ loop_info64, LOOP_CLR_FD, LOOP_CTL_ADD, LOOP_CTL_GET_FREE, LOOP_SET_CAPACITY, LOOP_SET_FD, LOOP_SET_STATUS64, LO_FLAGS_AUTOCLEAR, LO_FLAGS_PARTSCAN, LO_FLAGS_READ_ONLY, }; #[cfg(feature = "direct_io")] use bindings::LOOP_SET_DIRECT_IO; use libc::{c_int, ioctl}; use std::{ default::Default, fs::{File, OpenOptions}, io, os::unix::prelude::*, path::{Path, PathBuf}, }; #[allow(non_camel_case_types)] #[allow(dead_code)] #[allow(non_snake_case)] mod bindings { include!(concat!(env!("OUT_DIR"), "/bindings.rs")); } #[cfg(all(not(target_os = "android"), not(target_env = "musl")))] type IoctlRequest = libc::c_ulong; #[cfg(any(target_os = "android", target_env = "musl"))] type IoctlRequest = libc::c_int; const LOOP_CONTROL: &str = "/dev/loop-control"; #[cfg(not(target_os = "android"))] const LOOP_PREFIX: &str = "/dev/loop"; #[cfg(target_os = "android")] const LOOP_PREFIX: &str = "/dev/block/loop"; /// Interface to the loop control device: `/dev/loop-control`. #[derive(Debug)] pub struct LoopControl { dev_file: File, } impl LoopControl { /// Opens the loop control device. /// /// # Errors /// /// This function will return an error for various reasons when opening /// the loop control file `/dev/loop-control`. See /// [`OpenOptions::open`](https://doc.rust-lang.org/std/fs/struct.OpenOptions.html) /// for further details. pub fn open() -> io::Result<Self> { Ok(Self { dev_file: OpenOptions::new() .read(true) .write(true) .open(LOOP_CONTROL)?, }) } /// Finds and opens the next available loop device. /// /// # Examples /// /// ```no_run /// use loopdev::LoopControl; /// let lc = LoopControl::open().unwrap(); /// let ld = lc.next_free().unwrap(); /// println!("{}", ld.path().unwrap().display()); /// ``` /// /// # Errors /// /// This function will return an error for various reasons when opening /// the loop device file `/dev/loopX`. See /// [`OpenOptions::open`](https://doc.rust-lang.org/std/fs/struct.OpenOptions.html) /// for further details. pub fn next_free(&self) -> io::Result<LoopDevice> { let dev_num = ioctl_to_error(unsafe { ioctl( self.dev_file.as_raw_fd() as c_int, LOOP_CTL_GET_FREE as IoctlRequest, ) })?; LoopDevice::open(format!("{}{}", LOOP_PREFIX, dev_num)) } /// Add and opens a new loop device. /// /// # Examples /// /// ```no_run /// use loopdev::LoopControl; /// let lc = LoopControl::open().unwrap(); /// let ld = lc.add(1).unwrap(); /// println!("{}", ld.path().unwrap().display()); /// ``` /// /// # Errors /// /// This funcitons will return an error when a loop device with the passed /// number exists or opening the newly created device fails. pub fn add(&self, n: u32) -> io::Result<LoopDevice> { let dev_num = ioctl_to_error(unsafe { ioctl( self.dev_file.as_raw_fd() as c_int, LOOP_CTL_ADD as IoctlRequest, n as c_int, ) })?; LoopDevice::open(format!("{}{}", LOOP_PREFIX, dev_num)) } } impl AsRawFd for LoopControl { fn as_raw_fd(&self) -> RawFd { self.dev_file.as_raw_fd() } } impl IntoRawFd for LoopControl { fn into_raw_fd(self) -> RawFd { self.dev_file.into_raw_fd() } } /// Interface to a loop device ie `/dev/loop0`. #[derive(Debug)] pub struct LoopDevice { device: File, } impl AsRawFd for LoopDevice { fn as_raw_fd(&self) -> RawFd { self.device.as_raw_fd() } } impl IntoRawFd for LoopDevice { fn into_raw_fd(self) -> RawFd { self.device.into_raw_fd() } } impl LoopDevice { /// Opens a loop device. /// /// # Errors /// /// This function will return an error for various reasons when opening /// the given loop device file. See /// [`OpenOptions::open`](https://doc.rust-lang.org/std/fs/struct.OpenOptions.html) /// for further details. pub fn open<P: AsRef<Path>>(dev: P) -> io::Result<Self> { // TODO create dev if it does not exist and begins with LOOP_PREFIX Ok(Self { device: OpenOptions::new().read(true).write(true).open(dev)?, }) } /// Attach the loop device to a file with given options. /// /// # Examples /// /// Attach the device to a file. /// /// ```no_run /// use loopdev::LoopDevice; /// let mut ld = LoopDevice::open("/dev/loop0").unwrap(); /// ld.with().part_scan(true).attach("disk.img").unwrap(); /// # ld.detach().unwrap(); /// ``` pub fn with(&self) -> AttachOptions<'_> { AttachOptions { device: self, info: bindings::loop_info64::default(), #[cfg(feature = "direct_io")] direct_io: false, } } /// Attach the loop device to a file that maps to the whole file. /// /// # Examples /// /// Attach the device to a file. /// /// ```no_run /// use loopdev::LoopDevice; /// let ld = LoopDevice::open("/dev/loop0").unwrap(); /// ld.attach_file("disk.img").unwrap(); /// # ld.detach().unwrap(); /// ``` /// /// # Errors /// /// This function will return an error for various reasons. Either when /// opening the backing file (see /// [`OpenOptions::open`](https://doc.rust-lang.org/std/fs/struct.OpenOptions.html) /// for further details) or when calling the ioctl to attach the backing /// file to the device. pub fn attach_file<P: AsRef<Path>>(&self, backing_file: P) -> io::Result<()> { let info = loop_info64 { ..Default::default() }; Self::attach_with_loop_info(self, backing_file, info) } /// Attach the loop device to a file with `loop_info64`. fn attach_with_loop_info( &self, // TODO should be mut? - but changing it is a breaking change backing_file: impl AsRef<Path>, info: loop_info64, ) -> io::Result<()> { let write_access = (info.lo_flags & LO_FLAGS_READ_ONLY) == 0; let bf = OpenOptions::new() .read(true) .write(write_access) .open(backing_file)?; self.attach_fd_with_loop_info(bf, info) } /// Attach the loop device to a fd with `loop_info`. fn attach_fd_with_loop_info(&self, bf: impl AsRawFd, info: loop_info64) -> io::Result<()> { // Attach the file ioctl_to_error(unsafe { ioctl( self.device.as_raw_fd() as c_int, LOOP_SET_FD as IoctlRequest, bf.as_raw_fd() as c_int, ) })?; let result = unsafe { ioctl( self.device.as_raw_fd() as c_int, LOOP_SET_STATUS64 as IoctlRequest, &info, ) }; match ioctl_to_error(result) { Err(err) => { // Ignore the error to preserve the original error let _detach_err = self.detach(); Err(err) } Ok(_) => Ok(()), } } /// Get the path of the loop device. pub fn path(&self) -> Option<PathBuf> { let mut p = PathBuf::from("/proc/self/fd"); p.push(self.device.as_raw_fd().to_string()); std::fs::read_link(&p).ok() } /// Get the device major number /// /// # Errors /// /// This function needs to stat the backing file and can fail if there is /// an IO error. #[allow(clippy::unnecessary_cast)] pub fn major(&self) -> io::Result<u32> { self.device .metadata() .map(|m| unsafe { libc::major(m.rdev()) }) .map(|m| m as u32) } /// Get the device major number /// /// # Errors /// /// This function needs to stat the backing file and can fail if there is /// an IO error. #[allow(clippy::unnecessary_cast)] pub fn minor(&self) -> io::Result<u32> { self.device .metadata() .map(|m| unsafe { libc::minor(m.rdev()) }) .map(|m| m as u32) } /// Detach a loop device from its backing file. /// /// Note that the device won't fully detach until a short delay after the underling device file /// gets closed. This happens when `LoopDev` goes out of scope so you should ensure the `LoopDev` /// lives for a short a time as possible. /// /// # Examples /// /// ```no_run /// use loopdev::LoopDevice; /// let ld = LoopDevice::open("/dev/loop0").unwrap(); /// # ld.attach_file("disk.img").unwrap(); /// ld.detach().unwrap(); /// ``` /// /// # Errors /// /// This function will return an error for various reasons when calling the /// ioctl to detach the backing file from the device. pub fn detach(&self) -> io::Result<()> { ioctl_to_error(unsafe { ioctl( self.device.as_raw_fd() as c_int, LOOP_CLR_FD as IoctlRequest, 0, ) })?; Ok(()) } /// Resize a live loop device. If the size of the backing file changes this can be called to /// inform the loop driver about the new size. /// /// # Errors /// /// This function will return an error for various reasons when calling the /// ioctl to set the capacity of the device. pub fn set_capacity(&self) -> io::Result<()> { ioctl_to_error(unsafe { ioctl( self.device.as_raw_fd() as c_int, LOOP_SET_CAPACITY as IoctlRequest, 0, ) })?; Ok(()) } /// Enable or disable direct I/O for the backing file. /// /// # Errors /// /// This function will return an error for various reasons when calling the /// ioctl to set the direct io flag for the device. #[cfg(feature = "direct_io")] pub fn set_direct_io(&self, direct_io: bool) -> io::Result<()> { ioctl_to_error(unsafe { ioctl( self.device.as_raw_fd() as c_int, LOOP_SET_DIRECT_IO as IoctlRequest, if direct_io { 1 } else { 0 }, ) })?; Ok(()) } } /// Used to set options when attaching a device. Created with [`LoopDevice::with`()]. /// /// # Examples /// /// Enable partition scanning on attach: /// /// ```no_run /// use loopdev::LoopDevice; /// let mut ld = LoopDevice::open("/dev/loop0").unwrap(); /// ld.with() /// .part_scan(true) /// .attach("disk.img") /// .unwrap(); /// # ld.detach().unwrap(); /// ``` /// /// A 1MiB slice of the file located at 1KiB into the file. /// /// ```no_run /// use loopdev::LoopDevice; /// let mut ld = LoopDevice::open("/dev/loop0").unwrap(); /// ld.with() /// .offset(1024*1024) /// .size_limit(1024*1024*1024) /// .attach("disk.img") /// .unwrap(); /// # ld.detach().unwrap(); /// ``` #[must_use] pub struct AttachOptions<'d> { device: &'d LoopDevice, info: loop_info64, #[cfg(feature = "direct_io")] direct_io: bool, } impl AttachOptions<'_> { /// Offset in bytes from the start of the backing file the data will start at. pub fn offset(mut self, offset: u64) -> Self { self.info.lo_offset = offset; self } /// Maximum size of the data in bytes. pub fn size_limit(mut self, size_limit: u64) -> Self { self.info.lo_sizelimit = size_limit; self } /// Set read only flag pub fn read_only(mut self, read_only: bool) -> Self { if read_only { self.info.lo_flags |= LO_FLAGS_READ_ONLY; } else { self.info.lo_flags &= !LO_FLAGS_READ_ONLY; } self } /// Set autoclear flag pub fn autoclear(mut self, autoclear: bool) -> Self { if autoclear { self.info.lo_flags |= LO_FLAGS_AUTOCLEAR; } else { self.info.lo_flags &= !LO_FLAGS_AUTOCLEAR; } self } // Enable or disable direct I/O for the backing file. #[cfg(feature = "direct_io")] pub fn set_direct_io(mut self, direct_io: bool) -> Self { self.direct_io = direct_io; self } /// Force the kernel to scan the partition table on a newly created loop device. Note that the /// partition table parsing depends on sector sizes. The default is sector size is 512 bytes pub fn part_scan(mut self, enable: bool) -> Self { if enable { self.info.lo_flags |= LO_FLAGS_PARTSCAN; } else { self.info.lo_flags &= !LO_FLAGS_PARTSCAN; } self } /// Attach the loop device to a file with the set options. /// /// # Errors /// /// This function will return an error for various reasons. Either when /// opening the backing file (see /// [`OpenOptions::open`](https://doc.rust-lang.org/std/fs/struct.OpenOptions.html) /// for further details) or when calling the ioctl to attach the backing /// file to the device. pub fn attach(self, backing_file: impl AsRef<Path>) -> io::Result<()> { self.device.attach_with_loop_info(backing_file, self.info)?; #[cfg(feature = "direct_io")] if self.direct_io { self.device.set_direct_io(self.direct_io)?; } Ok(()) } /// Attach the loop device to an fd /// /// # Errors /// /// This function will return an error for various reasons when calling the /// ioctl to attach the backing file to the device. pub fn attach_fd(self, backing_file_fd: impl AsRawFd) -> io::Result<()> { self.device .attach_fd_with_loop_info(backing_file_fd, self.info)?; #[cfg(feature = "direct_io")] if self.direct_io { self.device.set_direct_io(self.direct_io)?; } Ok(()) } } fn ioctl_to_error(ret: i32) -> io::Result<i32> { if ret < 0 { Err(io::Error::last_os_error()) } else { Ok(ret) } }
use crate::info::fetch_ws_crates; use crate::util::{can_publish, get_published_version}; use anyhow::bail; use anyhow::{Context, Result}; use cargo_metadata::Package; use clap::ArgMatches; use futures_util::future::{BoxFuture, FutureExt}; use semver::Version; use std::collections::HashMap; use std::fs::{read_to_string, write}; use std::sync::Arc; use tokio::task::spawn_blocking; use toml_edit::{Item, Value}; pub async fn run<'a>(matches: &ArgMatches<'a>) -> Result<()> { let ws_packages = fetch_ws_crates().await?; let crate_to_bump = matches .value_of_lossy("crate") .expect("crate name is required argument"); let main = match ws_packages.iter().find(|p| p.name == crate_to_bump) { None => bail!("Package {} is not a member of workspace", crate_to_bump), Some(v) => v.clone(), }; let breaking = matches.is_present("breaking"); // Get list of crates to bump let mut dependants = Default::default(); public_dependants(&mut dependants, &ws_packages, &crate_to_bump, breaking).await?; let dependants = Arc::new(dependants); patch(main.clone(), dependants.clone()) .await .with_context(|| format!("failed to patch {}", crate_to_bump))?; if breaking { for dep in dependants.keys() { match ws_packages.iter().find(|p| p.name == &**dep) { None => bail!("Package {} is not a member of workspace", crate_to_bump), Some(v) => { patch(v.clone(), dependants.clone()) .await .with_context(|| format!("failed to patch {}", v.name))?; } }; } } Ok(()) } async fn patch(package: Package, deps_to_bump: Arc<HashMap<String, Version>>) -> Result<()> { eprintln!( "Package({}) -> {}", package.name, deps_to_bump[&package.name] ); spawn_blocking(move || -> Result<_> { let toml = read_to_string(&package.manifest_path).context("failed to read error")?; let mut doc = toml .parse::<toml_edit::Document>() .context("toml file is invalid")?; { // Bump version of package itself let v = deps_to_bump[&package.name].to_string(); doc["package"]["version"] = toml_edit::value(&*v); } // Bump version of dependencies for &dep_type in &["dependencies", "dev-dependencies", "build-dependencies"] { let deps_section = &mut doc[dep_type]; if !deps_section.is_none() { // let table = deps_section.as_table_mut(); if let Some(table) = table { for (dep_to_bump, new_version) in deps_to_bump.iter() { if table.contains_key(&dep_to_bump) { let prev: &mut toml_edit::Item = &mut table[dep_to_bump]; let new_version = toml_edit::value(new_version.to_string()); // We should handle object like // // { version = "0.1", path = "./macros" } match prev { Item::None => { unreachable!("{}.{} cannot be none", dep_type, dep_to_bump,) } Item::Value(v) => match v { Value::String(_) => { *v = new_version.as_value().unwrap().clone() } Value::InlineTable(v) => { *v.get_mut("version").expect("should have version") = new_version.as_value().unwrap().clone(); } _ => unreachable!( "{}.{}: cannot be unknown type {:?}", dep_type, dep_to_bump, prev ), }, Item::Table(_) => {} Item::ArrayOfTables(_) => unreachable!( "{}.{} cannot be array of table", dep_type, dep_to_bump ), } } } } } } write(&package.manifest_path, doc.to_string()) .context("failed to save modified Cargo.toml")?; Ok(()) }) .await .expect("failed to edit toml file") } /// This is recursive and returned value does not contain original crate itself. fn public_dependants<'a>( dependants: &'a mut HashMap<String, Version>, packages: &'a [Package], crate_to_bump: &'a str, breaking: bool, ) -> BoxFuture<'a, Result<()>> { eprintln!("Calculating dependants of `{}`", crate_to_bump); // eprintln!( // "Packages: {:?}", // packages.iter().map(|v| &*v.name).collect::<Vec<_>>() // ); async move { for p in packages { if !can_publish(&p) { continue; } if dependants.contains_key(&p.name) { continue; } if p.name == crate_to_bump { let previous = get_published_version(&crate_to_bump) .await .context("failed to get published version from crates.io")?; let new_version = calc_bumped_version(previous.clone(), breaking)?; dependants.insert(p.name.clone(), new_version); continue; } if breaking { for dep in &p.dependencies { if dep.name == crate_to_bump { eprintln!("{} depends on {}", p.name, dep.name); public_dependants(dependants, packages, &p.name, breaking).await?; } } } } Ok(()) } .boxed() } fn calc_bumped_version(mut v: Version, breaking: bool) -> Result<Version> { // Semver treats 0.x specially if v.major == 0 { if breaking { v.increment_minor(); } else { v.increment_patch(); } } else { if breaking { v.increment_major() } else { v.increment_patch(); } } Ok(v) }
use crate::Result; /// The ThreadPool contain new and spawn functions pub trait ThreadPool { /// Creates a new thread pool, immediately spawning the specified number of threads. /// Returns an error if any thread fails to spawn. All previously-spawned threads are terminated. fn new(threads: u32) -> Result<Self> where Self: Sized; /// Spawn a function into the threadpool. /// Spawning always succeeds, but if the function panics the threadpool continues to operate with the same number of threads โ€” the thread count is not reduced nor is the thread pool destroyed, corrupted or invalidated. fn spawn<F>(&self, job: F) where F: FnOnce() + Send + 'static; } mod naive; mod rayon; mod shared_queue; pub use self::rayon::RayonThreadPool; pub use naive::NaiveThreadPool; pub use shared_queue::SharedQueueThreadPool; /// Thread Pool Kind #[allow(non_camel_case_types)] #[derive(Debug)] pub enum ThreadPoolKind { /// NaiveThreadPool Naive(NaiveThreadPool), /// SharedQueueThreadPool SharedQueue(SharedQueueThreadPool), /// RayonThreadPool Rayon(RayonThreadPool), } impl ThreadPoolKind { /// spawn pub fn spawn<F>(&self, job: F) where F: FnOnce() + Send + 'static, { match self { ThreadPoolKind::Naive(inner) => inner.spawn(job), ThreadPoolKind::SharedQueue(inner) => inner.spawn(job), ThreadPoolKind::Rayon(inner) => inner.spawn(job), } } }
use std::thread; use std::time::Duration; async fn run() { println!("hello"); async_std::task::sleep(Duration::from_secs(1)).await; println!("world!"); complete::mark_complete(); } fn main() -> () { thread::spawn(move || { executor::spawn(run()); }); complete::block_until_complete(); }
#![recursion_limit = "1024"] mod audio_service; mod audio_store; mod encoder; mod recent_cache; mod silence_gate; use async_std::prelude::StreamExt; use regex::Regex; use async_std::future::timeout; use std::time::Duration; static EVENTSTREAM_PING_TIMEOUT: Duration = Duration::from_secs(15); lazy_static::lazy_static! { static ref MP3_FILENAME: Regex = Regex::new("(.*)\\.mp3$").unwrap(); } async fn serve_audio( srv: audio_service::AudioService, req: tide::Request<()>, ) -> Result<tide::Response, tide::Error> { let bad_file = |msg| tide::Error::from_str(tide::StatusCode::NotFound, msg); let text: String = req .param("audio_id") .map_err(|_| bad_file("unknown parameter"))?; let audio_text = &MP3_FILENAME .captures(&text) .ok_or(bad_file("not valid mp3"))?[1]; let audio_id = audio_text .parse() .map_err(|_| bad_file("not valid number"))?; let stream = srv .stream_audio(audio_store::AudioId(audio_id)) .await .ok_or(bad_file("couldn't find that stream id"))?; let mut resp = tide::Response::new(tide::StatusCode::Ok); resp.set_content_type("audio/mpeg".parse::<tide::http::Mime>().unwrap()); resp.insert_header("content-disposition", "attachment"); resp.set_body(tide::Body::from_reader( async_std::io::BufReader::new(stream), None, )); Ok(resp) } #[async_std::main] async fn main() -> Result<(), std::io::Error> { let audio_service = audio_service::AudioService::new(); audio_service.add_source( "your channel name 1".to_string(), "http://example.com/my_audio_stream".to_string(), ); audio_service.add_source( "your channel name 2".to_string(), "http://example.com/my_audio_stream_2".to_string(), ); let mut app = tide::new(); let srv = audio_service.clone(); app.at("/replay").get(|_| async move { let mut resp = tide::Response::new(200); resp.set_body(tide::Body::from_file("../client/build/index.html").await?); resp.set_content_type("text/html".parse::<tide::http::Mime>().unwrap()); Ok(resp) }); app.at("/audio/:audio_id") .get(move |req| serve_audio(srv.clone(), req)); let srv = audio_service.clone(); app.at("/stream") .get(tide::sse::endpoint(move |_req, sender| { let srv = srv.clone(); async move { let mut stream = srv.recent_stream(); loop { match timeout(EVENTSTREAM_PING_TIMEOUT, stream.next()).await { Ok(Some(metadata)) => { let json = format!( "{{\"timestamp\":{},\"channel\":\"{}\",\"url\":\"/audio/{}.mp3\"}}", metadata.timestamp, metadata.channel, metadata.id.0 ); sender.send("audio", json, None).await; } Ok(None) => break, // end event stream Err(_) => { // timeout, send update so eventstream doesn't close sender.send("ping", "", None).await; } } } Ok(()) } })); app.at("/").serve_dir("../client/build/")?; app.listen("localhost:8080").await?; Ok(()) }
//! Rasterizer for Outlines with Anti-Aliasing //! //! # Example //! //! //! use agg::{Pixfmt,Rgb8,Rgba8,DrawOutline}; //! use agg::{RendererOutlineAA,RasterizerOutlineAA}; //! //! // Create Image and Rendering Base //! let pix = Pixfmt::<Rgb8>::new(100,100); //! let mut ren_base = agg::RenderingBase::new(pix); //! ren_base.clear( Rgba8::new(255, 255, 255, 255) ); //! //! // Create Outline Rendering, set color and width //! let mut ren = RendererOutlineAA::with_base(&mut ren_base); //! ren.color(agg::Rgba8::new(0,0,0,255)); //! ren.width(20.0); //! //! // Create a Path //! let mut path = agg::Path::new(); //! path.move_to(10.0, 10.0); //! path.line_to(50.0, 90.0); //! path.line_to(90.0, 10.0); //! //! // Create Outline Rasterizer and add path //! let mut ras = RasterizerOutlineAA::with_renderer(&mut ren); //! ras.round_cap(true); //! ras.add_path(&path); //! ren_base.to_file("outline_aa.png").unwrap(); //! //! The above code will produce: //! //! ![Output](https://raw.githubusercontent.com/savage13/agg/master/images/outline_aa.png) //! use crate::stroke::LineJoin; use crate::paths::PathCommand; use crate::paths::Vertex; use crate::line_interp::LineParameters; use crate::line_interp::DrawVars; use crate::line_interp::DistanceInterpolator00; use crate::line_interp::DistanceInterpolator0; use crate::base::RenderingBase; use crate::color::Rgba8; use crate::clip::Rectangle; use crate::render::clip_line_segment; use crate::raster::len_i64_xy; use crate::Pixel; use crate::Color; use crate::RenderOutline; use crate::render::LINE_MAX_LENGTH; use crate::MAX_HALF_WIDTH; use crate::POLY_SUBPIXEL_SHIFT; use crate::POLY_SUBPIXEL_MASK; use crate::DrawOutline; use crate::VertexSource; use crate::raster::len_i64; use crate::POLY_SUBPIXEL_SCALE; /// Outline Rasterizer with Anti-Aliasing pub struct RasterizerOutlineAA<'a,T> where T: DrawOutline { ren: &'a mut T, start_x: i64, start_y: i64, vertices: Vec<Vertex<i64>>, round_cap: bool, line_join: LineJoin, } impl<'a,T> RasterizerOutlineAA<'a, T> where T: DrawOutline { /// Create and connect an Outline Rasterizer to a Renderer pub fn with_renderer(ren: &'a mut T) -> Self { let line_join = if ren.accurate_join_only() { LineJoin::MiterAccurate } else { LineJoin::Round }; Self { ren, start_x: 0, start_y: 0, vertices: vec![], round_cap: false, line_join } } /// Set Rounded End Caps pub fn round_cap(&mut self, on: bool) { self.round_cap = on; } /// Add and Render a path pub fn add_path<VS: VertexSource>(&mut self, path: &VS) { for v in path.xconvert().iter() { match v.cmd { PathCommand::MoveTo => self.move_to_d(v.x, v.y), PathCommand::LineTo => self.line_to_d(v.x, v.y), PathCommand::Close => self.close_path(), PathCommand::Stop => unimplemented!("stop encountered"), } } self.render(false); } fn conv(&self, v: f64) -> i64 { (v * POLY_SUBPIXEL_SCALE as f64).round() as i64 } /// Move the current point to (`x`,`y`) pub fn move_to_d(&mut self, x: f64, y: f64) { let x = self.conv(x); let y = self.conv(y); self.move_to( x, y ); } /// Draw a line from the current point to (`x`,`y`) pub fn line_to_d(&mut self, x: f64, y: f64) { let x = self.conv(x); let y = self.conv(y); self.line_to( x, y ); } fn move_to(&mut self, x: i64, y: i64) { self.start_x = x; self.start_y = y; self.vertices.push( Vertex::move_to(x, y) ); } fn line_to(&mut self, x: i64, y: i64) { let n = self.vertices.len(); if n > 1 { let v0 = self.vertices[n-1]; let v1 = self.vertices[n-2]; let len = len_i64(&v0,&v1); if len < POLY_SUBPIXEL_SCALE + POLY_SUBPIXEL_SCALE / 2 { self.vertices.pop(); } } self.vertices.push( Vertex::line_to(x, y) ); } /// Close the current path pub fn close_path(&mut self) { self.line_to(self.start_x, self.start_y); } fn cmp_dist_start(d: i64) -> bool { d > 0 } fn cmp_dist_end (d: i64) -> bool { d <= 0 } fn draw_two_points(&mut self) { debug_assert!(self.vertices.len() == 2); let p1 = self.vertices.first().unwrap(); let p2 = self.vertices.last().unwrap(); let (x1,y1) = (p1.x, p1.y); let (x2,y2) = (p2.x, p2.y); let lprev = len_i64(p1,p2); let lp = LineParameters::new(x1,y1, x2,y2, lprev); if self.round_cap { self.ren.semidot(Self::cmp_dist_start, x1, y1, x1 + (y2-y1), y1 - (x2-x1)); } self.ren.line3(&lp, x1 + (y2-y1), y1 - (x2-x1), x2 + (y2-y1), y2 - (x2-x1)); if self.round_cap { self.ren.semidot(Self::cmp_dist_end, x2, y2, x2 + (y2-y1), y2 - (x2-x1)); } } fn draw_three_points(&mut self) { debug_assert!(self.vertices.len() == 3); let mut v = self.vertices.iter(); let p1 = v.next().unwrap(); let p2 = v.next().unwrap(); let p3 = v.next().unwrap(); let (x1,y1) = (p1.x, p1.y); let (x2,y2) = (p2.x, p2.y); let (x3,y3) = (p3.x, p3.y); let lprev = len_i64(p1,p2); let lnext = len_i64(p2,p3); let lp1 = LineParameters::new(x1, y1, x2, y2, lprev); let lp2 = LineParameters::new(x2, y2, x3, y3, lnext); if self.round_cap { self.ren.semidot(Self::cmp_dist_start, x1, y1, x1 + (y2-y1), y1 - (x2-x1)); } if self.line_join == LineJoin::Round { self.ren.line3(&lp1, x1 + (y2-y1), y1 - (x2-x1), x2 + (y2-y1), y2 - (x2-x1)); self.ren.pie(x2, y2, x2 + (y2-y1), y2 - (x2-x1), x2 + (y3-y2), y2 - (x3-x2)); self.ren.line3(&lp2, x2 + (y3-y2), y2 - (x3-x2), x3 + (y3-y2), y3 - (x3-x2)); } else { let (xb1, yb1) = Self::bisectrix(&lp1, &lp2); self.ren.line3(&lp1, x1 + (y2-y1), y1 - (x2-x1), xb1, yb1); self.ren.line3(&lp2, xb1, yb1, x3 + (y3-y2), y3 - (x3-x2)); } if self.round_cap { self.ren.semidot(Self::cmp_dist_end, x3, y3, x3 + (y3-y2), y3 - (x3-x2)); } } fn draw_many_points(&mut self) { debug_assert!(self.vertices.len() > 3); let v1 = self.vertices[0]; let x1 = v1.x; let y1 = v1.y; let v2 = self.vertices[1]; let x2 = v2.x; let y2 = v2.y; let v3 = self.vertices[2]; let v4 = self.vertices[3]; let mut dv = DrawVars::new(); dv.idx = 3; let lprev = len_i64(&v1,&v2); dv.lcurr = len_i64(&v2,&v3); dv.lnext = len_i64(&v3,&v4); let prev = LineParameters::new(x1,y1, x2, y2, lprev); // pt1 -> pt2 dv.x1 = v3.x; dv.y1 = v3.y; dv.curr = LineParameters::new(x2,y2, dv.x1, dv.y1, dv.lcurr); // pt2 -> pt3 dv.x2 = v4.x; dv.y2 = v4.y; dv.next = LineParameters::new(dv.x1,dv.y1, dv.x2, dv.y2, dv.lnext); // pt3 -> pt4 dv.xb1 = 0; dv.xb2 = 0; dv.yb1 = 0; dv.yb2 = 0; dv.flags = match self.line_join { LineJoin::MiterRevert | LineJoin::Bevel | LineJoin::MiterRound => { 3 }, LineJoin::None => 3, LineJoin::MiterAccurate => 0, LineJoin::Miter | LineJoin::Round => { let mut v = 0; if prev.diagonal_quadrant() == dv.curr.diagonal_quadrant() { v |= 1; } if dv.curr.diagonal_quadrant() == dv.next.diagonal_quadrant() { v |= 2; } v } }; if self.round_cap { self.ren.semidot(Self::cmp_dist_start, x1,y1, x1 + (y2-y1), y1 - (x2-x1)); } if (dv.flags & 1) == 0 { if self.line_join == LineJoin::Round { self.ren.line3(&prev, x1 + (y2-y1), y1 - (x2-x1), x2 + (y2-y1), y2 - (x2-x1)); self.ren.pie(prev.x2, prev.y2, x2 + (y2-y1), y2 - (x2-x1), dv.curr.x1 + (dv.curr.y2-dv.curr.y1), dv.curr.y1 + (dv.curr.x2-dv.curr.x1)); } else { let(xb1, yb1) = Self::bisectrix(&prev, &dv.curr); self.ren.line3(&prev, x1 + (y2-y1), y1 - (x2-x1), xb1, yb1); dv.xb1 = xb1; dv.yb1 = yb1; } } else { self.ren.line1(&prev, x1 + (y2-y1), y1-(x2-x1)); } if (dv.flags & 2) == 0 && self.line_join != LineJoin::Round { let (xb2, yb2) = Self::bisectrix(&dv.curr, &dv.next); dv.xb2 = xb2; dv.yb2 = yb2; } self.draw(&mut dv, 1, self.vertices.len()-2); if (dv.flags & 1) == 0 { if self.line_join == LineJoin::Round { self.ren.line3(&dv.curr, dv.curr.x1 + (dv.curr.y2-dv.curr.y1), dv.curr.y1 - (dv.curr.x2 - dv.curr.x1), dv.curr.x2 + (dv.curr.y2 - dv.curr.y1), dv.curr.y2 - (dv.curr.x2 - dv.curr.x1)); } else { self.ren.line3(&dv.curr, dv.xb1, dv.yb1, dv.curr.x2 + (dv.curr.y2 - dv.curr.y1), dv.curr.y2 - (dv.curr.x2 - dv.curr.x1)); } } else { self.ren.line2(&dv.curr, dv.curr.x2 + (dv.curr.y2 - dv.curr.y1), dv.curr.y2 - (dv.curr.x2 - dv.curr.x1)); } if self.round_cap { self.ren.semidot(Self::cmp_dist_end, dv.curr.x2, dv.curr.y2, dv.curr.x2 + (dv.curr.y2 - dv.curr.y1), dv.curr.y2 - (dv.curr.x2 - dv.curr.x1)); } } /// Render the current path /// /// Use only if drawing a path with [`move_to_d`](#method.move_to_d) and /// [`line_to_d`](#method.line_to_d). Paths drawn with [`add_path`](#method.add_path) /// are automatically rendered /// pub fn render(&mut self, close_polygon: bool) { if close_polygon { unimplemented!("no closed polygons yet"); } else { match self.vertices.len() { 0 | 1 => return, 2 => self.draw_two_points(), 3 => self.draw_three_points(), _ => self.draw_many_points(), } } self.vertices.clear(); } fn draw(&mut self, dv: &mut DrawVars, start: usize, end: usize) { for _i in start .. end { if self.line_join == LineJoin::Round { dv.xb1 = dv.curr.x1 + (dv.curr.y2 - dv.curr.y1); dv.yb1 = dv.curr.y1 - (dv.curr.x2 - dv.curr.x1); dv.xb2 = dv.curr.x2 + (dv.curr.y2 - dv.curr.y1); dv.yb2 = dv.curr.y2 - (dv.curr.x2 - dv.curr.x1); } match dv.flags { 0 => self.ren.line3(&dv.curr, dv.xb1, dv.yb1, dv.xb2, dv.yb2), 1 => self.ren.line2(&dv.curr, dv.xb2, dv.yb2), 2 => self.ren.line1(&dv.curr, dv.xb1, dv.yb1), 3 => self.ren.line0(&dv.curr), _ => unreachable!("flag value not covered") } if self.line_join == LineJoin::Round && (dv.flags & 2) == 0 { self.ren.pie(dv.curr.x2, dv.curr.y2, dv.curr.x2 + (dv.curr.y2 - dv.curr.y1), dv.curr.y2 - (dv.curr.x2 - dv.curr.x1), dv.curr.x2 + (dv.next.y2 - dv.next.y1), dv.curr.y2 - (dv.next.x2 - dv.next.x1)); } // Increment to next segment dv.x1 = dv.x2; dv.y1 = dv.y2; dv.lcurr = dv.lnext; //dv.lnext = self.vertices[dv.idx].len; let v0 = self.vertices[dv.idx]; dv.idx += 1; if dv.idx >= self.vertices.len() { dv.idx = 0; } let v = self.vertices[dv.idx]; dv.x2 = v.x; dv.y2 = v.y; dv.lnext = len_i64(&v0,&v); dv.curr = dv.next; dv.next = LineParameters::new(dv.x1, dv.y1, dv.x2, dv.y2, dv.lnext); dv.xb1 = dv.xb2; dv.yb1 = dv.yb2; match self.line_join { LineJoin::Bevel | LineJoin::MiterRevert | LineJoin::MiterRound => dv.flags = 3, LineJoin::None => dv.flags = 3, LineJoin::Miter => { dv.flags >>= 1; if dv.curr.diagonal_quadrant() == dv.next.diagonal_quadrant() { dv.flags |= 1 << 1; } if (dv.flags & 2) == 0 { let (xb2,yb2) = Self::bisectrix(&dv.curr, &dv.next); dv.xb2 = xb2; dv.yb2 = yb2; } }, LineJoin::Round => { dv.flags >>= 1; if dv.curr.diagonal_quadrant() == dv.next.diagonal_quadrant() { dv.flags |= 1 << 1; } }, LineJoin::MiterAccurate => { dv.flags = 0; let (xb2,yb2) = Self::bisectrix(&dv.curr, &dv.next); dv.xb2 = xb2; dv.yb2 = yb2; } } } } fn bisectrix(l1: &LineParameters, l2: &LineParameters) -> (i64, i64) { let k = l2.len as f64 / l1.len as f64; let mut tx = l2.x2 as f64 - (l2.x1 - l1.x1) as f64 * k; let mut ty = l2.y2 as f64 - (l2.y1 - l1.y1) as f64 * k; //All bisectrices must be on the right of the line //If the next point is on the left (l1 => l2.2) //then the bisectix should be rotated by 180 degrees. if ((l2.x2 - l2.x1) as f64 * (l2.y1 - l1.y1) as f64) < ((l2.y2 - l2.y1) as f64 * (l2.x1 - l1.x1) as f64 + 100.0) { tx -= (tx - l2.x1 as f64) * 2.0; ty -= (ty - l2.y1 as f64) * 2.0; } // Check if the bisectrix is too short let dx = tx - l2.x1 as f64; let dy = ty - l2.y1 as f64; if ((dx * dx + dy * dy).sqrt() as i64) < POLY_SUBPIXEL_SCALE { let x = (l2.x1 + l2.x1 + (l2.y1 - l1.y1) + (l2.y2 - l2.y1)) >> 1; let y = (l2.y1 + l2.y1 - (l2.x1 - l1.x1) - (l2.x2 - l2.x1)) >> 1; (x,y) } else { (tx.round() as i64,ty.round() as i64) } } } #[derive(Debug)] /// Outline Renderer with Anti-Aliasing pub struct RendererOutlineAA<'a,T> { ren: &'a mut RenderingBase<T>, color: Rgba8, clip_box: Option<Rectangle<i64>>, profile: LineProfileAA, } impl<'a,T> RendererOutlineAA<'a,T> where T: Pixel { /// Create Outline Renderer with a [`RenderingBase`](../base/struct.RenderingBase.html) pub fn with_base(ren: &'a mut RenderingBase<T>) -> Self { let profile = LineProfileAA::new(); Self { ren, color: Rgba8::black(), clip_box: None, profile } } /// Set width of the line pub fn width(&mut self, width: f64) { self.profile.width(width); } /// Set minimum with of the line /// /// Use [`width`](#method.width) for this to take effect pub fn min_width(&mut self, width: f64) { self.profile.min_width(width); } /// Set smoother width of the line /// /// Use [`width`](#method.width) for this to take effect pub fn smoother_width(&mut self, width: f64) { self.profile.smoother_width(width); } fn subpixel_width(&self) -> i64 { self.profile.subpixel_width } /// Draw a Line Segment /// /// If line to "too long", divide it by two and draw both segments /// otherwise, interpolate along the line to draw /// fn line0_no_clip(&mut self, lp: &LineParameters) { if lp.len > LINE_MAX_LENGTH { let (lp1, lp2) = lp.divide(); self.line0_no_clip(&lp1); self.line0_no_clip(&lp2); return; } let mut li = lp.interp0(self.subpixel_width()); if li.count() > 0 { if li.vertical() { while li.step_ver(self) { } } else { while li.step_hor(self) { } } } } fn line1_no_clip(&mut self, lp: &LineParameters, sx: i64, sy: i64) { if lp.len > LINE_MAX_LENGTH { let (lp1, lp2) = lp.divide(); self.line1_no_clip(&lp1, (lp.x1 + sx)>>1, (lp.y1+sy)>>1); self.line1_no_clip(&lp2, lp.x2 + (lp.y1 + lp1.y1), lp1.y2 - (lp1.x2-lp1.x1)); return; } let (sx, sy) = lp.fix_degenerate_bisectrix_start(sx, sy); let mut li = lp.interp1(sx, sy, self.subpixel_width()); if li.vertical() { while li.step_ver(self) { } } else { while li.step_hor(self) { } } } fn line2_no_clip(&mut self, lp: &LineParameters, ex: i64, ey: i64) { if lp.len > LINE_MAX_LENGTH { let (lp1,lp2) = lp.divide(); self.line2_no_clip(&lp1, lp1.x2 + (lp1.y2 - lp1.y1), lp1.y2 - (lp1.x2 - lp1.x1)); self.line2_no_clip(&lp2, (lp.x2 + ex) >> 1, (lp.y2 + ey) >> 1); return; } let (ex, ey) = lp.fix_degenerate_bisectrix_end(ex, ey); let mut li = lp.interp2(ex, ey, self.subpixel_width()); if li.vertical() { while li.step_ver(self) { } } else { while li.step_hor(self) { } } } fn line3_no_clip(&mut self, lp: &LineParameters, sx: i64, sy: i64, ex: i64, ey: i64) { if lp.len > LINE_MAX_LENGTH { let (lp1, lp2) = lp.divide(); let mx = lp1.x2 + (lp1.y2 - lp1.y1); let my = lp1.y2 - (lp1.x2 - lp1.x1); self.line3_no_clip(&lp1, (lp.x1 + sx) >> 1, (lp.y1 + sy) >> 1, mx, my); self.line3_no_clip(&lp2, mx, my, (lp.x2 + ex) >> 1, (lp.y2 + ey) >> 1); return; } let (sx, sy) = lp.fix_degenerate_bisectrix_start(sx, sy); let (ex, ey) = lp.fix_degenerate_bisectrix_end(ex, ey); let mut li = lp.interp3(sx, sy, ex, ey, self.subpixel_width()); if li.vertical() { while li.step_ver(self) { } } else { while li.step_hor(self) { } } } fn semidot_hline<F>(&mut self, cmp: F, xc1: i64, yc1: i64, xc2: i64, yc2: i64, x1: i64, y1: i64, x2: i64) where F: Fn(i64) -> bool { let mut x1 = x1; let mut covers = [0u64; MAX_HALF_WIDTH * 2 + 4]; let p0 = 0; let mut p1 = 0; let mut x = x1 << POLY_SUBPIXEL_SHIFT; let mut y = y1 << POLY_SUBPIXEL_SHIFT; let w = self.subpixel_width(); let mut di = DistanceInterpolator0::new(xc1, yc1, xc2, yc2, x, y); x += POLY_SUBPIXEL_SCALE/2; y += POLY_SUBPIXEL_SCALE/2; let x0 = x1; let mut dx = x - xc1; let dy = y - yc1; loop { let d = ((dx*dx + dy*dy) as f64).sqrt() as i64; covers[p1] = 0; if cmp(di.dist) && d <= w { covers[p1] = self.cover(d); } p1 += 1; dx += POLY_SUBPIXEL_SCALE; di.inc_x(); x1 += 1; if x1 > x2 { break; } } self.ren.blend_solid_hspan(x0, y1, (p1 - p0) as i64, self.color, &covers); } fn pie_hline(&mut self, xc: i64, yc: i64, xp1: i64, yp1: i64, xp2: i64, yp2: i64, xh1: i64, yh1: i64, xh2: i64) { if let Some(clip_box) = self.clip_box { if clip_box.clip_flags(xc, yc) != 0 { return; } } let mut xh1 = xh1; let mut covers = [0u64; MAX_HALF_WIDTH * 2 + 4]; let p0 = 0; let mut p1 = 0; let mut x = xh1 << POLY_SUBPIXEL_SHIFT; let mut y = yh1 << POLY_SUBPIXEL_SHIFT; let w = self.subpixel_width(); let mut di = DistanceInterpolator00::new(xc, yc, xp1, yp1, xp2, yp2, x, y); x += POLY_SUBPIXEL_SCALE/2; y += POLY_SUBPIXEL_SCALE/2; let xh0 = xh1; let mut dx = x - xc; let dy = y - yc; loop { let d = ((dx*dx + dy*dy) as f64).sqrt() as i64; covers[p1] = 0; if di.dist1 <= 0 && di.dist2 > 0 && d <= w { covers[p1] = self.cover(d); } p1 += 1; dx += POLY_SUBPIXEL_SCALE; di.inc_x(); xh1 += 1; if xh1 > xh2 { break; } } self.ren.blend_solid_hspan(xh0, yh1, (p1 - p0) as i64, self.color, &covers); } } impl<T> RenderOutline for RendererOutlineAA<'_, T> where T: Pixel { fn cover(&self, d: i64) -> u64 { let subpixel_shift = POLY_SUBPIXEL_SHIFT; let subpixel_scale = 1 << subpixel_shift; let index = d + i64::from(subpixel_scale) * 2; assert!(index >= 0); u64::from( self.profile.profile[index as usize] ) } fn blend_solid_hspan(&mut self, x: i64, y: i64, len: i64, covers: &[u64]) { self.ren.blend_solid_hspan(x, y, len, self.color, covers); } fn blend_solid_vspan(&mut self, x: i64, y: i64, len: i64, covers: &[u64]) { self.ren.blend_solid_vspan(x, y, len, self.color, covers); } } impl<T> DrawOutline for RendererOutlineAA<'_, T> where T: Pixel { fn line3(&mut self, lp: &LineParameters, sx: i64, sy: i64, ex: i64, ey: i64) { if let Some(clip_box) = self.clip_box { let (x1,y1,x2,y2,flags) = clip_line_segment(lp.x1, lp.y1, lp.x2, lp.y2, clip_box); if (flags & 4) == 0 { let (mut sx, mut sy, mut ex, mut ey) = (sx,sy,ex,ey); if flags != 0{ let lp2 = LineParameters::new(x1,y1,x2,y2, len_i64_xy(x1, y1, x2, y2)); if flags & 1 != 0{ sx = x1 + (y2 - y1); sy = y1 - (x2 - x1); } else { while (sx - lp.x1).abs() + (sy - lp.y1).abs() > lp2.len { sx = (lp.x1 + sx) >> 1; sy = (lp.y1 + sy) >> 1; } } if flags & 2 != 0{ ex = x2 + (y2 - y1); ey = y2 - (x2 - x1); } else { while (ex - lp.x2).abs() + (ey - lp.y2).abs() > lp2.len { ex = (lp.x2 + ex) >> 1; ey = (lp.y2 + ey) >> 1; } } self.line3_no_clip(&lp2, sx, sy, ex, ey); } else { self.line3_no_clip(&lp, sx, sy, ex, ey); } } } else { self.line3_no_clip(&lp, sx, sy, ex, ey); } } fn semidot<F>(&mut self, cmp: F, xc1: i64, yc1: i64, xc2: i64, yc2: i64) where F: Fn(i64) -> bool { if let Some(clip_box) = self.clip_box { if clip_box.clip_flags(xc1, yc1) != 0 { return; } } let mut r = (self.subpixel_width() + POLY_SUBPIXEL_MASK) >> POLY_SUBPIXEL_SHIFT; if r < 1 { r = 1; } let mut ei = EllipseInterpolator::new(r, r); let mut dx = 0; let mut dy = -r; let mut dy0 = dy; let mut dx0 = dx; let x = xc1 >> POLY_SUBPIXEL_SHIFT; let y = yc1 >> POLY_SUBPIXEL_SHIFT; loop { dx += ei.dx; dy += ei.dy; if dy != dy0 { self.semidot_hline(&cmp, xc1, yc1, xc2, yc2, x-dx0, y+dy0, x+dx0); self.semidot_hline(&cmp, xc1, yc1, xc2, yc2, x-dx0, y-dy0, x+dx0); } dx0 = dx; dy0 = dy; ei.inc(); if dy >= 0 { break; } } self.semidot_hline(&cmp, xc1, yc1, xc2, yc2, x-dx0, y+dy0, x+dx0); } fn pie(&mut self, xc: i64, yc: i64, x1: i64, y1: i64, x2: i64, y2: i64) { let mut r = (self.subpixel_width() + POLY_SUBPIXEL_MASK) >> POLY_SUBPIXEL_SHIFT; if r < 1 { r = 1; } let mut ei = EllipseInterpolator::new(r, r); let mut dx = 0; let mut dy = -r; let mut dy0 = dy; let mut dx0 = dx; let x = xc >> POLY_SUBPIXEL_SHIFT; let y = yc >> POLY_SUBPIXEL_SHIFT; loop { dx += ei.dx; dy += ei.dy; if dy != dy0 { self.pie_hline(xc, yc, x1, y1, x2, y2, x-dx0, y+dy0, x+dx0); self.pie_hline(xc, yc, x1, y1, x2, y2, x-dx0, y-dy0, x+dx0); } dx0 = dx; dy0 = dy; ei.inc(); if dy >= 0 { break; } } self.pie_hline(xc, yc, x1, y1, x2, y2, x-dx0, y+dy0, x+dx0); } /// Draw a Line Segment, clipping if necessary /// fn line0(&mut self, lp: &LineParameters) { if let Some(clip_box) = self.clip_box { let (x1,y1,x2,y2,flags) = clip_line_segment(lp.x1,lp.y1,lp.x2,lp.y2,clip_box); if flags & 4 == 0 { // Line in Visible if flags != 0 { // Line is Clipped // Create new Line from clipped lines and draw let lp2 = LineParameters::new(x1, y1, x2, y2, len_i64_xy(x1,y1,x2,y2)); self.line0_no_clip(&lp2); } else { // Line is not Clipped self.line0_no_clip(&lp) } } } else { // No clip box defined self.line0_no_clip(&lp); } } fn line1(&mut self, lp: &LineParameters, sx: i64, sy: i64) { if let Some(clip_box) = self.clip_box { let (x1,y1,x2,y2,flags) = clip_line_segment(lp.x1,lp.y1,lp.x2,lp.y2, clip_box); if flags & 4 == 0 { if flags != 0{ let (mut sx, mut sy) = (sx,sy); let lp2 = LineParameters::new(x1,y1,x2,y2, len_i64_xy(x1,y1,x2,y2)); if flags & 1 == 0 { sx = x1 + (y2-y1); sy = y1 - (x2-x1); } else { while (sx - lp.x1).abs() + (sy-lp.y1).abs() > lp2.len { sx = (lp.x1 + sx) >> 1; sy = (lp.y1 + sy) >> 1; } } self.line1_no_clip(&lp2, sx, sy); } else { self.line1_no_clip(&lp, sx, sy); } } } else { self.line1_no_clip(&lp, sx, sy); } } fn line2(&mut self, lp: &LineParameters, ex: i64, ey: i64) { if let Some(clip_box) = self.clip_box { let (x1,y1,x2,y2,flags) = clip_line_segment(lp.x1,lp.y1,lp.x2,lp.y2, clip_box); if flags & 4 == 0 { if flags != 0 { let (mut ex,mut ey) = (ex,ey); let lp2 = LineParameters::new(x1,y1,x2,y2, len_i64_xy(x1,y1,x2,y2)); if flags & 2 != 0{ ex = x2 + (y2-y1); ey = y2 + (x2-x1); } else { while (ex - lp.x2).abs() + (ey - lp.y2).abs() > lp2.len { ex = (lp.x2 + ex) >> 1; ey = (lp.y2 + ey) >> 1; } } self.line2_no_clip(&lp2, ex, ey); } else { self.line2_no_clip(&lp, ex, ey); } } } else { self.line2_no_clip(&lp, ex, ey); } } fn color<C: Color>(&mut self, color: C) { self.color = Rgba8::from_trait(color); } fn accurate_join_only(&self) -> bool{ false } } #[derive(Debug,Default)] /// Profile of a Line struct LineProfileAA { min_width: f64, smoother_width: f64, subpixel_width: i64, gamma: Vec<u8>, profile: Vec<u8>, } impl LineProfileAA { /// Create new LineProfile /// /// Width is initialized to 0.0 pub fn new() -> Self { let gamma : Vec<_> = (0..POLY_SUBPIXEL_SCALE).map(|x| x as u8).collect(); let mut s = Self { min_width: 1.0, smoother_width: 1.0, subpixel_width: 0, profile: vec![], gamma }; s.width(0.0); s } /// Set minimum width /// /// For this to take effect, the width needs to be set pub fn min_width(&mut self, width: f64) { self.min_width = width; } /// Set smoother width /// /// For this to take effect, the width needs to be set pub fn smoother_width(&mut self, width: f64) { self.smoother_width = width; } /// Set width /// /// Negative widths are set to 0.0 /// /// Width less than smoother width are doubled, otherwise the smoother width is added /// to the with /// Widths are then divied by 2 and the smoother width is removed. /// /// The line profile is then constructed and saved to `profile` pub fn width(&mut self, w: f64) { let mut w = w; if w < 0.0 { w = 0.0; } if w < self.smoother_width { w += w; } else { w += self.smoother_width; } w *= 0.5; w -= self.smoother_width; let mut s = self.smoother_width; if w < 0.0 { s += w; w = 0.0; } self.set(w, s); } fn profile(&mut self, w: f64) { let subpixel_shift = POLY_SUBPIXEL_SHIFT; let subpixel_scale = 1 << subpixel_shift; self.subpixel_width = (w * subpixel_scale as f64).round() as i64; let size = (self.subpixel_width + subpixel_scale * 6) as usize; if size > self.profile.capacity() { self.profile.resize(size, 0); } } /// Create the Line Profile /// /// fn set(&mut self, center_width: f64, smoother_width: f64) { let subpixel_shift = POLY_SUBPIXEL_SHIFT; let subpixel_scale = 1 << subpixel_shift; let aa_shift = POLY_SUBPIXEL_SHIFT; let aa_scale = 1 << aa_shift; let aa_mask = aa_scale - 1; let mut base_val = 1.0; let mut center_width = center_width; let mut smoother_width = smoother_width; // Set minimum values for the center and smoother widths if center_width == 0.0 { center_width = 1.0 / subpixel_scale as f64; } if smoother_width == 0.0 { smoother_width = 1.0 / subpixel_scale as f64; } // Full width let width = center_width + smoother_width; // Scale widths so they equal the minimum width if width < self.min_width { let k = width / self.min_width; base_val *= k; center_width /= k; smoother_width /= k; } // Allocate space for the line profile self.profile(center_width + smoother_width); // Width in Subpixel scales let subpixel_center_width : usize = (center_width * subpixel_scale as f64) as usize; let subpixel_smoother_width : usize = (smoother_width * subpixel_scale as f64) as usize; // let n_smoother = self.profile.len() - subpixel_smoother_width - subpixel_center_width - subpixel_scale*2; // Center and Smoother Width Offsets let ch_center = subpixel_scale*2; let ch_smoother = ch_center + subpixel_center_width; // Fill center portion of the profile (on one side) base_val let val = self.gamma[(base_val * f64::from(aa_mask)) as usize]; for i in 0 .. subpixel_center_width { self.profile[ch_center + i] = val; } // Fill smoother portion of the profile with value decreasing linearly for i in 0 .. subpixel_smoother_width { let k = ((base_val - base_val * (i as f64 / subpixel_smoother_width as f64)) * f64::from(aa_mask)) as usize; self.profile[ch_smoother + i] = self.gamma[k]; } // Remainder is essentially 0.0 let val = self.gamma[0]; for i in 0 .. n_smoother { self.profile[ch_smoother + subpixel_smoother_width + i] = val; } // Copy to other side for i in 0 .. subpixel_scale*2 { self.profile[ch_center - 1 - i] = self.profile[ch_center + i] } } } /// Ellipse Interpolator #[derive(Debug)] struct EllipseInterpolator { rx2: i64, ry2: i64, two_rx2: i64, two_ry2: i64, dx: i64, dy: i64, inc_x: i64, inc_y: i64, cur_f: i64, } impl EllipseInterpolator { /// Create new Ellipse Interpolator with axes lenghts `rx` and `ry` pub fn new(rx: i64, ry: i64) -> Self { let rx2 = rx * rx; let ry2 = ry * ry; let two_rx2 = rx2 * 2; let two_ry2 = ry2 * 2; let dx = 0; let dy = 0; let inc_x = 0; let inc_y = -ry * two_rx2; let cur_f = 0; Self { rx2, ry2, two_rx2, two_ry2, dx, dy, inc_x, inc_y, cur_f } } /// Increment the Interpolator fn inc(&mut self) { // let mut mx = self.cur_f + self.inc_x + self.ry2; let fx = mx; if mx < 0 { mx = -mx; } let mut my = self.cur_f + self.inc_y + self.rx2; let fy = my; if my < 0 { my = -my; } let mut mxy = self.cur_f + self.inc_x + self.ry2 + self.inc_y + self.rx2; let fxy = mxy; if mxy < 0 { mxy = -mxy; } let mut min_m = mx; let flag = if min_m > my { min_m = my; false } else { true }; self.dx = 0; self.dy = 0; if min_m > mxy { self.inc_x += self.two_ry2; self.inc_y += self.two_rx2; self.cur_f = fxy; self.dx = 1; self.dy = 1; return; } if flag { self.inc_x += self.two_ry2; self.cur_f = fx; self.dx = 1; return; } self.inc_y += self.two_rx2; self.cur_f = fy; self.dy = 1; } }
#![allow(dead_code)] use std::mem; use winapi::*; /// A wrapper around `OVERLAPPED` to provide "rustic" accessors and /// initializers. #[derive(Debug)] pub struct Overlapped(OVERLAPPED); unsafe impl Send for Overlapped {} unsafe impl Sync for Overlapped {} impl Overlapped { /// Creates a new zeroed out instance of an overlapped I/O tracking state. /// /// This is suitable for passing to methods which will then later get /// notified via an I/O Completion Port. pub fn zero() -> Overlapped { Overlapped(unsafe { mem::zeroed() }) } /// Creates a new `Overlapped` function pointer from the underlying /// `OVERLAPPED`, wrapping in the "rusty" wrapper for working with /// accessors. /// /// # Unsafety /// /// This function doesn't validate `ptr` nor the lifetime of the returned /// pointer at all, it's recommended to use this method with extreme /// caution. pub unsafe fn from_raw<'a>(ptr: *mut OVERLAPPED) -> &'a mut Overlapped { &mut *(ptr as *mut Overlapped) } /// Gain access to the raw underlying data pub fn raw(&self) -> *mut OVERLAPPED { &self.0 as *const _ as *mut _ } /// Sets the offset inside this overlapped structure. /// /// Note that for I/O operations in general this only has meaning for I/O /// handles that are on a seeking device that supports the concept of an /// offset. pub fn set_offset(&mut self, offset: u64) { self.0.Offset = offset as u32; self.0.OffsetHigh = (offset >> 32) as u32; } /// Reads the offset inside this overlapped structure. pub fn offset(&self) -> u64 { (self.0.Offset as u64) | ((self.0.OffsetHigh as u64) << 32) } /// Sets the `hEvent` field of this structure. /// /// The event specified can be null. pub fn set_event(&mut self, event: HANDLE) { self.0.hEvent = event; } /// Reads the `hEvent` field of this structure, may return null. pub fn event(&self) -> HANDLE { self.0.hEvent } }
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, API_VERSION}; #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] Factories_List(#[from] factories::list::Error), #[error(transparent)] Factories_ConfigureFactoryRepo(#[from] factories::configure_factory_repo::Error), #[error(transparent)] Factories_ListByResourceGroup(#[from] factories::list_by_resource_group::Error), #[error(transparent)] Factories_Get(#[from] factories::get::Error), #[error(transparent)] Factories_CreateOrUpdate(#[from] factories::create_or_update::Error), #[error(transparent)] Factories_Update(#[from] factories::update::Error), #[error(transparent)] Factories_Delete(#[from] factories::delete::Error), #[error(transparent)] IntegrationRuntimes_ListByFactory(#[from] integration_runtimes::list_by_factory::Error), #[error(transparent)] IntegrationRuntimes_Get(#[from] integration_runtimes::get::Error), #[error(transparent)] IntegrationRuntimes_CreateOrUpdate(#[from] integration_runtimes::create_or_update::Error), #[error(transparent)] IntegrationRuntimes_Update(#[from] integration_runtimes::update::Error), #[error(transparent)] IntegrationRuntimes_Delete(#[from] integration_runtimes::delete::Error), #[error(transparent)] IntegrationRuntimes_GetStatus(#[from] integration_runtimes::get_status::Error), #[error(transparent)] IntegrationRuntimes_GetConnectionInfo(#[from] integration_runtimes::get_connection_info::Error), #[error(transparent)] IntegrationRuntimes_RegenerateAuthKey(#[from] integration_runtimes::regenerate_auth_key::Error), #[error(transparent)] IntegrationRuntimes_ListAuthKeys(#[from] integration_runtimes::list_auth_keys::Error), #[error(transparent)] IntegrationRuntimes_Start(#[from] integration_runtimes::start::Error), #[error(transparent)] IntegrationRuntimes_Stop(#[from] integration_runtimes::stop::Error), #[error(transparent)] IntegrationRuntimes_RemoveNode(#[from] integration_runtimes::remove_node::Error), #[error(transparent)] IntegrationRuntimes_SyncCredentials(#[from] integration_runtimes::sync_credentials::Error), #[error(transparent)] IntegrationRuntimes_GetMonitoringData(#[from] integration_runtimes::get_monitoring_data::Error), #[error(transparent)] IntegrationRuntimes_Upgrade(#[from] integration_runtimes::upgrade::Error), #[error(transparent)] IntegrationRuntimeNodes_Update(#[from] integration_runtime_nodes::update::Error), #[error(transparent)] IntegrationRuntimeNodes_Delete(#[from] integration_runtime_nodes::delete::Error), #[error(transparent)] IntegrationRuntimeNodes_GetIpAddress(#[from] integration_runtime_nodes::get_ip_address::Error), #[error(transparent)] LinkedServices_ListByFactory(#[from] linked_services::list_by_factory::Error), #[error(transparent)] LinkedServices_Get(#[from] linked_services::get::Error), #[error(transparent)] LinkedServices_CreateOrUpdate(#[from] linked_services::create_or_update::Error), #[error(transparent)] LinkedServices_Delete(#[from] linked_services::delete::Error), #[error(transparent)] Datasets_ListByFactory(#[from] datasets::list_by_factory::Error), #[error(transparent)] Datasets_Get(#[from] datasets::get::Error), #[error(transparent)] Datasets_CreateOrUpdate(#[from] datasets::create_or_update::Error), #[error(transparent)] Datasets_Delete(#[from] datasets::delete::Error), #[error(transparent)] Pipelines_ListByFactory(#[from] pipelines::list_by_factory::Error), #[error(transparent)] Pipelines_Get(#[from] pipelines::get::Error), #[error(transparent)] Pipelines_CreateOrUpdate(#[from] pipelines::create_or_update::Error), #[error(transparent)] Pipelines_Delete(#[from] pipelines::delete::Error), #[error(transparent)] Pipelines_CreateRun(#[from] pipelines::create_run::Error), #[error(transparent)] PipelineRuns_QueryByFactory(#[from] pipeline_runs::query_by_factory::Error), #[error(transparent)] PipelineRuns_Get(#[from] pipeline_runs::get::Error), #[error(transparent)] ActivityRuns_ListByPipelineRun(#[from] activity_runs::list_by_pipeline_run::Error), #[error(transparent)] Factories_CancelPipelineRun(#[from] factories::cancel_pipeline_run::Error), #[error(transparent)] Triggers_ListByFactory(#[from] triggers::list_by_factory::Error), #[error(transparent)] Triggers_Get(#[from] triggers::get::Error), #[error(transparent)] Triggers_CreateOrUpdate(#[from] triggers::create_or_update::Error), #[error(transparent)] Triggers_Delete(#[from] triggers::delete::Error), #[error(transparent)] Triggers_Start(#[from] triggers::start::Error), #[error(transparent)] Triggers_Stop(#[from] triggers::stop::Error), #[error(transparent)] Triggers_ListRuns(#[from] triggers::list_runs::Error), } pub mod operations { use super::{models, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::OperationListResponse, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.DataFactory/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::OperationListResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod factories { use super::{models, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::FactoryListResponse, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataFactory/factories", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::FactoryListResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn configure_factory_repo( operation_config: &crate::OperationConfig, subscription_id: &str, location_id: &str, factory_repo_update: &models::FactoryRepoUpdate, ) -> std::result::Result<models::Factory, configure_factory_repo::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataFactory/locations/{}/configureFactoryRepo", operation_config.base_path(), subscription_id, location_id ); let mut url = url::Url::parse(url_str).map_err(configure_factory_repo::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(configure_factory_repo::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(factory_repo_update).map_err(configure_factory_repo::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(configure_factory_repo::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(configure_factory_repo::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Factory = serde_json::from_slice(rsp_body) .map_err(|source| configure_factory_repo::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| configure_factory_repo::Error::DeserializeError(source, rsp_body.clone()))?; Err(configure_factory_repo::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod configure_factory_repo { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<models::FactoryListResponse, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::FactoryListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<models::Factory, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Factory = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, factory: &models::Factory, ) -> std::result::Result<models::Factory, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(factory).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Factory = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, factory_update_parameters: &models::FactoryUpdateParameters, ) -> std::result::Result<models::Factory, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(factory_update_parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::Factory = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn cancel_pipeline_run( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, run_id: &str, ) -> std::result::Result<(), cancel_pipeline_run::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/cancelpipelinerun/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, run_id ); let mut url = url::Url::parse(url_str).map_err(cancel_pipeline_run::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(cancel_pipeline_run::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(cancel_pipeline_run::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(cancel_pipeline_run::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| cancel_pipeline_run::Error::DeserializeError(source, rsp_body.clone()))?; Err(cancel_pipeline_run::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod cancel_pipeline_run { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod integration_runtimes { use super::{models, API_VERSION}; pub async fn list_by_factory( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<models::IntegrationRuntimeListResponse, list_by_factory::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(list_by_factory::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_factory::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_factory::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_factory::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_factory::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_factory { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<models::IntegrationRuntimeResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, if_match: Option<&str>, integration_runtime: &models::IntegrationRuntimeResource, ) -> std::result::Result<models::IntegrationRuntimeResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(if_match) = if_match { req_builder = req_builder.header("If-Match", if_match); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(integration_runtime).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, update_integration_runtime_request: &models::UpdateIntegrationRuntimeRequest, ) -> std::result::Result<models::IntegrationRuntimeStatusResponse, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_integration_runtime_request).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeStatusResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_status( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<models::IntegrationRuntimeStatusResponse, get_status::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/getStatus", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(get_status::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_status::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_status::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_status::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeStatusResponse = serde_json::from_slice(rsp_body).map_err(|source| get_status::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get_status::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_status::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_status { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_connection_info( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<models::IntegrationRuntimeConnectionInfo, get_connection_info::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/getConnectionInfo", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(get_connection_info::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_connection_info::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_connection_info::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_connection_info::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeConnectionInfo = serde_json::from_slice(rsp_body) .map_err(|source| get_connection_info::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| get_connection_info::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_connection_info::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_connection_info { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn regenerate_auth_key( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, regenerate_key_parameters: &models::IntegrationRuntimeRegenerateKeyParameters, ) -> std::result::Result<models::IntegrationRuntimeAuthKeys, regenerate_auth_key::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/regenerateAuthKey", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(regenerate_auth_key::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(regenerate_auth_key::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(regenerate_key_parameters).map_err(regenerate_auth_key::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(regenerate_auth_key::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(regenerate_auth_key::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeAuthKeys = serde_json::from_slice(rsp_body) .map_err(|source| regenerate_auth_key::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| regenerate_auth_key::Error::DeserializeError(source, rsp_body.clone()))?; Err(regenerate_auth_key::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod regenerate_auth_key { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_auth_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<models::IntegrationRuntimeAuthKeys, list_auth_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/listAuthKeys", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(list_auth_keys::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_auth_keys::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_auth_keys::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_auth_keys::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeAuthKeys = serde_json::from_slice(rsp_body).map_err(|source| list_auth_keys::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list_auth_keys::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_auth_keys::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_auth_keys { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn start( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<start::Response, start::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/start", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(start::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeStatusResponse = serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?; Ok(start::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?; Err(start::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod start { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::IntegrationRuntimeStatusResponse), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn stop( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<stop::Response, stop::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/stop", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(stop::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(stop::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(stop::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(stop::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(stop::Response::Ok200), http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| stop::Error::DeserializeError(source, rsp_body.clone()))?; Err(stop::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod stop { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn remove_node( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, remove_node_parameters: &models::IntegrationRuntimeRemoveNodeRequest, ) -> std::result::Result<remove_node::Response, remove_node::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/removeNode", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(remove_node::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(remove_node::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(remove_node_parameters).map_err(remove_node::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(remove_node::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(remove_node::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(remove_node::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(remove_node::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| remove_node::Error::DeserializeError(source, rsp_body.clone()))?; Err(remove_node::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod remove_node { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn sync_credentials( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<(), sync_credentials::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/syncCredentials", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(sync_credentials::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(sync_credentials::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(sync_credentials::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(sync_credentials::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| sync_credentials::Error::DeserializeError(source, rsp_body.clone()))?; Err(sync_credentials::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod sync_credentials { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_monitoring_data( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<models::IntegrationRuntimeMonitoringData, get_monitoring_data::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/monitoringData", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(get_monitoring_data::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_monitoring_data::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_monitoring_data::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_monitoring_data::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeMonitoringData = serde_json::from_slice(rsp_body) .map_err(|source| get_monitoring_data::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| get_monitoring_data::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_monitoring_data::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_monitoring_data { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn upgrade( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, ) -> std::result::Result<(), upgrade::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/upgrade", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name ); let mut url = url::Url::parse(url_str).map_err(upgrade::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(upgrade::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(upgrade::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(upgrade::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| upgrade::Error::DeserializeError(source, rsp_body.clone()))?; Err(upgrade::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod upgrade { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod integration_runtime_nodes { use super::{models, API_VERSION}; pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, node_name: &str, update_integration_runtime_node_request: &models::UpdateIntegrationRuntimeNodeRequest, ) -> std::result::Result<models::SelfHostedIntegrationRuntimeNode, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/nodes/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name, node_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_integration_runtime_node_request).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SelfHostedIntegrationRuntimeNode = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, node_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/nodes/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name, node_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_ip_address( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, integration_runtime_name: &str, node_name: &str, ) -> std::result::Result<models::IntegrationRuntimeNodeIpAddress, get_ip_address::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/integrationRuntimes/{}/nodes/{}/ipAddress", operation_config.base_path(), subscription_id, resource_group_name, factory_name, integration_runtime_name, node_name ); let mut url = url::Url::parse(url_str).map_err(get_ip_address::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_ip_address::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_ip_address::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_ip_address::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::IntegrationRuntimeNodeIpAddress = serde_json::from_slice(rsp_body).map_err(|source| get_ip_address::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get_ip_address::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_ip_address::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_ip_address { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod linked_services { use super::{models, API_VERSION}; pub async fn list_by_factory( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<models::LinkedServiceListResponse, list_by_factory::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/linkedservices", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(list_by_factory::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_factory::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_factory::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_factory::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::LinkedServiceListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_factory::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_factory { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, linked_service_name: &str, ) -> std::result::Result<models::LinkedServiceResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/linkedservices/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, linked_service_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::LinkedServiceResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, linked_service_name: &str, if_match: Option<&str>, linked_service: &models::LinkedServiceResource, ) -> std::result::Result<models::LinkedServiceResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/linkedservices/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, linked_service_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(if_match) = if_match { req_builder = req_builder.header("If-Match", if_match); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(linked_service).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::LinkedServiceResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, linked_service_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/linkedservices/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, linked_service_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod datasets { use super::{models, API_VERSION}; pub async fn list_by_factory( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<models::DatasetListResponse, list_by_factory::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/datasets", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(list_by_factory::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_factory::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_factory::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_factory::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatasetListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_factory::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_factory { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, dataset_name: &str, ) -> std::result::Result<models::DatasetResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/datasets/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, dataset_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatasetResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, dataset_name: &str, if_match: Option<&str>, dataset: &models::DatasetResource, ) -> std::result::Result<models::DatasetResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/datasets/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, dataset_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(if_match) = if_match { req_builder = req_builder.header("If-Match", if_match); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(dataset).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatasetResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, dataset_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/datasets/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, dataset_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod pipelines { use super::{models, API_VERSION}; pub async fn list_by_factory( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<models::PipelineListResponse, list_by_factory::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelines", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(list_by_factory::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_factory::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_factory::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_factory::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PipelineListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_factory::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_factory { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, pipeline_name: &str, ) -> std::result::Result<models::PipelineResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelines/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, pipeline_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PipelineResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, pipeline_name: &str, if_match: Option<&str>, pipeline: &models::PipelineResource, ) -> std::result::Result<models::PipelineResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelines/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, pipeline_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(if_match) = if_match { req_builder = req_builder.header("If-Match", if_match); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(pipeline).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PipelineResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, pipeline_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelines/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, pipeline_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_run( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, pipeline_name: &str, parameters: Option<&models::ParameterValueSpecification>, ) -> std::result::Result<models::CreateRunResponse, create_run::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelines/{}/createRun", operation_config.base_path(), subscription_id, resource_group_name, factory_name, pipeline_name ); let mut url = url::Url::parse(url_str).map_err(create_run::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_run::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(create_run::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_run::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_run::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: models::CreateRunResponse = serde_json::from_slice(rsp_body).map_err(|source| create_run::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create_run::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_run::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_run { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod pipeline_runs { use super::{models, API_VERSION}; pub async fn query_by_factory( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, filter_parameters: &models::PipelineRunFilterParameters, ) -> std::result::Result<models::PipelineRunQueryResponse, query_by_factory::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelineruns", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(query_by_factory::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(query_by_factory::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(filter_parameters).map_err(query_by_factory::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(query_by_factory::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(query_by_factory::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PipelineRunQueryResponse = serde_json::from_slice(rsp_body) .map_err(|source| query_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| query_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Err(query_by_factory::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod query_by_factory { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, run_id: &str, ) -> std::result::Result<models::PipelineRun, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelineruns/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, run_id ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PipelineRun = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod activity_runs { use super::{models, API_VERSION}; pub async fn list_by_pipeline_run( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, run_id: &str, start_time: &str, end_time: &str, status: Option<&str>, activity_name: Option<&str>, linked_service_name: Option<&str>, ) -> std::result::Result<models::ActivityRunsListResponse, list_by_pipeline_run::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/pipelineruns/{}/activityruns", operation_config.base_path(), subscription_id, resource_group_name, factory_name, run_id ); let mut url = url::Url::parse(url_str).map_err(list_by_pipeline_run::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_pipeline_run::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("startTime", start_time); url.query_pairs_mut().append_pair("endTime", end_time); if let Some(status) = status { url.query_pairs_mut().append_pair("status", status); } if let Some(activity_name) = activity_name { url.query_pairs_mut().append_pair("activityName", activity_name); } if let Some(linked_service_name) = linked_service_name { url.query_pairs_mut().append_pair("linkedServiceName", linked_service_name); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_pipeline_run::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_pipeline_run::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ActivityRunsListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_pipeline_run::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_pipeline_run::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_pipeline_run::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_pipeline_run { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod triggers { use super::{models, API_VERSION}; pub async fn list_by_factory( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, ) -> std::result::Result<models::TriggerListResponse, list_by_factory::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers", operation_config.base_path(), subscription_id, resource_group_name, factory_name ); let mut url = url::Url::parse(url_str).map_err(list_by_factory::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_factory::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_factory::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_factory::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TriggerListResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_factory::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_factory::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_factory { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, trigger_name: &str, ) -> std::result::Result<models::TriggerResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, trigger_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TriggerResource = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, trigger_name: &str, if_match: Option<&str>, trigger: &models::TriggerResource, ) -> std::result::Result<models::TriggerResource, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, trigger_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(if_match) = if_match { req_builder = req_builder.header("If-Match", if_match); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(trigger).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TriggerResource = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, trigger_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers/{}", operation_config.base_path(), subscription_id, resource_group_name, factory_name, trigger_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn start( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, trigger_name: &str, ) -> std::result::Result<(), start::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers/{}/start", operation_config.base_path(), subscription_id, resource_group_name, factory_name, trigger_name ); let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(start::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?; Err(start::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod start { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn stop( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, trigger_name: &str, ) -> std::result::Result<(), stop::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers/{}/stop", operation_config.base_path(), subscription_id, resource_group_name, factory_name, trigger_name ); let mut url = url::Url::parse(url_str).map_err(stop::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(stop::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(stop::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(stop::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| stop::Error::DeserializeError(source, rsp_body.clone()))?; Err(stop::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod stop { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_runs( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, factory_name: &str, trigger_name: &str, start_time: &str, end_time: &str, ) -> std::result::Result<models::TriggerRunListResponse, list_runs::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataFactory/factories/{}/triggers/{}/triggerruns", operation_config.base_path(), subscription_id, resource_group_name, factory_name, trigger_name ); let mut url = url::Url::parse(url_str).map_err(list_runs::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_runs::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("startTime", start_time); url.query_pairs_mut().append_pair("endTime", end_time); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_runs::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_runs::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TriggerRunListResponse = serde_json::from_slice(rsp_body).map_err(|source| list_runs::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list_runs::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_runs::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_runs { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
// This file was generated mod string_private { pub trait Sealed { } } /// Extension for [`String`](std::string::String) pub trait IsntStringExt: string_private::Sealed { /// The negation of [`is_empty`](std::string::String::is_empty) #[must_use] fn is_not_empty(&self) -> bool; } impl string_private::Sealed for std::string::String { } impl IsntStringExt for std::string::String { #[inline] fn is_not_empty(&self) -> bool { !self.is_empty() } }
extern crate actix_web; extern crate serde_json; extern crate uuid; extern crate ikrelln; mod helpers; use std::collections::HashMap; use std::{thread, time}; use actix_web::*; use ikrelln::api::span::IngestResponse; use ikrelln::opentracing::span::Kind; use ikrelln::opentracing::Span; #[test] fn can_receive_spans() { helpers::setup_logger(); let mut srv = helpers::setup_server(); let trace_id = uuid::Uuid::new_v4().to_string(); let req = srv .client(http::Method::POST, "/api/v1/spans") .json(vec![ Span { trace_id: trace_id.to_string(), id: trace_id.clone(), parent_id: None, name: Some(trace_id.clone()), kind: Some(Kind::CLIENT), duration: Some(25), timestamp: Some(50), debug: false, shared: false, local_endpoint: None, remote_endpoint: None, annotations: vec![], tags: HashMap::new(), binary_annotations: vec![], }, Span { trace_id: trace_id.clone(), id: uuid::Uuid::new_v4().to_string(), parent_id: Some(trace_id.clone()), name: Some(uuid::Uuid::new_v4().to_string()), kind: Some(Kind::CLIENT), duration: Some(25), timestamp: Some(50), debug: false, shared: false, local_endpoint: None, remote_endpoint: None, annotations: vec![], tags: HashMap::new(), binary_annotations: vec![], }, Span { trace_id: trace_id.clone(), id: uuid::Uuid::new_v4().to_string(), parent_id: Some(trace_id.clone()), name: Some(uuid::Uuid::new_v4().to_string()), kind: Some(Kind::CLIENT), duration: Some(25), timestamp: Some(50), debug: false, shared: false, local_endpoint: None, remote_endpoint: None, annotations: vec![], tags: HashMap::new(), binary_annotations: vec![], }, ]) .unwrap(); let response = srv.execute(req.send()).unwrap(); assert!(response.status().is_success()); let data: Result<IngestResponse, _> = serde_json::from_slice(&*srv.execute(response.body()).unwrap()); assert!(data.is_ok()); assert_eq!(data.unwrap().nb_events, 3); thread::sleep(time::Duration::from_millis( helpers::DELAY_SPAN_SAVED_MILLISECONDS, )); let req_trace = srv .client(http::Method::GET, &format!("/api/v1/trace/{}", &trace_id)) .finish() .unwrap(); let response_trace = srv.execute(req_trace.send()).unwrap(); assert!(response_trace.status().is_success()); let data_trace: Result<Vec<Span>, _> = serde_json::from_slice(&*srv.execute(response_trace.body()).unwrap()); assert!(data_trace.is_ok()); assert_eq!(data_trace.unwrap().len(), 3); thread::sleep(time::Duration::from_millis(helpers::DELAY_FINISH)); }
use crate::enums::{Color, Font}; use crate::prelude::*; use crate::utils::FlString; use fltk_sys::dialog::*; use std::{ ffi::{CStr, CString}, mem, os::raw, }; /// Color modes to be used with the color chooser #[repr(u8)] #[derive(Debug, Copy, Clone, PartialEq)] pub enum ColorMode { /// Rgb color mode Rgb = 0, /// Byte color mode Byte = 1, /// Hex color mode Hex = 2, /// Hsv color mode Hsv = 3, } /// Creates a file button #[derive(Debug)] pub struct FileDialog { inner: *mut Fl_Native_File_Chooser, } /// Re-alias `FileDialog` to `NativeFileChooser` (`Fl_Native_File_Chooser`) pub type NativeFileChooser = FileDialog; /// Defines the type of dialog, which can be changed dynamically using the `set_type()` method #[repr(i32)] #[derive(WidgetType, Debug, Copy, Clone, PartialEq)] pub enum FileDialogType { /// Browse file BrowseFile = 0, /// Browse dir BrowseDir, /// Browse multiple files BrowseMultiFile, /// Browse multiple dirs BrowseMultiDir, /// Browse save file BrowseSaveFile, /// Browse save directory BrowseSaveDir, } /// Alias for `NativeFileChooserType` pub type NativeFileChooserType = FileDialogType; /// Defines the File dialog options, which can be set using the `set_option()` method. #[repr(i32)] #[derive(WidgetType, Debug, Copy, Clone, PartialEq)] pub enum FileDialogOptions { /// No options NoOptions = 0, /// Confirm on save as SaveAsConfirm = 1, /// New folder option NewFolder = 2, /// Enable preview Preview = 4, /// Use extension filter UseFilterExt = 8, } /// Alias to `NativeFileChooserOptions` pub type NativeFileChooserOptions = FileDialogOptions; impl std::ops::BitOr<FileDialogOptions> for FileDialogOptions { type Output = FileDialogOptions; fn bitor(self, other: FileDialogOptions) -> Self::Output { unsafe { std::mem::transmute(self as i32 | other as i32) } } } impl FileDialog { /// Creates an new file dialog pub fn new(op: FileDialogType) -> FileDialog { unsafe { let file_dialog = Fl_Native_File_Chooser_new(mem::transmute(op)); assert!(!file_dialog.is_null()); FileDialog { inner: file_dialog } } } /// Returns the chosen file name pub fn filename(&self) -> std::path::PathBuf { assert!(!self.inner.is_null()); unsafe { let cnt = Fl_Native_File_Chooser_count(self.inner); if cnt == 0 { return std::path::PathBuf::from(""); } let x = Fl_Native_File_Chooser_filenames(self.inner, 0); std::path::PathBuf::from( CStr::from_ptr(x as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } /// Returns the chosen file names pub fn filenames(&self) -> Vec<std::path::PathBuf> { assert!(!self.inner.is_null()); unsafe { let cnt = Fl_Native_File_Chooser_count(self.inner); let mut names: Vec<std::path::PathBuf> = vec![]; if cnt == 0 { names } else { for i in 0..cnt { let x = Fl_Native_File_Chooser_filenames(self.inner, i); names.push(std::path::PathBuf::from( CStr::from_ptr(x as *mut raw::c_char) .to_string_lossy() .to_string(), )) } names } } } /// Returns the preset directory pub fn directory(&self) -> std::path::PathBuf { assert!(!self.inner.is_null()); unsafe { let x = Fl_Native_File_Chooser_directory(self.inner); if x.is_null() { std::path::PathBuf::from("") } else { std::path::PathBuf::from( CStr::from_ptr(x as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Sets the starting directory /// # Errors /// Errors on non-existent path pub fn set_directory<P: AsRef<std::path::Path>>(&mut self, dir: P) -> Result<(), FltkError> { assert!(!self.inner.is_null()); self.set_directory_(dir.as_ref()) } fn set_directory_(&mut self, dir: &std::path::Path) -> Result<(), FltkError> { assert!(!self.inner.is_null()); let dir = CString::new(dir.to_str().ok_or_else(|| { FltkError::Unknown(String::from("Failed to convert path to string")) })?)?; unsafe { Fl_Native_File_Chooser_set_directory(self.inner, dir.as_ptr()) } Ok(()) } /// Shows the file dialog pub fn show(&mut self) { assert!(!self.inner.is_null()); unsafe { Fl_Native_File_Chooser_show(self.inner); } } /// Sets the option for the dialog pub fn set_option(&mut self, opt: FileDialogOptions) { assert!(!self.inner.is_null()); unsafe { Fl_Native_File_Chooser_set_option(self.inner, opt as i32) } } /// Sets the type for the dialog pub fn set_type(&mut self, op: FileDialogType) { assert!(!self.inner.is_null()); unsafe { Fl_Native_File_Chooser_set_type(self.inner, op as i32) } } /// Sets the title for the dialog pub fn set_title(&mut self, title: &str) { assert!(!self.inner.is_null()); let title = CString::safe_new(title); unsafe { Fl_Native_File_Chooser_set_title(self.inner, title.as_ptr()) } } /// Sets the filter for the dialog, can be: /// A single wildcard (eg. `"*.txt"`). /// Multiple wildcards (eg. `"*.{cxx,h,H}"`). /// A descriptive name followed by a `\t` and a wildcard (eg. `"Text Files\t*.txt"`). /// A list of separate wildcards with a `\n` between each (eg. `"*.{cxx,H}\n*.txt"`). /// A list of descriptive names and wildcards (eg. `"C++ Files\t*.{cxx,H}\nTxt Files\t*.txt"`) pub fn set_filter(&mut self, f: &str) { assert!(!self.inner.is_null()); let f = CString::safe_new(f); unsafe { Fl_Native_File_Chooser_set_filter(self.inner, f.as_ptr()) } } /// Sets the preset filter for the dialog pub fn set_preset_file(&mut self, f: &str) { assert!(!self.inner.is_null()); let f = CString::safe_new(f); unsafe { Fl_Native_File_Chooser_set_preset_file(self.inner, f.as_ptr()) } } /// returns the error message from the file dialog pub fn error_message(&self) -> Option<String> { assert!(!self.inner.is_null()); unsafe { let err_msg = Fl_Native_File_Chooser_errmsg(self.inner); if err_msg.is_null() { None } else { Some( CStr::from_ptr(err_msg as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } } impl Drop for FileDialog { fn drop(&mut self) { if !self.inner.is_null() { unsafe { Fl_Native_File_Chooser_delete(self.inner) } self.inner = std::ptr::null_mut(); } } } /// Displays a message box pub fn message(x: i32, y: i32, txt: &str) { unsafe { let txt = CString::safe_new(txt); Fl_message(x, y, txt.as_ptr()) } } /// Displays an alert box pub fn alert(x: i32, y: i32, txt: &str) { unsafe { let txt = CString::safe_new(txt); Fl_alert(x, y, txt.as_ptr()) } } /// Displays a choice box with upto three choices. /// An empty choice will not be shown pub fn choice(x: i32, y: i32, txt: &str, b0: &str, b1: &str, b2: &str) -> i32 { unsafe { let txt = CString::safe_new(txt); let b0 = CString::safe_new(b0); let b1 = CString::safe_new(b1); let b2 = CString::safe_new(b2); Fl_choice(x, y, txt.as_ptr(), b0.as_ptr(), b1.as_ptr(), b2.as_ptr()) as i32 } } /// Displays an input box, which returns the inputted string. /// Can be used for gui io pub fn input(x: i32, y: i32, txt: &str, deflt: &str) -> Option<String> { unsafe { let temp = CString::safe_new(deflt); let txt = CString::safe_new(txt); let x = Fl_input(x, y, txt.as_ptr(), temp.as_ptr()); if x.is_null() { None } else { Some( CStr::from_ptr(x as *const raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Shows an input box, but with hidden string pub fn password(x: i32, y: i32, txt: &str, deflt: &str) -> Option<String> { unsafe { let temp = CString::safe_new(deflt); let txt = CString::safe_new(txt); let x = Fl_password(x, y, txt.as_ptr(), temp.as_ptr()); if x.is_null() { None } else { Some( CStr::from_ptr(x as *const raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Displays a message box pub fn message_default(txt: &str) { unsafe { let txt = CString::safe_new(txt); Fl_message2(txt.as_ptr()) } } /// Displays an alert box pub fn alert_default(txt: &str) { unsafe { let txt = CString::safe_new(txt); Fl_alert2(txt.as_ptr()) } } /// Displays a choice box with upto three choices. /// An empty choice will not be shown pub fn choice_default(txt: &str, b0: &str, b1: &str, b2: &str) -> i32 { unsafe { let txt = CString::safe_new(txt); let b0 = CString::safe_new(b0); let b1 = CString::safe_new(b1); let b2 = CString::safe_new(b2); Fl_choice2(txt.as_ptr(), b0.as_ptr(), b1.as_ptr(), b2.as_ptr()) as i32 } } /// Displays an input box, which returns the inputted string. /// Can be used for gui io pub fn input_default(txt: &str, deflt: &str) -> Option<String> { unsafe { let temp = CString::safe_new(deflt); let txt = CString::safe_new(txt); let x = Fl_input2(txt.as_ptr(), temp.as_ptr()); if x.is_null() { None } else { Some( CStr::from_ptr(x as *const raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Shows an input box, but with hidden string pub fn password_default(txt: &str, deflt: &str) -> Option<String> { unsafe { let temp = CString::safe_new(deflt); let txt = CString::safe_new(txt); let x = Fl_password2(txt.as_ptr(), temp.as_ptr()); if x.is_null() { None } else { Some( CStr::from_ptr(x as *const raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Creates a help dialog #[derive(Debug)] pub struct HelpDialog { inner: *mut Fl_Help_Dialog, } impl HelpDialog { /// Creates a default (size and location) help dialog pub fn default() -> HelpDialog { unsafe { let help_dialog = Fl_Help_Dialog_new(); assert!(!help_dialog.is_null()); HelpDialog { inner: help_dialog } } } /// Creates a new Help dialog with position(x, y) and size(w, h) pub fn new(x: i32, y: i32, w: i32, h: i32) -> HelpDialog { let mut temp = HelpDialog::default(); temp.resize(x, y, w, h); temp } /// Hides the help dialog pub fn hide(&mut self) { unsafe { Fl_Help_Dialog_hide(self.inner) } } /// Loads a file for the help dialog /// # Errors /// Errors on non-existent path pub fn load<P: AsRef<std::path::Path>>(&mut self, file: P) -> Result<(), FltkError> { self.load_(file.as_ref()) } fn load_(&mut self, file: &std::path::Path) -> Result<(), FltkError> { let f = file .to_str() .ok_or_else(|| FltkError::Unknown(String::from("Failed to convert path to string")))?; let f = CString::new(f)?; unsafe { match Fl_Help_Dialog_load(self.inner, f.as_ptr()) { 0 => Ok(()), _ => Err(FltkError::Internal(FltkErrorKind::ResourceNotFound)), } } } /// Sets the position of the help dialog pub fn position(&mut self, x: i32, y: i32) { unsafe { Fl_Help_Dialog_position(self.inner, x, y) } } /// Resizes the help dialog pub fn resize(&mut self, x: i32, y: i32, w: i32, h: i32) { unsafe { Fl_Help_Dialog_resize(self.inner, x, y, w, h) } } /// Shows the help dialog pub fn show(&mut self) { unsafe { Fl_Help_Dialog_show(self.inner) } } /// Sets the text size pub fn set_text_size(&mut self, s: i32) { unsafe { Fl_Help_Dialog_set_text_size(self.inner, s as i32) } } /// Returns the text size pub fn text_size(&mut self) -> i32 { unsafe { Fl_Help_Dialog_text_size(self.inner) as i32 } } /// Sets the value of the help dialog pub fn set_value(&mut self, f: &str) { let f = CString::safe_new(f); unsafe { Fl_Help_Dialog_set_value(self.inner, f.as_ptr()) } } /// Returns the value of the help dialog pub fn value(&self) -> Option<String> { unsafe { let val = Fl_Help_Dialog_value(self.inner); if val.is_null() { None } else { Some(CStr::from_ptr(val).to_string_lossy().to_string()) } } } /// Returs whether the help dialog is visible pub fn visible(&mut self) -> bool { unsafe { Fl_Help_Dialog_visible(self.inner) != 0 } } /// Returns the width of the help dialog pub fn width(&mut self) -> i32 { unsafe { Fl_Help_Dialog_w(self.inner) } } /// Returns the height of the help dialog pub fn height(&mut self) -> i32 { unsafe { Fl_Help_Dialog_h(self.inner) } } /// Returns the width of the help dialog pub fn w(&mut self) -> i32 { unsafe { Fl_Help_Dialog_w(self.inner) } } /// Returns the height of the help dialog pub fn h(&mut self) -> i32 { unsafe { Fl_Help_Dialog_h(self.inner) } } /// Returns the x position of the help dialog pub fn x(&mut self) -> i32 { unsafe { Fl_Help_Dialog_x(self.inner) } } /// Returns the y position of the help dialog pub fn y(&mut self) -> i32 { unsafe { Fl_Help_Dialog_y(self.inner) } } } impl Drop for HelpDialog { fn drop(&mut self) { unsafe { Fl_Help_Dialog_delete(self.inner) } } } /// Defines the type of beep to be passed to the beep function #[repr(i32)] #[derive(Debug, Copy, Clone, PartialEq)] pub enum BeepType { /// Default beep Default = 0, /// Message beep Message, /// Error beep Error, /// Question beep Question, /// Password sound Password, /// Notification sound Notification, } /// Emits a beep pub fn beep(tp: BeepType) { unsafe { Fl_beep(tp as i32) } } /** FLTK's own `FileChooser`. Which differs for the Native `FileDialog` Example: ```rust,no_run use fltk::{prelude::*, *}; let mut chooser = dialog::FileChooser::new( ".", // directory "*", // filter or pattern dialog::FileChooserType::Multi, // chooser type "Title Of Chooser", // title ); chooser.show(); chooser.window().set_pos(300, 300); // Block until user picks something. // (The other way to do this is to use a callback()) // while chooser.shown() { app::wait(); } // User hit cancel? if chooser.value(1).is_none() { println!("(User hit 'Cancel')"); return; } // Print what the user picked println!("--------------------"); println!("DIRECTORY: '{}'", chooser.directory().unwrap()); println!(" VALUE: '{}'", chooser.value(1).unwrap()); // value starts at 1! println!(" COUNT: {} files selected", chooser.count()); // Multiple files? Show all of them if chooser.count() > 1 { for t in 1..=chooser.count() { println!(" VALUE[{}]: '{}'", t, chooser.value(t).unwrap()); } } ``` */ pub struct FileChooser { inner: *mut Fl_File_Chooser, } bitflags! { /// The types of FileChooser pub struct FileChooserType: i32 { /// Single file const Single = 0; /// Multiple files const Multi = 1; /// Allow creation of file/dir const Create = 2; /// Directory const Directory = 4; } } impl FileChooser { /// Instantiates a new `FileChooser` pub fn new(dir: &str, pattern: &str, typ: FileChooserType, title: &str) -> FileChooser { let dir = CString::safe_new(dir); let pattern = CString::safe_new(pattern); let title = CString::safe_new(title); unsafe { let ptr = Fl_File_Chooser_new( dir.as_ptr(), pattern.as_ptr(), typ.bits as i32, title.into_raw(), ); assert!(!ptr.is_null()); FileChooser { inner: ptr } } } /// Deletes a `FileChooser` /// # Safety /// Can invalidate the underlying pointer pub unsafe fn delete(dlg: Self) { Fl_File_Chooser_delete(dlg.inner) } /// Gets the new button of the `FileChooser` pub fn new_button(&mut self) -> Option<impl ButtonExt> { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_newButton(self.inner); if ptr.is_null() { None } else { Some(crate::button::Button::from_widget_ptr(ptr as *mut _)) } } } /// Gets the preview button of the `FileChooser` pub fn preview_button(&mut self) -> Option<impl ButtonExt> { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_previewButton(self.inner); if ptr.is_null() { None } else { Some(crate::button::CheckButton::from_widget_ptr( ptr as *mut fltk_sys::widget::Fl_Widget, )) } } } /// Gets the show hidden button of the `FileChooser` pub fn show_hidden_button(&mut self) -> Option<impl ButtonExt> { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_showHiddenButton(self.inner); if ptr.is_null() { None } else { Some(crate::button::CheckButton::from_widget_ptr( ptr as *mut fltk_sys::widget::Fl_Widget, )) } } } /// Sets the callback of the `FileChooser` pub fn set_callback<F: FnMut(&mut Self) + 'static>(&mut self, cb: F) { assert!(!self.inner.is_null()); unsafe { unsafe extern "C" fn shim(arg1: *mut Fl_File_Chooser, data: *mut raw::c_void) { let mut wid = FileChooser { inner: arg1 }; let a: *mut Box<dyn FnMut(&mut FileChooser)> = data as *mut Box<dyn FnMut(&mut FileChooser)>; let f: &mut (dyn FnMut(&mut FileChooser)) = &mut **a; let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| f(&mut wid))); } let _old_data = self.user_data(); let a: *mut Box<dyn FnMut(&mut Self)> = Box::into_raw(Box::new(Box::new(cb))); let data: *mut raw::c_void = a as *mut raw::c_void; let callback: Option< unsafe extern "C" fn(arg1: *mut Fl_File_Chooser, data: *mut raw::c_void), > = Some(shim); Fl_File_Chooser_set_callback(self.inner, callback, data) } } /// Sets the color of the `FileChooser` pub fn set_color(&mut self, c: Color) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_color(self.inner, c.bits() as u32) } } /// Gets the color of the `FileChooser` pub fn color(&mut self) -> Color { assert!(!self.inner.is_null()); unsafe { mem::transmute(Fl_File_Chooser_color(self.inner)) } } /// Gets the count of chosen items pub fn count(&mut self) -> i32 { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_count(self.inner) as i32 } } /// Sets the directory of the `FileChooser` pub fn set_directory(&mut self, dir: &str) { assert!(!self.inner.is_null()); let dir = CString::safe_new(dir); unsafe { Fl_File_Chooser_set_directory(self.inner, dir.as_ptr()) } } /// Gets the directory of the `FileChooser` pub fn directory(&mut self) -> Option<String> { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_directory(self.inner); if ptr.is_null() { None } else { Some( CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Sets the filter for the dialog, can be: /// A single wildcard (eg. `"*.txt"`). /// Multiple wildcards (eg. `"*.{cxx,h,H}"`). /// A descriptive name followed by a `\t` and a wildcard (eg. `"Text Files\t*.txt"`). /// A list of separate wildcards with a `\n` between each (eg. `"*.{cxx,H}\n*.txt"`). /// A list of descriptive names and wildcards (eg. `"C++ Files\t*.{cxx,H}\nTxt Files\t*.txt"`) pub fn set_filter(&mut self, pattern: &str) { assert!(!self.inner.is_null()); let pattern = CString::safe_new(pattern); unsafe { Fl_File_Chooser_set_filter(self.inner, pattern.as_ptr()) } } /// Gets the filter of the `FileChooser` pub fn filter(&mut self) -> Option<String> { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_filter(self.inner); if ptr.is_null() { None } else { Some( CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Gets the current filename filter selection pub fn filter_value(&mut self) -> i32 { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_filter_value(self.inner) as i32 } } /// Sets the filter for the dialog, can be: /// A single wildcard (eg. `"*.txt"`). /// Multiple wildcards (eg. `"*.{cxx,h,H}"`). /// A descriptive name followed by a `\t` and a wildcard (eg. `"Text Files\t*.txt"`). /// A list of separate wildcards with a `\n` between each (eg. `"*.{cxx,H}\n*.txt"`). /// A list of descriptive names and wildcards (eg. `"C++ Files\t*.{cxx,H}\nTxt Files\t*.txt"`) pub fn set_filter_value(&mut self, f: i32) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_filter_value(self.inner, f as i32) } } /// Hides the file chooser pub fn hide(&mut self) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_hide(self.inner) } } /// Sets the icon size of the `FileChooser` pub fn set_icon_size(&mut self, s: u8) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_iconsize(self.inner, s) } } /// Gets the icon size of the `FileChooser` pub fn icon_size(&mut self) -> u8 { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_iconsize(self.inner) } } /// Sets the label of the `FileChooser` pub fn set_label(&mut self, l: &str) { assert!(!self.inner.is_null()); let l = CString::safe_new(l); unsafe { Fl_File_Chooser_set_label(self.inner, l.as_ptr()) } } /// Gets the label of the `FileChooser` pub fn label(&mut self) -> String { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_label(self.inner); if ptr.is_null() { String::from("") } else { CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string() } } } /// Sets the label of the Ok button pub fn set_ok_label(&mut self, l: &str) { assert!(!self.inner.is_null()); let l = CString::safe_new(l); unsafe { Fl_File_Chooser_set_ok_label(self.inner, l.as_ptr()) } } /// Gets the label of the Ok button pub fn ok_label(&mut self) -> String { assert!(!self.inner.is_null()); unsafe { let ptr = Fl_File_Chooser_ok_label(self.inner); if ptr.is_null() { String::from("") } else { CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string() } } } /// Add preview to the `FileChooser` pub fn set_preview(&mut self, e: bool) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_preview(self.inner, e as i32) } } /// Returns whether preview is enabled for the `FileChooser` pub fn preview(&self) -> bool { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_preview(self.inner) != 0 } } /// Rescan the directory pub fn rescan(&mut self) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_rescan(self.inner) } } /// Rescan the directory while keeping the file name pub fn rescan_keep_filename(&mut self) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_rescan_keep_filename(self.inner) } } /// Shows the File Chooser pub fn show(&mut self) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_show(self.inner) } } /// Returns whether the file chooser is shown pub fn shown(&self) -> bool { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_shown(self.inner) != 0 } } /// Sets the text color of the file chooser pub fn set_text_color(&mut self, c: Color) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_text_color(self.inner, c.bits() as u32) } } /// Gets the text color of the file chooser pub fn text_color(&mut self) -> Color { assert!(!self.inner.is_null()); unsafe { mem::transmute(Fl_File_Chooser_text_color(self.inner)) } } /// Sets the text font of the file chooser pub fn set_text_font(&mut self, f: Font) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_text_font(self.inner, f.bits() as i32) } } /// Gets the text font of the file chooser pub fn text_font(&mut self) -> Font { assert!(!self.inner.is_null()); unsafe { mem::transmute(Fl_File_Chooser_text_font(self.inner)) } } /// Sets the text size of the file chooser pub fn set_text_size(&mut self, s: i32) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_text_size(self.inner, s as i32) } } /// Gets the text size of the file chooser pub fn text_size(&mut self) -> i32 { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_text_size(self.inner) as i32 } } /// Sets the type of the `FileChooser` pub fn set_type(&mut self, t: FileChooserType) { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_set_type(self.inner, t.bits as i32) } } /// Gets the type of the `FileChooser` pub fn get_type(&mut self) -> FileChooserType { assert!(!self.inner.is_null()); unsafe { mem::transmute(Fl_File_Chooser_type(self.inner)) } } /// Gets the user data of the `FileChooser` /// # Safety /// Can invalidate the user data while the `FileChooser` is in use pub unsafe fn user_data(&self) -> Option<Box<dyn FnMut()>> { let ptr = Fl_File_Chooser_user_data(self.inner); if ptr.is_null() { None } else { let x = ptr as *mut Box<dyn FnMut()>; let x = Box::from_raw(x); Fl_File_Chooser_set_callback(self.inner, None, std::ptr::null_mut()); Some(*x) } } /// Gets the file or dir name chosen by the `FileChooser` pub fn value(&mut self, f: i32) -> Option<String> { assert!(!self.inner.is_null()); let f = if f == 0 { 1 } else { f }; unsafe { let ptr = Fl_File_Chooser_value(self.inner, f as i32); if ptr.is_null() { None } else { Some( CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Sets the file or dir name chosen by the `FileChooser` pub fn set_value(&mut self, filename: &str) { assert!(!self.inner.is_null()); let filename = CString::safe_new(filename); unsafe { Fl_File_Chooser_set_value(self.inner, filename.as_ptr()) } } /// Returns whether the `FileChooser` is visible or not pub fn visible(&mut self) -> bool { assert!(!self.inner.is_null()); unsafe { Fl_File_Chooser_visible(self.inner) != 0 } } /// Return dialog window pub fn window(&mut self) -> impl WindowExt { // Shouldn't fail unsafe { let win_ptr = self .new_button() .unwrap() .parent() .unwrap() .parent() .unwrap() .as_widget_ptr(); crate::window::Window::from_widget_ptr(win_ptr) } } /// Set "Add favorites" label pub fn set_add_favorites_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_add_favorites_label(msg.into_raw()) } } /// Set "All Files" label pub fn set_all_files_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_all_files_label(msg.into_raw()) } } /// Set "Custom Filter" label pub fn set_custom_filter_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_custom_filter_label(msg.into_raw()) } } /// Set "Existing file" label pub fn set_existing_file_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_existing_file_label(msg.into_raw()) } } /// Set "Favorites" label pub fn set_favorites_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_favorites_label(msg.into_raw()) } } /// Set "Filename" label pub fn set_filename_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_filename_label(msg.into_raw()) } } /// Set "Filesystems" label pub fn set_filesystems_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_filesystems_label(msg.into_raw()) } } /// Set "Manage favorites" label pub fn set_manage_favorites_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_manage_favorites_label(msg.into_raw()) } } /// Set "New directory" label pub fn set_new_directory_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_new_directory_label(msg.into_raw()) } } /// Set "New directory" tooltip pub fn set_new_directory_tooltip(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_new_directory_tooltip(msg.into_raw()) } } /// Set "Preview" label pub fn set_preview_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_preview_label(msg.into_raw()) } } /// Set "Save" label pub fn set_save_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_save_label(msg.into_raw()) } } /// Set "Show" label pub fn set_show_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_show_label(msg.into_raw()) } } /// Set "Hidden" label pub fn set_hidden_label(msg: &'static str) { let msg = CString::safe_new(msg); unsafe { Fl_File_Chooser_set_hidden_label(msg.into_raw()) } } } impl Drop for FileChooser { fn drop(&mut self) { unsafe { Fl_File_Chooser_delete(self.inner) } } } /// Shows a directory chooser returning a String pub fn dir_chooser(message: &str, fname: &str, relative: bool) -> Option<String> { unsafe { let message = CString::safe_new(message); let fname = CString::safe_new(fname); let ptr = Fl_dir_chooser(message.as_ptr(), fname.as_ptr(), relative as i32); if ptr.is_null() { None } else { Some( CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } /** Shows a file chooser returning a String. Example: ```rust,no_run use fltk::{prelude::*, *}; let file = dialog::file_chooser("Choose File", "*.rs", ".", true).unwrap(); println!("{}", file); ``` */ pub fn file_chooser(message: &str, pattern: &str, dir: &str, relative: bool) -> Option<String> { let message = CString::safe_new(message); let pattern = CString::safe_new(pattern); let dir = CString::safe_new(dir); unsafe { let ptr = Fl_file_chooser( message.as_ptr(), pattern.as_ptr(), dir.as_ptr(), relative as i32, ); if ptr.is_null() { None } else { Some( CStr::from_ptr(ptr as *mut raw::c_char) .to_string_lossy() .to_string(), ) } } } /// Spawns a color chooser dialog. pub fn color_chooser(name: &str, cmode: ColorMode) -> Option<(u8, u8, u8)> { unsafe { let name = CString::safe_new(name); let mut r = 255; let mut g = 255; let mut b = 255; let ret = Fl_color_chooser(name.as_ptr(), &mut r, &mut g, &mut b, cmode as i32); if ret == 0 { None } else { Some((r, g, b)) } } } /// Spawns a color chooser dialog. pub fn color_chooser_with_default(name: &str, cmode: ColorMode, col: (u8, u8, u8)) -> (u8, u8, u8) { unsafe { let name = CString::safe_new(name); let mut r = col.0; let mut g = col.1; let mut b = col.2; let ret = Fl_color_chooser(name.as_ptr(), &mut r, &mut g, &mut b, cmode as i32); if ret == 0 { col } else { (r, g, b) } } }
use std::sync::MutexGuard; use nia_interpreter_core::Interpreter; use nia_interpreter_core::NiaInterpreterCommand; use nia_interpreter_core::NiaInterpreterCommandResult; use nia_interpreter_core::{ EventLoopHandle, NiaRemoveDeviceByPathCommandResult, }; use crate::error::{NiaServerError, NiaServerResult}; use crate::protocol::{NiaRemoveDeviceByPathRequest, Serializable}; use crate::server::Server; use nia_protocol_rust::RemoveDeviceByPathResponse; #[derive(Debug, Clone)] pub struct NiaRemoveDeviceByPathResponse { command_result: NiaRemoveDeviceByPathCommandResult, } impl NiaRemoveDeviceByPathResponse { fn try_from( server: &mut Server, nia_remove_keyboard_by_path_request: NiaRemoveDeviceByPathRequest, event_loop_handle: MutexGuard<EventLoopHandle>, ) -> Result<NiaRemoveDeviceByPathResponse, NiaServerError> { let device_path = nia_remove_keyboard_by_path_request.get_device_path(); let interpreter_command = NiaInterpreterCommand::make_remove_device_by_path_command( device_path.clone(), ); event_loop_handle .send_command(interpreter_command) .map_err(|_| { NiaServerError::interpreter_error( "Error sending command to the interpreter.", ) })?; let execution_result = event_loop_handle.receive_result().map_err(|_| { NiaServerError::interpreter_error( "Error reading command from the interpreter.", ) })?; let response = match execution_result { NiaInterpreterCommandResult::RemoveDeviceByPath(command_result) => { (*server).undefine_device_by_path(&device_path); NiaRemoveDeviceByPathResponse { command_result } } _ => { return NiaServerError::interpreter_error( "Unexpected command result.", ) .into() } }; Ok(response) } pub fn from( server: &mut Server, nia_remove_keyboard_by_path_request: NiaRemoveDeviceByPathRequest, event_loop_handle: MutexGuard<EventLoopHandle>, ) -> NiaRemoveDeviceByPathResponse { println!("{:?}", nia_remove_keyboard_by_path_request); let try_result = NiaRemoveDeviceByPathResponse::try_from( server, nia_remove_keyboard_by_path_request, event_loop_handle, ); match try_result { Ok(result) => result, Err(error) => { let message = format!("Execution failure: {}", error.get_message()); let command_result = NiaRemoveDeviceByPathCommandResult::Failure(message); NiaRemoveDeviceByPathResponse { command_result } } } } } impl Serializable< NiaRemoveDeviceByPathResponse, nia_protocol_rust::RemoveDeviceByPathResponse, > for NiaRemoveDeviceByPathResponse { fn to_pb(&self) -> RemoveDeviceByPathResponse { let result = &self.command_result; let mut remove_keyboard_by_path_response = nia_protocol_rust::RemoveDeviceByPathResponse::new(); match result { NiaRemoveDeviceByPathCommandResult::Success() => { let mut success_result = nia_protocol_rust::RemoveDeviceByPathResponse_SuccessResult::new(); success_result.set_message(protobuf::Chars::from( String::from("Success."), )); remove_keyboard_by_path_response .set_success_result(success_result); } NiaRemoveDeviceByPathCommandResult::Error(error_message) => { let mut error_result = nia_protocol_rust::RemoveDeviceByPathResponse_ErrorResult::new( ); error_result .set_message(protobuf::Chars::from(error_message.clone())); remove_keyboard_by_path_response.set_error_result(error_result); } NiaRemoveDeviceByPathCommandResult::Failure(failure_message) => { let mut failure_result = nia_protocol_rust::RemoveDeviceByPathResponse_FailureResult::new(); failure_result.set_message(protobuf::Chars::from( failure_message.clone(), )); remove_keyboard_by_path_response .set_failure_result(failure_result); } } remove_keyboard_by_path_response } fn from_pb( object_pb: RemoveDeviceByPathResponse, ) -> NiaServerResult<NiaRemoveDeviceByPathResponse> { unreachable!() } }
use std::ptr; use crate::bdev::BDev; use crate::generated::{spdk_bdev_create_bs_dev, spdk_bs_dev}; #[derive(Debug, Error)] pub enum BlobBDevError { #[error(display = "Could not create blob bdev!: {}", _0)] FailedToCreate(String), } /// SPDK blob store block device. /// /// This is a virtual representation of a block device that is exported by the backend. /// TODO: Implement Drop #[derive(Debug)] pub struct BlobStoreBDev { pub(crate) bs_dev: *mut spdk_bs_dev, } pub fn create_bs_dev(bdev: &mut BDev) -> Result<BlobStoreBDev, BlobBDevError> { let bs_dev = unsafe { spdk_bdev_create_bs_dev(bdev.bdev, None, ptr::null_mut()) }; if bs_dev.is_null() { return Err(BlobBDevError::FailedToCreate(bdev.name.clone())); } Ok(BlobStoreBDev { bs_dev }) }
use std::marker::PhantomData; use std::iter::*; use std::collections::VecDeque; use std::cmp::Ordering; use num::{PrimInt, zero, one}; use myopic::*; use myopic::lens::lens::*; use crate::shape::*; // NOTE this is just a Monoid Action pub trait Scope<I> { fn adjust(&mut self, index: I); } pub struct Action<F, O, D, A> { pub act: F, pub lens: O, a: PhantomData<A>, d: PhantomData<D>, } impl<F, O, D, A> Action<F, O, D, A> where O: Getter + Setter + Lensable<Input=D, Output=A>, F: Fn(A) -> A { pub fn act(&self, d: &mut D) { let val = self.lens.get(d); let val_f = (self.act)(val); self.lens.set(d, val_f); } } pub struct Transform<F, O, D, A, Ix> { pub action: Action<F, O, D, A>, pub indices: Ix, } impl<F, O, D, I, A, Ix> Transform<F, O, D, A, Ix> where O: Getter + Setter + Lensable<Input=D, Output=A>, D: Shape + Scope<I>, F: Fn(A) -> A, Ix: Clone + IntoIterator<Item=I> { pub fn transform(&self, d: &mut D) { for index in self.indices.clone().into_iter() { d.adjust(index); self.action.act(d); } } pub fn make_transform(lens: O, indices: Ix, f: F) -> Transform<F, O, D, A, Ix> where F: Fn(A) -> A + 'static { Transform { action: Action { act: f, lens: lens, d: PhantomData, a: PhantomData, }, indices: indices, } } } pub fn apply_both<F, G, O, D, A, Ix, I>(first: &Transform<F, O, D, A, Ix>, second: &Transform<G, O, D, A, Ix>, d: &mut D) where D: Scope<I> + Shape, Ix: IntoIterator<Item=I> + Clone, I: PartialOrd + Copy, F: Fn(&mut D), G: Fn(&mut D), O: Getter + Setter + Lensable<Input=D, Output=A> { let mut ixs1 = first.indices.clone().into_iter(); let mut ixs2 = second.indices.clone().into_iter(); if let (Some(mut ix1), Some(mut ix2)) = (ixs1.next(), ixs2.next()) { loop { if ix1 < ix2 { d.adjust(ix1); (first.action.act)(d); if let Some(new_ix) = ixs1.next() { ix1 = new_ix; } else { break; } } else { d.adjust(ix2); (second.action.act)(d); if let Some(new_ix) = ixs2.next() { ix2 = new_ix; } else { break; } } } } for index in ixs1 { d.adjust(index); (first.action.act)(d); } for index in ixs2 { d.adjust(index); (second.action.act)(d); } } #[derive(Eq, PartialEq)] struct Queued<I> { ix: I, index: usize, } impl<I: Ord> PartialOrd for Queued<I> { fn partial_cmp(&self, other: &Queued<I>) -> Option<Ordering> { Some(self.ix.cmp(&other.ix)) } } impl<I: Ord> Ord for Queued<I> { fn cmp(&self, other: &Queued<I>) -> Ordering { self.ix.cmp(&other.ix) } } pub fn apply_many<F, O, D, A, Ix, I>(transforms: Vec<Transform<F, O, D, A, Ix>>, d: &mut D) where D: Scope<I> + Shape, Ix: IntoIterator<Item=I> + Clone, I: PartialOrd + Ord + Copy, F: Fn(A) -> A, O: Getter + Setter + Lensable<Input=D, Output=A> { let mut pqueue = VecDeque::new(); let mut ix_vec = vec!(); for index in 0..transforms.len() { ix_vec.push(transforms[index].indices.clone().into_iter()); if let Some(ix) = ix_vec[index].next() { pqueue.push_back(Queued { ix: ix, index: index }); } } while let Some(queued) = pqueue.pop_front() { d.adjust(queued.ix); transforms[queued.index].action.act(d); if let Some(ix) = ix_vec[queued.index].next() { let new_queued = Queued { ix: ix, index: queued.index }; if let Some(insert_index) = pqueue.iter().position(|queued| queued.ix > ix) { pqueue.insert(insert_index, new_queued); } else { pqueue.push_back(new_queued); } } } } pub fn scope_map<I, O, D, A, F>(d: &mut D, action: Action<F, O, D, A>) where D: Scope<I> + Shape<Shape=I>, I: PrimInt, F: Fn(A) -> A, O: Getter + Setter + Lensable<Input=D, Output=A> { let cap = d.shape(); let mut index = zero(); while index != cap { d.adjust(index); action.act(d); index = index + one(); } } pub fn scope_ixmap<F, O, I, Ix, D, A>(d: &mut D, ix: Ix, action: Action<F, O, D, A>) where D: Scope<I>, Ix: Iterator<Item=I>, F: Fn(&mut D), O: Getter + Setter + Lensable<Input=D, Output=A> { for index in ix { d.adjust(index); (action.act)(d); } }
pub mod action; pub mod args; pub mod error; pub use action::*; pub use args::*; pub use error::*;
use primal::Primes; pub fn run() -> u64 { Primes::all().take_while(|&x| x < 2_000_000).sum::<usize>() as u64 }
use crate::{format_context::FormatContext, tools}; use ffmpeg_sys_next::*; #[derive(Debug)] pub struct SubtitleDecoder { pub identifier: String, pub stream_index: isize, pub codec_context: *mut AVCodecContext, } impl SubtitleDecoder { pub fn new( identifier: String, format: &FormatContext, stream_index: isize, ) -> Result<Self, String> { unsafe { let codec = avcodec_find_decoder(format.get_codec_id(stream_index)); let mut codec_context = avcodec_alloc_context3(codec); check_result!( avcodec_parameters_to_context( codec_context, (**(*format.format_context).streams.offset(stream_index)).codecpar ), { avcodec_free_context(&mut codec_context); } ); Ok(SubtitleDecoder { identifier, stream_index, codec_context, }) } } } impl Drop for SubtitleDecoder { fn drop(&mut self) { unsafe { if !self.codec_context.is_null() { avcodec_close(self.codec_context); avcodec_free_context(&mut self.codec_context); } } } }
use std::env; use std::fs; use std::path::PathBuf; fn main() { if env::var_os("CARGO_FEATURE_RT").is_some() { let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap()); println!("cargo:rustc-link-search={}", out.display()); let device_file = if env::var_os("CARGO_FEATURE_STM32G431").is_some() { "src/stm32g431/device.x" } else if env::var_os("CARGO_FEATURE_STM32G441").is_some() { "src/stm32g441/device.x" } else if env::var_os("CARGO_FEATURE_STM32G471").is_some() { "src/stm32g471/device.x" } else if env::var_os("CARGO_FEATURE_STM32G473").is_some() { "src/stm32g473/device.x" } else if env::var_os("CARGO_FEATURE_STM32G474").is_some() { "src/stm32g474/device.x" } else if env::var_os("CARGO_FEATURE_STM32G483").is_some() { "src/stm32g483/device.x" } else if env::var_os("CARGO_FEATURE_STM32G484").is_some() { "src/stm32g484/device.x" } else if env::var_os("CARGO_FEATURE_STM32G491").is_some() { "src/stm32g491/device.x" } else if env::var_os("CARGO_FEATURE_STM32G4A1").is_some() { "src/stm32g4a1/device.x" } else { panic!("No device features selected"); }; fs::copy(device_file, out.join("device.x")).unwrap(); println!("cargo:rerun-if-changed={}", device_file); } println!("cargo:rerun-if-changed=build.rs"); }
//! Channel for sending and receiving STUN messages. use crate::message::{ ErrorResponse, Indication, InvalidMessage, MessageError, MessageErrorKind, MessageResult, Request, Response, SuccessResponse, }; use crate::transport::StunTransport; use crate::{Error, Result}; use fibers::sync::oneshot; use fibers_timeout_queue::TimeoutQueue; use futures::{Async, Future, Poll}; use std::collections::HashMap; use std::fmt; use std::time::Duration; use stun_codec::{Attribute, BrokenMessage, Message, MessageClass, Method, TransactionId}; use trackable::error::ErrorKindExt; type Reply<A> = oneshot::Monitored<Response<A>, MessageError>; /// [`Channel`] builder. /// /// [`Channel`]: ./struct.Channel.html #[derive(Debug, Clone)] pub struct ChannelBuilder { request_timeout: Duration, } impl ChannelBuilder { /// The default value of `request_timeout`. /// /// > Reliability of STUN over TCP and TLS-over-TCP is handled by TCP /// > itself, and there are no retransmissions at the STUN protocol level. /// > However, for a request/response transaction, if the client has not /// > received a response by **Ti** seconds after it sent the SYN to establish /// > the connection, it considers the transaction to have timed out. **Ti** /// > SHOULD be configurable and SHOULD have a default of **39.5s**. /// > /// > [RFC 5389 -- 7.2.2. Sending over TCP or TLS-over-TCP] /// /// [RFC 5389 -- 7.2.2. Sending over TCP or TLS-over-TCP]: https://tools.ietf.org/html/rfc5389#section-7.2.2 pub const DEFAULT_REQUEST_TIMEOUT_MS: u64 = 39_500; /// Makes a new `ChannelBuilder` instance with the default settings. pub fn new() -> Self { Self::default() } /// Sets the request timeout duration of the channel. /// /// The default value is `DEFAULT_REQUEST_TIMEOUT_MS`. pub fn request_timeout(&mut self, duration: Duration) -> &mut Self { self.request_timeout = duration; self } /// Makes a new `Channel` instance with the given settings. pub fn finish<A, T>(&self, transporter: T) -> Channel<A, T> where A: Attribute, T: StunTransport<A>, { Channel { transporter, timeout_queue: TimeoutQueue::new(), request_timeout: self.request_timeout, transactions: HashMap::new(), } } } impl Default for ChannelBuilder { fn default() -> Self { ChannelBuilder { request_timeout: Duration::from_millis(Self::DEFAULT_REQUEST_TIMEOUT_MS), } } } /// Channel for sending and receiving STUN messages. pub struct Channel<A, T> where A: Attribute, T: StunTransport<A>, { transporter: T, timeout_queue: TimeoutQueue<(T::PeerAddr, TransactionId)>, request_timeout: Duration, transactions: HashMap<(T::PeerAddr, TransactionId), (Method, Reply<A>)>, } impl<A, T> fmt::Debug for Channel<A, T> where A: Attribute, T: StunTransport<A>, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Channel {{ .. }}") } } impl<A, T> Channel<A, T> where A: Attribute, T: StunTransport<A>, { /// Makes a new `Channel` instance. /// /// This is equivalent to `ChannelBuilder::default().finish(transporter)`. pub fn new(transporter: T) -> Self { ChannelBuilder::default().finish(transporter) } /// Sends the given request message to the destination peer and /// returns a future that waits the corresponding response. #[allow(clippy::map_entry)] pub fn call( &mut self, peer: T::PeerAddr, request: Request<A>, ) -> impl Future<Item = Response<A>, Error = MessageError> { let id = request.transaction_id(); let method = request.method(); let (tx, rx) = oneshot::monitor(); if self.transactions.contains_key(&(peer.clone(), id)) { let e = MessageErrorKind::InvalidInput .cause(format!("Transaction ID conflicts: transaction_id={id:?}")); tx.exit(Err(track!(e).into())); } else if let Err(e) = track!(self .transporter .start_send(peer.clone(), request.into_message())) { tx.exit(Err(e.into())); } else { self.transactions.insert((peer.clone(), id), (method, tx)); self.timeout_queue.push((peer, id), self.request_timeout); } rx.map_err(MessageError::from) } /// Sends the given indication message to the destination peer. pub fn cast(&mut self, peer: T::PeerAddr, indication: Indication<A>) -> MessageResult<()> { track!(self.transporter.start_send(peer, indication.into_message()))?; Ok(()) } /// Replies the given response message to the destination peer. pub fn reply(&mut self, peer: T::PeerAddr, response: Response<A>) -> MessageResult<()> { let message = response .map(|m| m.into_message()) .unwrap_or_else(|m| m.into_message()); track!(self.transporter.start_send(peer, message))?; Ok(()) } /// Returns a reference to the transporter of the channel. pub fn transporter_ref(&self) -> &T { &self.transporter } /// Returns a mutable reference to the transporter of the channel. pub fn transporter_mut(&mut self) -> &mut T { &mut self.transporter } /// Returns the number of the outstanding request/response transactions in the channel. pub fn outstanding_transactions(&self) -> usize { self.transactions.len() } /// Polls the transmission of the all outstanding messages in the channel have been completed. /// /// If it has been completed, this will return `Ok(Async::Ready(()))`. pub fn poll_send(&mut self) -> Poll<(), Error> { Ok(track!(self.transporter.poll_send())?) } /// Polls reception of a message from a peer. #[allow(clippy::type_complexity)] pub fn poll_recv(&mut self) -> Poll<Option<(T::PeerAddr, RecvMessage<A>)>, Error> { track!(self.handle_timeout())?; while let Async::Ready(item) = track!(self.transporter.poll_recv())? { if let Some((peer, message)) = item { if let Some(item) = track!(self.handle_message(peer, message))? { return Ok(Async::Ready(Some(item))); } } else { return Ok(Async::Ready(None)); } } Ok(Async::NotReady) } fn handle_timeout(&mut self) -> Result<()> { let transactions = &mut self.transactions; while let Some((peer, id)) = self .timeout_queue .filter_pop(|entry| transactions.contains_key(entry)) { if let Some((_, tx)) = transactions.remove(&(peer.clone(), id)) { let e = track!(MessageErrorKind::Timeout.error()); tx.exit(Err(e.into())); } track!(self.transporter.finish_transaction(&peer, id))?; } Ok(()) } fn handle_message( &mut self, peer: T::PeerAddr, message: std::result::Result<Message<A>, BrokenMessage>, ) -> Result<Option<(T::PeerAddr, RecvMessage<A>)>> { let message = match message { Err(broken) => Some(self.handle_broken_message(&broken)), Ok(message) => match message.class() { MessageClass::Indication => Some(self.handle_indication(message)), MessageClass::Request => Some(self.handle_request(message)), MessageClass::SuccessResponse => { track!(self.handle_success_response(&peer, message))? } MessageClass::ErrorResponse => track!(self.handle_error_response(&peer, message))?, }, }; Ok(message.map(|m| (peer, m))) } fn handle_broken_message(&self, message: &BrokenMessage) -> RecvMessage<A> { let bytecodec_error_kind = *message.error().kind(); let error = MessageErrorKind::MalformedAttribute.takes_over(message.error().clone()); RecvMessage::Invalid(InvalidMessage::new( message.method(), message.class(), message.transaction_id(), track!(error; bytecodec_error_kind).into(), )) } fn handle_indication(&self, message: Message<A>) -> RecvMessage<A> { let class = message.class(); let method = message.method(); let transaction_id = message.transaction_id(); match track!(Indication::from_message(message)) { Err(error) => { RecvMessage::Invalid(InvalidMessage::new(method, class, transaction_id, error)) } Ok(indication) => RecvMessage::Indication(indication), } } fn handle_request(&self, message: Message<A>) -> RecvMessage<A> { let class = message.class(); let method = message.method(); let transaction_id = message.transaction_id(); match track!(Request::from_message(message)) { Err(error) => { RecvMessage::Invalid(InvalidMessage::new(method, class, transaction_id, error)) } Ok(request) => RecvMessage::Request(request), } } fn handle_success_response( &mut self, peer: &T::PeerAddr, message: Message<A>, ) -> Result<Option<RecvMessage<A>>> { let class = message.class(); let method = message.method(); let transaction_id = message.transaction_id(); if let Some((method, tx)) = self.transactions.remove(&(peer.clone(), transaction_id)) { track!(self.transporter.finish_transaction(peer, transaction_id))?; let result = track!(SuccessResponse::from_message(message)) .and_then(|m| { track_assert_eq!(m.method(), method, MessageErrorKind::UnexpectedResponse); Ok(m) }) .map(Ok); tx.exit(result); Ok(None) } else { let error = track!(MessageErrorKind::UnexpectedResponse.cause("Unknown transaction ID")).into(); let message = RecvMessage::Invalid(InvalidMessage::new(method, class, transaction_id, error)); Ok(Some(message)) } } fn handle_error_response( &mut self, peer: &T::PeerAddr, message: Message<A>, ) -> Result<Option<RecvMessage<A>>> { let class = message.class(); let method = message.method(); let transaction_id = message.transaction_id(); if let Some((method, tx)) = self.transactions.remove(&(peer.clone(), transaction_id)) { track!(self.transporter.finish_transaction(peer, transaction_id))?; let result = track!(ErrorResponse::from_message(message)) .and_then(|m| { track_assert_eq!(m.method(), method, MessageErrorKind::UnexpectedResponse); Ok(m) }) .map(Err); tx.exit(result); Ok(None) } else { let error = track!(MessageErrorKind::UnexpectedResponse.cause("Unknown transaction ID")).into(); let message = RecvMessage::Invalid(InvalidMessage::new(method, class, transaction_id, error)); Ok(Some(message)) } } } /// Received message. /// /// Messages are received by calling `Channel::poll` method. #[allow(missing_docs)] #[derive(Debug)] pub enum RecvMessage<A> { Request(Request<A>), Indication(Indication<A>), Invalid(InvalidMessage), }
use cumulus_client_consensus_aura::{ build_aura_consensus, BuildAuraConsensusParams, SlotProportion, }; use cumulus_client_network::build_block_announce_validator; use cumulus_client_service::{ prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, }; use cumulus_primitives_core::{ ParaId, relay_chain::v1::{Hash as PHash, PersistedValidationData}, }; use cumulus_client_consensus_common::{ ParachainConsensus, ParachainCandidate, ParachainBlockImport, }; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use polkadot_primitives::v0::CollatorPair; use runtime_common::Header; use sc_client_api::ExecutorProvider; use sc_executor::native_executor_instance; use sc_service::{Configuration, PartialComponents, Role, TFullBackend, TFullClient, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; use sp_consensus::{ BlockImportParams, BlockOrigin, import_queue::{BasicQueue, CacheKeyId, Verifier as VerifierT}, }; use sp_api::{ConstructRuntimeApi, ApiExt}; use sp_consensus::SlotData; use sp_consensus_aura::AuraApi; use sp_runtime::{ generic::{self, BlockId}, OpaqueExtrinsic, traits::{BlakeTwo256, Header as HeaderT}, }; use std::sync::Arc; use futures::lock::Mutex; pub use sc_executor::NativeExecutor; pub type Block = generic::Block<Header, OpaqueExtrinsic>; // Native Statemint executor instance. native_executor_instance!( pub StatemintRuntimeExecutor, statemint_runtime::api::dispatch, statemint_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); // Native Statemine executor instance. native_executor_instance!( pub StatemineRuntimeExecutor, statemine_runtime::api::dispatch, statemine_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); // Native Westmint executor instance. native_executor_instance!( pub WestmintRuntimeExecutor, westmint_runtime::api::dispatch, westmint_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, ); enum BuildOnAccess<R> { Uninitialized(Option<Box<dyn FnOnce() -> R + Send + Sync>>), Initialized(R), } impl<R> BuildOnAccess<R> { fn get_mut(&mut self) -> &mut R { loop { match self { Self::Uninitialized(f) => { *self = Self::Initialized((f.take().unwrap())()); }, Self::Initialized(ref mut r) => return r, } } } } /// Special [`ParachainConsensus`] implementation that waits for the upgrade from /// shell to a parachain runtime that implements Aura. struct WaitForAuraConsensus<Client> { client: Arc<Client>, aura_consensus: Arc<Mutex<BuildOnAccess<Box<dyn ParachainConsensus<Block>>>>>, } impl<Client> Clone for WaitForAuraConsensus<Client> { fn clone(&self) -> Self { Self { client: self.client.clone(), aura_consensus: self.aura_consensus.clone(), } } } #[async_trait::async_trait] impl<Client> ParachainConsensus<Block> for WaitForAuraConsensus<Client> where Client: sp_api::ProvideRuntimeApi<Block> + Send + Sync, Client::Api: AuraApi<Block, sp_consensus_aura::sr25519::AuthorityId>, { async fn produce_candidate( &mut self, parent: &Header, relay_parent: PHash, validation_data: &PersistedValidationData, ) -> Option<ParachainCandidate<Block>> { let block_id = BlockId::hash(parent.hash()); if self.client .runtime_api() .has_api::<dyn AuraApi<Block, sp_consensus_aura::sr25519::AuthorityId>>(&block_id) .unwrap_or(false) { self.aura_consensus .lock() .await .get_mut() .produce_candidate( parent, relay_parent, validation_data, ).await } else { log::debug!("Waiting for runtime with AuRa api"); None } } } struct Verifier<Client> { client: Arc<Client>, aura_verifier: BuildOnAccess<Box<dyn VerifierT<Block>>>, relay_chain_verifier: Box<dyn VerifierT<Block>>, } #[async_trait::async_trait] impl<Client> VerifierT<Block> for Verifier<Client> where Client: sp_api::ProvideRuntimeApi<Block> + Send + Sync, Client::Api: AuraApi<Block, sp_consensus_aura::sr25519::AuthorityId>, { async fn verify( &mut self, origin: BlockOrigin, header: Header, justifications: Option<sp_runtime::Justifications>, body: Option<Vec<<Block as sp_runtime::traits::Block>::Extrinsic>>, ) -> Result< ( BlockImportParams<Block, ()>, Option<Vec<(CacheKeyId, Vec<u8>)>>, ), String, > { let block_id = BlockId::hash(*header.parent_hash()); if self.client .runtime_api() .has_api::<dyn AuraApi<Block, sp_consensus_aura::sr25519::AuthorityId>>(&block_id) .unwrap_or(false) { self.aura_verifier.get_mut().verify(origin, header, justifications, body).await } else { self.relay_chain_verifier.verify(origin, header, justifications, body).await } } } /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. pub fn new_partial<RuntimeApi, Executor>( config: &Configuration, ) -> Result< PartialComponents< TFullClient<Block, RuntimeApi, Executor>, TFullBackend<Block>, (), sp_consensus::DefaultImportQueue<Block, TFullClient<Block, RuntimeApi, Executor>>, sc_transaction_pool::FullPool<Block, TFullClient<Block, RuntimeApi, Executor>>, (Option<Telemetry>, Option<TelemetryWorkerHandle>), >, sc_service::Error, > where RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, Executor>> + Send + Sync + 'static, RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> + sp_api::Metadata<Block> + sp_session::SessionKeys<Block> + sp_api::ApiExt< Block, StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>, > + sp_offchain::OffchainWorkerApi<Block> + sp_block_builder::BlockBuilder<Block> + sp_consensus_aura::AuraApi<Block, sp_consensus_aura::sr25519::AuthorityId>, sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>, Executor: sc_executor::NativeExecutionDispatch + 'static, { let telemetry = config .telemetry_endpoints .clone() .filter(|x| !x.is_empty()) .map(|endpoints| -> Result<_, sc_telemetry::Error> { let worker = TelemetryWorker::new(16)?; let telemetry = worker.handle().new_telemetry(endpoints); Ok((worker, telemetry)) }) .transpose()?; let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>( &config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), )?; let client = Arc::new(client); let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); let telemetry = telemetry.map(|(worker, telemetry)| { task_manager.spawn_handle().spawn("telemetry", worker.run()); telemetry }); let transaction_pool = sc_transaction_pool::BasicPool::new_full( config.transaction_pool.clone(), config.role.is_authority().into(), config.prometheus_registry(), task_manager.spawn_handle(), client.clone(), ); let client2 = client.clone(); let telemetry_handle = telemetry.as_ref().map(|telemetry| telemetry.handle()); let aura_verifier = move || { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client2).unwrap(); Box::new(cumulus_client_consensus_aura::build_verifier::< sp_consensus_aura::sr25519::AuthorityPair, _, _, _, >(cumulus_client_consensus_aura::BuildVerifierParams { client: client2.clone(), create_inherent_data_providers: move |_, _| async move { let time = sp_timestamp::InherentDataProvider::from_system_time(); let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( *time, slot_duration.slot_duration(), ); Ok((time, slot)) }, can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client2.executor().clone()), telemetry: telemetry_handle, })) as Box<_> }; let relay_chain_verifier = Box::new(RelayChainVerifier::new( client.clone(), |_, _| async { Ok(()) }, )) as Box<_>; let verifier = Verifier { client: client.clone(), relay_chain_verifier, aura_verifier: BuildOnAccess::Uninitialized(Some(Box::new(aura_verifier))), }; let registry = config.prometheus_registry().clone(); let spawner = task_manager.spawn_essential_handle(); let import_queue = BasicQueue::new( verifier, Box::new(ParachainBlockImport::new(client.clone())), None, &spawner, registry, ); let params = PartialComponents { backend, client, import_queue, keystore_container, task_manager, transaction_pool, select_chain: (), other: (telemetry, telemetry_worker_handle), }; Ok(params) } /// Start a node with the given parachain `Configuration` and relay chain `Configuration`. /// /// This is the actual implementation that is abstract over the executor and the runtime api. #[sc_tracing::logging::prefix_logs_with("Parachain")] pub async fn start_node<RuntimeApi, Executor, RB>( parachain_config: Configuration, collator_key: CollatorPair, polkadot_config: Configuration, id: ParaId, rpc_ext_builder: RB, ) -> sc_service::error::Result<(TaskManager, Arc<TFullClient<Block, RuntimeApi, Executor>>)> where RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, Executor>> + Send + Sync + 'static, RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> + sp_api::Metadata<Block> + sp_session::SessionKeys<Block> + sp_api::ApiExt< Block, StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>, > + sp_offchain::OffchainWorkerApi<Block> + sp_block_builder::BlockBuilder<Block> + cumulus_primitives_core::CollectCollationInfo<Block> + sp_consensus_aura::AuraApi<Block, sp_consensus_aura::sr25519::AuthorityId>, sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>, Executor: sc_executor::NativeExecutionDispatch + 'static, RB: Fn( Arc<TFullClient<Block, RuntimeApi, Executor>>, ) -> jsonrpc_core::IoHandler<sc_rpc::Metadata> + Send + 'static, { if matches!(parachain_config.role, Role::Light) { return Err("Light client not supported!".into()); } let parachain_config = prepare_node_config(parachain_config); let params = new_partial::<RuntimeApi, Executor>(&parachain_config)?; let (mut telemetry, telemetry_worker_handle) = params.other; let relay_chain_full_node = cumulus_client_service::build_polkadot_full_node( polkadot_config, collator_key.clone(), telemetry_worker_handle, ) .map_err(|e| match e { polkadot_service::Error::Sub(x) => x, s => format!("{}", s).into(), })?; let client = params.client.clone(); let backend = params.backend.clone(); let block_announce_validator = build_block_announce_validator( relay_chain_full_node.client.clone(), id, Box::new(relay_chain_full_node.network.clone()), relay_chain_full_node.backend.clone(), ); let force_authoring = parachain_config.force_authoring; let validator = parachain_config.role.is_authority(); let prometheus_registry = parachain_config.prometheus_registry().cloned(); let transaction_pool = params.transaction_pool.clone(); let mut task_manager = params.task_manager; let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); let (network, system_rpc_tx, start_network) = sc_service::build_network(sc_service::BuildNetworkParams { config: &parachain_config, client: client.clone(), transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue: import_queue.clone(), on_demand: None, block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)), })?; let rpc_client = client.clone(); let rpc_extensions_builder = Box::new(move |_, _| rpc_ext_builder(rpc_client.clone())); sc_service::spawn_tasks(sc_service::SpawnTasksParams { on_demand: None, remote_blockchain: None, rpc_extensions_builder, client: client.clone(), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, config: parachain_config, keystore: params.keystore_container.sync_keystore(), backend: backend.clone(), network: network.clone(), system_rpc_tx, telemetry: telemetry.as_mut(), })?; let announce_block = { let network = network.clone(); Arc::new(move |hash, data| network.announce_block(hash, data)) }; if validator { let client2 = client.clone(); let relay_chain_backend = relay_chain_full_node.backend.clone(); let relay_chain_client = relay_chain_full_node.client.clone(); let keystore = params.keystore_container.sync_keystore(); let spawn_handle = task_manager.spawn_handle(); let parachain_consensus = Box::new(WaitForAuraConsensus { client: client2.clone(), aura_consensus: Arc::new(Mutex::new(BuildOnAccess::Uninitialized(Some(Box::new(move || { let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client2).unwrap(); let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( spawn_handle, client2.clone(), transaction_pool, prometheus_registry.as_ref(), telemetry.as_ref().map(|t| t.handle()), ); let relay_chain_backend2 = relay_chain_backend.clone(); let relay_chain_client2 = relay_chain_client.clone(); build_aura_consensus::< sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _, _, _, _, >(BuildAuraConsensusParams { proposer_factory, create_inherent_data_providers: move |_, (relay_parent, validation_data)| { let parachain_inherent = cumulus_primitives_parachain_inherent::ParachainInherentData::create_at_with_client( relay_parent, &relay_chain_client, &*relay_chain_backend, &validation_data, id, ); async move { let time = sp_timestamp::InherentDataProvider::from_system_time(); let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( *time, slot_duration.slot_duration(), ); let parachain_inherent = parachain_inherent.ok_or_else(|| { Box::<dyn std::error::Error + Send + Sync>::from( "Failed to create parachain inherent", ) })?; Ok((time, slot, parachain_inherent)) } }, block_import: client2.clone(), relay_chain_client: relay_chain_client2, relay_chain_backend: relay_chain_backend2, para_client: client2.clone(), backoff_authoring_blocks: Option::<()>::None, sync_oracle: network.clone(), keystore, force_authoring, slot_duration, // We got around 500ms for proposing block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32), telemetry: telemetry.map(|t| t.handle()), }) }))))), }); let spawner = task_manager.spawn_handle(); let params = StartCollatorParams { para_id: id, block_status: client.clone(), announce_block, client: client.clone(), task_manager: &mut task_manager, collator_key, relay_chain_full_node, spawner, parachain_consensus, import_queue, }; start_collator(params).await?; } else { let params = StartFullNodeParams { client: client.clone(), announce_block, task_manager: &mut task_manager, para_id: id, relay_chain_full_node, }; start_full_node(params)?; } start_network.start_network(); Ok((task_manager, client)) }
#[doc = "Reader of register LTDC_BPCR"] pub type R = crate::R<u32, super::LTDC_BPCR>; #[doc = "Writer for register LTDC_BPCR"] pub type W = crate::W<u32, super::LTDC_BPCR>; #[doc = "Register LTDC_BPCR `reset()`'s with value 0"] impl crate::ResetValue for super::LTDC_BPCR { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `AVBP`"] pub type AVBP_R = crate::R<u16, u16>; #[doc = "Write proxy for field `AVBP`"] pub struct AVBP_W<'a> { w: &'a mut W, } impl<'a> AVBP_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x0fff) | ((value as u32) & 0x0fff); self.w } } #[doc = "Reader of field `AHBP`"] pub type AHBP_R = crate::R<u16, u16>; #[doc = "Write proxy for field `AHBP`"] pub struct AHBP_W<'a> { w: &'a mut W, } impl<'a> AHBP_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0fff << 16)) | (((value as u32) & 0x0fff) << 16); self.w } } impl R { #[doc = "Bits 0:11 - AVBP"] #[inline(always)] pub fn avbp(&self) -> AVBP_R { AVBP_R::new((self.bits & 0x0fff) as u16) } #[doc = "Bits 16:27 - AHBP"] #[inline(always)] pub fn ahbp(&self) -> AHBP_R { AHBP_R::new(((self.bits >> 16) & 0x0fff) as u16) } } impl W { #[doc = "Bits 0:11 - AVBP"] #[inline(always)] pub fn avbp(&mut self) -> AVBP_W { AVBP_W { w: self } } #[doc = "Bits 16:27 - AHBP"] #[inline(always)] pub fn ahbp(&mut self) -> AHBP_W { AHBP_W { w: self } } }
use crate::asm::out8; const PIC0_ICW1: u32 = 0x0020; pub const PIC0_OCW2: u32 = 0x0020; const PIC0_IMR: u32 = 0x0021; const PIC0_ICW2: u32 = 0x0021; const PIC0_ICW3: u32 = 0x0021; const PIC0_ICW4: u32 = 0x0021; const PIC1_ICW1: u32 = 0x00a0; pub const PIC1_OCW2: u32 = 0x00a0; const PIC1_IMR: u32 = 0x00a1; const PIC1_ICW2: u32 = 0x00a1; const PIC1_ICW3: u32 = 0x00a1; const PIC1_ICW4: u32 = 0x00a1; pub const PORT_KEYCMD: u32 = 0x0064; pub const PORT_KEYDAT: u32 = 0x60; pub fn init() { out8(PIC0_IMR, 0xff); // ๅ…จใฆใฎๅ‰ฒใ‚Š่พผใฟใ‚’ๅ—ใ‘ไป˜ใ‘ใชใ„ out8(PIC1_IMR, 0xff); // ๅ…จใฆใฎๅ‰ฒใ‚Š่พผใฟใ‚’ๅ—ใ‘ไป˜ใ‘ใชใ„ out8(PIC0_ICW1, 0x11); // ใ‚จใƒƒใ‚ธใƒˆใƒชใ‚ฌใƒขใƒผใƒ‰ out8(PIC0_ICW2, 0x20); // IRQ0-7ใฏใ€INT20-27ใงๅ—ใ‘ใ‚‹ out8(PIC0_ICW3, 1 << 2); // PIC1ใฏIRQ2ใซใฆๆŽฅ็ถš out8(PIC0_ICW4, 0x01); // ใƒŽใƒณใƒใƒƒใƒ•ใ‚กใƒขใƒผใƒ‰ out8(PIC1_ICW1, 0x11); // ใ‚จใƒƒใ‚ธใƒˆใƒชใ‚ฌใƒขใƒผใƒ‰ out8(PIC1_ICW2, 0x28); // IRQ8-15ใฏใ€INT28-2fใงๅ—ใ‘ใ‚‹ out8(PIC1_ICW3, 2); // PIC1ใฏIRQ2ใซใฆๆŽฅ็ถš out8(PIC1_ICW4, 0x01); // ใƒŽใƒณใƒใƒƒใƒ•ใ‚กใƒขใƒผใƒ‰ out8(PIC0_IMR, 0xfb); // 11111011 PIC1ไปฅๅค–ใฏๅ…จใฆ็ฆๆญข out8(PIC1_IMR, 0xff); // 11111111 ๅ…จใฆใฎๅ‰ฒใ‚Š่พผใฟใ‚’ๅ—ใ‘ไป˜ใ‘ใชใ„ } pub fn allow_input() { out8(PIC0_IMR, 0xf8); // PITใจPIC1ใจใ‚ญใƒผใƒœใƒผใƒ‰ใ‚’่จฑๅฏ(11111000) out8(PIC1_IMR, 0xef); // ใƒžใ‚ฆใ‚นใ‚’่จฑๅฏ(11101111) }
extern crate pest; #[macro_use] extern crate pest_derive; use clap::{load_yaml, App}; use image::{DynamicImage, ImageBuffer}; use std::fs::File; use std::io::prelude::*; use std::path::Path; mod canvas; mod css; mod html; mod layout; mod mock; mod paint; mod style; use pest::Parser; fn main() { let yaml = load_yaml!("cli.yml"); let matches = App::from(yaml).get_matches(); let read_source = |arg_filename: Option<&str>, default_filename: &str| { let path = match arg_filename { Some(ref filename) => filename, None => default_filename, }; let mut file = File::open(&Path::new(path)).unwrap(); let mut content = String::new(); file.read_to_string(&mut content).unwrap(); content }; let html = read_source(matches.value_of("html"), "examples/test.html"); let css = read_source(matches.value_of("css"), "examples/test.css"); let initial_containing_block = layout::Dimensions { content: layout::Rect { x: 0.0, y: 0.0, width: 800.0, height: 600.0, }, padding: Default::default(), border: Default::default(), margin: Default::default(), }; let root_node = html::parse_nodes(html.as_str()); let stylesheet = css::parse_css(css.as_str()); // println!("{:?}", stylesheet); let style_root = style::style_tree(&root_node, &stylesheet); // println!("{:?}", style_root); let layout_root = layout::layout_tree(&style_root, initial_containing_block); // println!("{:?}", layout_root); let canvas = canvas::paint(&layout_root, initial_containing_block.content); let filename = matches.value_of("output").unwrap_or("output.png"); let (w, h) = (canvas.width as u32, canvas.height as u32); let img = ImageBuffer::from_fn(w, h, move |x, y| { let color = canvas.pixels[(y * w + x) as usize]; image::Rgb([color.r, color.g, color.b]) }); DynamicImage::ImageRgb8(img).save(filename).unwrap(); }
use super::{check_dtype, HasPandasColumn, PandasColumn, PandasColumnObject, GIL_MUTEX}; use crate::errors::ConnectorXPythonError; use anyhow::anyhow; use fehler::throws; use ndarray::{ArrayViewMut2, Axis, Ix2}; use numpy::{npyffi::NPY_TYPES, Element, PyArray, PyArrayDescr}; use pyo3::{FromPyObject, Py, PyAny, PyResult, Python}; use std::any::TypeId; #[derive(Clone)] #[repr(transparent)] pub struct PyBytes(Py<pyo3::types::PyBytes>); // In order to put it into a numpy array impl Element for PyBytes { const DATA_TYPE: numpy::DataType = numpy::DataType::Object; fn is_same_type(dtype: &PyArrayDescr) -> bool { unsafe { *dtype.as_dtype_ptr() }.type_num == NPY_TYPES::NPY_OBJECT as i32 } } pub struct BytesBlock<'a> { data: ArrayViewMut2<'a, PyBytes>, buf_size_mb: usize, } impl<'a> FromPyObject<'a> for BytesBlock<'a> { fn extract(ob: &'a PyAny) -> PyResult<Self> { check_dtype(ob, "object")?; let array = ob.downcast::<PyArray<PyBytes, Ix2>>()?; let data = unsafe { array.as_array_mut() }; Ok(BytesBlock { data, buf_size_mb: 16, // in MB }) } } impl<'a> BytesBlock<'a> { #[throws(ConnectorXPythonError)] pub fn split(self) -> Vec<BytesColumn> { let mut ret = vec![]; let mut view = self.data; let nrows = view.ncols(); while view.nrows() > 0 { let (col, rest) = view.split_at(Axis(0), 1); view = rest; ret.push(BytesColumn { data: col .into_shape(nrows)? .into_slice() .ok_or_else(|| anyhow!("get None for splitted String data"))? .as_mut_ptr(), bytes_lengths: vec![], row_idx: vec![], bytes_buf: Vec::with_capacity(self.buf_size_mb * (1 << 20) * 11 / 10), // allocate a little bit more memory to avoid Vec growth buf_size: self.buf_size_mb * (1 << 20), }) } ret } } pub struct BytesColumn { data: *mut PyBytes, bytes_buf: Vec<u8>, bytes_lengths: Vec<usize>, // usize::MAX if the string is None row_idx: Vec<usize>, buf_size: usize, } unsafe impl Send for BytesColumn {} unsafe impl Sync for BytesColumn {} impl PandasColumnObject for BytesColumn { fn typecheck(&self, id: TypeId) -> bool { id == TypeId::of::<&'static [u8]>() || id == TypeId::of::<Option<&'static [u8]>>() } fn typename(&self) -> &'static str { std::any::type_name::<&'static [u8]>() } #[throws(ConnectorXPythonError)] fn finalize(&mut self) { self.flush()?; } } impl PandasColumn<Vec<u8>> for BytesColumn { #[throws(ConnectorXPythonError)] fn write(&mut self, val: Vec<u8>, row: usize) { self.bytes_lengths.push(val.len()); self.bytes_buf.extend_from_slice(&val[..]); self.row_idx.push(row); self.try_flush()?; } } impl<'r> PandasColumn<&'r [u8]> for BytesColumn { #[throws(ConnectorXPythonError)] fn write(&mut self, val: &'r [u8], row: usize) { self.bytes_lengths.push(val.len()); self.bytes_buf.extend_from_slice(val); self.row_idx.push(row); self.try_flush()?; } } impl PandasColumn<Option<Vec<u8>>> for BytesColumn { #[throws(ConnectorXPythonError)] fn write(&mut self, val: Option<Vec<u8>>, row: usize) { match val { Some(b) => { self.bytes_lengths.push(b.len()); self.bytes_buf.extend_from_slice(&b[..]); self.row_idx.push(row); self.try_flush()?; } None => { self.bytes_lengths.push(usize::MAX); self.row_idx.push(row); } } } } impl<'r> PandasColumn<Option<&'r [u8]>> for BytesColumn { #[throws(ConnectorXPythonError)] fn write(&mut self, val: Option<&'r [u8]>, row: usize) { match val { Some(b) => { self.bytes_lengths.push(b.len()); self.bytes_buf.extend_from_slice(b); self.row_idx.push(row); self.try_flush()?; } None => { self.bytes_lengths.push(usize::MAX); self.row_idx.push(row); } } } } impl HasPandasColumn for Vec<u8> { type PandasColumn<'a> = BytesColumn; } impl HasPandasColumn for Option<Vec<u8>> { type PandasColumn<'a> = BytesColumn; } impl<'r> HasPandasColumn for &'r [u8] { type PandasColumn<'a> = BytesColumn; } impl<'r> HasPandasColumn for Option<&'r [u8]> { type PandasColumn<'a> = BytesColumn; } impl BytesColumn { pub fn partition(self, counts: usize) -> Vec<BytesColumn> { let mut partitions = vec![]; for _ in 0..counts { partitions.push(BytesColumn { data: self.data, bytes_lengths: vec![], row_idx: vec![], bytes_buf: Vec::with_capacity(self.buf_size), buf_size: self.buf_size, }); } partitions } #[throws(ConnectorXPythonError)] pub fn flush(&mut self) { let nstrings = self.bytes_lengths.len(); if nstrings > 0 { let py = unsafe { Python::assume_gil_acquired() }; { // allocation in python is not thread safe let _guard = GIL_MUTEX .lock() .map_err(|e| anyhow!("mutex poisoned {}", e))?; let mut start = 0; for (i, &len) in self.bytes_lengths.iter().enumerate() { if len != usize::MAX { let end = start + len; unsafe { // allocate and write in the same time *self.data.add(self.row_idx[i]) = PyBytes( pyo3::types::PyBytes::new(py, &self.bytes_buf[start..end]).into(), ); }; start = end; } else { unsafe { let b = Py::from_borrowed_ptr(py, pyo3::ffi::Py_None()); *self.data.add(self.row_idx[i]) = PyBytes(b); } } } } self.bytes_buf.truncate(0); self.bytes_lengths.truncate(0); self.row_idx.truncate(0); } } #[throws(ConnectorXPythonError)] pub fn try_flush(&mut self) { if self.bytes_buf.len() >= self.buf_size { self.flush()?; } } }
use std::sync::Arc; use anyhow::Context; use crate::app::authz::AuthzObject; use crate::app::AppContext; use crate::app::{error::ErrorExt, error::ErrorKind as AppErrorKind}; use crate::db::class::WebinarType; use super::{find, AppResult}; pub use convert::convert as convert_webinar; pub use create::*; pub use download::download as download_webinar; pub use restart_transcoding::restart_transcoding; mod convert; mod create; mod download; mod restart_transcoding;
/* Our Crate */ use day_3::*; /* Standard Library */ use std::fs::File; use std::io::Read; #[test] fn calc_nearest_wire_crossing() { let (wire1, wire2) = read_file("input.txt"); let wire1 = parse_string_to_wire(wire1); let wire2 = parse_string_to_wire(wire2); let mut positions1 = get_positions(wire1); let mut positions2 = get_positions(wire2); let origin = Position::new(0, 0); positions1.remove(&origin); positions2.remove(&origin); let intersection = positions1.intersection(&positions2); let mut closest = Position::new(1000000, 1000000); for pos in intersection { if pos.distance_from_origin() < closest.distance_from_origin() { closest = *pos; } } println!("{:?}", closest); return; let result = closest.distance_from_origin(); println!("Closet crossing is at: {}", result); }
use super::block_trait::DisplayNamed; use super::BlockId; use crate::arena::resource::ResourceId; use crate::libs::color::Pallet; use crate::libs::select_list::SelectList; #[derive(Clone)] pub struct CharacterTexture { name: String, texture_id: Option<ResourceId>, height: f32, } #[derive(Clone)] pub struct Character { size: f32, name: String, display_name: String, description: String, position: [f32; 3], textures: SelectList<CharacterTexture>, properties: Vec<BlockId>, name_color: Pallet, } impl Character { pub fn new() -> Self { Self { size: 1.0, name: String::from(""), display_name: String::from(""), description: String::from(""), position: [0.0, 0.0, 0.0], textures: SelectList::new( vec![CharacterTexture { name: String::from("[default]"), texture_id: None, height: 1.0, }], 0, ), properties: vec![], name_color: Pallet::gray(9).a(100), } } pub fn size(&self) -> f32 { self.size } pub fn set_size(&mut self, size: f32) { self.size = size; } pub fn current_tex_height(&self) -> f32 { if let Some(tex) = self.textures.selected() { tex.height } else { 1.0 } } pub fn set_tex_height(&mut self, tex_idx: usize, height: f32) { if let Some(tex) = self.textures.get_mut(tex_idx) { tex.height = height; } } pub fn current_tex_id(&self) -> Option<&ResourceId> { if let Some(tex) = self.textures.selected() { tex.texture_id.as_ref() } else { None } } pub fn set_tex_id(&mut self, tex_idx: usize, tex_id: Option<ResourceId>) { if let Some(tex) = self.textures.get_mut(tex_idx) { tex.texture_id = tex_id; } } pub fn name(&self) -> &String { &self.name } pub fn set_name(&mut self, name: String) { self.name = name; } pub fn display_name(&self) -> &String { &self.display_name } pub fn set_display_name(&mut self, display_name: String) { self.display_name = display_name; } pub fn name_color(&self) -> &Pallet { &self.name_color } pub fn set_name_color(&mut self, color: Pallet) { self.name_color = color; } pub fn description(&self) -> &String { &self.description } pub fn set_description(&mut self, description: String) { self.description = description; } pub fn position(&self) -> &[f32; 3] { &self.position } pub fn set_position(&mut self, position: [f32; 3]) { self.position = position; } pub fn tex_names(&self) -> Vec<&str> { self.textures.iter().map(|tex| tex.name.as_str()).collect() } pub fn current_tex_name(&self) -> &str { self.textures .selected() .map(|tex| tex.name.as_str()) .unwrap_or("") } pub fn current_tex_idx(&self) -> usize { self.textures.selected_idx() } pub fn set_current_tex_idx(&mut self, idx: usize) { self.textures.set_selected_idx(idx); } pub fn add_tex_to_select(&mut self) { self.textures.push(CharacterTexture { name: String::from("ๆ–ฐ่ฆ็ซ‹ใก็ตต"), texture_id: None, height: self.size, }); self.textures.set_selected_idx(self.textures.len() - 1); } pub fn remove_tex(&mut self, tex_idx: usize) { if self.textures.len() > 1 { self.textures.remove(tex_idx); if self.textures.selected_idx() >= self.textures.len() { self.textures.set_selected_idx(self.textures.len() - 1); } } } pub fn set_tex_name(&mut self, tex_idx: usize, tex_name: String) { if let Some(tex) = self.textures.get_mut(tex_idx) { tex.name = tex_name; } } pub fn properties(&self) -> impl Iterator<Item = &BlockId> { self.properties.iter() } pub fn add_property(&mut self, property_id: BlockId) { self.properties.push(property_id); } } impl DisplayNamed for Character { fn display_name(&self) -> &String { self.display_name() } fn set_display_name(&mut self, name: String) { self.set_display_name(name); } } impl CharacterTexture { async fn pack_to_toml(&self) -> toml::Value { let mut packed = toml::value::Table::new(); packed.insert(String::from("name"), toml::Value::String(self.name.clone())); if let Some(texture_id) = &self.texture_id { packed.insert( String::from("texture_id"), toml::Value::String(texture_id.to_string()), ); } packed.insert( String::from("height"), toml::Value::Float(self.height as f64), ); toml::Value::Table(packed) } async fn unpack_from_toml(packed: toml::Value) -> Self { let mut unpacked = Self { name: String::new(), texture_id: None, height: 1.0, }; if let toml::Value::Table(mut packed) = packed { if let Some(toml::Value::String(name)) = packed.remove("name") { unpacked.name = name; } if let Some(toml::Value::String(texture_id)) = packed.remove("texture_id") { if let Some(texture_id) = ResourceId::from_str(&texture_id) { unpacked.texture_id = Some(texture_id); } } if let Some(toml::Value::Float(height)) = packed.remove("height") { unpacked.height = height as f32; } } unpacked } } impl Character { pub async fn pack_to_toml(&self) -> toml::Value { let mut packed = toml::value::Table::new(); packed.insert(String::from("size"), toml::Value::Float(self.size as f64)); packed.insert(String::from("name"), toml::Value::String(self.name.clone())); packed.insert( String::from("description"), toml::Value::String(self.description.clone()), ); packed.insert( String::from("display_name"), toml::Value::String(self.display_name.clone()), ); let props = { let mut props = toml::value::Array::new(); for prop_id in self.properties.iter() { props.push(toml::Value::String(prop_id.to_string())); } props }; packed.insert(String::from("propaties"), toml::Value::Array(props)); let textures = { let mut textures = toml::value::Table::new(); textures.insert( String::from("_selected_idx"), toml::Value::Integer(self.textures.selected_idx() as i64), ); let payload = { let mut payload = toml::value::Array::new(); for texture in self.textures.iter() { payload.push(texture.pack_to_toml().await); } payload }; textures.insert(String::from("_payload"), toml::Value::Array(payload)); textures }; packed.insert(String::from("textures"), toml::Value::Table(textures)); toml::Value::Table(packed) } pub async fn unpack_from_toml(packed: toml::Value) -> Self { let mut unpacked = Self::new(); if let toml::Value::Table(mut packed) = packed { if let Some(toml::Value::Float(size)) = packed.remove("size") { unpacked.size = size as f32; } if let Some(toml::Value::String(name)) = packed.remove("name") { unpacked.name = name; } if let Some(toml::Value::String(description)) = packed.remove("description") { unpacked.description = description; } if let Some(toml::Value::String(display_name)) = packed.remove("display_name") { unpacked.display_name = display_name; } if let Some(toml::Value::Array(packed_props)) = packed.remove("propaties") { let mut props = vec![]; for packed_prop_id in packed_props { if let toml::Value::String(prop_id) = packed_prop_id { if let Some(prop_id) = BlockId::from_str(&prop_id) { props.push(prop_id); } } } unpacked.properties = props; } if let Some(toml::Value::Table(mut textures)) = packed.remove("textures") { let selected_idx = if let Some(toml::Value::Integer(x)) = textures.remove("_selected_idx") { x.max(0) as usize } else { 0 }; let payload = if let Some(toml::Value::Array(textures)) = textures.remove("_payload") { let mut payload = vec![]; for texture in textures { payload.push(CharacterTexture::unpack_from_toml(texture).await); } payload } else { vec![] }; if payload.len() > 0 { let selected_idx = selected_idx.min(payload.len()); unpacked.textures = SelectList::new(payload, selected_idx); } } } unpacked } }
//! Histogram type of plotting: point cloud, density or probability density function //! (pdf) and cummulative density function (cdf). //! //! # Examples //! //! Quick plot. //! ```no_run //! use preexplorer::prelude::*; //! pre::Density::new((0..10)).plot("my_identifier").unwrap(); //! ``` //! //! Compare ``Density``s. //! ```no_run //! use preexplorer::prelude::*; //! pre::Densities::new(vec![ //! pre::Density::new((0..10)), //! pre::Density::new((0..10)), //! ]) //! .plot("my_identifier").unwrap(); //! ``` // Traits pub use crate::traits::{Configurable, Plotable, Saveable}; use core::fmt::Display; use core::ops::Add; // Structs pub use comparison::Densities; /// Compare various ``Distribution`` types together. pub mod comparison; /// Akin to a histogram: point cloud, density and cummulative distribution. #[derive(Debug, PartialEq, Clone)] pub struct Density<T> where T: Display + Clone, { pub(crate) realizations: Vec<T>, config: crate::configuration::Configuration, } impl<T> Density<T> where T: Display + Clone, { /// Create a new ``Density``. /// /// # Examples /// /// From a complicated computation. /// ```no_run /// use preexplorer::prelude::*; /// use rand_distr::Exp1; /// use rand::prelude::*; /// let simulation_results: Vec<f64> = (0..100).map(|_| thread_rng().sample(Exp1)).collect(); /// pre::Density::new(simulation_results) /// .set_title("Empirical Exponential 1") /// .plot("my_identifier") /// .unwrap(); /// ``` pub fn new<I>(realizations: I) -> Density<T> where I: IntoIterator<Item = T>, { let realizations: Vec<T> = realizations.into_iter().collect(); let mut config = crate::configuration::Configuration::default(); config.set_custom("cdf", "true"); config.set_custom("pdf", "true"); config.set_custom("cloud", "true"); config.set_custom("bins", "true"); Density { realizations, config, } } /// Controls the plotting of the cummulative density function (cdf). /// If true, it will appear in the plotting, otherwise it will not. /// /// # Default /// /// The default value is true. /// ``` /// # use preexplorer::prelude::*; /// let mut den = pre::Density::new((0..10)); /// assert_eq!(den.cdf(), true); /// den.set_cdf(false); /// assert_eq!(den.cdf(), false); /// ``` pub fn set_cdf(&mut self, cdf: bool) -> &mut Self { self.configuration_mut().set_custom("cdf", cdf.to_string()); self } /// Controls the plotting of the probability density function (pdf). /// If true, it will appear in the plotting, otherwise it will not. /// /// # Default /// /// The default value is true. /// ``` /// # use preexplorer::prelude::*; /// let mut den = pre::Density::new((0..10)); /// assert_eq!(den.pdf(), true); /// den.set_pdf(false); /// assert_eq!(den.pdf(), false); /// ``` pub fn set_pdf(&mut self, pdf: bool) -> &mut Self { self.configuration_mut().set_custom("pdf", pdf.to_string()); self } /// Controls the plotting of the point cloud. /// If true, it will appear in the plotting, otherwise it will not. /// /// # Default /// /// The default value is true. /// ``` /// # use preexplorer::prelude::*; /// let mut den = pre::Density::new((0..10)); /// assert_eq!(den.cloud(), true); /// den.set_cloud(false); /// assert_eq!(den.cloud(), false); /// ``` pub fn set_cloud(&mut self, cloud: bool) -> &mut Self { self.configuration_mut() .set_custom("cloud", cloud.to_string()); self } /// Controls the plotting of bins representation of the density. /// If true, it will appear in the plotting, otherwise it will not. /// /// # Default /// /// The default value is true. /// ``` /// # use preexplorer::prelude::*; /// let mut den = pre::Density::new((0..10)); /// assert_eq!(den.bins(), true); /// den.set_bins(false); /// assert_eq!(den.bins(), false); /// ``` pub fn set_bins(&mut self, cloud: bool) -> &mut Self { self.configuration_mut() .set_custom("bins", cloud.to_string()); self } pub fn cloud(&self) -> bool { match self.configuration().custom("cloud") { Some(cloud) => std::str::FromStr::from_str(cloud).unwrap(), None => unreachable!(), } } pub fn pdf(&self) -> bool { match self.configuration().custom("pdf") { Some(pdf) => std::str::FromStr::from_str(pdf).unwrap(), None => unreachable!(), } } pub fn cdf(&self) -> bool { match self.configuration().custom("cdf") { Some(cdf) => std::str::FromStr::from_str(cdf).unwrap(), None => unreachable!(), } } pub fn bins(&self) -> bool { match self.configuration().custom("bins") { Some(bins) => std::str::FromStr::from_str(bins).unwrap(), None => unreachable!(), } } } impl<T> Add for Density<T> where T: Display + Clone, { type Output = crate::Densities<T>; fn add(self, other: crate::Density<T>) -> crate::Densities<T> { let mut cmp = self.into(); cmp += other; cmp } } impl<T> Configurable for Density<T> where T: Display + Clone, { fn configuration_mut(&mut self) -> &mut crate::configuration::Configuration { &mut self.config } fn configuration(&self) -> &crate::configuration::Configuration { &self.config } } impl<T> Saveable for Density<T> where T: Display + Clone, { fn plotable_data(&self) -> String { let mut raw_data = String::new(); for value in self.realizations.clone() { raw_data.push_str(&format!("{}\n", value)); } raw_data } } impl<T> Plotable for Density<T> where T: Display + Clone, { /// Construct a suitable plot script for the struct. /// /// # Remarks /// /// Only works for real numbers. fn plot_script(&self) -> String { let mut gnuplot_script = self.opening_plot_script(); gnuplot_script += "set zeroaxis\n"; let mut realizations = self.realizations.clone().into_iter(); match realizations.next() { Some(_) => { // Gnuplot scrpit gnuplot_script += "# Warning: this script only works when the data are real numbers. \n\n"; let dashtype = match self.dashtype() { Some(dashtype) => dashtype, None => 1, }; gnuplot_script += "set style fill solid 0.5\n\n"; gnuplot_script += "plot "; if self.cloud() { gnuplot_script += &format!("{:?} using 1:(0.25*rand(0)-.35)", self.data_path(),); } if self.pdf() { if self.cloud() { gnuplot_script += ", \\\n\t "; } gnuplot_script += &format!( "{:?} using 1:(1./{}) smooth kdensity with {} dashtype {}", self.data_path(), realizations.len(), self.style(), dashtype, ); } if self.cdf() { if self.cloud() || self.pdf() { gnuplot_script += ", \\\n\t "; } gnuplot_script += &format!("{:?} using 1:(1.) smooth cnorm", self.data_path(),); } if self.bins() { if self.cloud() || self.pdf() || self.cdf() { gnuplot_script += ", \\\n\t "; } gnuplot_script += &format!( "{:?} using 1:(1./{}) bins with boxes", self.data_path(), realizations.len() ); } gnuplot_script += "\n"; } None => { std::io::Error::new( std::io::ErrorKind::Other, "No data to plot: There are no realizations, so no script can be prepared.", ); } } // Gnuplot section gnuplot_script += &self.ending_plot_script(); gnuplot_script } }
use core::cell::UnsafeCell; use core::intrinsics::{volatile_load, volatile_store}; pub struct VolatileCell<T>(UnsafeCell<T>); impl<T> VolatileCell<T> { pub unsafe fn get(&self) -> T where T: Copy { volatile_load(self.0.get()) } pub unsafe fn set(&self, v: T) { volatile_store(self.0.get(), v) } }
pub mod ast; pub mod semantics;
use super::Handle; use futures_util::task::AtomicWaker; use std::mem::ManuallyDrop; use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering}; use std::io; use std::sync::Arc; use std::task::{Context, Poll}; fumio_utils::mpsc! { mod mpsc_task_list { link TaskListLink; head TaskListHead; member next of InnerTask; } } fumio_utils::local_dl_list! { mod loc_dl_list { link LocalTaskListLink; head LocalTaskListHead; member local_link of InnerTask; } } #[derive(Debug)] pub(super) struct Tasks { // tasks to process (new, deregister) list: TaskListHead, // make sure to keep Tasks alive until they are deregistered, so we can reinterpret tokens as // pointers to tasks. local_list: LocalTaskListHead, } unsafe impl Send for Tasks {} unsafe impl Sync for Tasks {} impl Tasks { pub(super) fn new() -> Self { Self { list: TaskListHead::new(), local_list: LocalTaskListHead::new(), } } pub(super) fn add_task(&self, task: ReactorTask) { let prev = task.inner.state.compare_and_swap(0, STATE_QUEUED, Ordering::Release); if 0 != prev { // someone else queued it... wtf. anyway, no need to continue return; } self.list.push(task.inner); } pub(super) fn deregister_task(&self, task: ReactorTask) { let prev = task.inner.state.swap(STATE_QUEUED | STATE_DEREGISTERED, Ordering::Release); if 0 != prev { // either already deregistered or already queued // not queuing again return; } self.list.push(task.inner); } // call after an event loop iteration is done pub(super) fn cleanup_tasks(&self) { for task_inner in unsafe { self.list.start_pop() } { let prev = task_inner.state.fetch_and(!STATE_QUEUED, Ordering::Acquire); assert!(0 != STATE_QUEUED & prev, "invalid internal state"); if 0 != STATE_DEREGISTERED & prev { self.local_remove(task_inner) } else { self.local_add(task_inner) } } } fn local_add(&self, task_inner: Arc<InnerTask>) { if task_inner.local_link.is_unlinked() { // move reference to local list let task_inner = ManuallyDrop::new(task_inner); unsafe { self.local_list.append(&task_inner); } } } #[allow(clippy::needless_pass_by_value)] fn local_remove(&self, task_inner: Arc<InnerTask>) { if !task_inner.local_link.is_unlinked() { unsafe { task_inner.local_link.unlink(); } unsafe { Arc::from_raw(&*task_inner); } // drop refcount hold by list } } } impl Drop for Tasks { fn drop(&mut self) { self.cleanup_tasks(); while let Some(task_inner) = unsafe { self.local_list.pop_front() } { let _task_inner = unsafe { Arc::from_raw(task_inner) }; } } } const STATE_DEREGISTERED: u8 = 0b01; const STATE_QUEUED: u8 = 0b10; #[derive(Debug)] struct InnerTask { state: AtomicU8, next: TaskListLink, local_link: LocalTaskListLink, reactor: Handle, read_mask: usize, write_mask: usize, read_readiness: AtomicUsize, read_waker: AtomicWaker, write_readiness: AtomicUsize, write_waker: AtomicWaker, } #[derive(Debug, Clone)] pub(super) struct ReactorTask { inner: Arc<InnerTask>, } impl ReactorTask { pub(super) fn new(reactor: Handle, read_mask: mio::Ready, write_mask: mio::Ready) -> Self { let inner = Arc::new(InnerTask { state: AtomicU8::new(0), next: TaskListLink::new(), local_link: LocalTaskListLink::new(), reactor, read_mask: read_mask.as_usize(), write_mask: write_mask.as_usize(), read_readiness: AtomicUsize::new(0), read_waker: AtomicWaker::new(), write_readiness: AtomicUsize::new(0), write_waker: AtomicWaker::new(), }); Self { inner } } pub(super) fn reactor(&self) -> &Handle { &self.inner.reactor } fn take_read_ready(&self) -> mio::Ready { mio::Ready::from_usize(self.inner.read_readiness.swap(0, Ordering::Relaxed)) } pub(super) fn clear_read_ready(&self) -> io::Result<mio::Ready> { Ok(self.take_read_ready()) } pub(super) fn poll_read_ready(&self, context: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> { let ready = self.take_read_ready(); if !ready.is_empty() { return Poll::Ready(Ok(ready)); } self.inner.read_waker.register(context.waker()); let ready = self.take_read_ready(); if !ready.is_empty() { return Poll::Ready(Ok(ready)); } self.inner.reactor.expect_upgrade()?; // make sure reactor still lives Poll::Pending } fn take_write_ready(&self) -> mio::Ready { mio::Ready::from_usize(self.inner.write_readiness.swap(0, Ordering::Relaxed)) } pub(super) fn clear_write_ready(&self) -> io::Result<mio::Ready> { Ok(self.take_write_ready()) } pub(super) fn poll_write_ready(&self, context: &mut Context<'_>) -> Poll<io::Result<mio::Ready>> { let ready = self.take_write_ready(); if !ready.is_empty() { return Poll::Ready(Ok(ready)); } self.inner.write_waker.register(context.waker()); let ready = self.take_write_ready(); if !ready.is_empty() { return Poll::Ready(Ok(ready)); } self.inner.reactor.expect_upgrade()?; // make sure reactor still lives Poll::Pending } // token doesn't own a reference! pub(super) fn as_token(&self) -> mio::Token { let raw: *const InnerTask = &*self.inner; mio::Token(raw as usize) } // use only in event loop between poll and cleanup_tasks pub(super) fn from_token(token: mio::Token) -> Self { let raw: *const InnerTask = token.0 as _; let inner = unsafe { Arc::from_raw(raw) }; // increase ref count by 1 std::mem::forget(inner.clone()); Self { inner } } pub(super) fn update_ready(&self, readiness: mio::Ready) { let read_bits = self.inner.read_mask & readiness.as_usize(); if 0 != read_bits { self.inner.read_readiness.fetch_or(read_bits, Ordering::Relaxed); self.inner.read_waker.wake(); } let write_bits = self.inner.write_mask & readiness.as_usize(); if 0 != write_bits { self.inner.write_readiness.fetch_or(write_bits, Ordering::Relaxed); self.inner.write_waker.wake(); } } } impl std::cmp::PartialEq for ReactorTask { fn eq(&self, other: &Self) -> bool { Arc::ptr_eq(&self.inner, &other.inner) } } impl std::cmp::Eq for ReactorTask { } impl std::hash::Hash for ReactorTask { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.as_token().hash(state) } }
impl GPU { pub fn dump_vram(&self) { for k in 0..128 { let tile = &self.vram[(k*128)..(k+1)*128]; for i in 0..8 { for j in 0..4 { let byte = tile[(i*8)+j]; let one = if (byte & 0x0F) > 0 { "โ–ˆ" } else { " " }; let two = if (byte >> 4) > 0 { "โ–ˆ" } else { " " }; print!("{} {} ", one, two); } print!("\r\n"); } } } } #[derive(Default)] pub struct GPU { pub oam: Vec<u8>, pub vram: Vec<u8> }
use std::env; use std::error::Error; use uuid::Uuid; pub struct Config { count: u8, } impl Config { pub fn count(&self) -> &u8 { &self.count } } impl Config { pub fn new(mut args: env::Args) -> Result<Self, Box<dyn Error>> { args.next(); let count = match args.next() { Some(arg) => arg.parse::<u8>()?, None => 1, }; Ok(Config { count }) } } pub fn generate_uuid(count: &u8) -> Result<Vec<String>, Box<dyn Error>> { let mut uuids: Vec<String> = vec![]; for _ in 0..*count { uuids.push(Uuid::new_v4().to_hyphenated().to_string()) } Ok(uuids) } pub fn run(config: &Config) -> Result<(), Box<dyn Error>> { let uuids = generate_uuid(&config.count())?; // Print message let message = match config.count() { 1 => String::from("Generated 1 UUID"), count => format!("Generated {} UUIDs", count), }; match term_size::dimensions() { Some((w, _)) => { let dash_size = ((w - message.len()) / 2) - 2; let dash = (0..dash_size).map(|_| "-").collect::<String>(); println!("{}| {} |{}", dash, message, dash); } _ => println!("{}", message), }; // Print UUID(s) for uuid in uuids.iter() { println!("{}", uuid); } Ok(()) } #[cfg(test)] mod tests { use regex::Regex; use super::generate_uuid; fn _check_regex(test: &String) -> bool { let regex = Regex::new(r"^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$") .unwrap(); regex.is_match(test) } #[test] fn generate_1_uuid() { let uuids = generate_uuid(&(1 as u8)).unwrap(); assert_eq!(uuids.len(), 1); assert!(_check_regex(&uuids[0])); } #[test] fn generate_3_uuids() { let uuids = generate_uuid(&(3 as u8)).unwrap(); assert_eq!(uuids.len(), 3); assert_ne!(uuids.windows(2).any(|w| w[0] == w[1]), true); for uuid in uuids.iter() { assert!(_check_regex(&uuid)); } } }
extern crate crossterm; use std::io::{stdout, Write}; use soldier::Soldier; use utilities::Position; use self::crossterm::{ execute, style, Clear, ClearType, Color, Goto, PrintStyledFont, Show }; pub fn init_ui() { execute!( stdout(), Clear(ClearType::All), Goto(0, 0), PrintStyledFont(style(format!("{}", BORDERS.join("\n\r"))).with(Color::White)), Show ).unwrap(); } pub fn draw_details(soldier: &Soldier) { execute!( stdout(), Goto(54,1), PrintStyledFont(style(format!("{}", "Name:")).with(Color::White)), Goto(55,2), PrintStyledFont(style(format!("{}", " ")).with(Color::White)), Goto(55,2), PrintStyledFont(style(format!("{}", soldier.name)).with(Color::White)), Goto(soldier.pos.x, soldier.pos.y), Show ).unwrap(); } pub fn clear_details(initial_position: &Position) { execute!( stdout(), Goto(54,1), PrintStyledFont(style(format!("{}", " ")).with(Color::White)), Goto(55,2), PrintStyledFont(style(format!("{}", " ")).with(Color::White)), Goto(initial_position.x,initial_position.y), Show ).unwrap(); } const BORDERS: [&str; 19] = [ "โ•”โ•โ•ก R-COM โ•žโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฆโ•โ•ก Status โ•žโ•โ•โ•โ•โ•โ•โ•—", "โ•‘ โ•‘ Name: โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ•‘ โ•‘ โ•‘", "โ• โ•โ•โ•โ•ก Commands โ•žโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฉโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฃ", "โ•‘ โ•‘", "โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•", ];