CombinedText
stringlengths
4
3.42M
 use std::mem; use super::constants::*; #[derive(Debug)] enum LcdMode { Hblank, Vblank, SearchingOam, Transfer, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Lcd { counter: i32, pub vblank_sync: bool, pub buffer: Vec<u8>, last_frame: Vec<u8>, oam: Vec<u8>, vram_tile_data: Vec<u8>, vram_tile_table: Vec<u8>, lcdc: u8, stat: u8, scx: u8, scy: u8, ly: u8, lyc: u8, bgp: u8, obp0: u8, obp1: u8, wy: u8, wx: u8, pub show_bg: bool, pub show_sprites: bool, pub show_window: bool } #[derive(Debug)] struct SpriteOamEntry { x: u8, y: u8, tile_id: u8, flags: u8, } impl SpriteOamEntry { fn is_behind_bg(&self) -> bool { self.flags & 0x80 != 0 } fn is_vertically_flipped(&self) -> bool { self.flags & 0x40 != 0 } fn is_horizontally_flipped(&self) -> bool { self.flags & 0x20 != 0 } fn palette_number(&self) -> u8 { (self.flags >> 4) & 1 } fn get_y(&self) -> i32 { self.y as i32 - 16 } fn get_x(&self) -> i32 { self.x as i32 - 8 } fn covers_line(&self, sprite_height: i32, line_y: i32) -> bool { let sprite_y = self.get_y(); (line_y >= sprite_y) && (line_y < sprite_y + sprite_height) } } impl Lcd { pub fn new() -> Lcd { let mut lcd = Lcd { counter: 0, vblank_sync: false, buffer: vec![0u8; LCD_WIDTH * LCD_HEIGHT], last_frame: vec![0u8; LCD_WIDTH * LCD_HEIGHT], oam: vec![0u8; 0xa0], // De-interleaved and expanded pixels vram_tile_data: vec![0u8; 384 * 8 * 8], vram_tile_table: vec![0u8; 0x800], lcdc: 0x91, stat: 0, scx: 0, scy: 0, ly: 0, lyc: 0, bgp: 0xfc, obp0: 0xff, obp1: 0xff, wy: 0, wx: 0, show_bg: true, show_sprites: true, show_window: true }; lcd.set_mode(LcdMode::Hblank); lcd } pub fn read(&self, addr: u16) -> u8 { match addr { MEM_LCD_IO_LCDC => self.lcdc, MEM_LCD_IO_STAT => self.stat, MEM_LCD_IO_SCY => self.scy, MEM_LCD_IO_SCX => self.scx, MEM_LCD_IO_LY => self.ly, MEM_LCD_IO_LYC => self.lyc, MEM_LCD_IO_BGP => self.bgp, MEM_LCD_IO_OBP0 => self.obp0, MEM_LCD_IO_OBP1 => self.obp1, MEM_LCD_IO_WY => self.wy, MEM_LCD_IO_WX => self.wx, MEM_LCD_VRAM_TILE_DATA_START...MEM_LCD_VRAM_TILE_DATA_END => { self.read_vram_tile_data(addr) } MEM_LCD_VRAM_TILE_MAP_START...MEM_LCD_VRAM_TILE_MAP_END => { self.vram_tile_table[(addr - MEM_LCD_VRAM_TILE_MAP_START) as usize] } MEM_LCD_OAM_START...MEM_LCD_OAM_END => self.read_oam(addr), _ => 0xff, } } pub fn write(&mut self, addr: u16, value: u8) { match addr { MEM_LCD_IO_LCDC => self.lcdc = value, MEM_LCD_IO_STAT => self.stat = (value & 0b1111000) | (self.stat & 0b111), MEM_LCD_IO_SCY => self.scy = value, MEM_LCD_IO_SCX => self.scx = value, MEM_LCD_IO_LY => self.ly = 0, // LY (writing will reset the counter) MEM_LCD_IO_LYC => self.lyc = value, MEM_LCD_IO_BGP => self.bgp = value, MEM_LCD_IO_OBP0 => self.obp0 = value, MEM_LCD_IO_OBP1 => self.obp1 = value, MEM_LCD_IO_WX => self.wx = value, MEM_LCD_IO_WY => self.wy = value, MEM_LCD_VRAM_TILE_DATA_START...MEM_LCD_VRAM_TILE_DATA_END => { self.write_vram_tile_data(addr, value) } MEM_LCD_VRAM_TILE_MAP_START...MEM_LCD_VRAM_TILE_MAP_END => { self.vram_tile_table[(addr - MEM_LCD_VRAM_TILE_MAP_START) as usize] = value } MEM_LCD_OAM_START...MEM_LCD_OAM_END => self.write_oam(addr, value), _ => (), } } pub fn try_get_buffer(&mut self) -> Option<Vec<u8>> { if self.vblank_sync { self.vblank_sync = false; Some(mem::replace(&mut self.last_frame, vec![0u8; LCD_WIDTH * LCD_HEIGHT])) } else { None } } fn read_oam(&self, addr: u16) -> u8 { self.oam[(addr - MEM_LCD_OAM_START) as usize] } fn write_oam(&mut self, addr: u16, value: u8) { self.oam[(addr - MEM_LCD_OAM_START) as usize] = value; } fn read_vram_tile_data(&self, addr: u16) -> u8 { let offset = ((addr & 0xfffe) - MEM_LCD_VRAM_TILE_DATA_START) as usize * 4; let mut value = 0; let set_mask = 1u8 << (addr & 1); for i in 0..8 { value <<= 1; if (self.vram_tile_data[offset + i] & set_mask) != 0 { value |= 0x1; } } value // self.vram[(addr - MEM_LCD_VRAM_START) as usize] } fn write_vram_tile_data(&mut self, addr: u16, value: u8) { let offset = ((addr & 0xfffe) - MEM_LCD_VRAM_TILE_DATA_START) as usize * 4; let mut v = value; let set_mask = 1u8 << (addr & 1); let clear_mask = !set_mask; for i in 0..8 { if v & 0x80 == 0 { self.vram_tile_data[offset + i] &= clear_mask; } else { self.vram_tile_data[offset + i] |= set_mask; } v <<= 1; } // assert_eq!(value, self.read_vram_tile_data(addr)); // self.vram[(addr - MEM_LCD_VRAM_START) as usize] = value; } fn is_window_enabled(&self) -> bool { (self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE) != 0 } fn get_mode(&self) -> LcdMode { match self.stat & 3 { 0b00 => LcdMode::Hblank, 0b01 => LcdMode::Vblank, 0b10 => LcdMode::SearchingOam, 0b11 => LcdMode::Transfer, _ => unreachable!(), } } fn get_oam_entry(&self, index: usize) -> SpriteOamEntry { let offset = index * 4; SpriteOamEntry { y: self.oam[offset], x: self.oam[offset + 1], tile_id: self.oam[offset + 2], flags: self.oam[offset + 3], } } fn get_sprite_height(&self) -> i32 { if (self.lcdc & 0x04) == 0 { 8 } else { 16 } } fn get_oam_entries(&self, line_y: i32) -> Vec<SpriteOamEntry> { let sprite_height = self.get_sprite_height(); let mut entries: Vec<SpriteOamEntry> = (0..40) .map(|idx| self.get_oam_entry(idx)) .filter(|entry| entry.covers_line(sprite_height, line_y)) .collect(); entries.sort_by_key(|entry| entry.x); if entries.len() > 10 { entries.truncate(10); } entries } fn draw_sprites(&mut self, line_y: i32) { let sprite_height = self.get_sprite_height(); let entries = self.get_oam_entries(line_y); let line_start = line_y * LCD_WIDTH as i32; for entry in entries { let palette = if entry.palette_number() == 0 { self.obp0 } else { self.obp1 }; let tile_id = if sprite_height == 16 { entry.tile_id & 0xfe } else { entry.tile_id }; let sprite_y = entry.get_y(); let sprite_x = entry.get_x(); let tile_y = line_y - sprite_y; let tile_y = if entry.is_vertically_flipped() { (sprite_height - 1) - tile_y } else { tile_y }; let tile_offset = (tile_id as i32 * 8 * 8) + tile_y * 8; for tile_x in 0..8 { let screen_x = sprite_x + tile_x; let tile_x = if entry.is_horizontally_flipped() { 7 - tile_x } else { tile_x }; if screen_x >= 0 && screen_x < LCD_WIDTH as i32 { let value = self.vram_tile_data[(tile_offset + tile_x) as usize]; let screen_offset = (line_start + screen_x) as usize; if value != 0 && (!entry.is_behind_bg() || self.buffer[screen_offset] >= 0x80) { let color = Lcd::palette_lookup(palette, value); self.buffer[screen_offset] = color; } } } // l0 = self.vram[] } } fn set_mode(&mut self, mode: LcdMode) { trace!("Entering lcd mode {:?}", mode); let stat = self.stat & !0x03; match mode { LcdMode::Hblank => { self.counter += 207; self.stat = stat | 0b00; } LcdMode::Vblank => { self.counter += 456; self.stat = stat | 0b01; } LcdMode::SearchingOam => { self.counter += 80; self.stat = stat | 0b10; } LcdMode::Transfer => { self.counter += 169; self.stat = stat | 0b11; } } } fn inc_ly(&mut self) -> u8 { self.ly = (self.ly + 1) % 154; trace!("Lcd ly incremented to {}", self.ly); if self.ly == self.lyc { self.stat |= 0x04; if self.stat & 0x40 != 0 { return INT_MASK_LCDSTAT; } } else { self.stat &= !0x04; } 0 // No interrupt set } fn get_bg_tile_offset(&self, tile_id: u8) -> usize { if (self.lcdc & 0x10) == 0 { (0x1000 + ((tile_id as i8) as isize * 16)) as usize * 4 } else { (0x0000 + (tile_id as usize * 16)) * 4 } } fn get_window_at(&self, x: u8, y: u8) -> u8 { // Probably very unoptimized, shouldn't matter too much though.. let tile_base = if (self.lcdc & LCDC_WINDOW_TILE_MAP_DISPLAY_SELECT) == 0 { 0 } else { 0x400 }; let tile_id = self.vram_tile_table[tile_base + (((y & 0xf8) as usize) << 2) + ((x as usize) >> 3)]; let palette_idx = self.vram_tile_data[self.get_bg_tile_offset(tile_id) + (((y & 0x7) << 3) | (x & 0x7)) as usize]; // let line_offset = self.get_bg_tile_offset(tile_id) + ((y & 0x7) * 2) as usize; // let l0 = self.read_vram_tile_data(line_offset as u16 + MEM_LCD_VRAM_TILE_DATA_START); // let l1 = self.read_vram_tile_data(line_offset as u16 + 1 + MEM_LCD_VRAM_TILE_DATA_START); // let bit_offset = 7 - (x & 0x7); // let palette_idx = (((l0 >> bit_offset) & 1) | (((l1 >> bit_offset) & 1) << 1)) as usize; // Lcd::palette_lookup(self.bgp, palette_idx) } fn get_bg_at(&self, x: u8, y: u8) -> u8 { // Probably very unoptimized, shouldn't matter too much though.. let tile_base = if (self.lcdc & LCDC_BG_TILE_MAP_DISPLAY_SELECT) == 0 { 0 } else { 0x400 }; let tile_id = self.vram_tile_table[tile_base + (((y & 0xf8) as usize) << 2) + ((x as usize) >> 3)]; let palette_idx = self.vram_tile_data[self.get_bg_tile_offset(tile_id) + (((y & 0x7) << 3) | (x & 0x7)) as usize]; // let line_offset = self.get_bg_tile_offset(tile_id) + ((y & 0x7) * 2) as usize; // let l0 = self.read_vram_tile_data(line_offset as u16 + MEM_LCD_VRAM_TILE_DATA_START); // let l1 = self.read_vram_tile_data(line_offset as u16 + 1 + MEM_LCD_VRAM_TILE_DATA_START); // let bit_offset = 7 - (x & 0x7); // let palette_idx = (((l0 >> bit_offset) & 1) | (((l1 >> bit_offset) & 1) << 1)) as usize; // Lcd::palette_lookup(self.bgp, palette_idx) | if palette_idx == 0 { 0x80 } else { 0 } } fn palette_lookup(palette: u8, index: u8) -> u8 { (palette >> ((index & 0x03) * 2)) & 0x03 } pub fn tick(&mut self, cycles: usize) -> u8 { if self.lcdc & 0x80 == 0 { return 0; } trace!("Lcd tick"); let max_step: usize = 80; if cycles > max_step { return self.tick(max_step) | self.tick(cycles - max_step); } let mut ints = 0u8; self.counter -= cycles as i32; if self.counter <= 0 { let mode = self.get_mode(); match mode { LcdMode::Hblank => { // Transition from HBlank to either SearchingOam or Vblank // I think LY increments at this point ints |= self.inc_ly(); if self.ly >= 144 { self.vblank_sync = true; mem::swap(&mut self.buffer, &mut self.last_frame); self.set_mode(LcdMode::Vblank); ints |= INT_MASK_VBLANK; } else { self.set_mode(LcdMode::SearchingOam); } } LcdMode::Vblank => { ints |= self.inc_ly(); if self.ly == 0 { self.set_mode(LcdMode::SearchingOam); } else { self.set_mode(LcdMode::Vblank); } } LcdMode::SearchingOam => { self.set_mode(LcdMode::Transfer); } LcdMode::Transfer => { self.draw_line(); self.set_mode(LcdMode::Hblank); if self.stat & 0x08 != 0 { ints |= INT_MASK_LCDSTAT; } } } } ints } fn draw_line(&mut self) { let y = self.ly; let offset = y as usize * LCD_WIDTH; if self.show_bg { for x in 0..LCD_WIDTH { self.buffer[offset + x] = self.get_bg_at((x as u8).wrapping_add(self.scx), y.wrapping_add(self.scy)); } } if self.show_sprites { self.draw_sprites(y as i32); } if self.show_window { if self.is_window_enabled() && y >= self.wy { let win_y = y - self.wy; for x in (self.wx.saturating_sub(7) as usize)..LCD_WIDTH { self.buffer[offset + x] = self.get_window_at((x as u8).wrapping_sub(self.wx.wrapping_sub(7)), win_y); } } } } } Fix sprite ordering  use std::mem; use super::constants::*; #[derive(Debug)] enum LcdMode { Hblank, Vblank, SearchingOam, Transfer, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Lcd { counter: i32, pub vblank_sync: bool, pub buffer: Vec<u8>, last_frame: Vec<u8>, oam: Vec<u8>, vram_tile_data: Vec<u8>, vram_tile_table: Vec<u8>, lcdc: u8, stat: u8, scx: u8, scy: u8, ly: u8, lyc: u8, bgp: u8, obp0: u8, obp1: u8, wy: u8, wx: u8, pub show_bg: bool, pub show_sprites: bool, pub show_window: bool } #[derive(Debug)] struct SpriteOamEntry { x: u8, y: u8, tile_id: u8, flags: u8, index: u8, } impl SpriteOamEntry { fn is_behind_bg(&self) -> bool { self.flags & 0x80 != 0 } fn is_vertically_flipped(&self) -> bool { self.flags & 0x40 != 0 } fn is_horizontally_flipped(&self) -> bool { self.flags & 0x20 != 0 } fn palette_number(&self) -> u8 { (self.flags >> 4) & 1 } fn get_y(&self) -> i32 { self.y as i32 - 16 } fn get_x(&self) -> i32 { self.x as i32 - 8 } fn covers_line(&self, sprite_height: i32, line_y: i32) -> bool { let sprite_y = self.get_y(); (line_y >= sprite_y) && (line_y < sprite_y + sprite_height) } } impl Lcd { pub fn new() -> Lcd { let mut lcd = Lcd { counter: 0, vblank_sync: false, buffer: vec![0u8; LCD_WIDTH * LCD_HEIGHT], last_frame: vec![0u8; LCD_WIDTH * LCD_HEIGHT], oam: vec![0u8; 0xa0], // De-interleaved and expanded pixels vram_tile_data: vec![0u8; 384 * 8 * 8], vram_tile_table: vec![0u8; 0x800], lcdc: 0x91, stat: 0, scx: 0, scy: 0, ly: 0, lyc: 0, bgp: 0xfc, obp0: 0xff, obp1: 0xff, wy: 0, wx: 0, show_bg: true, show_sprites: true, show_window: true }; lcd.set_mode(LcdMode::Hblank); lcd } pub fn read(&self, addr: u16) -> u8 { match addr { MEM_LCD_IO_LCDC => self.lcdc, MEM_LCD_IO_STAT => self.stat, MEM_LCD_IO_SCY => self.scy, MEM_LCD_IO_SCX => self.scx, MEM_LCD_IO_LY => self.ly, MEM_LCD_IO_LYC => self.lyc, MEM_LCD_IO_BGP => self.bgp, MEM_LCD_IO_OBP0 => self.obp0, MEM_LCD_IO_OBP1 => self.obp1, MEM_LCD_IO_WY => self.wy, MEM_LCD_IO_WX => self.wx, MEM_LCD_VRAM_TILE_DATA_START...MEM_LCD_VRAM_TILE_DATA_END => { self.read_vram_tile_data(addr) } MEM_LCD_VRAM_TILE_MAP_START...MEM_LCD_VRAM_TILE_MAP_END => { self.vram_tile_table[(addr - MEM_LCD_VRAM_TILE_MAP_START) as usize] } MEM_LCD_OAM_START...MEM_LCD_OAM_END => self.read_oam(addr), _ => 0xff, } } pub fn write(&mut self, addr: u16, value: u8) { match addr { MEM_LCD_IO_LCDC => self.lcdc = value, MEM_LCD_IO_STAT => self.stat = (value & 0b1111000) | (self.stat & 0b111), MEM_LCD_IO_SCY => self.scy = value, MEM_LCD_IO_SCX => self.scx = value, MEM_LCD_IO_LY => self.ly = 0, // LY (writing will reset the counter) MEM_LCD_IO_LYC => self.lyc = value, MEM_LCD_IO_BGP => self.bgp = value, MEM_LCD_IO_OBP0 => self.obp0 = value, MEM_LCD_IO_OBP1 => self.obp1 = value, MEM_LCD_IO_WX => self.wx = value, MEM_LCD_IO_WY => self.wy = value, MEM_LCD_VRAM_TILE_DATA_START...MEM_LCD_VRAM_TILE_DATA_END => { self.write_vram_tile_data(addr, value) } MEM_LCD_VRAM_TILE_MAP_START...MEM_LCD_VRAM_TILE_MAP_END => { self.vram_tile_table[(addr - MEM_LCD_VRAM_TILE_MAP_START) as usize] = value } MEM_LCD_OAM_START...MEM_LCD_OAM_END => self.write_oam(addr, value), _ => (), } } pub fn try_get_buffer(&mut self) -> Option<Vec<u8>> { if self.vblank_sync { self.vblank_sync = false; Some(mem::replace(&mut self.last_frame, vec![0u8; LCD_WIDTH * LCD_HEIGHT])) } else { None } } fn read_oam(&self, addr: u16) -> u8 { self.oam[(addr - MEM_LCD_OAM_START) as usize] } fn write_oam(&mut self, addr: u16, value: u8) { self.oam[(addr - MEM_LCD_OAM_START) as usize] = value; } fn read_vram_tile_data(&self, addr: u16) -> u8 { let offset = ((addr & 0xfffe) - MEM_LCD_VRAM_TILE_DATA_START) as usize * 4; let mut value = 0; let set_mask = 1u8 << (addr & 1); for i in 0..8 { value <<= 1; if (self.vram_tile_data[offset + i] & set_mask) != 0 { value |= 0x1; } } value // self.vram[(addr - MEM_LCD_VRAM_START) as usize] } fn write_vram_tile_data(&mut self, addr: u16, value: u8) { let offset = ((addr & 0xfffe) - MEM_LCD_VRAM_TILE_DATA_START) as usize * 4; let mut v = value; let set_mask = 1u8 << (addr & 1); let clear_mask = !set_mask; for i in 0..8 { if v & 0x80 == 0 { self.vram_tile_data[offset + i] &= clear_mask; } else { self.vram_tile_data[offset + i] |= set_mask; } v <<= 1; } // assert_eq!(value, self.read_vram_tile_data(addr)); // self.vram[(addr - MEM_LCD_VRAM_START) as usize] = value; } fn is_window_enabled(&self) -> bool { (self.lcdc & LCDC_WINDOW_DISPLAY_ENABLE) != 0 } fn get_mode(&self) -> LcdMode { match self.stat & 3 { 0b00 => LcdMode::Hblank, 0b01 => LcdMode::Vblank, 0b10 => LcdMode::SearchingOam, 0b11 => LcdMode::Transfer, _ => unreachable!(), } } fn get_oam_entry(&self, index: usize) -> SpriteOamEntry { let offset = index * 4; SpriteOamEntry { y: self.oam[offset], x: self.oam[offset + 1], tile_id: self.oam[offset + 2], flags: self.oam[offset + 3], index: index as u8, } } fn get_sprite_height(&self) -> i32 { if (self.lcdc & 0x04) == 0 { 8 } else { 16 } } fn get_oam_entries(&self, line_y: i32) -> Vec<SpriteOamEntry> { let sprite_height = self.get_sprite_height(); // Can we avoid allocation here by using a fixed size vector? let mut entries: Vec<SpriteOamEntry> = (0..40) .map(|idx| self.get_oam_entry(idx)) .filter(|entry| entry.covers_line(sprite_height, line_y)) .collect(); // TODO: Also sort by OAM table ordering, see http://bgb.bircd.org/pandocs.htm#vramspriteattributetableoam // TODO: Maybe the ordering is reverse from how it should be here, since the first sprite gets overdrawn by the last? entries.sort_unstable_by_key(|entry| -((((entry.x as usize) << 16) | entry.index as usize) as isize)); if entries.len() > 10 { entries.truncate(10); } entries } fn draw_sprites(&mut self, line_y: i32) { let sprite_height = self.get_sprite_height(); let entries = self.get_oam_entries(line_y); let line_start = line_y * LCD_WIDTH as i32; for entry in entries { let palette = if entry.palette_number() == 0 { self.obp0 } else { self.obp1 }; let tile_id = if sprite_height == 16 { entry.tile_id & 0xfe } else { entry.tile_id }; let sprite_y = entry.get_y(); let sprite_x = entry.get_x(); let tile_y = line_y - sprite_y; let tile_y = if entry.is_vertically_flipped() { (sprite_height - 1) - tile_y } else { tile_y }; let tile_offset = (tile_id as i32 * 8 * 8) + tile_y * 8; for tile_x in 0..8 { let screen_x = sprite_x + tile_x; let tile_x = if entry.is_horizontally_flipped() { 7 - tile_x } else { tile_x }; if screen_x >= 0 && screen_x < LCD_WIDTH as i32 { let value = self.vram_tile_data[(tile_offset + tile_x) as usize]; let screen_offset = (line_start + screen_x) as usize; if value != 0 && (!entry.is_behind_bg() || self.buffer[screen_offset] >= 0x80) { let color = Lcd::palette_lookup(palette, value); self.buffer[screen_offset] = color; } } } // l0 = self.vram[] } } fn set_mode(&mut self, mode: LcdMode) { trace!("Entering lcd mode {:?}", mode); let stat = self.stat & !0x03; match mode { LcdMode::Hblank => { self.counter += 207; self.stat = stat | 0b00; } LcdMode::Vblank => { self.counter += 456; self.stat = stat | 0b01; } LcdMode::SearchingOam => { self.counter += 80; self.stat = stat | 0b10; } LcdMode::Transfer => { self.counter += 169; self.stat = stat | 0b11; } } } fn inc_ly(&mut self) -> u8 { self.ly = (self.ly + 1) % 154; trace!("Lcd ly incremented to {}", self.ly); if self.ly == self.lyc { self.stat |= 0x04; if self.stat & 0x40 != 0 { return INT_MASK_LCDSTAT; } } else { self.stat &= !0x04; } 0 // No interrupt set } fn get_bg_tile_offset(&self, tile_id: u8) -> usize { if (self.lcdc & 0x10) == 0 { (0x1000 + ((tile_id as i8) as isize * 16)) as usize * 4 } else { (0x0000 + (tile_id as usize * 16)) * 4 } } fn get_window_at(&self, x: u8, y: u8) -> u8 { // Probably very unoptimized, shouldn't matter too much though.. let tile_base = if (self.lcdc & LCDC_WINDOW_TILE_MAP_DISPLAY_SELECT) == 0 { 0 } else { 0x400 }; let tile_id = self.vram_tile_table[tile_base + (((y & 0xf8) as usize) << 2) + ((x as usize) >> 3)]; let palette_idx = self.vram_tile_data[self.get_bg_tile_offset(tile_id) + (((y & 0x7) << 3) | (x & 0x7)) as usize]; // let line_offset = self.get_bg_tile_offset(tile_id) + ((y & 0x7) * 2) as usize; // let l0 = self.read_vram_tile_data(line_offset as u16 + MEM_LCD_VRAM_TILE_DATA_START); // let l1 = self.read_vram_tile_data(line_offset as u16 + 1 + MEM_LCD_VRAM_TILE_DATA_START); // let bit_offset = 7 - (x & 0x7); // let palette_idx = (((l0 >> bit_offset) & 1) | (((l1 >> bit_offset) & 1) << 1)) as usize; // Lcd::palette_lookup(self.bgp, palette_idx) } fn get_bg_at(&self, x: u8, y: u8) -> u8 { // Probably very unoptimized, shouldn't matter too much though.. let tile_base = if (self.lcdc & LCDC_BG_TILE_MAP_DISPLAY_SELECT) == 0 { 0 } else { 0x400 }; let tile_id = self.vram_tile_table[tile_base + (((y & 0xf8) as usize) << 2) + ((x as usize) >> 3)]; let palette_idx = self.vram_tile_data[self.get_bg_tile_offset(tile_id) + (((y & 0x7) << 3) | (x & 0x7)) as usize]; // let line_offset = self.get_bg_tile_offset(tile_id) + ((y & 0x7) * 2) as usize; // let l0 = self.read_vram_tile_data(line_offset as u16 + MEM_LCD_VRAM_TILE_DATA_START); // let l1 = self.read_vram_tile_data(line_offset as u16 + 1 + MEM_LCD_VRAM_TILE_DATA_START); // let bit_offset = 7 - (x & 0x7); // let palette_idx = (((l0 >> bit_offset) & 1) | (((l1 >> bit_offset) & 1) << 1)) as usize; // Lcd::palette_lookup(self.bgp, palette_idx) | if palette_idx == 0 { 0x80 } else { 0 } } fn palette_lookup(palette: u8, index: u8) -> u8 { (palette >> ((index & 0x03) * 2)) & 0x03 } pub fn tick(&mut self, cycles: usize) -> u8 { if self.lcdc & 0x80 == 0 { return 0; } trace!("Lcd tick"); let max_step: usize = 80; if cycles > max_step { return self.tick(max_step) | self.tick(cycles - max_step); } let mut ints = 0u8; self.counter -= cycles as i32; if self.counter <= 0 { let mode = self.get_mode(); match mode { LcdMode::Hblank => { // Transition from HBlank to either SearchingOam or Vblank // I think LY increments at this point ints |= self.inc_ly(); if self.ly >= 144 { self.vblank_sync = true; mem::swap(&mut self.buffer, &mut self.last_frame); self.set_mode(LcdMode::Vblank); ints |= INT_MASK_VBLANK; } else { self.set_mode(LcdMode::SearchingOam); } } LcdMode::Vblank => { ints |= self.inc_ly(); if self.ly == 0 { self.set_mode(LcdMode::SearchingOam); } else { self.set_mode(LcdMode::Vblank); } } LcdMode::SearchingOam => { self.set_mode(LcdMode::Transfer); } LcdMode::Transfer => { self.draw_line(); self.set_mode(LcdMode::Hblank); if self.stat & 0x08 != 0 { ints |= INT_MASK_LCDSTAT; } } } } ints } fn draw_line(&mut self) { let y = self.ly; let offset = y as usize * LCD_WIDTH; if self.show_bg { for x in 0..LCD_WIDTH { self.buffer[offset + x] = self.get_bg_at((x as u8).wrapping_add(self.scx), y.wrapping_add(self.scy)); } } if self.show_sprites { self.draw_sprites(y as i32); } if self.show_window { if self.is_window_enabled() && y >= self.wy { let win_y = y - self.wy; for x in (self.wx.saturating_sub(7) as usize)..LCD_WIDTH { self.buffer[offset + x] = self.get_window_at((x as u8).wrapping_sub(self.wx.wrapping_sub(7)), win_y); } } } } }
use crate::htmlrenderer; use crate::logger::{self, Level}; use libc::c_ulong; use percent_encoding::*; use std::fs::DirBuilder; use std::io::{self, Write}; use std::os::unix::fs::DirBuilderExt; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use unicode_width::{UnicodeWidthChar, UnicodeWidthStr}; use url::Url; pub fn replace_all(input: String, from: &str, to: &str) -> String { input.replace(from, to) } pub fn consolidate_whitespace(input: String) -> String { let found = input.find(|c: char| !c.is_whitespace()); let mut result = String::new(); if let Some(found) = found { let (leading, rest) = input.split_at(found); let lastchar = input.chars().rev().next().unwrap(); result.push_str(leading); let iter = rest.split_whitespace(); for elem in iter { result.push_str(elem); result.push(' '); } result.pop(); if lastchar.is_whitespace() { result.push(' '); } } result } pub fn to_u(rs_str: String, default_value: u32) -> u32 { let mut result = rs_str.parse::<u32>(); if result.is_err() { result = Ok(default_value); } result.unwrap() } /// Combine a base URL and a link to a new absolute URL. /// If the base URL is malformed or joining with the link fails, link will be returned. /// # Examples /// ``` /// use libnewsboat::utils::absolute_url; /// assert_eq!(absolute_url("http://foobar/hello/crook/", "bar.html"), /// "http://foobar/hello/crook/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "/bar.html"), /// "https://foobar/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "http://quux/bar.html"), /// "http://quux/bar.html".to_owned()); /// assert_eq!(absolute_url("http://foobar", "bla.html"), /// "http://foobar/bla.html".to_owned()); /// assert_eq!(absolute_url("http://test:test@foobar:33", "bla2.html"), /// "http://test:test@foobar:33/bla2.html".to_owned()); /// assert_eq!(absolute_url("foo", "bar"), "bar".to_owned()); /// ``` pub fn absolute_url(base_url: &str, link: &str) -> String { Url::parse(base_url) .and_then(|url| url.join(link)) .as_ref() .map(Url::as_str) .unwrap_or(link) .to_owned() } /// Path to the home directory, if known. Doesn't work on Windows. #[cfg(not(target_os = "windows"))] pub fn home_dir() -> Option<PathBuf> { // This function got deprecated because it examines HOME environment variable even on Windows, // which is wrong. But Newsboat doesn't support Windows, so we're fine using that. // // Cf. https://github.com/rust-lang/rust/issues/28940 #[allow(deprecated)] std::env::home_dir() } /// Replaces tilde (`~`) at the beginning of the path with the path to user's home directory. pub fn resolve_tilde(path: PathBuf) -> PathBuf { if let (Some(home), Ok(suffix)) = (home_dir(), path.strip_prefix("~")) { return home.join(suffix); } // Either the `path` doesn't start with tilde, or we couldn't figure out the path to the // home directory -- either way, it's no big deal. Let's return the original string. path } pub fn resolve_relative(reference: &Path, path: &Path) -> PathBuf { if path.is_relative() { // Will only ever panic if reference is `/`, which shouldn't be the case as reference is // always a file path return reference.parent().unwrap().join(path); } path.to_path_buf() } pub fn is_special_url(url: &str) -> bool { is_query_url(url) || is_filter_url(url) || is_exec_url(url) } /// Check if the given URL is a http(s) URL /// # Example /// ``` /// use libnewsboat::utils::is_http_url; /// assert!(is_http_url("http://example.com")); /// ``` pub fn is_http_url(url: &str) -> bool { url.starts_with("https://") || url.starts_with("http://") } pub fn is_query_url(url: &str) -> bool { url.starts_with("query:") } pub fn is_filter_url(url: &str) -> bool { url.starts_with("filter:") } pub fn is_exec_url(url: &str) -> bool { url.starts_with("exec:") } /// Censor URLs by replacing username and password with '*' /// ``` /// use libnewsboat::utils::censor_url; /// assert_eq!(&censor_url(""), ""); /// assert_eq!(&censor_url("foobar"), "foobar"); /// assert_eq!(&censor_url("foobar://xyz/"), "foobar://xyz/"); /// assert_eq!(&censor_url("http://newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://@newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://@newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foo:bar@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://foo:bar@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://aschas@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://aschas@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("xxx://aschas@newsbeuter.org/"), /// "xxx://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foobar"), "http://foobar/"); /// assert_eq!(&censor_url("https://foobar"), "https://foobar/"); /// /// assert_eq!(&censor_url("http://aschas@host"), "http://*:*@host/"); /// assert_eq!(&censor_url("https://aschas@host"), "https://*:*@host/"); /// /// assert_eq!(&censor_url("query:name:age between 1:10"), /// "query:name:age between 1:10"); /// ``` pub fn censor_url(url: &str) -> String { if !url.is_empty() && !is_special_url(url) { Url::parse(url) .map(|mut url| { if url.username() != "" || url.password().is_some() { // can not panic. If either username or password is present we can change both. url.set_username("*").unwrap(); url.set_password(Some("*")).unwrap(); } url }) .as_ref() .map(Url::as_str) .unwrap_or(url) .to_owned() } else { url.into() } } /// Quote a string for use with stfl by replacing all occurences of "<" with "<>" /// ``` /// use libnewsboat::utils::quote_for_stfl; /// assert_eq!(&quote_for_stfl("<"), "<>"); /// assert_eq!(&quote_for_stfl("<<><><><"), "<><>><>><>><>"); /// assert_eq!(&quote_for_stfl("test"), "test"); /// ``` pub fn quote_for_stfl(string: &str) -> String { string.replace("<", "<>") } /// Get basename from a URL if available else return an empty string /// ``` /// use libnewsboat::utils::get_basename; /// assert_eq!(get_basename("https://example.com/"), ""); /// assert_eq!(get_basename("https://example.org/?param=value#fragment"), ""); /// assert_eq!(get_basename("https://example.org/path/to/?param=value#fragment"), ""); /// assert_eq!(get_basename("https://example.org/file.mp3"), "file.mp3"); /// assert_eq!(get_basename("https://example.org/path/to/file.mp3?param=value#fragment"), "file.mp3"); /// ``` pub fn get_basename(input: &str) -> String { match Url::parse(input) { Ok(url) => match url.path_segments() { Some(segments) => segments.last().unwrap().to_string(), None => String::from(""), }, Err(_) => String::from(""), } } pub fn get_default_browser() -> String { use std::env; env::var("BROWSER").unwrap_or_else(|_| "lynx".to_string()) } pub fn trim(rs_str: String) -> String { rs_str.trim().to_string() } pub fn trim_end(rs_str: String) -> String { let x: &[_] = &['\n', '\r']; rs_str.trim_end_matches(x).to_string() } pub fn quote(input: String) -> String { let mut input = input.replace("\"", "\\\""); input.insert(0, '"'); input.push('"'); input } pub fn quote_if_necessary(input: String) -> String { match input.find(' ') { Some(_) => quote(input), None => input, } } pub fn get_random_value(max: u32) -> u32 { rand::random::<u32>() % max } pub fn is_valid_color(color: &str) -> bool { const COLORS: [&str; 9] = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", ]; if COLORS.contains(&color) { true } else if color.starts_with("color0") { color == "color0" } else if color.starts_with("color") { let num_part = &color[5..]; num_part.parse::<u8>().is_ok() } else { false } } pub fn is_valid_attribute(attribute: &str) -> bool { const VALID_ATTRIBUTES: [&str; 9] = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; VALID_ATTRIBUTES.contains(&attribute) } pub fn strwidth(rs_str: &str) -> usize { UnicodeWidthStr::width(rs_str) } /// Returns the width of `rs_str` when displayed on screen. /// /// STFL tags (e.g. `<b>`, `<foobar>`, `</>`) are counted as having 0 width. /// Escaped less-than sign (`<` escaped as `<>`) is counted as having a width of 1 character. /// ``` /// use libnewsboat::utils::strwidth_stfl; /// assert_eq!(strwidth_stfl("a"), 1); /// assert_eq!(strwidth_stfl("abc<tag>def"), 6); /// assert_eq!(strwidth_stfl("less-than: <>"), 12); /// assert_eq!(strwidth_stfl("ABCDEF"), 12); ///``` pub fn strwidth_stfl(rs_str: &str) -> usize { let mut s = &rs_str[..]; let mut width = 0; loop { if let Some(pos) = s.find('<') { width += strwidth(&s[..pos]); s = &s[pos..]; if let Some(endpos) = s.find('>') { if endpos == 1 { // Found "<>" which stfl uses to encode a literal '<' width += strwidth("<"); } s = &s[endpos + 1..]; } else { // '<' without closing '>' so ignore rest of string break; } } else { width += strwidth(s); break; } } width } /// Returns a longest substring fits to the given width. /// Returns an empty string if `str` is an empty string or `max_width` is zero. /// /// Each chararacter width is calculated with UnicodeWidthChar::width. If UnicodeWidthChar::width() /// returns None, the character width is treated as 0. /// ``` /// use libnewsboat::utils::substr_with_width; /// assert_eq!(substr_with_width("a", 1), "a"); /// assert_eq!(substr_with_width("a", 2), "a"); /// assert_eq!(substr_with_width("ab", 1), "a"); /// assert_eq!(substr_with_width("abc", 1), "a"); /// assert_eq!(substr_with_width("A\u{3042}B\u{3044}C\u{3046}", 5), "A\u{3042}B") ///``` pub fn substr_with_width(string: &str, max_width: usize) -> String { let mut result = String::new(); let mut width = 0; for c in string.chars() { // Control chars count as width 0 let w = UnicodeWidthChar::width(c).unwrap_or(0); if width + w > max_width { break; } width += w; result.push(c); } result } /// Returns a longest substring fits to the given width. /// Returns an empty string if `str` is an empty string or `max_width` is zero. /// /// Each chararacter width is calculated with UnicodeWidthChar::width. If UnicodeWidthChar::width() /// returns None, the character width is treated as 0. A STFL tag (e.g. `<b>`, `<foobar>`, `</>`) /// width is treated as 0, but escaped less-than (`<>`) width is treated as 1. /// ``` /// use libnewsboat::utils::substr_with_width_stfl; /// assert_eq!(substr_with_width_stfl("a", 1), "a"); /// assert_eq!(substr_with_width_stfl("a", 2), "a"); /// assert_eq!(substr_with_width_stfl("ab", 1), "a"); /// assert_eq!(substr_with_width_stfl("abc", 1), "a"); /// assert_eq!(substr_with_width_stfl("A\u{3042}B\u{3044}C\u{3046}", 5), "A\u{3042}B") ///``` pub fn substr_with_width_stfl(string: &str, max_width: usize) -> String { let mut result = String::new(); let mut in_bracket = false; let mut tagbuf = Vec::<char>::new(); let mut width = 0; for c in string.chars() { if in_bracket { tagbuf.push(c); if c == '>' { in_bracket = false; if tagbuf == ['<', '>'] { if width + 1 > max_width { break; } result += "<>"; // escaped less-than tagbuf.clear(); width += 1; } else { result += &tagbuf.iter().collect::<String>(); tagbuf.clear(); } } } else if c == '<' { in_bracket = true; tagbuf.push(c); } else { // Control chars count as width 0 let w = UnicodeWidthChar::width(c).unwrap_or(0); if width + w > max_width { break; } width += w; result.push(c); } } result } /// Remove all soft-hyphens as they can behave unpredictably (see /// https://github.com/akrennmair/newsbeuter/issues/259#issuecomment-259609490) and inadvertently /// render as hyphens pub fn remove_soft_hyphens(text: &mut String) { text.retain(|c| c != '\u{00AD}') } /// An array of "MIME matchers" and their associated LinkTypes /// /// This is used for two tasks: /// /// 1. checking if a MIME type is a podcast type (`utils::is_valid_podcast_type`). That involves /// running all matching functions on given input and checking if any of them returned `true`; /// /// 2. figuring out the `LinkType` for a particular enclosure, given its MIME type /// (`utils::podcast_mime_to_link_type`). type MimeMatcher = (fn(&str) -> bool, htmlrenderer::LinkType); const PODCAST_MIME_TO_LINKTYPE: [MimeMatcher; 2] = [ ( |mime| { // RFC 5334, section 10.1 says "historically, some implementations expect .ogg files to be // solely Vorbis-encoded audio", so let's assume it's audio, not video. // https://tools.ietf.org/html/rfc5334#section-10.1 mime.starts_with("audio/") || mime == "application/ogg" }, htmlrenderer::LinkType::Audio, ), ( |mime| mime.starts_with("video/"), htmlrenderer::LinkType::Video, ), ]; /// Returns `true` if given MIME type is considered to be a podcast by Newsboat. pub fn is_valid_podcast_type(mimetype: &str) -> bool { PODCAST_MIME_TO_LINKTYPE .iter() .any(|(matcher, _)| matcher(mimetype)) } /// Converts podcast's MIME type into an HtmlRenderer's "link type" /// /// Returns None if given MIME type is not a podcast type. See `is_valid_podcast_type()`. pub fn podcast_mime_to_link_type(mime_type: &str) -> Option<htmlrenderer::LinkType> { PODCAST_MIME_TO_LINKTYPE .iter() .find_map(|(matcher, link_type)| { if matcher(mime_type) { Some(*link_type) } else { None } }) } pub fn get_auth_method(method: &str) -> c_ulong { match method { "basic" => curl_sys::CURLAUTH_BASIC, "digest" => curl_sys::CURLAUTH_DIGEST, "digest_ie" => curl_sys::CURLAUTH_DIGEST_IE, "gssnegotiate" => curl_sys::CURLAUTH_GSSNEGOTIATE, "ntlm" => curl_sys::CURLAUTH_NTLM, "anysafe" => curl_sys::CURLAUTH_ANYSAFE, "any" | "" => curl_sys::CURLAUTH_ANY, _ => { log!( Level::UserError, "utils::get_auth_method: you configured an invalid proxy authentication method: {}", method ); curl_sys::CURLAUTH_ANY } } } pub fn unescape_url(rs_str: String) -> Option<String> { let decoded = percent_decode(rs_str.as_bytes()).decode_utf8(); decoded.ok().map(|s| s.replace("\0", "")) } /// Runs given command in a shell, and returns the output (from stdout; stderr is printed to the /// screen). pub fn get_command_output(cmd: &str) -> String { let cmd = Command::new("sh") .arg("-c") .arg(cmd) // Inherit stdin so that the program can ask something of the user (see // https://github.com/newsboat/newsboat/issues/455 for an example). .stdin(Stdio::inherit()) .output(); // from_utf8_lossy will convert any bad bytes to U+FFFD cmd.map(|cmd| String::from_utf8_lossy(&cmd.stdout).into_owned()) .unwrap_or_else(|_| String::from("")) } // This function assumes that the user is not interested in command's output (not even errors on // stderr!), so it redirects everything to /dev/null. pub fn run_command(cmd: &str, param: &str) { let child = Command::new(cmd) .arg(param) // Prevent the command from blocking Newsboat by asking for input .stdin(Stdio::null()) // Prevent the command from botching the screen by printing onto it. .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn(); if let Err(error) = child { log!( Level::Debug, "utils::run_command: spawning a child for \"{}\" failed: {}", cmd, error ); } // We deliberately *don't* wait for the child to finish. } pub fn run_program(cmd_with_args: &[&str], input: &str) -> String { if cmd_with_args.is_empty() { return String::new(); } Command::new(cmd_with_args[0]) .args(&cmd_with_args[1..]) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() .map_err(|error| { log!( Level::Debug, "utils::run_program: spawning a child for \"{:?}\" \ with input \"{}\" failed: {}", cmd_with_args, input, error ); }) .and_then(|mut child| { if let Some(stdin) = child.stdin.as_mut() { if let Err(error) = stdin.write_all(input.as_bytes()) { log!( Level::Debug, "utils::run_program: failed to write to child's stdin: {}", error ); } } child .wait_with_output() .map_err(|error| { log!( Level::Debug, "utils::run_program: failed to read child's stdout: {}", error ); }) .map(|output| String::from_utf8_lossy(&output.stdout).into_owned()) }) .unwrap_or_else(|_| String::new()) } pub fn make_title(rs_str: String) -> String { /* Sometimes it is possible to construct the title from the URL * This attempts to do just that. eg: * http://domain.com/story/yy/mm/dd/title-with-dashes?a=b */ // Strip out trailing slashes let mut result = rs_str.trim_end_matches('/'); // get to the final part of the URI's path and // extract just the juicy part 'title-with-dashes?a=b' let v: Vec<&str> = result.rsplitn(2, '/').collect(); result = v[0]; // find where query part of URI starts // throw away the query part 'title-with-dashes' let v: Vec<&str> = result.splitn(2, '?').collect(); result = v[0]; // Throw away common webpage suffixes: .html, .php, .aspx, .htm result = result .trim_end_matches(".html") .trim_end_matches(".php") .trim_end_matches(".aspx") .trim_end_matches(".htm"); // 'title with dashes' let result = result.replace('-', " ").replace('_', " "); //'Title with dashes' //let result = ""; let mut c = result.chars(); let result = match c.next() { None => String::new(), Some(f) => f.to_uppercase().collect::<String>() + c.as_str(), }; // Un-escape any percent-encoding, e.g. "It%27s%202017%21" -> "It's // 2017!" match unescape_url(result) { None => String::new(), Some(f) => f, } } /// Run the given command interactively with inherited stdin and stdout/stderr. Return the lowest /// 8 bits of its exit code, or `None` if the command failed to start. /// ``` /// use libnewsboat::utils::run_interactively; /// /// let result = run_interactively("echo true", "test"); /// assert_eq!(result, Some(0)); /// /// let result = run_interactively("exit 1", "test"); /// assert_eq!(result, Some(1)); /// /// // Unfortunately, there is no easy way to provoke this function to return `None`, nor to test /// // that it returns just the lowest 8 bits. /// ``` pub fn run_interactively(command: &str, caller: &str) -> Option<u8> { log!(Level::Debug, &format!("{}: running `{}'", caller, command)); Command::new("sh") .arg("-c") .arg(command) .status() .map_err(|err| { log!( Level::Warn, &format!("{}: Couldn't create child process: {}", caller, err) ) }) .ok() .and_then(|exit_status| exit_status.code()) .map(|exit_code| exit_code as u8) } /// Get the current working directory. pub fn getcwd() -> Result<PathBuf, io::Error> { use std::env; env::current_dir() } pub fn strnaturalcmp(a: &str, b: &str) -> std::cmp::Ordering { natord::compare(a, b) } /// Calculate the number of padding tabs when formatting columns /// /// The number of tabs will be adjusted by the width of the given string. Usually, a column will /// consist of 4 tabs, 8 characters each. Each column will consist of at least one tab. /// /// ``` /// use libnewsboat::utils::gentabs; /// /// fn genstring(len: usize) -> String { /// return std::iter::repeat("a").take(len).collect::<String>(); /// } /// /// assert_eq!(gentabs(""), 4); /// assert_eq!(gentabs("a"), 4); /// assert_eq!(gentabs("aa"), 4); /// assert_eq!(gentabs("aaa"), 4); /// assert_eq!(gentabs("aaaa"), 4); /// assert_eq!(gentabs("aaaaa"), 4); /// assert_eq!(gentabs("aaaaaa"), 4); /// assert_eq!(gentabs("aaaaaaa"), 4); /// assert_eq!(gentabs("aaaaaaaa"), 3); /// assert_eq!(gentabs(&genstring(8)), 3); /// assert_eq!(gentabs(&genstring(9)), 3); /// assert_eq!(gentabs(&genstring(15)), 3); /// assert_eq!(gentabs(&genstring(16)), 2); /// assert_eq!(gentabs(&genstring(20)), 2); /// assert_eq!(gentabs(&genstring(24)), 1); /// assert_eq!(gentabs(&genstring(32)), 1); /// assert_eq!(gentabs(&genstring(100)), 1); /// ``` pub fn gentabs(string: &str) -> usize { let tabcount = strwidth(string) / 8; if tabcount >= 4 { 1 } else { 4 - tabcount } } /// Recursively create directories if missing and set permissions accordingly. pub fn mkdir_parents<R: AsRef<Path>>(p: &R, mode: u32) -> io::Result<()> { DirBuilder::new() .mode(mode) .recursive(true) // directories created with same security and permissions .create(p.as_ref()) } /// The tag and Git commit ID the program was built from, or a pre-defined value from config.h if /// there is no Git directory. pub fn program_version() -> String { // NEWSBOAT_VERSION is set by this crate's build script, "build.rs" env!("NEWSBOAT_VERSION").to_string() } /// Newsboat's major version number. pub fn newsboat_major_version() -> u32 { // This will panic if the version couldn't be parsed, which is virtually impossible as Cargo // won't even start compilation if it couldn't parse the version. env!("CARGO_PKG_VERSION_MAJOR").parse::<u32>().unwrap() } /// Returns the part of the string before first # character (or the whole input string if there are /// no # character in it). Pound characters inside double quotes and backticks are ignored. pub fn strip_comments(line: &str) -> &str { let mut prev_was_backslash = false; let mut inside_quotes = false; let mut inside_backticks = false; let mut first_pound_chr_idx = line.len(); for (idx, chr) in line.char_indices() { match chr { '\\' => { prev_was_backslash = true; continue; } '"' => { // If the quote is escaped or we're inside backticks, do nothing if !prev_was_backslash && !inside_backticks { inside_quotes = !inside_quotes; } } '`' => { // If the backtick is escaped, do nothing if !prev_was_backslash { inside_backticks = !inside_backticks; } } '#' => { if !prev_was_backslash && !inside_quotes && !inside_backticks { first_pound_chr_idx = idx; break; } } _ => {} } // We call `continue` when we run into a backslash; here, we handle all the other // characters, which clearly *aren't* a backslash prev_was_backslash = false; } &line[0..first_pound_chr_idx] } /// Extract filter and url from line separated by ':'. pub fn extract_filter(line: &str) -> (&str, &str) { debug_assert!(line.starts_with("filter:")); // line must start with "filter:" let line = line.get("filter:".len()..).unwrap(); let (filter, url) = line.split_at(line.find(':').unwrap_or(0)); let url = url.get(1..).unwrap_or(""); log!( Level::Debug, "utils::extract_filter: {} -> filter: {} url: {}", line, filter, url ); (filter, url) } #[cfg(test)] mod tests { use super::*; #[test] fn t_replace_all() { assert_eq!( replace_all(String::from("aaa"), "a", "b"), String::from("bbb") ); assert_eq!( replace_all(String::from("aaa"), "aa", "ba"), String::from("baa") ); assert_eq!( replace_all(String::from("aaaaaa"), "aa", "ba"), String::from("bababa") ); assert_eq!(replace_all(String::new(), "a", "b"), String::new()); let input = String::from("aaaa"); assert_eq!(replace_all(input.clone(), "b", "c"), input); assert_eq!( replace_all(String::from("this is a normal test text"), " t", " T"), String::from("this is a normal Test Text") ); assert_eq!( replace_all(String::from("o o o"), "o", "<o>"), String::from("<o> <o> <o>") ); } #[test] fn t_consolidate_whitespace() { assert_eq!( consolidate_whitespace(String::from("LoremIpsum")), String::from("LoremIpsum") ); assert_eq!( consolidate_whitespace(String::from("Lorem Ipsum")), String::from("Lorem Ipsum") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); assert_eq!(consolidate_whitespace(String::new()), String::new()); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); } #[test] fn t_to_u() { assert_eq!(to_u(String::from("0"), 10), 0); assert_eq!(to_u(String::from("23"), 1), 23); assert_eq!(to_u(String::from(""), 0), 0); assert_eq!(to_u(String::from("zero"), 1), 1); } #[test] fn t_is_special_url() { assert!(is_special_url("query:")); assert!(is_special_url("query: example")); assert!(!is_special_url("query")); assert!(!is_special_url(" query:")); assert!(is_special_url("filter:")); assert!(is_special_url("filter: example")); assert!(!is_special_url("filter")); assert!(!is_special_url(" filter:")); assert!(is_special_url("exec:")); assert!(is_special_url("exec: example")); assert!(!is_special_url("exec")); assert!(!is_special_url(" exec:")); } #[test] fn t_is_http_url() { assert!(is_http_url("https://foo.bar")); assert!(is_http_url("http://")); assert!(is_http_url("https://")); assert!(!is_http_url("htt://foo.bar")); assert!(!is_http_url("http:/")); assert!(!is_http_url("foo://bar")); } #[test] fn t_is_query_url() { assert!(is_query_url("query:")); assert!(is_query_url("query: example")); assert!(!is_query_url("query")); assert!(!is_query_url(" query:")); } #[test] fn t_is_filter_url() { assert!(is_filter_url("filter:")); assert!(is_filter_url("filter: example")); assert!(!is_filter_url("filter")); assert!(!is_filter_url(" filter:")); } #[test] fn t_is_exec_url() { assert!(is_exec_url("exec:")); assert!(is_exec_url("exec: example")); assert!(!is_exec_url("exec")); assert!(!is_exec_url(" exec:")); } #[test] fn t_trim() { assert_eq!(trim(String::from(" xxx\r\n")), "xxx"); assert_eq!(trim(String::from("\n\n abc foobar\n")), "abc foobar"); assert_eq!(trim(String::from("")), ""); assert_eq!(trim(String::from(" \n")), ""); } #[test] fn t_trim_end() { assert_eq!(trim_end(String::from("quux\n")), "quux"); } #[test] fn t_quote() { assert_eq!(quote("".to_string()), "\"\""); assert_eq!(quote("Hello World!".to_string()), "\"Hello World!\""); assert_eq!( quote("\"Hello World!\"".to_string()), "\"\\\"Hello World!\\\"\"" ); } #[test] fn t_quote_if_necessary() { assert_eq!(quote_if_necessary("".to_string()), ""); assert_eq!( quote_if_necessary("Hello World!".to_string()), "\"Hello World!\"" ); } #[test] fn t_is_valid_color() { let invalid = [ "awesome", "list", "of", "things", "that", "aren't", "colors", "color0123", "color1024", ]; for color in &invalid { assert!(!is_valid_color(color)); } let valid = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", "color0", "color163", ]; for color in &valid { assert!(is_valid_color(color)); } } #[test] fn t_strwidth() { assert_eq!(strwidth(""), 0); assert_eq!(strwidth("xx"), 2); assert_eq!(strwidth("\u{F91F}"), 2); assert_eq!(strwidth("\u{0007}"), 0); } #[test] fn t_strwidth_stfl() { assert_eq!(strwidth_stfl(""), 0); assert_eq!(strwidth_stfl("x<hi>x"), 2); assert_eq!(strwidth_stfl("x<longtag>x</>"), 2); assert_eq!(strwidth_stfl("x<>x"), 3); assert_eq!(strwidth_stfl("x<>y<>z"), 5); assert_eq!(strwidth_stfl("x<>hi>x"), 6); assert_eq!(strwidth_stfl("\u{F91F}"), 2); assert_eq!(strwidth_stfl("\u{0007}"), 0); assert_eq!(strwidth_stfl("<a"), 0); // #415 } #[test] fn t_substr_with_width_given_string_empty() { assert_eq!(substr_with_width("", 0), ""); assert_eq!(substr_with_width("", 1), ""); } #[test] fn t_substr_with_width_max_width_zero() { assert_eq!(substr_with_width("world", 0), ""); assert_eq!(substr_with_width("", 0), ""); } #[test] fn t_substr_with_width_max_width_dont_split_codepoints() { assert_eq!(substr_with_width("ABCDEF", 9), "ABCD"); assert_eq!(substr_with_width("ABC", 4), "AB"); assert_eq!(substr_with_width("a>bcd", 3), "a>b"); assert_eq!(substr_with_width("ABCDE", 10), "ABCDE"); assert_eq!(substr_with_width("abc", 2), "ab"); } #[test] fn t_substr_with_width_max_width_does_count_stfl_tag() { assert_eq!(substr_with_width("ABC<b>DE</b>F", 9), "ABC<b>"); assert_eq!(substr_with_width("<foobar>ABC", 4), "<foo"); assert_eq!(substr_with_width("a<<xyz>>bcd", 3), "a<<"); assert_eq!(substr_with_width("ABC<b>DE", 10), "ABC<b>"); assert_eq!(substr_with_width("a</>b</>c</>", 2), "a<"); } #[test] fn t_substr_with_width_max_width_count_marks_as_regular_characters() { assert_eq!(substr_with_width("<><><>", 2), "<>"); assert_eq!(substr_with_width("a<>b<>c", 3), "a<>"); } #[test] fn t_substr_with_width_max_width_non_printable() { assert_eq!(substr_with_width("\x01\x02abc", 1), "\x01\x02a"); } #[test] fn t_substr_with_width_stfl_given_string_empty() { assert_eq!(substr_with_width_stfl("", 0), ""); assert_eq!(substr_with_width_stfl("", 1), ""); } #[test] fn t_substr_with_width_stfl_max_width_zero() { assert_eq!(substr_with_width_stfl("world", 0), ""); assert_eq!(substr_with_width_stfl("", 0), ""); } #[test] fn t_substr_with_width_stfl_max_width_dont_split_codepoints() { assert_eq!( substr_with_width_stfl("ABC<b>DE</b>F", 9), "ABC<b>D" ); assert_eq!(substr_with_width_stfl("<foobar>ABC", 4), "<foobar>AB"); assert_eq!(substr_with_width_stfl("a<<xyz>>bcd", 3), "a<<xyz>>b"); // tag: "<<xyz>" assert_eq!(substr_with_width_stfl("ABC<b>DE", 10), "ABC<b>DE"); assert_eq!(substr_with_width_stfl("a</>b</>c</>", 2), "a</>b</>"); } #[test] fn t_substr_with_width_stfl_max_width_do_not_count_stfl_tag() { assert_eq!( substr_with_width_stfl("ABC<b>DE</b>F", 9), "ABC<b>D" ); assert_eq!(substr_with_width_stfl("<foobar>ABC", 4), "<foobar>AB"); assert_eq!(substr_with_width_stfl("a<<xyz>>bcd", 3), "a<<xyz>>b"); // tag: "<<xyz>" assert_eq!(substr_with_width_stfl("ABC<b>DE", 10), "ABC<b>DE"); assert_eq!(substr_with_width_stfl("a</>b</>c</>", 2), "a</>b</>"); } #[test] fn t_substr_with_width_stfl_max_width_count_escaped_less_than_mark() { assert_eq!(substr_with_width_stfl("<><><>", 2), "<><>"); assert_eq!(substr_with_width_stfl("a<>b<>c", 3), "a<>b"); } #[test] fn t_substr_with_width_stfl_max_width_non_printable() { assert_eq!(substr_with_width_stfl("\x01\x02abc", 1), "\x01\x02a"); } #[test] fn t_is_valid_podcast_type() { assert!(is_valid_podcast_type("audio/mpeg")); assert!(is_valid_podcast_type("audio/mp3")); assert!(is_valid_podcast_type("audio/x-mp3")); assert!(is_valid_podcast_type("audio/ogg")); assert!(is_valid_podcast_type("video/x-matroska")); assert!(is_valid_podcast_type("video/webm")); assert!(is_valid_podcast_type("application/ogg")); assert!(!is_valid_podcast_type("image/jpeg")); assert!(!is_valid_podcast_type("image/png")); assert!(!is_valid_podcast_type("text/plain")); assert!(!is_valid_podcast_type("application/zip")); } #[test] fn t_podcast_mime_to_link_type() { use crate::htmlrenderer::LinkType::*; assert_eq!(podcast_mime_to_link_type("audio/mpeg"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("audio/mp3"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("audio/x-mp3"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("audio/ogg"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("video/x-matroska"), Some(Video)); assert_eq!(podcast_mime_to_link_type("video/webm"), Some(Video)); assert_eq!(podcast_mime_to_link_type("application/ogg"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("image/jpeg"), None); assert_eq!(podcast_mime_to_link_type("image/png"), None); assert_eq!(podcast_mime_to_link_type("text/plain"), None); assert_eq!(podcast_mime_to_link_type("application/zip"), None); } #[test] fn t_is_valid_attribte() { let invalid = ["foo", "bar", "baz", "quux"]; for attr in &invalid { assert!(!is_valid_attribute(attr)); } let valid = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; for attr in &valid { assert!(is_valid_attribute(attr)); } } #[test] fn t_get_auth_method() { assert_eq!(get_auth_method("any"), curl_sys::CURLAUTH_ANY); assert_eq!(get_auth_method("ntlm"), curl_sys::CURLAUTH_NTLM); assert_eq!(get_auth_method("basic"), curl_sys::CURLAUTH_BASIC); assert_eq!(get_auth_method("digest"), curl_sys::CURLAUTH_DIGEST); assert_eq!(get_auth_method("digest_ie"), curl_sys::CURLAUTH_DIGEST_IE); assert_eq!( get_auth_method("gssnegotiate"), curl_sys::CURLAUTH_GSSNEGOTIATE ); assert_eq!(get_auth_method("anysafe"), curl_sys::CURLAUTH_ANYSAFE); assert_eq!(get_auth_method(""), curl_sys::CURLAUTH_ANY); assert_eq!(get_auth_method("unknown"), curl_sys::CURLAUTH_ANY); } #[test] fn t_unescape_url() { assert!(unescape_url(String::from("foo%20bar")).unwrap() == String::from("foo bar")); assert!( unescape_url(String::from( "%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D" )) .unwrap() == String::from("!#$&'()*+,/:;=?@[]") ); } #[test] fn t_get_command_output() { assert_eq!( get_command_output("ls /dev/null"), "/dev/null\n".to_string() ); assert_eq!( get_command_output("a-program-that-is-guaranteed-to-not-exists"), "".to_string() ); assert_eq!(get_command_output("echo c\" d e"), "".to_string()); } #[test] fn t_run_command_executes_given_command_with_given_argument() { use std::{thread, time}; use tempfile::TempDir; let tmp = TempDir::new().unwrap(); let filepath = { let mut filepath = tmp.path().to_owned(); filepath.push("sentry"); filepath }; assert!(!filepath.exists()); run_command("touch", filepath.to_str().unwrap()); // Busy-wait for 100 tries of 10 milliseconds each, waiting for `touch` to // create the file. Usually it happens quickly, and the loop exists on the // first try; but sometimes on CI it takes longer for `touch` to finish, so // we need a slightly longer wait. for _ in 0..100 { thread::sleep(time::Duration::from_millis(10)); if filepath.exists() { break; } } assert!(filepath.exists()); } #[test] fn t_run_command_doesnt_wait_for_the_command_to_finish() { use std::time::{Duration, Instant}; let start = Instant::now(); let five: &str = "5"; run_command("sleep", five); let runtime = start.elapsed(); assert!(runtime < Duration::from_secs(1)); } #[test] fn t_run_program() { let input1 = "this is a multine-line\ntest string"; assert_eq!(run_program(&["cat"], input1), input1); assert_eq!( run_program(&["echo", "-n", "hello world"], ""), "hello world" ); } #[test] fn t_make_title() { let mut input = String::from("http://example.com/Item"); assert_eq!(make_title(input), String::from("Item")); input = String::from("http://example.com/This-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is_the_title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.php"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.html"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.htm"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.aspx"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/items/misc/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/item/"); assert_eq!(make_title(input), String::from("Item")); input = String::from("http://example.com/item/////////////"); assert_eq!(make_title(input), String::from("Item")); input = String::from("blahscheme://example.com/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/story/aug/title-with-dashes?a=b"); assert_eq!(make_title(input), String::from("Title with dashes")); input = String::from("http://example.com/title-with-dashes?a=b&x=y&utf8=✓"); assert_eq!(make_title(input), String::from("Title with dashes")); input = String::from("https://example.com/It%27s%202017%21"); assert_eq!(make_title(input), String::from("It's 2017!")); input = String::from("https://example.com/?format=rss"); assert_eq!(make_title(input), String::from("")); assert_eq!(make_title(String::from("")), String::from("")); } #[test] fn t_resolve_relative() { assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("/baz")), Path::new("/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("/config/baz")), Path::new("/config/baz") ); assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("baz")), Path::new("/foo/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("baz")), Path::new("/baz") ); } #[test] fn t_remove_soft_hyphens_removes_all_00ad_unicode_chars_from_a_string() { { // Does nothing if input has no soft hyphens in it let mut input1 = "hello world!".to_string(); remove_soft_hyphens(&mut input1); assert_eq!(input1, "hello world!"); } { // Removes *all* soft hyphens let mut data = "hy\u{00AD}phen\u{00AD}a\u{00AD}tion".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "hyphenation"); } { // Removes consecutive soft hyphens let mut data = "don't know why any\u{00AD}\u{00AD}one would do that".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "don't know why anyone would do that"); } { // Removes soft hyphen at the beginning of the line let mut data = "\u{00AD}tion".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "tion"); } { // Removes soft hyphen at the end of the line let mut data = "over\u{00AD}".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "over"); } } #[test] fn t_mkdir_parents() { use std::fs; use std::os::unix::fs::PermissionsExt; use tempfile::TempDir; let mode: u32 = 0o700; let tmp_dir = TempDir::new().unwrap(); let path = tmp_dir.path().join("parent/dir"); assert_eq!(path.exists(), false); let result = mkdir_parents(&path, mode); assert!(result.is_ok()); assert_eq!(path.exists(), true); let file_type_mask = 0o7777; let metadata = fs::metadata(&path).unwrap(); assert_eq!(file_type_mask & metadata.permissions().mode(), mode); // rerun on existing directories let result = mkdir_parents(&path, mode); assert!(result.is_ok()); } #[test] fn t_strnaturalcmp() { use std::cmp::Ordering; assert_eq!(strnaturalcmp("", ""), Ordering::Equal); assert_eq!(strnaturalcmp("", "a"), Ordering::Less); assert_eq!(strnaturalcmp("a", ""), Ordering::Greater); assert_eq!(strnaturalcmp("a", "a"), Ordering::Equal); assert_eq!(strnaturalcmp("", "9"), Ordering::Less); assert_eq!(strnaturalcmp("9", ""), Ordering::Greater); assert_eq!(strnaturalcmp("1", "1"), Ordering::Equal); assert_eq!(strnaturalcmp("1", "2"), Ordering::Less); assert_eq!(strnaturalcmp("3", "2"), Ordering::Greater); assert_eq!(strnaturalcmp("a1", "a1"), Ordering::Equal); assert_eq!(strnaturalcmp("a1", "a2"), Ordering::Less); assert_eq!(strnaturalcmp("a2", "a1"), Ordering::Greater); assert_eq!(strnaturalcmp("a1a2", "a1a3"), Ordering::Less); assert_eq!(strnaturalcmp("a1a2", "a1a0"), Ordering::Greater); assert_eq!(strnaturalcmp("134", "122"), Ordering::Greater); assert_eq!(strnaturalcmp("12a3", "12a3"), Ordering::Equal); assert_eq!(strnaturalcmp("12a1", "12a0"), Ordering::Greater); assert_eq!(strnaturalcmp("12a1", "12a2"), Ordering::Less); assert_eq!(strnaturalcmp("a", "aa"), Ordering::Less); assert_eq!(strnaturalcmp("aaa", "aa"), Ordering::Greater); assert_eq!(strnaturalcmp("Alpha 2", "Alpha 2"), Ordering::Equal); assert_eq!(strnaturalcmp("Alpha 2", "Alpha 2A"), Ordering::Less); assert_eq!(strnaturalcmp("Alpha 2 B", "Alpha 2"), Ordering::Greater); assert_eq!(strnaturalcmp("aa10", "aa2"), Ordering::Greater); } #[test] fn t_strip_comments() { // no comments in line assert_eq!(strip_comments(""), ""); assert_eq!(strip_comments("\t\n"), "\t\n"); assert_eq!(strip_comments("some directive "), "some directive "); // fully commented line assert_eq!(strip_comments("#"), ""); assert_eq!(strip_comments("# #"), ""); assert_eq!(strip_comments("# comment"), ""); // partially commented line assert_eq!(strip_comments("directive # comment"), "directive "); assert_eq!( strip_comments("directive # comment # another"), "directive " ); assert_eq!(strip_comments("directive#comment"), "directive"); // ignores # characters inside double quotes (#652) let expected = r#"highlight article "[-=+#_*~]{3,}.*" green default"#; let input = expected.to_owned() + "# this is a comment"; assert_eq!(strip_comments(&input), expected); let expected = r#"highlight all "(https?|ftp)://[\-\.,/%~_:?&=\#a-zA-Z0-9]+" blue default bold"#; let input = expected.to_owned() + "#heresacomment"; assert_eq!(strip_comments(&input), expected); // Escaped double quote inside double quotes is not treated as closing quote let expected = r#"test "here \"goes # nothing\" etc" hehe"#; let input = expected.to_owned() + "# and here is a comment"; assert_eq!(strip_comments(&input), expected); // Ignores # characters inside backticks let expected = r#"one `two # three` four"#; let input = expected.to_owned() + "# and a comment, of course"; assert_eq!(strip_comments(&input), expected); // Escaped backtick inside backticks is not treated as closing let expected = r#"some `other \` tricky # test` hehe"#; let input = expected.to_owned() + "#here goescomment"; assert_eq!(strip_comments(&input), expected); // Ignores escaped # characters (\\#) let expected = r#"one two \# three four"#; let input = expected.to_owned() + "# and a comment"; assert_eq!(strip_comments(&input), expected); } #[test] fn t_extract_filter() { let expected = ("~/bin/script.sh", "https://newsboat.org"); let input = "filter:~/bin/script.sh:https://newsboat.org"; assert_eq!(extract_filter(input), expected); let expected = ("", "https://newsboat.org"); let input = "filter::https://newsboat.org"; assert_eq!(extract_filter(input), expected); let expected = ("https", "//newsboat.org"); let input = "filter:https://newsboat.org"; assert_eq!(extract_filter(input), expected); let expected = ("foo", ""); let input = "filter:foo:"; assert_eq!(extract_filter(input), expected); let expected = ("", ""); let input = "filter:"; assert_eq!(extract_filter(input), expected); } } Fix clippy::cmp_owned use crate::htmlrenderer; use crate::logger::{self, Level}; use libc::c_ulong; use percent_encoding::*; use std::fs::DirBuilder; use std::io::{self, Write}; use std::os::unix::fs::DirBuilderExt; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use unicode_width::{UnicodeWidthChar, UnicodeWidthStr}; use url::Url; pub fn replace_all(input: String, from: &str, to: &str) -> String { input.replace(from, to) } pub fn consolidate_whitespace(input: String) -> String { let found = input.find(|c: char| !c.is_whitespace()); let mut result = String::new(); if let Some(found) = found { let (leading, rest) = input.split_at(found); let lastchar = input.chars().rev().next().unwrap(); result.push_str(leading); let iter = rest.split_whitespace(); for elem in iter { result.push_str(elem); result.push(' '); } result.pop(); if lastchar.is_whitespace() { result.push(' '); } } result } pub fn to_u(rs_str: String, default_value: u32) -> u32 { let mut result = rs_str.parse::<u32>(); if result.is_err() { result = Ok(default_value); } result.unwrap() } /// Combine a base URL and a link to a new absolute URL. /// If the base URL is malformed or joining with the link fails, link will be returned. /// # Examples /// ``` /// use libnewsboat::utils::absolute_url; /// assert_eq!(absolute_url("http://foobar/hello/crook/", "bar.html"), /// "http://foobar/hello/crook/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "/bar.html"), /// "https://foobar/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "http://quux/bar.html"), /// "http://quux/bar.html".to_owned()); /// assert_eq!(absolute_url("http://foobar", "bla.html"), /// "http://foobar/bla.html".to_owned()); /// assert_eq!(absolute_url("http://test:test@foobar:33", "bla2.html"), /// "http://test:test@foobar:33/bla2.html".to_owned()); /// assert_eq!(absolute_url("foo", "bar"), "bar".to_owned()); /// ``` pub fn absolute_url(base_url: &str, link: &str) -> String { Url::parse(base_url) .and_then(|url| url.join(link)) .as_ref() .map(Url::as_str) .unwrap_or(link) .to_owned() } /// Path to the home directory, if known. Doesn't work on Windows. #[cfg(not(target_os = "windows"))] pub fn home_dir() -> Option<PathBuf> { // This function got deprecated because it examines HOME environment variable even on Windows, // which is wrong. But Newsboat doesn't support Windows, so we're fine using that. // // Cf. https://github.com/rust-lang/rust/issues/28940 #[allow(deprecated)] std::env::home_dir() } /// Replaces tilde (`~`) at the beginning of the path with the path to user's home directory. pub fn resolve_tilde(path: PathBuf) -> PathBuf { if let (Some(home), Ok(suffix)) = (home_dir(), path.strip_prefix("~")) { return home.join(suffix); } // Either the `path` doesn't start with tilde, or we couldn't figure out the path to the // home directory -- either way, it's no big deal. Let's return the original string. path } pub fn resolve_relative(reference: &Path, path: &Path) -> PathBuf { if path.is_relative() { // Will only ever panic if reference is `/`, which shouldn't be the case as reference is // always a file path return reference.parent().unwrap().join(path); } path.to_path_buf() } pub fn is_special_url(url: &str) -> bool { is_query_url(url) || is_filter_url(url) || is_exec_url(url) } /// Check if the given URL is a http(s) URL /// # Example /// ``` /// use libnewsboat::utils::is_http_url; /// assert!(is_http_url("http://example.com")); /// ``` pub fn is_http_url(url: &str) -> bool { url.starts_with("https://") || url.starts_with("http://") } pub fn is_query_url(url: &str) -> bool { url.starts_with("query:") } pub fn is_filter_url(url: &str) -> bool { url.starts_with("filter:") } pub fn is_exec_url(url: &str) -> bool { url.starts_with("exec:") } /// Censor URLs by replacing username and password with '*' /// ``` /// use libnewsboat::utils::censor_url; /// assert_eq!(&censor_url(""), ""); /// assert_eq!(&censor_url("foobar"), "foobar"); /// assert_eq!(&censor_url("foobar://xyz/"), "foobar://xyz/"); /// assert_eq!(&censor_url("http://newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://@newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://@newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foo:bar@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://foo:bar@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://aschas@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://aschas@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("xxx://aschas@newsbeuter.org/"), /// "xxx://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foobar"), "http://foobar/"); /// assert_eq!(&censor_url("https://foobar"), "https://foobar/"); /// /// assert_eq!(&censor_url("http://aschas@host"), "http://*:*@host/"); /// assert_eq!(&censor_url("https://aschas@host"), "https://*:*@host/"); /// /// assert_eq!(&censor_url("query:name:age between 1:10"), /// "query:name:age between 1:10"); /// ``` pub fn censor_url(url: &str) -> String { if !url.is_empty() && !is_special_url(url) { Url::parse(url) .map(|mut url| { if url.username() != "" || url.password().is_some() { // can not panic. If either username or password is present we can change both. url.set_username("*").unwrap(); url.set_password(Some("*")).unwrap(); } url }) .as_ref() .map(Url::as_str) .unwrap_or(url) .to_owned() } else { url.into() } } /// Quote a string for use with stfl by replacing all occurences of "<" with "<>" /// ``` /// use libnewsboat::utils::quote_for_stfl; /// assert_eq!(&quote_for_stfl("<"), "<>"); /// assert_eq!(&quote_for_stfl("<<><><><"), "<><>><>><>><>"); /// assert_eq!(&quote_for_stfl("test"), "test"); /// ``` pub fn quote_for_stfl(string: &str) -> String { string.replace("<", "<>") } /// Get basename from a URL if available else return an empty string /// ``` /// use libnewsboat::utils::get_basename; /// assert_eq!(get_basename("https://example.com/"), ""); /// assert_eq!(get_basename("https://example.org/?param=value#fragment"), ""); /// assert_eq!(get_basename("https://example.org/path/to/?param=value#fragment"), ""); /// assert_eq!(get_basename("https://example.org/file.mp3"), "file.mp3"); /// assert_eq!(get_basename("https://example.org/path/to/file.mp3?param=value#fragment"), "file.mp3"); /// ``` pub fn get_basename(input: &str) -> String { match Url::parse(input) { Ok(url) => match url.path_segments() { Some(segments) => segments.last().unwrap().to_string(), None => String::from(""), }, Err(_) => String::from(""), } } pub fn get_default_browser() -> String { use std::env; env::var("BROWSER").unwrap_or_else(|_| "lynx".to_string()) } pub fn trim(rs_str: String) -> String { rs_str.trim().to_string() } pub fn trim_end(rs_str: String) -> String { let x: &[_] = &['\n', '\r']; rs_str.trim_end_matches(x).to_string() } pub fn quote(input: String) -> String { let mut input = input.replace("\"", "\\\""); input.insert(0, '"'); input.push('"'); input } pub fn quote_if_necessary(input: String) -> String { match input.find(' ') { Some(_) => quote(input), None => input, } } pub fn get_random_value(max: u32) -> u32 { rand::random::<u32>() % max } pub fn is_valid_color(color: &str) -> bool { const COLORS: [&str; 9] = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", ]; if COLORS.contains(&color) { true } else if color.starts_with("color0") { color == "color0" } else if color.starts_with("color") { let num_part = &color[5..]; num_part.parse::<u8>().is_ok() } else { false } } pub fn is_valid_attribute(attribute: &str) -> bool { const VALID_ATTRIBUTES: [&str; 9] = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; VALID_ATTRIBUTES.contains(&attribute) } pub fn strwidth(rs_str: &str) -> usize { UnicodeWidthStr::width(rs_str) } /// Returns the width of `rs_str` when displayed on screen. /// /// STFL tags (e.g. `<b>`, `<foobar>`, `</>`) are counted as having 0 width. /// Escaped less-than sign (`<` escaped as `<>`) is counted as having a width of 1 character. /// ``` /// use libnewsboat::utils::strwidth_stfl; /// assert_eq!(strwidth_stfl("a"), 1); /// assert_eq!(strwidth_stfl("abc<tag>def"), 6); /// assert_eq!(strwidth_stfl("less-than: <>"), 12); /// assert_eq!(strwidth_stfl("ABCDEF"), 12); ///``` pub fn strwidth_stfl(rs_str: &str) -> usize { let mut s = &rs_str[..]; let mut width = 0; loop { if let Some(pos) = s.find('<') { width += strwidth(&s[..pos]); s = &s[pos..]; if let Some(endpos) = s.find('>') { if endpos == 1 { // Found "<>" which stfl uses to encode a literal '<' width += strwidth("<"); } s = &s[endpos + 1..]; } else { // '<' without closing '>' so ignore rest of string break; } } else { width += strwidth(s); break; } } width } /// Returns a longest substring fits to the given width. /// Returns an empty string if `str` is an empty string or `max_width` is zero. /// /// Each chararacter width is calculated with UnicodeWidthChar::width. If UnicodeWidthChar::width() /// returns None, the character width is treated as 0. /// ``` /// use libnewsboat::utils::substr_with_width; /// assert_eq!(substr_with_width("a", 1), "a"); /// assert_eq!(substr_with_width("a", 2), "a"); /// assert_eq!(substr_with_width("ab", 1), "a"); /// assert_eq!(substr_with_width("abc", 1), "a"); /// assert_eq!(substr_with_width("A\u{3042}B\u{3044}C\u{3046}", 5), "A\u{3042}B") ///``` pub fn substr_with_width(string: &str, max_width: usize) -> String { let mut result = String::new(); let mut width = 0; for c in string.chars() { // Control chars count as width 0 let w = UnicodeWidthChar::width(c).unwrap_or(0); if width + w > max_width { break; } width += w; result.push(c); } result } /// Returns a longest substring fits to the given width. /// Returns an empty string if `str` is an empty string or `max_width` is zero. /// /// Each chararacter width is calculated with UnicodeWidthChar::width. If UnicodeWidthChar::width() /// returns None, the character width is treated as 0. A STFL tag (e.g. `<b>`, `<foobar>`, `</>`) /// width is treated as 0, but escaped less-than (`<>`) width is treated as 1. /// ``` /// use libnewsboat::utils::substr_with_width_stfl; /// assert_eq!(substr_with_width_stfl("a", 1), "a"); /// assert_eq!(substr_with_width_stfl("a", 2), "a"); /// assert_eq!(substr_with_width_stfl("ab", 1), "a"); /// assert_eq!(substr_with_width_stfl("abc", 1), "a"); /// assert_eq!(substr_with_width_stfl("A\u{3042}B\u{3044}C\u{3046}", 5), "A\u{3042}B") ///``` pub fn substr_with_width_stfl(string: &str, max_width: usize) -> String { let mut result = String::new(); let mut in_bracket = false; let mut tagbuf = Vec::<char>::new(); let mut width = 0; for c in string.chars() { if in_bracket { tagbuf.push(c); if c == '>' { in_bracket = false; if tagbuf == ['<', '>'] { if width + 1 > max_width { break; } result += "<>"; // escaped less-than tagbuf.clear(); width += 1; } else { result += &tagbuf.iter().collect::<String>(); tagbuf.clear(); } } } else if c == '<' { in_bracket = true; tagbuf.push(c); } else { // Control chars count as width 0 let w = UnicodeWidthChar::width(c).unwrap_or(0); if width + w > max_width { break; } width += w; result.push(c); } } result } /// Remove all soft-hyphens as they can behave unpredictably (see /// https://github.com/akrennmair/newsbeuter/issues/259#issuecomment-259609490) and inadvertently /// render as hyphens pub fn remove_soft_hyphens(text: &mut String) { text.retain(|c| c != '\u{00AD}') } /// An array of "MIME matchers" and their associated LinkTypes /// /// This is used for two tasks: /// /// 1. checking if a MIME type is a podcast type (`utils::is_valid_podcast_type`). That involves /// running all matching functions on given input and checking if any of them returned `true`; /// /// 2. figuring out the `LinkType` for a particular enclosure, given its MIME type /// (`utils::podcast_mime_to_link_type`). type MimeMatcher = (fn(&str) -> bool, htmlrenderer::LinkType); const PODCAST_MIME_TO_LINKTYPE: [MimeMatcher; 2] = [ ( |mime| { // RFC 5334, section 10.1 says "historically, some implementations expect .ogg files to be // solely Vorbis-encoded audio", so let's assume it's audio, not video. // https://tools.ietf.org/html/rfc5334#section-10.1 mime.starts_with("audio/") || mime == "application/ogg" }, htmlrenderer::LinkType::Audio, ), ( |mime| mime.starts_with("video/"), htmlrenderer::LinkType::Video, ), ]; /// Returns `true` if given MIME type is considered to be a podcast by Newsboat. pub fn is_valid_podcast_type(mimetype: &str) -> bool { PODCAST_MIME_TO_LINKTYPE .iter() .any(|(matcher, _)| matcher(mimetype)) } /// Converts podcast's MIME type into an HtmlRenderer's "link type" /// /// Returns None if given MIME type is not a podcast type. See `is_valid_podcast_type()`. pub fn podcast_mime_to_link_type(mime_type: &str) -> Option<htmlrenderer::LinkType> { PODCAST_MIME_TO_LINKTYPE .iter() .find_map(|(matcher, link_type)| { if matcher(mime_type) { Some(*link_type) } else { None } }) } pub fn get_auth_method(method: &str) -> c_ulong { match method { "basic" => curl_sys::CURLAUTH_BASIC, "digest" => curl_sys::CURLAUTH_DIGEST, "digest_ie" => curl_sys::CURLAUTH_DIGEST_IE, "gssnegotiate" => curl_sys::CURLAUTH_GSSNEGOTIATE, "ntlm" => curl_sys::CURLAUTH_NTLM, "anysafe" => curl_sys::CURLAUTH_ANYSAFE, "any" | "" => curl_sys::CURLAUTH_ANY, _ => { log!( Level::UserError, "utils::get_auth_method: you configured an invalid proxy authentication method: {}", method ); curl_sys::CURLAUTH_ANY } } } pub fn unescape_url(rs_str: String) -> Option<String> { let decoded = percent_decode(rs_str.as_bytes()).decode_utf8(); decoded.ok().map(|s| s.replace("\0", "")) } /// Runs given command in a shell, and returns the output (from stdout; stderr is printed to the /// screen). pub fn get_command_output(cmd: &str) -> String { let cmd = Command::new("sh") .arg("-c") .arg(cmd) // Inherit stdin so that the program can ask something of the user (see // https://github.com/newsboat/newsboat/issues/455 for an example). .stdin(Stdio::inherit()) .output(); // from_utf8_lossy will convert any bad bytes to U+FFFD cmd.map(|cmd| String::from_utf8_lossy(&cmd.stdout).into_owned()) .unwrap_or_else(|_| String::from("")) } // This function assumes that the user is not interested in command's output (not even errors on // stderr!), so it redirects everything to /dev/null. pub fn run_command(cmd: &str, param: &str) { let child = Command::new(cmd) .arg(param) // Prevent the command from blocking Newsboat by asking for input .stdin(Stdio::null()) // Prevent the command from botching the screen by printing onto it. .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn(); if let Err(error) = child { log!( Level::Debug, "utils::run_command: spawning a child for \"{}\" failed: {}", cmd, error ); } // We deliberately *don't* wait for the child to finish. } pub fn run_program(cmd_with_args: &[&str], input: &str) -> String { if cmd_with_args.is_empty() { return String::new(); } Command::new(cmd_with_args[0]) .args(&cmd_with_args[1..]) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() .map_err(|error| { log!( Level::Debug, "utils::run_program: spawning a child for \"{:?}\" \ with input \"{}\" failed: {}", cmd_with_args, input, error ); }) .and_then(|mut child| { if let Some(stdin) = child.stdin.as_mut() { if let Err(error) = stdin.write_all(input.as_bytes()) { log!( Level::Debug, "utils::run_program: failed to write to child's stdin: {}", error ); } } child .wait_with_output() .map_err(|error| { log!( Level::Debug, "utils::run_program: failed to read child's stdout: {}", error ); }) .map(|output| String::from_utf8_lossy(&output.stdout).into_owned()) }) .unwrap_or_else(|_| String::new()) } pub fn make_title(rs_str: String) -> String { /* Sometimes it is possible to construct the title from the URL * This attempts to do just that. eg: * http://domain.com/story/yy/mm/dd/title-with-dashes?a=b */ // Strip out trailing slashes let mut result = rs_str.trim_end_matches('/'); // get to the final part of the URI's path and // extract just the juicy part 'title-with-dashes?a=b' let v: Vec<&str> = result.rsplitn(2, '/').collect(); result = v[0]; // find where query part of URI starts // throw away the query part 'title-with-dashes' let v: Vec<&str> = result.splitn(2, '?').collect(); result = v[0]; // Throw away common webpage suffixes: .html, .php, .aspx, .htm result = result .trim_end_matches(".html") .trim_end_matches(".php") .trim_end_matches(".aspx") .trim_end_matches(".htm"); // 'title with dashes' let result = result.replace('-', " ").replace('_', " "); //'Title with dashes' //let result = ""; let mut c = result.chars(); let result = match c.next() { None => String::new(), Some(f) => f.to_uppercase().collect::<String>() + c.as_str(), }; // Un-escape any percent-encoding, e.g. "It%27s%202017%21" -> "It's // 2017!" match unescape_url(result) { None => String::new(), Some(f) => f, } } /// Run the given command interactively with inherited stdin and stdout/stderr. Return the lowest /// 8 bits of its exit code, or `None` if the command failed to start. /// ``` /// use libnewsboat::utils::run_interactively; /// /// let result = run_interactively("echo true", "test"); /// assert_eq!(result, Some(0)); /// /// let result = run_interactively("exit 1", "test"); /// assert_eq!(result, Some(1)); /// /// // Unfortunately, there is no easy way to provoke this function to return `None`, nor to test /// // that it returns just the lowest 8 bits. /// ``` pub fn run_interactively(command: &str, caller: &str) -> Option<u8> { log!(Level::Debug, &format!("{}: running `{}'", caller, command)); Command::new("sh") .arg("-c") .arg(command) .status() .map_err(|err| { log!( Level::Warn, &format!("{}: Couldn't create child process: {}", caller, err) ) }) .ok() .and_then(|exit_status| exit_status.code()) .map(|exit_code| exit_code as u8) } /// Get the current working directory. pub fn getcwd() -> Result<PathBuf, io::Error> { use std::env; env::current_dir() } pub fn strnaturalcmp(a: &str, b: &str) -> std::cmp::Ordering { natord::compare(a, b) } /// Calculate the number of padding tabs when formatting columns /// /// The number of tabs will be adjusted by the width of the given string. Usually, a column will /// consist of 4 tabs, 8 characters each. Each column will consist of at least one tab. /// /// ``` /// use libnewsboat::utils::gentabs; /// /// fn genstring(len: usize) -> String { /// return std::iter::repeat("a").take(len).collect::<String>(); /// } /// /// assert_eq!(gentabs(""), 4); /// assert_eq!(gentabs("a"), 4); /// assert_eq!(gentabs("aa"), 4); /// assert_eq!(gentabs("aaa"), 4); /// assert_eq!(gentabs("aaaa"), 4); /// assert_eq!(gentabs("aaaaa"), 4); /// assert_eq!(gentabs("aaaaaa"), 4); /// assert_eq!(gentabs("aaaaaaa"), 4); /// assert_eq!(gentabs("aaaaaaaa"), 3); /// assert_eq!(gentabs(&genstring(8)), 3); /// assert_eq!(gentabs(&genstring(9)), 3); /// assert_eq!(gentabs(&genstring(15)), 3); /// assert_eq!(gentabs(&genstring(16)), 2); /// assert_eq!(gentabs(&genstring(20)), 2); /// assert_eq!(gentabs(&genstring(24)), 1); /// assert_eq!(gentabs(&genstring(32)), 1); /// assert_eq!(gentabs(&genstring(100)), 1); /// ``` pub fn gentabs(string: &str) -> usize { let tabcount = strwidth(string) / 8; if tabcount >= 4 { 1 } else { 4 - tabcount } } /// Recursively create directories if missing and set permissions accordingly. pub fn mkdir_parents<R: AsRef<Path>>(p: &R, mode: u32) -> io::Result<()> { DirBuilder::new() .mode(mode) .recursive(true) // directories created with same security and permissions .create(p.as_ref()) } /// The tag and Git commit ID the program was built from, or a pre-defined value from config.h if /// there is no Git directory. pub fn program_version() -> String { // NEWSBOAT_VERSION is set by this crate's build script, "build.rs" env!("NEWSBOAT_VERSION").to_string() } /// Newsboat's major version number. pub fn newsboat_major_version() -> u32 { // This will panic if the version couldn't be parsed, which is virtually impossible as Cargo // won't even start compilation if it couldn't parse the version. env!("CARGO_PKG_VERSION_MAJOR").parse::<u32>().unwrap() } /// Returns the part of the string before first # character (or the whole input string if there are /// no # character in it). Pound characters inside double quotes and backticks are ignored. pub fn strip_comments(line: &str) -> &str { let mut prev_was_backslash = false; let mut inside_quotes = false; let mut inside_backticks = false; let mut first_pound_chr_idx = line.len(); for (idx, chr) in line.char_indices() { match chr { '\\' => { prev_was_backslash = true; continue; } '"' => { // If the quote is escaped or we're inside backticks, do nothing if !prev_was_backslash && !inside_backticks { inside_quotes = !inside_quotes; } } '`' => { // If the backtick is escaped, do nothing if !prev_was_backslash { inside_backticks = !inside_backticks; } } '#' => { if !prev_was_backslash && !inside_quotes && !inside_backticks { first_pound_chr_idx = idx; break; } } _ => {} } // We call `continue` when we run into a backslash; here, we handle all the other // characters, which clearly *aren't* a backslash prev_was_backslash = false; } &line[0..first_pound_chr_idx] } /// Extract filter and url from line separated by ':'. pub fn extract_filter(line: &str) -> (&str, &str) { debug_assert!(line.starts_with("filter:")); // line must start with "filter:" let line = line.get("filter:".len()..).unwrap(); let (filter, url) = line.split_at(line.find(':').unwrap_or(0)); let url = url.get(1..).unwrap_or(""); log!( Level::Debug, "utils::extract_filter: {} -> filter: {} url: {}", line, filter, url ); (filter, url) } #[cfg(test)] mod tests { use super::*; #[test] fn t_replace_all() { assert_eq!( replace_all(String::from("aaa"), "a", "b"), String::from("bbb") ); assert_eq!( replace_all(String::from("aaa"), "aa", "ba"), String::from("baa") ); assert_eq!( replace_all(String::from("aaaaaa"), "aa", "ba"), String::from("bababa") ); assert_eq!(replace_all(String::new(), "a", "b"), String::new()); let input = String::from("aaaa"); assert_eq!(replace_all(input.clone(), "b", "c"), input); assert_eq!( replace_all(String::from("this is a normal test text"), " t", " T"), String::from("this is a normal Test Text") ); assert_eq!( replace_all(String::from("o o o"), "o", "<o>"), String::from("<o> <o> <o>") ); } #[test] fn t_consolidate_whitespace() { assert_eq!( consolidate_whitespace(String::from("LoremIpsum")), String::from("LoremIpsum") ); assert_eq!( consolidate_whitespace(String::from("Lorem Ipsum")), String::from("Lorem Ipsum") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); assert_eq!(consolidate_whitespace(String::new()), String::new()); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); } #[test] fn t_to_u() { assert_eq!(to_u(String::from("0"), 10), 0); assert_eq!(to_u(String::from("23"), 1), 23); assert_eq!(to_u(String::from(""), 0), 0); assert_eq!(to_u(String::from("zero"), 1), 1); } #[test] fn t_is_special_url() { assert!(is_special_url("query:")); assert!(is_special_url("query: example")); assert!(!is_special_url("query")); assert!(!is_special_url(" query:")); assert!(is_special_url("filter:")); assert!(is_special_url("filter: example")); assert!(!is_special_url("filter")); assert!(!is_special_url(" filter:")); assert!(is_special_url("exec:")); assert!(is_special_url("exec: example")); assert!(!is_special_url("exec")); assert!(!is_special_url(" exec:")); } #[test] fn t_is_http_url() { assert!(is_http_url("https://foo.bar")); assert!(is_http_url("http://")); assert!(is_http_url("https://")); assert!(!is_http_url("htt://foo.bar")); assert!(!is_http_url("http:/")); assert!(!is_http_url("foo://bar")); } #[test] fn t_is_query_url() { assert!(is_query_url("query:")); assert!(is_query_url("query: example")); assert!(!is_query_url("query")); assert!(!is_query_url(" query:")); } #[test] fn t_is_filter_url() { assert!(is_filter_url("filter:")); assert!(is_filter_url("filter: example")); assert!(!is_filter_url("filter")); assert!(!is_filter_url(" filter:")); } #[test] fn t_is_exec_url() { assert!(is_exec_url("exec:")); assert!(is_exec_url("exec: example")); assert!(!is_exec_url("exec")); assert!(!is_exec_url(" exec:")); } #[test] fn t_trim() { assert_eq!(trim(String::from(" xxx\r\n")), "xxx"); assert_eq!(trim(String::from("\n\n abc foobar\n")), "abc foobar"); assert_eq!(trim(String::from("")), ""); assert_eq!(trim(String::from(" \n")), ""); } #[test] fn t_trim_end() { assert_eq!(trim_end(String::from("quux\n")), "quux"); } #[test] fn t_quote() { assert_eq!(quote("".to_string()), "\"\""); assert_eq!(quote("Hello World!".to_string()), "\"Hello World!\""); assert_eq!( quote("\"Hello World!\"".to_string()), "\"\\\"Hello World!\\\"\"" ); } #[test] fn t_quote_if_necessary() { assert_eq!(quote_if_necessary("".to_string()), ""); assert_eq!( quote_if_necessary("Hello World!".to_string()), "\"Hello World!\"" ); } #[test] fn t_is_valid_color() { let invalid = [ "awesome", "list", "of", "things", "that", "aren't", "colors", "color0123", "color1024", ]; for color in &invalid { assert!(!is_valid_color(color)); } let valid = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", "color0", "color163", ]; for color in &valid { assert!(is_valid_color(color)); } } #[test] fn t_strwidth() { assert_eq!(strwidth(""), 0); assert_eq!(strwidth("xx"), 2); assert_eq!(strwidth("\u{F91F}"), 2); assert_eq!(strwidth("\u{0007}"), 0); } #[test] fn t_strwidth_stfl() { assert_eq!(strwidth_stfl(""), 0); assert_eq!(strwidth_stfl("x<hi>x"), 2); assert_eq!(strwidth_stfl("x<longtag>x</>"), 2); assert_eq!(strwidth_stfl("x<>x"), 3); assert_eq!(strwidth_stfl("x<>y<>z"), 5); assert_eq!(strwidth_stfl("x<>hi>x"), 6); assert_eq!(strwidth_stfl("\u{F91F}"), 2); assert_eq!(strwidth_stfl("\u{0007}"), 0); assert_eq!(strwidth_stfl("<a"), 0); // #415 } #[test] fn t_substr_with_width_given_string_empty() { assert_eq!(substr_with_width("", 0), ""); assert_eq!(substr_with_width("", 1), ""); } #[test] fn t_substr_with_width_max_width_zero() { assert_eq!(substr_with_width("world", 0), ""); assert_eq!(substr_with_width("", 0), ""); } #[test] fn t_substr_with_width_max_width_dont_split_codepoints() { assert_eq!(substr_with_width("ABCDEF", 9), "ABCD"); assert_eq!(substr_with_width("ABC", 4), "AB"); assert_eq!(substr_with_width("a>bcd", 3), "a>b"); assert_eq!(substr_with_width("ABCDE", 10), "ABCDE"); assert_eq!(substr_with_width("abc", 2), "ab"); } #[test] fn t_substr_with_width_max_width_does_count_stfl_tag() { assert_eq!(substr_with_width("ABC<b>DE</b>F", 9), "ABC<b>"); assert_eq!(substr_with_width("<foobar>ABC", 4), "<foo"); assert_eq!(substr_with_width("a<<xyz>>bcd", 3), "a<<"); assert_eq!(substr_with_width("ABC<b>DE", 10), "ABC<b>"); assert_eq!(substr_with_width("a</>b</>c</>", 2), "a<"); } #[test] fn t_substr_with_width_max_width_count_marks_as_regular_characters() { assert_eq!(substr_with_width("<><><>", 2), "<>"); assert_eq!(substr_with_width("a<>b<>c", 3), "a<>"); } #[test] fn t_substr_with_width_max_width_non_printable() { assert_eq!(substr_with_width("\x01\x02abc", 1), "\x01\x02a"); } #[test] fn t_substr_with_width_stfl_given_string_empty() { assert_eq!(substr_with_width_stfl("", 0), ""); assert_eq!(substr_with_width_stfl("", 1), ""); } #[test] fn t_substr_with_width_stfl_max_width_zero() { assert_eq!(substr_with_width_stfl("world", 0), ""); assert_eq!(substr_with_width_stfl("", 0), ""); } #[test] fn t_substr_with_width_stfl_max_width_dont_split_codepoints() { assert_eq!( substr_with_width_stfl("ABC<b>DE</b>F", 9), "ABC<b>D" ); assert_eq!(substr_with_width_stfl("<foobar>ABC", 4), "<foobar>AB"); assert_eq!(substr_with_width_stfl("a<<xyz>>bcd", 3), "a<<xyz>>b"); // tag: "<<xyz>" assert_eq!(substr_with_width_stfl("ABC<b>DE", 10), "ABC<b>DE"); assert_eq!(substr_with_width_stfl("a</>b</>c</>", 2), "a</>b</>"); } #[test] fn t_substr_with_width_stfl_max_width_do_not_count_stfl_tag() { assert_eq!( substr_with_width_stfl("ABC<b>DE</b>F", 9), "ABC<b>D" ); assert_eq!(substr_with_width_stfl("<foobar>ABC", 4), "<foobar>AB"); assert_eq!(substr_with_width_stfl("a<<xyz>>bcd", 3), "a<<xyz>>b"); // tag: "<<xyz>" assert_eq!(substr_with_width_stfl("ABC<b>DE", 10), "ABC<b>DE"); assert_eq!(substr_with_width_stfl("a</>b</>c</>", 2), "a</>b</>"); } #[test] fn t_substr_with_width_stfl_max_width_count_escaped_less_than_mark() { assert_eq!(substr_with_width_stfl("<><><>", 2), "<><>"); assert_eq!(substr_with_width_stfl("a<>b<>c", 3), "a<>b"); } #[test] fn t_substr_with_width_stfl_max_width_non_printable() { assert_eq!(substr_with_width_stfl("\x01\x02abc", 1), "\x01\x02a"); } #[test] fn t_is_valid_podcast_type() { assert!(is_valid_podcast_type("audio/mpeg")); assert!(is_valid_podcast_type("audio/mp3")); assert!(is_valid_podcast_type("audio/x-mp3")); assert!(is_valid_podcast_type("audio/ogg")); assert!(is_valid_podcast_type("video/x-matroska")); assert!(is_valid_podcast_type("video/webm")); assert!(is_valid_podcast_type("application/ogg")); assert!(!is_valid_podcast_type("image/jpeg")); assert!(!is_valid_podcast_type("image/png")); assert!(!is_valid_podcast_type("text/plain")); assert!(!is_valid_podcast_type("application/zip")); } #[test] fn t_podcast_mime_to_link_type() { use crate::htmlrenderer::LinkType::*; assert_eq!(podcast_mime_to_link_type("audio/mpeg"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("audio/mp3"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("audio/x-mp3"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("audio/ogg"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("video/x-matroska"), Some(Video)); assert_eq!(podcast_mime_to_link_type("video/webm"), Some(Video)); assert_eq!(podcast_mime_to_link_type("application/ogg"), Some(Audio)); assert_eq!(podcast_mime_to_link_type("image/jpeg"), None); assert_eq!(podcast_mime_to_link_type("image/png"), None); assert_eq!(podcast_mime_to_link_type("text/plain"), None); assert_eq!(podcast_mime_to_link_type("application/zip"), None); } #[test] fn t_is_valid_attribte() { let invalid = ["foo", "bar", "baz", "quux"]; for attr in &invalid { assert!(!is_valid_attribute(attr)); } let valid = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; for attr in &valid { assert!(is_valid_attribute(attr)); } } #[test] fn t_get_auth_method() { assert_eq!(get_auth_method("any"), curl_sys::CURLAUTH_ANY); assert_eq!(get_auth_method("ntlm"), curl_sys::CURLAUTH_NTLM); assert_eq!(get_auth_method("basic"), curl_sys::CURLAUTH_BASIC); assert_eq!(get_auth_method("digest"), curl_sys::CURLAUTH_DIGEST); assert_eq!(get_auth_method("digest_ie"), curl_sys::CURLAUTH_DIGEST_IE); assert_eq!( get_auth_method("gssnegotiate"), curl_sys::CURLAUTH_GSSNEGOTIATE ); assert_eq!(get_auth_method("anysafe"), curl_sys::CURLAUTH_ANYSAFE); assert_eq!(get_auth_method(""), curl_sys::CURLAUTH_ANY); assert_eq!(get_auth_method("unknown"), curl_sys::CURLAUTH_ANY); } #[test] fn t_unescape_url() { assert!(unescape_url(String::from("foo%20bar")).unwrap() == "foo bar"); assert!( unescape_url(String::from( "%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D" )) .unwrap() == "!#$&'()*+,/:;=?@[]" ); } #[test] fn t_get_command_output() { assert_eq!( get_command_output("ls /dev/null"), "/dev/null\n".to_string() ); assert_eq!( get_command_output("a-program-that-is-guaranteed-to-not-exists"), "".to_string() ); assert_eq!(get_command_output("echo c\" d e"), "".to_string()); } #[test] fn t_run_command_executes_given_command_with_given_argument() { use std::{thread, time}; use tempfile::TempDir; let tmp = TempDir::new().unwrap(); let filepath = { let mut filepath = tmp.path().to_owned(); filepath.push("sentry"); filepath }; assert!(!filepath.exists()); run_command("touch", filepath.to_str().unwrap()); // Busy-wait for 100 tries of 10 milliseconds each, waiting for `touch` to // create the file. Usually it happens quickly, and the loop exists on the // first try; but sometimes on CI it takes longer for `touch` to finish, so // we need a slightly longer wait. for _ in 0..100 { thread::sleep(time::Duration::from_millis(10)); if filepath.exists() { break; } } assert!(filepath.exists()); } #[test] fn t_run_command_doesnt_wait_for_the_command_to_finish() { use std::time::{Duration, Instant}; let start = Instant::now(); let five: &str = "5"; run_command("sleep", five); let runtime = start.elapsed(); assert!(runtime < Duration::from_secs(1)); } #[test] fn t_run_program() { let input1 = "this is a multine-line\ntest string"; assert_eq!(run_program(&["cat"], input1), input1); assert_eq!( run_program(&["echo", "-n", "hello world"], ""), "hello world" ); } #[test] fn t_make_title() { let mut input = String::from("http://example.com/Item"); assert_eq!(make_title(input), String::from("Item")); input = String::from("http://example.com/This-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is_the_title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.php"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.html"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.htm"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.aspx"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/items/misc/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/item/"); assert_eq!(make_title(input), String::from("Item")); input = String::from("http://example.com/item/////////////"); assert_eq!(make_title(input), String::from("Item")); input = String::from("blahscheme://example.com/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/story/aug/title-with-dashes?a=b"); assert_eq!(make_title(input), String::from("Title with dashes")); input = String::from("http://example.com/title-with-dashes?a=b&x=y&utf8=✓"); assert_eq!(make_title(input), String::from("Title with dashes")); input = String::from("https://example.com/It%27s%202017%21"); assert_eq!(make_title(input), String::from("It's 2017!")); input = String::from("https://example.com/?format=rss"); assert_eq!(make_title(input), String::from("")); assert_eq!(make_title(String::from("")), String::from("")); } #[test] fn t_resolve_relative() { assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("/baz")), Path::new("/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("/config/baz")), Path::new("/config/baz") ); assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("baz")), Path::new("/foo/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("baz")), Path::new("/baz") ); } #[test] fn t_remove_soft_hyphens_removes_all_00ad_unicode_chars_from_a_string() { { // Does nothing if input has no soft hyphens in it let mut input1 = "hello world!".to_string(); remove_soft_hyphens(&mut input1); assert_eq!(input1, "hello world!"); } { // Removes *all* soft hyphens let mut data = "hy\u{00AD}phen\u{00AD}a\u{00AD}tion".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "hyphenation"); } { // Removes consecutive soft hyphens let mut data = "don't know why any\u{00AD}\u{00AD}one would do that".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "don't know why anyone would do that"); } { // Removes soft hyphen at the beginning of the line let mut data = "\u{00AD}tion".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "tion"); } { // Removes soft hyphen at the end of the line let mut data = "over\u{00AD}".to_string(); remove_soft_hyphens(&mut data); assert_eq!(data, "over"); } } #[test] fn t_mkdir_parents() { use std::fs; use std::os::unix::fs::PermissionsExt; use tempfile::TempDir; let mode: u32 = 0o700; let tmp_dir = TempDir::new().unwrap(); let path = tmp_dir.path().join("parent/dir"); assert_eq!(path.exists(), false); let result = mkdir_parents(&path, mode); assert!(result.is_ok()); assert_eq!(path.exists(), true); let file_type_mask = 0o7777; let metadata = fs::metadata(&path).unwrap(); assert_eq!(file_type_mask & metadata.permissions().mode(), mode); // rerun on existing directories let result = mkdir_parents(&path, mode); assert!(result.is_ok()); } #[test] fn t_strnaturalcmp() { use std::cmp::Ordering; assert_eq!(strnaturalcmp("", ""), Ordering::Equal); assert_eq!(strnaturalcmp("", "a"), Ordering::Less); assert_eq!(strnaturalcmp("a", ""), Ordering::Greater); assert_eq!(strnaturalcmp("a", "a"), Ordering::Equal); assert_eq!(strnaturalcmp("", "9"), Ordering::Less); assert_eq!(strnaturalcmp("9", ""), Ordering::Greater); assert_eq!(strnaturalcmp("1", "1"), Ordering::Equal); assert_eq!(strnaturalcmp("1", "2"), Ordering::Less); assert_eq!(strnaturalcmp("3", "2"), Ordering::Greater); assert_eq!(strnaturalcmp("a1", "a1"), Ordering::Equal); assert_eq!(strnaturalcmp("a1", "a2"), Ordering::Less); assert_eq!(strnaturalcmp("a2", "a1"), Ordering::Greater); assert_eq!(strnaturalcmp("a1a2", "a1a3"), Ordering::Less); assert_eq!(strnaturalcmp("a1a2", "a1a0"), Ordering::Greater); assert_eq!(strnaturalcmp("134", "122"), Ordering::Greater); assert_eq!(strnaturalcmp("12a3", "12a3"), Ordering::Equal); assert_eq!(strnaturalcmp("12a1", "12a0"), Ordering::Greater); assert_eq!(strnaturalcmp("12a1", "12a2"), Ordering::Less); assert_eq!(strnaturalcmp("a", "aa"), Ordering::Less); assert_eq!(strnaturalcmp("aaa", "aa"), Ordering::Greater); assert_eq!(strnaturalcmp("Alpha 2", "Alpha 2"), Ordering::Equal); assert_eq!(strnaturalcmp("Alpha 2", "Alpha 2A"), Ordering::Less); assert_eq!(strnaturalcmp("Alpha 2 B", "Alpha 2"), Ordering::Greater); assert_eq!(strnaturalcmp("aa10", "aa2"), Ordering::Greater); } #[test] fn t_strip_comments() { // no comments in line assert_eq!(strip_comments(""), ""); assert_eq!(strip_comments("\t\n"), "\t\n"); assert_eq!(strip_comments("some directive "), "some directive "); // fully commented line assert_eq!(strip_comments("#"), ""); assert_eq!(strip_comments("# #"), ""); assert_eq!(strip_comments("# comment"), ""); // partially commented line assert_eq!(strip_comments("directive # comment"), "directive "); assert_eq!( strip_comments("directive # comment # another"), "directive " ); assert_eq!(strip_comments("directive#comment"), "directive"); // ignores # characters inside double quotes (#652) let expected = r#"highlight article "[-=+#_*~]{3,}.*" green default"#; let input = expected.to_owned() + "# this is a comment"; assert_eq!(strip_comments(&input), expected); let expected = r#"highlight all "(https?|ftp)://[\-\.,/%~_:?&=\#a-zA-Z0-9]+" blue default bold"#; let input = expected.to_owned() + "#heresacomment"; assert_eq!(strip_comments(&input), expected); // Escaped double quote inside double quotes is not treated as closing quote let expected = r#"test "here \"goes # nothing\" etc" hehe"#; let input = expected.to_owned() + "# and here is a comment"; assert_eq!(strip_comments(&input), expected); // Ignores # characters inside backticks let expected = r#"one `two # three` four"#; let input = expected.to_owned() + "# and a comment, of course"; assert_eq!(strip_comments(&input), expected); // Escaped backtick inside backticks is not treated as closing let expected = r#"some `other \` tricky # test` hehe"#; let input = expected.to_owned() + "#here goescomment"; assert_eq!(strip_comments(&input), expected); // Ignores escaped # characters (\\#) let expected = r#"one two \# three four"#; let input = expected.to_owned() + "# and a comment"; assert_eq!(strip_comments(&input), expected); } #[test] fn t_extract_filter() { let expected = ("~/bin/script.sh", "https://newsboat.org"); let input = "filter:~/bin/script.sh:https://newsboat.org"; assert_eq!(extract_filter(input), expected); let expected = ("", "https://newsboat.org"); let input = "filter::https://newsboat.org"; assert_eq!(extract_filter(input), expected); let expected = ("https", "//newsboat.org"); let input = "filter:https://newsboat.org"; assert_eq!(extract_filter(input), expected); let expected = ("foo", ""); let input = "filter:foo:"; assert_eq!(extract_filter(input), expected); let expected = ("", ""); let input = "filter:"; assert_eq!(extract_filter(input), expected); } }
// This file is heavily derived from Rust's stdlib, and therefore // retains the copyright notice below // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::string::String; static ASCII_LOWER_MAP: [u8, ..256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'[', b'\\', b']', b'^', b'_', b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub static RFC1459_LOWER_MAP: [u8, ..256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'^', b'_', b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub static STRICT_RFC1459_LOWER_MAP: [u8, ..256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'^', b'_', b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub trait IrcAsciiExt<T> for Sized? { /// Makes a copy of the string in IRC ASCII lower case: /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', /// and "[]\\~" are mapped to "{}|\^" respectively, /// but all other characters are unchanged. fn to_irc_lower(&self) -> T; /// Check that two strings are an ASCII case-insensitive match. /// Same as `to_irc_lower(a) == to_irc_lower(b)`, /// but without allocating and copying temporary strings. fn eq_ignore_irc_case(&self, other: &Self) -> bool; } pub trait OwnedIrcAsciiExt { fn into_irc_lower(self) -> Self; } impl IrcAsciiExt<Vec<u8>> for [u8] { #[inline] fn to_irc_lower(&self) -> Vec<u8> { let lower_map = RFC1459_LOWER_MAP; self.iter().map(|&byte| lower_map[byte as uint]).collect() } #[inline] fn eq_ignore_irc_case(&self, other: &[u8]) -> bool { let lower_map = RFC1459_LOWER_MAP; self.len() == other.len() && self.iter().zip(other.iter()).all( |(byte_self, byte_other)| { lower_map[*byte_self as uint] == lower_map[*byte_other as uint] }) } } impl OwnedIrcAsciiExt for Vec<u8> { #[inline] fn into_irc_lower(mut self) -> Vec<u8> { let lower_map = RFC1459_LOWER_MAP; for byte in self.iter_mut() { *byte = lower_map[*byte as uint]; } self } } impl IrcAsciiExt<String> for str { #[inline] fn to_irc_lower(&self) -> String { // Vec<u8>::to_irc_lower() preserves the UTF-8 invariant. unsafe { String::from_utf8_unchecked(self.as_bytes().to_irc_lower()) } } #[inline] fn eq_ignore_irc_case(&self, other: &str) -> bool { self.as_bytes().eq_ignore_irc_case(other.as_bytes()) } } impl OwnedIrcAsciiExt for String { #[inline] fn into_irc_lower(self) -> String { // Vec<u8>::into_irc_lower() preserves the UTF-8 invariant. unsafe { String::from_utf8_unchecked(self.into_bytes().into_irc_lower()) } } } enum CaseMapping { Ascii, Rfc1459, StrictRfc1459, } impl CaseMapping { pub fn get_lower_map(&self) -> &[u8] { match *self { CaseMapping::Ascii => ASCII_LOWER_MAP.as_slice(), CaseMapping::Rfc1459 => RFC1459_LOWER_MAP.as_slice(), CaseMapping::StrictRfc1459 => STRICT_RFC1459_LOWER_MAP.as_slice(), } } pub fn to_irc_lower<Sized? T>(&self, left: &T) -> Vec<u8> where T: ToByteSlice { let lower_map = self.get_lower_map(); left.to_byte_slice().iter().map(|&byte| lower_map[byte as uint]).collect() } pub fn eq_ignore_case<Sized? T>(&self, left: &T, right: &T) -> bool where T: ToByteSlice { let lower_map = self.get_lower_map(); let left = left.to_byte_slice(); let right = right.to_byte_slice(); left.len() == right.len() && left.iter().zip(right.iter()).all( |(byte_self, byte_other)| { lower_map[*byte_self as uint] == lower_map[*byte_other as uint] }) } } trait ToByteSlice for Sized? { fn to_byte_slice<'a>(&'a self) -> &'a [u8]; } impl ToByteSlice for str { fn to_byte_slice<'a>(&'a self) -> &'a [u8] { self.as_bytes() } } impl ToByteSlice for [u8] { fn to_byte_slice<'a>(&'a self) -> &'a [u8] { self } } #[test] fn test_old_basics() { assert!("[".eq_ignore_irc_case("{")); assert!("]".eq_ignore_irc_case("}")); assert!("\\".eq_ignore_irc_case("|")); assert!("~".eq_ignore_irc_case("^")); assert_eq!("[".to_irc_lower()[], "{"); assert_eq!("]".to_irc_lower()[], "}"); assert_eq!("\\".to_irc_lower()[], "|"); assert_eq!("~".to_irc_lower()[], "^"); assert_eq!("~".to_string().into_irc_lower()[], "^"); } #[test] fn test_basics() { assert!(CaseMapping::Ascii.eq_ignore_case("A", "a")); assert!(!CaseMapping::Ascii.eq_ignore_case("[", "{")); assert!(!CaseMapping::Ascii.eq_ignore_case("\\", "|")); assert!(!CaseMapping::Ascii.eq_ignore_case("]", "}")); assert!(!CaseMapping::Ascii.eq_ignore_case("^", "~")); assert!(CaseMapping::Rfc1459.eq_ignore_case("A", "a")); assert!(CaseMapping::Rfc1459.eq_ignore_case("[", "{")); assert!(CaseMapping::Rfc1459.eq_ignore_case("\\", "|")); assert!(CaseMapping::Rfc1459.eq_ignore_case("]", "}")); assert!(CaseMapping::Rfc1459.eq_ignore_case("^", "~")); assert!(CaseMapping::StrictRfc1459.eq_ignore_case("A", "a")); assert!(CaseMapping::StrictRfc1459.eq_ignore_case("[", "{")); assert!(CaseMapping::StrictRfc1459.eq_ignore_case("\\", "|")); assert!(CaseMapping::StrictRfc1459.eq_ignore_case("]", "}")); assert!(!CaseMapping::StrictRfc1459.eq_ignore_case("^", "~")); assert_eq!( CaseMapping::Ascii.to_irc_lower("A[]\\^Z"), b"a[]\\^z".to_vec()); assert_eq!( CaseMapping::Rfc1459.to_irc_lower("A[]\\^Z"), b"a{}|~z".to_vec()); assert_eq!( CaseMapping::StrictRfc1459.to_irc_lower("A[]\\^Z"), b"a{}|^z".to_vec()); } Fix broken case mapping // This file is heavily derived from Rust's stdlib, and therefore // retains the copyright notice below // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::string::String; use std::default::Default; static ASCII_LOWER_MAP: [u8, ..256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'[', b'\\', b']', b'^', b'_', b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub static RFC1459_LOWER_MAP: [u8, ..256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', b'_', b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub static STRICT_RFC1459_LOWER_MAP: [u8, ..256] = [ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', b'@', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'^', b'_', b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, ]; pub trait IrcAsciiExt<T> for Sized? { /// Makes a copy of the string in IRC ASCII lower case: /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z', /// and "[]\\~" are mapped to "{}|\^" respectively, /// but all other characters are unchanged. fn to_irc_lower(&self) -> T; /// Check that two strings are an ASCII case-insensitive match. /// Same as `to_irc_lower(a) == to_irc_lower(b)`, /// but without allocating and copying temporary strings. fn eq_ignore_irc_case(&self, other: &Self) -> bool; } pub trait OwnedIrcAsciiExt { fn into_irc_lower(self) -> Self; } impl IrcAsciiExt<Vec<u8>> for [u8] { #[inline] fn to_irc_lower(&self) -> Vec<u8> { let lower_map = RFC1459_LOWER_MAP; self.iter().map(|&byte| lower_map[byte as uint]).collect() } #[inline] fn eq_ignore_irc_case(&self, other: &[u8]) -> bool { let lower_map = RFC1459_LOWER_MAP; self.len() == other.len() && self.iter().zip(other.iter()).all( |(byte_self, byte_other)| { lower_map[*byte_self as uint] == lower_map[*byte_other as uint] }) } } impl OwnedIrcAsciiExt for Vec<u8> { #[inline] fn into_irc_lower(mut self) -> Vec<u8> { let lower_map = RFC1459_LOWER_MAP; for byte in self.iter_mut() { *byte = lower_map[*byte as uint]; } self } } impl IrcAsciiExt<String> for str { #[inline] fn to_irc_lower(&self) -> String { // Vec<u8>::to_irc_lower() preserves the UTF-8 invariant. unsafe { String::from_utf8_unchecked(self.as_bytes().to_irc_lower()) } } #[inline] fn eq_ignore_irc_case(&self, other: &str) -> bool { self.as_bytes().eq_ignore_irc_case(other.as_bytes()) } } impl OwnedIrcAsciiExt for String { #[inline] fn into_irc_lower(self) -> String { // Vec<u8>::into_irc_lower() preserves the UTF-8 invariant. unsafe { String::from_utf8_unchecked(self.into_bytes().into_irc_lower()) } } } #[test] fn test_old_basics() { // lower("[]\\^") == "{}|~" assert!("[".eq_ignore_irc_case("{")); assert!("]".eq_ignore_irc_case("}")); assert!("\\".eq_ignore_irc_case("|")); assert!("^".eq_ignore_irc_case("~")); assert_eq!("[".to_irc_lower()[], "{"); assert_eq!("]".to_irc_lower()[], "}"); assert_eq!("\\".to_irc_lower()[], "|"); assert_eq!("^".to_irc_lower()[], "~"); assert_eq!("^".to_string().into_irc_lower()[], "~"); } #[deriving(PartialEq, Eq, Show)] pub struct AsciiCaseMapping; impl Default for AsciiCaseMapping { fn default() -> AsciiCaseMapping { AsciiCaseMapping } } impl CaseMapping for AsciiCaseMapping { #[inline] fn get_lower_map(&self) -> &[u8] { ASCII_LOWER_MAP.as_slice() } } #[deriving(PartialEq, Eq, Show)] pub struct Rfc1459CaseMapping; impl Default for Rfc1459CaseMapping { fn default() -> Rfc1459CaseMapping { Rfc1459CaseMapping } } impl CaseMapping for Rfc1459CaseMapping { #[inline] fn get_lower_map(&self) -> &[u8] { RFC1459_LOWER_MAP.as_slice() } } #[deriving(PartialEq, Eq, Show)] pub struct StrictRfc1459CaseMapping; impl Default for StrictRfc1459CaseMapping { fn default() -> StrictRfc1459CaseMapping { StrictRfc1459CaseMapping } } impl CaseMapping for StrictRfc1459CaseMapping { #[inline] fn get_lower_map(&self) -> &[u8] { STRICT_RFC1459_LOWER_MAP.as_slice() } } pub trait CaseMapping: Default+PartialEq+Eq { fn get_lower_map(&self) -> &[u8]; fn to_irc_lower<Sized? T>(&self, left: &T) -> Vec<u8> where T: ToByteSlice { // Vec<u8>::to_irc_lower() preserves the UTF-8 invariant. let lower_map = self.get_lower_map(); left.to_byte_slice().iter().map(|&byte| lower_map[byte as uint]).collect() } #[inline] fn hash_ignore_case<Sized? T>(&self, left: &T, state: &mut ::std::hash::sip::SipState) where T: ToByteSlice+::std::hash::Hash { let lower_map = self.get_lower_map(); for byte in left.to_byte_slice().iter() { ::std::hash::Hash::hash(&lower_map[*byte as uint], state); } } #[inline] fn eq_ignore_case<Sized? T>(&self, left: &T, right: &T) -> bool where T: ToByteSlice { let lower_map = self.get_lower_map(); let left = left.to_byte_slice(); let right = right.to_byte_slice(); left.len() == right.len() && left.iter().zip(right.iter()).all( |(byte_self, byte_other)| { lower_map[*byte_self as uint] == lower_map[*byte_other as uint] }) } } trait ToByteSlice for Sized? { fn to_byte_slice<'a>(&'a self) -> &'a [u8]; } impl ToByteSlice for str { fn to_byte_slice<'a>(&'a self) -> &'a [u8] { self.as_bytes() } } impl ToByteSlice for String { fn to_byte_slice<'a>(&'a self) -> &'a [u8] { self.as_slice().as_bytes() } } impl ToByteSlice for [u8] { fn to_byte_slice<'a>(&'a self) -> &'a [u8] { self } } impl ToByteSlice for Vec<u8> { fn to_byte_slice<'a>(&'a self) -> &'a [u8] { self.as_slice() } } #[test] fn test_basics() { assert!(AsciiCaseMapping.eq_ignore_case("A", "a")); assert!(!AsciiCaseMapping.eq_ignore_case("[", "{")); assert!(!AsciiCaseMapping.eq_ignore_case("\\", "|")); assert!(!AsciiCaseMapping.eq_ignore_case("]", "}")); assert!(!AsciiCaseMapping.eq_ignore_case("^", "~")); assert!(Rfc1459CaseMapping.eq_ignore_case("A", "a")); assert!(Rfc1459CaseMapping.eq_ignore_case("[", "{")); assert!(Rfc1459CaseMapping.eq_ignore_case("\\", "|")); assert!(Rfc1459CaseMapping.eq_ignore_case("]", "}")); assert!(Rfc1459CaseMapping.eq_ignore_case("^", "~")); assert!(StrictRfc1459CaseMapping.eq_ignore_case("A", "a")); assert!(StrictRfc1459CaseMapping.eq_ignore_case("[", "{")); assert!(StrictRfc1459CaseMapping.eq_ignore_case("\\", "|")); assert!(StrictRfc1459CaseMapping.eq_ignore_case("]", "}")); assert!(!StrictRfc1459CaseMapping.eq_ignore_case("^", "~")); assert_eq!( AsciiCaseMapping.to_irc_lower("A[]\\^Z"), b"a[]\\^z".to_vec()); assert_eq!( Rfc1459CaseMapping.to_irc_lower("A[]\\^Z"), b"a{}|~z".to_vec()); assert_eq!( StrictRfc1459CaseMapping.to_irc_lower("A[]\\^Z"), b"a{}|^z".to_vec()); }
//! Allows dynamic dispatch to object-unsafe traits, with some caveats. //! //! `list_of_types` works around Rust's lack of generic/associated statics by requiring the program to specify a single "list of types" at compile time, which must include everything that you want to call into dynamically. This must be globally unique within the program. That is, if a *library* wants to use these features, it must rely on being passed the global list as a generic parameter. //! //! Creating a list of types is simple: you wrap each type `T` into `ListedType<T>`, then put the `ListedType`s into tuples (possibly nested tuples). All the traits necessary to use a list for automatically implemented for all nested tuples of `ListedType`s. For instance, ((ListedType<i64>, ListedType<String>, ListedType<Vec<usize>>), ListedType<bool>) is a valid list with 4 types in it. use std::marker::PhantomData; #[macro_export] macro_rules! time_steward_make_sublist { (mod $mod: ident visits $T: ident where $($where:tt)*) => {mod $mod { use std::marker::PhantomData; use std::any::Any; use $crate::dynamic::list_of_types::{List, SubListRepresentative, ListedType}; pub trait Visitor { fn visit<$T>(&mut self) where $($where)*; } pub unsafe trait SubList: List { fn visit_all<V: Visitor>(visitor: &mut V); fn count()->usize; fn count_before<T>()->usize; } unsafe impl <T: Any> SubList for ListedType<T> { #[inline(always)] default fn visit_all<V: Visitor>(_: &mut V) {} #[inline(always)] default fn count()->usize {0} #[inline(always)] default fn count_before<U>()->usize {panic!("invoked count_before on a list that doesn't contain the type")} } unsafe impl <$T: Any> SubList for ListedType<$T> where $($where)* { #[inline(always)] fn visit_all<V: Visitor>(visitor: &mut V) { visitor.visit::<$T>(); } #[inline(always)] fn count()->usize {1} #[inline(always)] fn count_before<U>()->usize {0} } __time_steward_internal_sublist_tuple_impls! (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31); pub struct Representative <GlobalList: SubList> (PhantomData <GlobalList>,!); unsafe impl <GlobalList: SubList> SubListRepresentative for Representative <GlobalList> { fn count()->usize { $crate::dynamic::list_of_types::assert_unique_global_list::<GlobalList>(); GlobalList::count() } fn count_before<T>()->usize { $crate::dynamic::list_of_types::assert_unique_global_list::<GlobalList>(); GlobalList::count_before::<T>() } }/*{ fn visit_all<V: Visitor>(visitor: &mut V) { GlobalList::visit_all(visitor); } }*/ }} } pub struct ListedType<T: Any>(PhantomData <T>, !); #[macro_export] #[doc(hidden)] macro_rules! __time_steward_internal_sublist_tuple_impls { ($TL: ident $(, $T: ident)*) => { unsafe impl<$($T,)* $TL> SubList for ($($T,)* $TL,) where $($T: SubList,)* $TL: SubList { #[inline(always)] fn visit_all<V: Visitor>(visitor: &mut V) { $($T::visit_all(visitor);)* $TL::visit_all(visitor); } #[inline(always)] fn count()->usize { $($T::count() + )* $TL::count() } #[inline(always)] fn count_before<T>()->usize { let count = 0; $(if $T::includes::<T> () {return count + $T::count_before::<T>()} let count = count + $T::count();)* if $TL::includes::<T> () {return count + $TL::count_before::<T>()} panic!("invoked count_before on a list that doesn't contain the type") } } __time_steward_internal_sublist_tuple_impls! ($($T),*); }; () => {}; } pub unsafe trait List: Any { fn includes <T> ()->bool; } pub unsafe trait SubListRepresentative { fn count()->usize; fn count_before<T>()->usize; } pub unsafe trait AmI <T> {fn am_i()->bool;} unsafe impl <T, U> AmI <T> for U {default fn am_i()->bool {false}} unsafe impl <T> AmI <T> for T {fn am_i()->bool {true}} unsafe impl <T: Any> List for ListedType<T> { #[inline(always)] fn includes<U>()->bool {<T as AmI<U>>::am_i()} } macro_rules! tuple_impls { ($TL: ident $(, $T: ident)*) => { unsafe impl<$($T,)* $TL> List for ($($T,)* $TL,) where $($T: List,)* $TL: List { #[inline(always)] fn includes<T>()->bool { $($T::includes::<T>() || )* $TL::includes::<T>() } } tuple_impls! ($($T),*); }; () => {}; } tuple_impls! (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31); #[derive (Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct DeterministicallyRandomlyIdentifiedTypeId(u64); pub trait DeterministicallyRandomlyIdentifiedType { const ID: DeterministicallyRandomlyIdentifiedTypeId; } fn assert_unique_global_list <GlobalList: List>() { use std::any::TypeId; use std::cell::Cell; thread_local! { static LIST_ID: Cell<Option <TypeId>> = Cell::new (None); } LIST_ID.with (| id_cell | { let id = TypeId::of::<GlobalList>(); if let Some (existing) = id_cell.get() { assert!(id == existing, "invoked dynamic function with two different global lists (see the list_of_types documentation for why this is wrong"); return } id_cell.set (Some(id)); }); } time_steward_make_sublist! ( mod whatever visits T where T: super::DeterministicallyRandomlyIdentifiedType); /* #[macro_export] macro_rules! time_steward_sublist_fn { (static $visitor_data: ident: $VisitorData: ty: $VisitorTrait: ident = $initial_value: expr followed by visit<$T> (&mut $self_hack) $visitation_effect: expr; fn $function_name: ident <$GlobalList: $SubList: ident> ($($argument_name: ident: $argument_type:ty),*)->$return_type:ty $($body:tt)*) => { fn $function_name <GlobalList: $List> $($argument_name: $argument_type),*)->$return_type:ty { struct Visitor ($VisitorData); impl VisitorTrait for Visitor { fn visit<$T>(&mut $self_hack) where {$visitation_effect} } thread_local! { static DATA: $VisitorData = { let mut visitor = Visitor ($initial_value); <GlobalList as DeterministicallyRandomlyIdentifiedTypesList>::visit_all (&mut visitor); visitor.0 }; } DATA.with (| $visitor_data | {$($body)*}) } } */ #[macro_export] macro_rules! time_steward_visit_sublist { (&mut $object: ident: $Object: ty, $GlobalList: ident, $mod: ident, $($method: tt)*) => { /*struct Visitor <'a> (&mut 'a $Object); impl <'a> $mod::Visitor for Visitor <'a> { $method }*/ impl $mod::Visitor for $Object { $($method)* } <$GlobalList as $mod::SubList>::visit_all (&mut $object); } } use std::any::Any ; fn listed_type_id <GlobalList: whatever::SubList> (index: usize)->Option <DeterministicallyRandomlyIdentifiedTypeId> { assert_unique_global_list::<GlobalList>(); use std::cell::RefCell; use std::mem; struct Visitor (Vec<DeterministicallyRandomlyIdentifiedTypeId>); impl whatever::Visitor for Visitor { fn visit<T>(&mut self) where T: DeterministicallyRandomlyIdentifiedType {self.0.push (T::ID);} } thread_local! { static TABLE: RefCell<Vec<DeterministicallyRandomlyIdentifiedTypeId>> = RefCell::new(Vec::new()); } TABLE.with (| table | { let mut guard = table.borrow_mut(); if guard.is_empty() { mem::replace (&mut*guard, { let mut result = Vec::with_capacity(whatever::Representative::<GlobalList>::count()); time_steward_visit_sublist! (&mut result: Vec<DeterministicallyRandomlyIdentifiedTypeId>, GlobalList, whatever, fn visit<T>(&mut self) where T: DeterministicallyRandomlyIdentifiedType {self.push (T::ID);}); result }); } guard.get (index).cloned() }) } Made macros more convenient //! Allows dynamic dispatch to object-unsafe traits, with some caveats. //! //! `list_of_types` works around Rust's lack of generic/associated statics by requiring the program to specify a single "list of types" at compile time, which must include everything that you want to call into dynamically. This must be globally unique within the program. That is, if a *library* wants to use these features, it must rely on being passed the global list as a generic parameter. //! //! Creating a list of types is simple: you wrap each type `T` into `ListedType<T>`, then put the `ListedType`s into tuples (possibly nested tuples). All the traits necessary to use a list for automatically implemented for all nested tuples of `ListedType`s. For instance, ((ListedType<i64>, ListedType<String>, ListedType<Vec<usize>>), ListedType<bool>) is a valid list with 4 types in it. use std::marker::PhantomData; #[macro_export] macro_rules! time_steward_make_sublist { (mod $mod: ident visits $T: ident where $($where:tt)*) => {mod $mod { use std::marker::PhantomData; use std::any::Any; use $crate::dynamic::list_of_types::{List, SubListRepresentative, ListedType}; pub trait Visitor { fn visit<$T>(&mut self) where $($where)*; } pub unsafe trait SubList: List { fn visit_all<V: Visitor>(visitor: &mut V); fn count()->usize; fn count_before<T>()->usize; } unsafe impl <T: Any> SubList for ListedType<T> { #[inline(always)] default fn visit_all<V: Visitor>(_: &mut V) {} #[inline(always)] default fn count()->usize {0} #[inline(always)] default fn count_before<U>()->usize {panic!("invoked count_before on a list that doesn't contain the type")} } unsafe impl <$T: Any> SubList for ListedType<$T> where $($where)* { #[inline(always)] fn visit_all<V: Visitor>(visitor: &mut V) { visitor.visit::<$T>(); } #[inline(always)] fn count()->usize {1} #[inline(always)] fn count_before<U>()->usize {0} } __time_steward_internal_sublist_tuple_impls! (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31); pub struct Representative <GlobalList: SubList> (PhantomData <GlobalList>,!); unsafe impl <GlobalList: SubList> SubListRepresentative for Representative <GlobalList> { fn count()->usize { $crate::dynamic::list_of_types::assert_unique_global_list::<GlobalList>(); GlobalList::count() } fn count_before<T>()->usize { $crate::dynamic::list_of_types::assert_unique_global_list::<GlobalList>(); GlobalList::count_before::<T>() } }/*{ fn visit_all<V: Visitor>(visitor: &mut V) { GlobalList::visit_all(visitor); } }*/ }} } pub struct ListedType<T: Any>(PhantomData <T>, !); #[macro_export] #[doc(hidden)] macro_rules! __time_steward_internal_sublist_tuple_impls { ($TL: ident $(, $T: ident)*) => { unsafe impl<$($T,)* $TL> SubList for ($($T,)* $TL,) where $($T: SubList,)* $TL: SubList { #[inline(always)] fn visit_all<V: Visitor>(visitor: &mut V) { $($T::visit_all(visitor);)* $TL::visit_all(visitor); } #[inline(always)] fn count()->usize { $($T::count() + )* $TL::count() } #[inline(always)] fn count_before<T>()->usize { let count = 0; $(if $T::includes::<T> () {return count + $T::count_before::<T>()} let count = count + $T::count();)* if $TL::includes::<T> () {return count + $TL::count_before::<T>()} panic!("invoked count_before on a list that doesn't contain the type") } } __time_steward_internal_sublist_tuple_impls! ($($T),*); }; () => {}; } pub unsafe trait List: Any { fn includes <T> ()->bool; } pub unsafe trait SubListRepresentative { fn count()->usize; fn count_before<T>()->usize; } pub unsafe trait AmI <T> {fn am_i()->bool;} unsafe impl <T, U> AmI <T> for U {default fn am_i()->bool {false}} unsafe impl <T> AmI <T> for T {fn am_i()->bool {true}} unsafe impl <T: Any> List for ListedType<T> { #[inline(always)] fn includes<U>()->bool {<T as AmI<U>>::am_i()} } macro_rules! tuple_impls { ($TL: ident $(, $T: ident)*) => { unsafe impl<$($T,)* $TL> List for ($($T,)* $TL,) where $($T: List,)* $TL: List { #[inline(always)] fn includes<T>()->bool { $($T::includes::<T>() || )* $TL::includes::<T>() } } tuple_impls! ($($T),*); }; () => {}; } tuple_impls! (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30, T31); #[derive (Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct DeterministicallyRandomlyIdentifiedTypeId(u64); pub trait DeterministicallyRandomlyIdentifiedType { const ID: DeterministicallyRandomlyIdentifiedTypeId; } fn assert_unique_global_list <GlobalList: List>() { use std::any::TypeId; use std::cell::Cell; thread_local! { static LIST_ID: Cell<Option <TypeId>> = Cell::new (None); } LIST_ID.with (| id_cell | { let id = TypeId::of::<GlobalList>(); if let Some (existing) = id_cell.get() { assert!(id == existing, "invoked dynamic function with two different global lists (see the list_of_types documentation for why this is wrong"); return } id_cell.set (Some(id)); }); } time_steward_make_sublist! ( mod whatever visits T where T: super::DeterministicallyRandomlyIdentifiedType); /* #[macro_export] macro_rules! time_steward_sublist_fn { (static $visitor_data: ident: $VisitorData: ty: $VisitorTrait: ident = $initial_value: expr followed by visit<$T> (&mut $self_hack) $visitation_effect: expr; fn $function_name: ident <$GlobalList: $SubList: ident> ($($argument_name: ident: $argument_type:ty),*)->$return_type:ty $($body:tt)*) => { fn $function_name <GlobalList: $List> $($argument_name: $argument_type),*)->$return_type:ty { struct Visitor ($VisitorData); impl VisitorTrait for Visitor { fn visit<$T>(&mut $self_hack) where {$visitation_effect} } thread_local! { static DATA: $VisitorData = { let mut visitor = Visitor ($initial_value); <GlobalList as DeterministicallyRandomlyIdentifiedTypesList>::visit_all (&mut visitor); visitor.0 }; } DATA.with (| $visitor_data | {$($body)*}) } } */ #[macro_export] macro_rules! time_steward_visit_sublist { (&mut $object: ident: $Object: ty, <$GlobalList: ident as $mod: ident::SubList>, $($method: tt)*) => { impl $mod::Visitor for $Object { $($method)* } <$GlobalList as $mod::SubList>::visit_all (&mut $object); } } #[macro_export] macro_rules! time_steward_with_sublist_table { (Vec<$Entry: ty>, <$GlobalList: ident as $mod: ident::SubList>, {$($method: tt)*} | $table: ident | $($closure: tt)*) => {{ assert_unique_global_list::<GlobalList>(); use std::cell::RefCell; use std::mem; thread_local! { static TABLE: RefCell<Vec<DeterministicallyRandomlyIdentifiedTypeId>> = RefCell::new(Vec::new()); } TABLE.with (| table | { let mut guard = table.borrow_mut(); if guard.is_empty() { mem::replace (&mut*guard, { let mut result = Vec::with_capacity(whatever::Representative::<GlobalList>::count()); time_steward_visit_sublist! ( &mut result: Vec<$Entry>, <GlobalList as whatever::SubList>, $($method)* ); result }); } let $table = &*guard; $($closure)* }) }} } use std::any::Any ; fn index_to_id <GlobalList: whatever::SubList> (index: usize)->Option <DeterministicallyRandomlyIdentifiedTypeId> { time_steward_with_sublist_table! ( Vec<DeterministicallyRandomlyIdentifiedTypeId>, <GlobalList as whatever::SubList>, { fn visit<T>(&mut self) where T: DeterministicallyRandomlyIdentifiedType {self.push (T::ID);}} | table | table.get (index).cloned() ) }
use ::quote::Tokens; use ::syn::Ident; use ::yaml_rust::Yaml; use infer::auth; use infer::datatype; use infer::path; use infer::TokensResult; type FieldsSupport = (Vec<Tokens>, Vec<Tokens>); fn infer_ref_obj(schema: &Yaml, required: &Vec<Yaml>) -> FieldsSupport { let mut fields: Vec<Tokens> = Vec::new(); let mut additional_types: Vec<Tokens> = Vec::new(); for (field, schema) in schema["properties"].as_hash() .expect("Properties must be a map.") { let field_name = field.as_str() .expect("Property must be a string."); let field_ident = Ident::new(field_name); let (ty, maybe_at) = datatype::infer_v3(&schema).unwrap(); let mandate: Tokens; if let Some(true) = schema["required"].as_bool() { mandate = quote!(::tapioca::datatype::Required); } else if required.contains(field) { mandate = quote!(::tapioca::datatype::Required); } else { mandate = quote!(::tapioca::datatype::Optional); } fields.push(quote!{ #field_ident: #mandate<#ty> }); if let Some(additional_type) = maybe_at { additional_types.push(additional_type); } } (fields, additional_types) } fn infer_ref(ident: &Ident, schema: &Yaml, required: &Vec<Yaml>) -> TokensResult { match schema["properties"].as_hash() { Some(_) => { let (fields, additionals) = infer_ref_obj(&schema, &required); Ok(quote! { #(#additionals)* #[derive(Debug, Deserialize, PartialEq, Serialize)] pub struct #ident { #(pub #fields),* } }) }, None => { let (alias_to, maybe_at) = datatype::infer_v3(&schema)?; let additional_type = match maybe_at { Some(at) => at, None => quote!(), }; Ok(quote! { #additional_type #[allow(dead_code)] pub type #ident = #alias_to; }) }, } } pub(super) fn infer_v3(schema: &Yaml) -> TokensResult { let paths = schema["paths"].clone(); let path_impls: Vec<Tokens> = paths.as_hash() .expect("Paths must be a map.") .iter() .map(|(path, path_schema)| path::infer_v3( path.as_str().expect("Path must be a string."), &path_schema ).unwrap()) .collect(); let api_url = schema["servers"][0]["url"].as_str() .expect("Must have at least one server URL."); let mut schema_ref_defs: Vec<Tokens> = Vec::new(); let schema_refs = &schema["components"]["schemas"]; if !schema_refs.is_badvalue() { for (schema_ref, schema) in schema_refs.as_hash() .expect("#/components/schemas must be a map.") { let schema_ref_name = schema_ref.as_str() .expect("$ref name must be a string."); schema_ref_defs.push(infer_ref( &Ident::new(schema_ref_name), &schema, &schema["required"].as_vec().unwrap_or(&Vec::new()) )?); } } let mut auth_scheme_defs: Vec<Tokens> = Vec::new(); let auth_schemes = &schema["components"]["securitySchemes"]; if !auth_schemes.is_badvalue() { for (auth_scheme, schema) in auth_schemes.as_hash() .expect("#/components/securitySchemes must be a map.") { let auth_scheme_name = auth_scheme.as_str() .expect("security scheme name must be a string"); auth_scheme_defs.push(auth::infer_v3_component(auth_scheme_name, &schema)?); } } let server_auth_impl: Tokens; let auth_struct = Ident::new("ServerAuth"); let security_reqs = &schema["security"]; if !security_reqs.is_badvalue() { server_auth_impl = auth::infer_v3(&auth_struct, &security_reqs)? } else { server_auth_impl = quote!{ type #auth_struct = (); impl header::Header for #auth_struct { fn header_name() -> &'static str { "" } fn parse_header(_: &[Vec<u8>]) -> HeaderResult<()> { Ok(()) } } } } Ok(quote! { pub mod schema_ref { #[allow(unused_imports)] use super::schema_ref; #(#schema_ref_defs)* } pub mod auth_scheme { #(#auth_scheme_defs)* } use ::tapioca::header; use ::tapioca::HeaderResult; const API_URL: &'static str = #api_url; #[allow(dead_code)] #server_auth_impl #(#path_impls)* }) } Fix impl being on unowned type and trait use ::quote::Tokens; use ::syn::Ident; use ::yaml_rust::Yaml; use infer::auth; use infer::datatype; use infer::path; use infer::TokensResult; type FieldsSupport = (Vec<Tokens>, Vec<Tokens>); fn infer_ref_obj(schema: &Yaml, required: &Vec<Yaml>) -> FieldsSupport { let mut fields: Vec<Tokens> = Vec::new(); let mut additional_types: Vec<Tokens> = Vec::new(); for (field, schema) in schema["properties"].as_hash() .expect("Properties must be a map.") { let field_name = field.as_str() .expect("Property must be a string."); let field_ident = Ident::new(field_name); let (ty, maybe_at) = datatype::infer_v3(&schema).unwrap(); let mandate: Tokens; if let Some(true) = schema["required"].as_bool() { mandate = quote!(::tapioca::datatype::Required); } else if required.contains(field) { mandate = quote!(::tapioca::datatype::Required); } else { mandate = quote!(::tapioca::datatype::Optional); } fields.push(quote!{ #field_ident: #mandate<#ty> }); if let Some(additional_type) = maybe_at { additional_types.push(additional_type); } } (fields, additional_types) } fn infer_ref(ident: &Ident, schema: &Yaml, required: &Vec<Yaml>) -> TokensResult { match schema["properties"].as_hash() { Some(_) => { let (fields, additionals) = infer_ref_obj(&schema, &required); Ok(quote! { #(#additionals)* #[derive(Debug, Deserialize, PartialEq, Serialize)] pub struct #ident { #(pub #fields),* } }) }, None => { let (alias_to, maybe_at) = datatype::infer_v3(&schema)?; let additional_type = match maybe_at { Some(at) => at, None => quote!(), }; Ok(quote! { #additional_type #[allow(dead_code)] pub type #ident = #alias_to; }) }, } } pub(super) fn infer_v3(schema: &Yaml) -> TokensResult { let paths = schema["paths"].clone(); let path_impls: Vec<Tokens> = paths.as_hash() .expect("Paths must be a map.") .iter() .map(|(path, path_schema)| path::infer_v3( path.as_str().expect("Path must be a string."), &path_schema ).unwrap()) .collect(); let api_url = schema["servers"][0]["url"].as_str() .expect("Must have at least one server URL."); let mut schema_ref_defs: Vec<Tokens> = Vec::new(); let schema_refs = &schema["components"]["schemas"]; if !schema_refs.is_badvalue() { for (schema_ref, schema) in schema_refs.as_hash() .expect("#/components/schemas must be a map.") { let schema_ref_name = schema_ref.as_str() .expect("$ref name must be a string."); schema_ref_defs.push(infer_ref( &Ident::new(schema_ref_name), &schema, &schema["required"].as_vec().unwrap_or(&Vec::new()) )?); } } let mut auth_scheme_defs: Vec<Tokens> = Vec::new(); let auth_schemes = &schema["components"]["securitySchemes"]; if !auth_schemes.is_badvalue() { for (auth_scheme, schema) in auth_schemes.as_hash() .expect("#/components/securitySchemes must be a map.") { let auth_scheme_name = auth_scheme.as_str() .expect("security scheme name must be a string"); auth_scheme_defs.push(auth::infer_v3_component(auth_scheme_name, &schema)?); } } let server_auth_impl: Tokens; let auth_struct = Ident::new("ServerAuth"); let security_reqs = &schema["security"]; if !security_reqs.is_badvalue() { server_auth_impl = auth::infer_v3(&auth_struct, &security_reqs)? } else { server_auth_impl = quote!{ pub struct #auth_struct(()); impl header::Header for #auth_struct { fn header_name() -> &'static str { "" } fn parse_header(_: &[Vec<u8>]) -> HeaderResult<Self> { Ok(Self { 0: () }) } } } } Ok(quote! { pub mod schema_ref { #[allow(unused_imports)] use super::schema_ref; #(#schema_ref_defs)* } pub mod auth_scheme { #(#auth_scheme_defs)* } use ::tapioca::header; use ::tapioca::HeaderResult; const API_URL: &'static str = #api_url; #[allow(dead_code)] #server_auth_impl #(#path_impls)* }) }
use super::constants::{BROTLI_NUM_BLOCK_LEN_SYMBOLS, kZeroRepsBits, kZeroRepsDepth, kNonZeroRepsBits, kNonZeroRepsDepth, kCodeLengthBits, kCodeLengthDepth}; use super::entropy_encode::{HuffmanTree, BrotliWriteHuffmanTree, BrotliCreateHuffmanTree, BrotliConvertBitDepthsToSymbols, NewHuffmanTree, InitHuffmanTree, SortHuffmanTreeItems, SortHuffmanTree, BrotliSetDepth}; use super::super::alloc; use super::super::alloc::SliceWrapper; use super::super::alloc::SliceWrapperMut; use super::super::core; pub struct PrefixCodeRange { pub offset: u32, pub nbits: u32, } static kBlockLengthPrefixCode: [PrefixCodeRange; BROTLI_NUM_BLOCK_LEN_SYMBOLS] = [PrefixCodeRange { offset: 1u32, nbits: 2u32, }, PrefixCodeRange { offset: 5u32, nbits: 2u32, }, PrefixCodeRange { offset: 9u32, nbits: 2u32, }, PrefixCodeRange { offset: 13u32, nbits: 2u32, }, PrefixCodeRange { offset: 17u32, nbits: 3u32, }, PrefixCodeRange { offset: 25u32, nbits: 3u32, }, PrefixCodeRange { offset: 33u32, nbits: 3u32, }, PrefixCodeRange { offset: 41u32, nbits: 3u32, }, PrefixCodeRange { offset: 49u32, nbits: 4u32, }, PrefixCodeRange { offset: 65u32, nbits: 4u32, }, PrefixCodeRange { offset: 81u32, nbits: 4u32, }, PrefixCodeRange { offset: 97u32, nbits: 4u32, }, PrefixCodeRange { offset: 113u32, nbits: 5u32, }, PrefixCodeRange { offset: 145u32, nbits: 5u32, }, PrefixCodeRange { offset: 177u32, nbits: 5u32, }, PrefixCodeRange { offset: 209u32, nbits: 5u32, }, PrefixCodeRange { offset: 241u32, nbits: 6u32, }, PrefixCodeRange { offset: 305u32, nbits: 6u32, }, PrefixCodeRange { offset: 369u32, nbits: 7u32, }, PrefixCodeRange { offset: 497u32, nbits: 8u32, }, PrefixCodeRange { offset: 753u32, nbits: 9u32, }, PrefixCodeRange { offset: 1265u32, nbits: 10u32, }, PrefixCodeRange { offset: 2289u32, nbits: 11u32, }, PrefixCodeRange { offset: 4337u32, nbits: 12u32, }, PrefixCodeRange { offset: 8433u32, nbits: 13u32, }, PrefixCodeRange { offset: 16625u32, nbits: 24u32, }]; fn BrotliWriteBits(n_bits: u8, bits: u64, mut pos: &mut usize, mut array: &mut [u8]) { assert!((bits >> n_bits as usize) == 0); assert!(n_bits <= 56); let ptr_offset: usize = ((*pos >> 3) as u32) as usize; let mut v = array[ptr_offset] as u64; v |= bits << ((*pos) as u64 & 7); array[ptr_offset + 7] = (v >> 56) as u8; array[ptr_offset + 6] = ((v >> 48) & 0xff) as u8; array[ptr_offset + 5] = ((v >> 40) & 0xff) as u8; array[ptr_offset + 4] = ((v >> 24) & 0xff) as u8; array[ptr_offset + 3] = ((v >> 16) & 0xff) as u8; array[ptr_offset + 2] = ((v >> 8) & 0xff) as u8; array[ptr_offset + 1] = ((v >> 4) & 0xff) as u8; array[ptr_offset] = (v & 0xff) as u8; *pos += n_bits as usize } fn BrotliWriteBitsPrepareStorage(pos: usize, mut array: &mut [u8]) { assert_eq!(pos & 7, 0); array[pos >> 3] = 0; } fn BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes: i32, code_length_bitdepth: &[u8], mut storage_ix: &mut usize, mut storage: &mut [u8]) { static kStorageOrder: [u8; 18] = [1i32 as (u8), 2i32 as (u8), 3i32 as (u8), 4i32 as (u8), 0i32 as (u8), 5i32 as (u8), 17i32 as (u8), 6i32 as (u8), 16i32 as (u8), 7i32 as (u8), 8i32 as (u8), 9i32 as (u8), 10i32 as (u8), 11i32 as (u8), 12i32 as (u8), 13i32 as (u8), 14i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeSymbols: [u8; 6] = [0i32 as (u8), 7i32 as (u8), 3i32 as (u8), 2i32 as (u8), 1i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeBitLengths: [u8; 6] = [2i32 as (u8), 4i32 as (u8), 3i32 as (u8), 2i32 as (u8), 2i32 as (u8), 4i32 as (u8)]; let mut skip_some: u64 = 0u64; let mut codes_to_store: u64 = 18; if num_codes > 1i32 { 'break5: while codes_to_store > 0 { { if code_length_bitdepth[(kStorageOrder[codes_to_store.wrapping_sub(1) as usize] as (usize))] as (i32) != 0i32 { { break 'break5; } } } codes_to_store = codes_to_store.wrapping_sub(1); } } if code_length_bitdepth[(kStorageOrder[0usize] as (usize))] as (i32) == 0i32 && (code_length_bitdepth[(kStorageOrder[1usize] as (usize))] as (i32) == 0i32) { skip_some = 2; if code_length_bitdepth[(kStorageOrder[2usize] as (usize))] as (i32) == 0i32 { skip_some = 3; } } BrotliWriteBits(2, skip_some, storage_ix, storage); { let mut i: u64; i = skip_some; while i < codes_to_store { { let mut l: usize = code_length_bitdepth[(kStorageOrder[i as usize] as (usize))] as (usize); BrotliWriteBits(kHuffmanBitLengthHuffmanCodeBitLengths[l] as (u8), kHuffmanBitLengthHuffmanCodeSymbols[l] as u64, storage_ix, storage); } i = i.wrapping_add(1); } } } fn BrotliStoreHuffmanTreeToBitMask(huffman_tree_size: usize, huffman_tree: &[u8], huffman_tree_extra_bits: &[u8], code_length_bitdepth: &[u8], code_length_bitdepth_symbols: &[u16], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut i: usize; i = 0usize; while i < huffman_tree_size { { let mut ix: usize = huffman_tree[(i as (usize))] as (usize); BrotliWriteBits(code_length_bitdepth[(ix as (usize))] as (u8), code_length_bitdepth_symbols[(ix as (usize))] as (u64), storage_ix, storage); if ix == 16usize { BrotliWriteBits(2, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } else if ix == 17usize { BrotliWriteBits(3, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } pub fn BrotliStoreHuffmanTree(depths: &[u8], num: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut huffman_tree: [u8; 704] = [0; 704]; let mut huffman_tree_extra_bits: [u8; 704] = [0; 704]; let mut huffman_tree_size: usize = 0usize; let mut code_length_bitdepth: [u8; 18] = [0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8)]; let mut code_length_bitdepth_symbols: [u16; 18] = [0;18]; let mut huffman_tree_histogram: [u32; 18] = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; let mut i: usize; let mut num_codes: i32 = 0i32; let mut code: usize = 0usize; 0i32; BrotliWriteHuffmanTree(depths, num, &mut huffman_tree_size, &mut huffman_tree[..], &mut huffman_tree_extra_bits[..]); i = 0usize; while i < huffman_tree_size { { let _rhs = 1; let _lhs = &mut huffman_tree_histogram[huffman_tree[i] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } i = 0usize; 'break3: while i < 18usize { { if huffman_tree_histogram[i] != 0 { if num_codes == 0i32 { code = i; num_codes = 1i32; } else if num_codes == 1i32 { num_codes = 2i32; { { break 'break3; } } } } } i = i.wrapping_add(1 as (usize)); } BrotliCreateHuffmanTree(&mut huffman_tree_histogram, 18usize, 5i32, tree, &mut code_length_bitdepth); BrotliConvertBitDepthsToSymbols(&mut code_length_bitdepth, 18usize, &mut code_length_bitdepth_symbols); BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes, &code_length_bitdepth, storage_ix, storage); if num_codes == 1i32 { code_length_bitdepth[code] = 0i32 as (u8); } BrotliStoreHuffmanTreeToBitMask(huffman_tree_size, &huffman_tree, &huffman_tree_extra_bits, &code_length_bitdepth, &code_length_bitdepth_symbols, storage_ix, storage); } fn StoreStaticCodeLengthCode(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(40, 0xffu32 as (u64) << 32i32 | 0x55555554u32 as (u64), storage_ix, storage); } pub fn BrotliBuildAndStoreHuffmanTreeFast<AllocHT: alloc::Allocator<HuffmanTree>>( mut m : AllocHT, histogram : &[u32], histogram_total : usize, max_bits : usize, mut depth : &mut [u8], mut bits : &mut [u16], mut storage_ix : &mut usize, mut storage : &mut [u8] ){ let mut count: u64 = 0; let mut symbols: [u64; 4] = [0; 4]; let mut length: u64 = 0; let mut total: usize = histogram_total; while total != 0usize { if histogram[(length as (usize))] != 0 { if count < 4 { symbols[count as usize] = length; } count = count.wrapping_add(1); total = total.wrapping_sub(histogram[(length as (usize))] as (usize)); } length = length.wrapping_add(1); } if count <= 1 { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); depth[symbols[0usize] as (usize)] = 0i32 as (u8); bits[symbols[0usize] as (usize)] = 0i32 as (u16); } for depth_elem in depth[..(length as usize)].iter_mut() { *depth_elem = 0; // memset } { let max_tree_size: u64 = (2u64).wrapping_mul(length).wrapping_add(1); let mut tree = if max_tree_size != 0 { m.alloc_cell(max_tree_size as usize) } else { AllocHT::AllocatedMemory::default() // null }; let mut count_limit: u32; if !(0i32 == 0) { return; } count_limit = 1u32; 'break11: loop { { let mut node_index: u32 = 0u32; let mut l: u64; l = length; while l != 0 { l = l.wrapping_sub(1); if histogram[(l as (usize))] != 0 { if histogram[(l as (usize))] >= count_limit { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], histogram[(l as (usize))], -1i32 as (i16), l as (i16)); } else { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], count_limit, -1i32 as (i16), l as (i16)); } node_index = node_index.wrapping_add(1 as (u32)); } } { let n: i32 = node_index as (i32); let mut sentinel: HuffmanTree; let mut i: i32 = 0i32; let mut j: i32 = n + 1i32; let mut k: i32; SortHuffmanTreeItems(tree.slice_mut(), n as (usize), SortHuffmanTree{}); sentinel = NewHuffmanTree(!(0u32), -1i16, -1i16); tree.slice_mut()[(node_index.wrapping_add(1u32) as (usize))] = sentinel.clone(); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(2u32); k = n - 1i32; while k > 0i32 { { let mut left: i32; let mut right: i32; if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { left = i; i = i + 1; } else { left = j; j = j + 1; } if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { right = i; i = i + 1; } else { right = j; j = j + 1; } let sum_total = (tree.slice()[(left as (usize))]) .total_count_ .wrapping_add((tree.slice()[(right as (usize))]).total_count_); (tree.slice_mut()[(node_index.wrapping_sub(1u32) as (usize))]).total_count_ = sum_total; (tree.slice_mut()[(node_index.wrapping_sub(1u32) as (usize))]).index_left_ = left as (i16); (tree.slice_mut()[(node_index.wrapping_sub(1u32) as (usize))]).index_right_or_value_ = right as (i16); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(1u32); } k = k - 1; } if BrotliSetDepth(2i32 * n - 1i32, tree.slice_mut(), depth, 14i32) { { break 'break11; } } } } count_limit = count_limit.wrapping_mul(2u32); } { m.free_cell(core::mem::replace(&mut tree, AllocHT::AllocatedMemory::default())); } } BrotliConvertBitDepthsToSymbols(depth, length as usize, bits); if count <= 4 { let mut i: u64; BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, count.wrapping_sub(1) as u64, storage_ix, storage); i = 0; while i < count { { let mut j: u64; j = i.wrapping_add(1); while j < count { { if depth[(symbols[j as usize] as (usize))] as (i32) < depth[(symbols[i as usize] as (usize)) as usize] as (i32) { let mut brotli_swap_tmp: u64 = symbols[j as usize]; symbols[j as usize] = symbols[i as usize]; symbols[i as usize] = brotli_swap_tmp; } } j = j.wrapping_add(1); } } i = i.wrapping_add(1); } if count == 2 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); } else if count == 3 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[3usize], storage_ix, storage); BrotliWriteBits(1, if depth[(symbols[0usize] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } else { let mut previous_value: u8 = 8i32 as (u8); let mut i: u64; StoreStaticCodeLengthCode(storage_ix, storage); i = 0; while i < length { let value: u8 = depth[(i as (usize))]; let mut reps: u64 = 1; let mut k: u64; k = i.wrapping_add(1); while k < length && (depth[(k as (usize))] as (i32) == value as (i32)) { { reps = reps.wrapping_add(1); } k = k.wrapping_add(1); } i = i.wrapping_add(reps); if value as (i32) == 0i32 { BrotliWriteBits(kZeroRepsDepth[reps as usize] as (u8), kZeroRepsBits[reps as usize] as u64, storage_ix, storage); } else { if previous_value as (i32) != value as (i32) { BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); reps = reps.wrapping_sub(1); } if reps < 3 { while reps != 0 { reps = reps.wrapping_sub(1); BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); } } else { reps = reps.wrapping_sub(3); BrotliWriteBits(kNonZeroRepsDepth[reps as usize] as (u8), kNonZeroRepsBits[reps as usize] as u64, storage_ix, storage); } previous_value = value; } } } } pub enum ContextType { CONTEXT_LSB6 = 0, CONTEXT_MSB6 = 1, CONTEXT_UTF8 = 2, CONTEXT_SIGNED = 3, } #[derive(Clone, Copy)] pub struct Command { pub insert_len_: u32, pub copy_len_: u32, pub dist_extra_: u32, pub cmd_prefix_: u16, pub dist_prefix_: u16, } #[derive(Clone, Copy)] pub struct BlockSplit { pub num_types: usize, pub num_blocks: usize, pub types: *mut u8, pub lengths: *mut u32, pub types_alloc_size: usize, pub lengths_alloc_size: usize, } pub struct HistogramLiteral { pub data_: [u32; 256], pub total_count_: usize, pub bit_cost_: f64, } impl SliceWrapper<u32> for HistogramLiteral { fn slice(&self) -> &[u32] { return &self.data_[..]; } } impl SliceWrapperMut<u32> for HistogramLiteral { fn slice_mut(&mut self) -> &mut [u32] { return &mut self.data_[..]; } } pub struct HistogramCommand { pub data_: [u32; 704], pub total_count_: usize, pub bit_cost_: f64, } impl SliceWrapper<u32> for HistogramCommand { fn slice(&self) -> &[u32] { return &self.data_[..]; } } impl SliceWrapperMut<u32> for HistogramCommand { fn slice_mut(&mut self) -> &mut [u32] { return &mut self.data_[..]; } } pub struct HistogramDistance { pub data_: [u32; 520], pub total_count_: usize, pub bit_cost_: f64, } impl SliceWrapper<u32> for HistogramDistance { fn slice(&self) -> &[u32] { return &self.data_[..]; } } impl SliceWrapperMut<u32> for HistogramDistance { fn slice_mut(&mut self) -> &mut [u32] { return &mut self.data_[..]; } } #[derive(Clone, Copy)] pub struct MetaBlockSplit { pub literal_split: BlockSplit, pub command_split: BlockSplit, pub distance_split: BlockSplit, pub literal_context_map: *mut u32, pub literal_context_map_size: usize, pub distance_context_map: *mut u32, pub distance_context_map_size: usize, pub literal_histograms: *mut HistogramLiteral, pub literal_histograms_size: usize, pub command_histograms: *mut HistogramCommand, pub command_histograms_size: usize, pub distance_histograms: *mut HistogramDistance, pub distance_histograms_size: usize, } #[derive(Clone, Copy)] pub struct BlockTypeCodeCalculator { pub last_type: usize, pub second_last_type: usize, } pub struct BlockSplitCode { pub type_code_calculator: BlockTypeCodeCalculator, pub type_depths: [u8; 258], pub type_bits: [u16; 258], pub length_depths: [u8; 26], pub length_bits: [u16; 26], } pub struct BlockEncoder<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>> { /* pub alloc_u8 : AllocU8, pub alloc_u16 : AllocU16, pub alloc_u32 : AllocU32, pub alloc_ht : AllocHT,*/ pub alphabet_size_: usize, pub num_block_types_: usize, pub block_types_: AllocU8::AllocatedMemory, pub block_lengths_: AllocU32::AllocatedMemory, pub num_blocks_: usize, pub block_split_code_: BlockSplitCode, pub block_ix_: usize, pub block_len_: usize, pub entropy_ix_: usize, pub depths_: AllocU8::AllocatedMemory, pub bits_: AllocU16::AllocatedMemory, } fn Log2FloorNonZero(mut n: u64) -> u32 { let mut result: u32 = 0u32; 'loop1: loop { if { n = n >> 1i32; n } != 0 { result = result.wrapping_add(1 as (u32)); continue 'loop1; } else { break 'loop1; } } result } fn BrotliEncodeMlen(mut length: u32, mut bits: &mut u64, mut numbits: &mut u32, mut nibblesbits: &mut u32) { let mut lg: u32 = (if length == 1u32 { 1u32 } else { Log2FloorNonZero(length.wrapping_sub(1u32) as (u32) as (u64)) .wrapping_add(1u32) }) as (u32); let mut mnibbles: u32 = (if lg < 16u32 { 16u32 } else { lg.wrapping_add(3u32) }) .wrapping_div(4u32); assert!(length > 0); assert!(length <= (1 << 24)); assert!(lg <= 24); *nibblesbits = mnibbles.wrapping_sub(4u32); *numbits = mnibbles.wrapping_mul(4u32); *bits = length.wrapping_sub(1u32) as u64; } fn StoreCompressedMetaBlockHeader(mut is_final_block: i32, mut length: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut lenbits: u64 = 0; let mut nlenbits: u32 = 0; let mut nibblesbits: u32 = 0; BrotliWriteBits(1, is_final_block as (u64), storage_ix, storage); if is_final_block != 0 { BrotliWriteBits(1, 0, storage_ix, storage); } BrotliEncodeMlen(length as u32, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2, nibblesbits as u64, storage_ix, storage); BrotliWriteBits(nlenbits as u8, lenbits, storage_ix, storage); if is_final_block == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } } fn NewBlockTypeCodeCalculator() -> BlockTypeCodeCalculator { return BlockTypeCodeCalculator { last_type: 1, second_last_type: 0, }; } fn NewBlockEncoder<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>> (mut alphabet_size: usize, mut num_block_types: usize, mut block_types: AllocU8::AllocatedMemory, mut block_lengths: AllocU32::AllocatedMemory, num_blocks: usize) -> BlockEncoder<AllocU8, AllocU16, AllocU32> { let block_len: usize; if num_blocks != 0 && block_lengths.slice().len() != 0 { block_len = block_lengths.slice()[0] as usize; } else { block_len = 0; } return BlockEncoder::<AllocU8, AllocU16, AllocU32> { alphabet_size_: alphabet_size, num_block_types_: num_block_types, block_types_: block_types, block_lengths_: block_lengths, num_blocks_: num_blocks, block_split_code_: BlockSplitCode { type_code_calculator: NewBlockTypeCodeCalculator(), type_depths: [0; 258], type_bits: [0; 258], length_depths: [0; 26], length_bits: [0; 26], }, block_ix_: 0, block_len_: block_len, entropy_ix_: 0, depths_: AllocU8::AllocatedMemory::default(), bits_: AllocU16::AllocatedMemory::default(), }; } extern "C" fn NextBlockTypeCode(mut calculator: &mut BlockTypeCodeCalculator, mut type_: u8) -> usize { let mut type_code: usize = (if type_ as (usize) == (*calculator).last_type.wrapping_add(1usize) { 1u32 } else if type_ as (usize) == (*calculator).second_last_type { 0u32 } else { (type_ as (u32)).wrapping_add(2u32) }) as (usize); (*calculator).second_last_type = (*calculator).last_type; (*calculator).last_type = type_ as (usize); type_code } fn BlockLengthPrefixCode(mut len: u32) -> u32 { let mut code: u32 = (if len >= 177u32 { if len >= 753u32 { 20i32 } else { 14i32 } } else if len >= 41u32 { 7i32 } else { 0i32 }) as (u32); while code < (26i32 - 1i32) as (u32) && (len >= kBlockLengthPrefixCode[code.wrapping_add(1u32) as (usize)].offset) { code = code.wrapping_add(1 as (u32)); } code } fn StoreVarLenUint8(mut n: u64, mut storage_ix: &mut usize, mut storage: &mut [u8]) { if n == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } else { let mut nbits: u8 = Log2FloorNonZero(n) as (u8); BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(3, nbits as u64, storage_ix, storage); BrotliWriteBits(nbits, n.wrapping_sub(1u64 << nbits), storage_ix, storage); } } fn StoreSimpleHuffmanTree(mut depths: &[u8], mut symbols: &mut [usize], mut num_symbols: usize, mut max_bits: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, num_symbols.wrapping_sub(1) as u64, storage_ix, storage); { let mut i: usize; i = 0usize; while i < num_symbols { { let mut j: usize; j = i.wrapping_add(1usize); while j < num_symbols { { if depths[(symbols[(j as (usize))] as (usize))] as (i32) < depths[(symbols[(i as (usize))] as (usize))] as (i32) { let mut __brotli_swap_tmp: usize = symbols[(j as (usize))]; symbols[(j as (usize))] = symbols[(i as (usize))]; symbols[(i as (usize))] = __brotli_swap_tmp; } } j = j.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } } if num_symbols == 2usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); } else if num_symbols == 3usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(3usize)] as u64, storage_ix, storage); BrotliWriteBits(1, if depths[(symbols[(0usize)] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } fn BuildAndStoreHuffmanTree(mut histogram: &[u32], length: usize, mut tree: &mut [HuffmanTree], mut depth: &mut [u8], mut bits: &mut [u16], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut count: usize = 0usize; let mut s4: [usize; 4] = [0usize, 0usize, 0usize, 0usize]; let mut i: usize; let mut max_bits: usize = 0usize; i = 0usize; 'break31: while i < length { { if histogram[(i as (usize))] != 0 { if count < 4usize { s4[count] = i; } else if count > 4usize { { break 'break31; } } count = count.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } { let mut max_bits_counter: usize = length.wrapping_sub(1usize); while max_bits_counter != 0 { max_bits_counter = max_bits_counter >> 1i32; max_bits = max_bits.wrapping_add(1 as (usize)); } } if count <= 1usize { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, s4[0usize] as u64, storage_ix, storage); depth[(s4[0usize] as (usize))] = 0i32 as (u8); bits[(s4[0usize] as (usize))] = 0i32 as (u16); return; } for depth_elem in depth[..length].iter_mut() { *depth_elem = 0; // memset } BrotliCreateHuffmanTree(histogram, length, 15i32, tree, depth); BrotliConvertBitDepthsToSymbols(depth, length, bits); if count <= 4usize { StoreSimpleHuffmanTree(depth, &mut s4[..], count, max_bits, storage_ix, storage); } else { BrotliStoreHuffmanTree(depth, length, tree, storage_ix, storage); } } fn GetBlockLengthPrefixCode(mut len: u32, mut code: &mut usize, mut n_extra: &mut u32, mut extra: &mut u32) { *code = BlockLengthPrefixCode(len) as (usize); *n_extra = kBlockLengthPrefixCode[*code].nbits; *extra = len.wrapping_sub(kBlockLengthPrefixCode[*code].offset); } fn StoreBlockSwitch(mut code: &mut BlockSplitCode, block_len: u32, block_type: u8, mut is_first_block: i32, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut typecode: usize = NextBlockTypeCode(&mut (*code).type_code_calculator, block_type); let mut lencode: usize = 0; let mut len_nextra: u32 = 0; let mut len_extra: u32 = 0; if is_first_block == 0 { BrotliWriteBits((*code).type_depths[typecode] as (u8), (*code).type_bits[typecode] as (u64), storage_ix, storage); } GetBlockLengthPrefixCode(block_len, &mut lencode, &mut len_nextra, &mut len_extra); BrotliWriteBits((*code).length_depths[lencode] as (u8), (*code).length_bits[lencode] as (u64), storage_ix, storage); BrotliWriteBits(len_nextra as (u8), len_extra as (u64), storage_ix, storage); } fn BuildAndStoreBlockSplitCode(mut types: &[u8], mut lengths: &[u32], num_blocks: usize, num_types: usize, mut tree: &mut [HuffmanTree], mut code: &mut BlockSplitCode, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut type_histo: [u32; 258] = [0;258]; let mut length_histo: [u32; 26] = [0;26]; let mut i: usize; let mut type_code_calculator = NewBlockTypeCodeCalculator(); i = 0usize; while i < num_blocks { { let mut type_code: usize = NextBlockTypeCode(&mut type_code_calculator, types[(i as (usize))]); if i != 0usize { let _rhs = 1; let _lhs = &mut type_histo[type_code]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } { let _rhs = 1; let _lhs = &mut length_histo[BlockLengthPrefixCode(lengths[(i as (usize))]) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } } i = i.wrapping_add(1 as (usize)); } StoreVarLenUint8(num_types.wrapping_sub(1) as u64, storage_ix, storage); if num_types > 1usize { BuildAndStoreHuffmanTree(&mut type_histo[0usize..], num_types.wrapping_add(2usize), tree, &mut (*code).type_depths[0usize..], &mut (*code).type_bits[0usize..], storage_ix, storage); BuildAndStoreHuffmanTree(&mut length_histo[0usize..], 26usize, tree, &mut (*code).length_depths[0usize..], &mut (*code).length_bits[0usize..], storage_ix, storage); StoreBlockSwitch(code, lengths[(0usize)], types[(0usize)], 1i32, storage_ix, storage); } } fn BuildAndStoreBlockSwitchEntropyCodes<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>>(mut xself: &mut BlockEncoder<AllocU8, AllocU16, AllocU32>, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { BuildAndStoreBlockSplitCode((*xself).block_types_.slice(), (*xself).block_lengths_.slice(), (*xself).num_blocks_, (*xself).num_block_types_, tree, &mut (*xself).block_split_code_, storage_ix, storage); } fn StoreTrivialContextMap(mut num_types: usize, mut context_bits: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { StoreVarLenUint8(num_types.wrapping_sub(1usize) as u64, storage_ix, storage); if num_types > 1usize { let mut repeat_code: usize = context_bits.wrapping_sub(1u32 as (usize)); let mut repeat_bits: usize = (1u32 << repeat_code).wrapping_sub(1u32) as (usize); let mut alphabet_size: usize = num_types.wrapping_add(repeat_code); let mut histogram: [u32; 272] = [0;272]; let mut depths: [u8; 272] = [0;272]; let mut bits: [u16; 272] = [0;272]; let mut i: usize; BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(4u8, repeat_code.wrapping_sub(1usize) as u64, storage_ix, storage); histogram[repeat_code] = num_types as (u32); histogram[0usize] = 1u32; i = context_bits; while i < alphabet_size { { histogram[i] = 1u32; } i = i.wrapping_add(1 as (usize)); } BuildAndStoreHuffmanTree(&mut histogram[..], alphabet_size, tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_types { { let mut code: usize = if i == 0usize { 0usize } else { i.wrapping_add(context_bits).wrapping_sub(1usize) }; BrotliWriteBits(depths[code] as (u8), bits[code] as (u64), storage_ix, storage); BrotliWriteBits(depths[repeat_code] as (u8), bits[repeat_code] as (u64), storage_ix, storage); BrotliWriteBits(repeat_code as u8, repeat_bits as u64, storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); } } fn IndexOf(mut v: &[u8], mut v_size: usize, mut value: u8) -> usize { let mut i: usize = 0usize; while i < v_size { { if v[(i as (usize))] as (i32) == value as (i32) { return i; } } i = i.wrapping_add(1 as (usize)); } i } fn MoveToFront(mut v: &mut [u8], mut index: usize) { let mut value: u8 = v[(index as (usize))]; let mut i: usize; i = index; while i != 0usize { { v[(i as (usize))] = v[(i.wrapping_sub(1usize) as (usize))]; } i = i.wrapping_sub(1 as (usize)); } v[(0usize)] = value; } fn MoveToFrontTransform(mut v_in: &[u32], v_size: usize, mut v_out: &mut [u32]) { let mut i: usize; let mut mtf: [u8; 256] = [0;256]; let mut max_value: u32; if v_size == 0usize { return; } max_value = v_in[(0usize)]; i = 1usize; while i < v_size { { if v_in[(i as (usize))] > max_value { max_value = v_in[(i as (usize))]; } } i = i.wrapping_add(1 as (usize)); } 0i32; i = 0usize; while i <= max_value as (usize) { { mtf[i] = i as (u8); } i = i.wrapping_add(1 as (usize)); } { let mut mtf_size: usize = max_value.wrapping_add(1u32) as (usize); i = 0usize; while i < v_size { { let mut index: usize = IndexOf(&mtf[..], mtf_size, v_in[(i as (usize))] as (u8)); 0i32; v_out[(i as (usize))] = index as (u32); MoveToFront(&mut mtf[..], index); } i = i.wrapping_add(1 as (usize)); } } } fn brotli_max_uint32_t(mut a: u32, mut b: u32) -> u32 { if a > b { a } else { b } } fn brotli_min_uint32_t(mut a: u32, mut b: u32) -> u32 { if a < b { a } else { b } } fn RunLengthCodeZeros(in_size: usize, mut v: &mut [u32], mut out_size: &mut usize, mut max_run_length_prefix: &mut u32) { let mut max_reps: u32 = 0u32; let mut i: usize; let mut max_prefix: u32; i = 0usize; while i < in_size { let mut reps: u32 = 0u32; while i < in_size && (v[(i as (usize))] != 0u32) { i = i.wrapping_add(1 as (usize)); } while i < in_size && (v[(i as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } i = i.wrapping_add(1 as (usize)); } max_reps = brotli_max_uint32_t(reps, max_reps); } max_prefix = if max_reps > 0u32 { Log2FloorNonZero(max_reps as (u64)) } else { 0u32 }; max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix); *max_run_length_prefix = max_prefix; *out_size = 0usize; i = 0usize; while i < in_size { 0i32; if v[(i as (usize))] != 0u32 { v[(*out_size as (usize))] = (v[(i as (usize))]).wrapping_add(*max_run_length_prefix); i = i.wrapping_add(1 as (usize)); *out_size = (*out_size).wrapping_add(1 as (usize)); } else { let mut reps: u32 = 1u32; let mut k: usize; k = i.wrapping_add(1usize); while k < in_size && (v[(k as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } k = k.wrapping_add(1 as (usize)); } i = i.wrapping_add(reps as (usize)); while reps != 0u32 { if reps < 2u32 << max_prefix { let mut run_length_prefix: u32 = Log2FloorNonZero(reps as (u64)); let extra_bits: u32 = reps.wrapping_sub(1u32 << run_length_prefix); v[(*out_size as (usize))] = run_length_prefix.wrapping_add(extra_bits << 9i32); *out_size = (*out_size).wrapping_add(1 as (usize)); { { break; } } } else { let extra_bits: u32 = (1u32 << max_prefix).wrapping_sub(1u32); v[(*out_size as (usize))] = max_prefix.wrapping_add(extra_bits << 9i32); reps = reps.wrapping_sub((2u32 << max_prefix).wrapping_sub(1u32)); *out_size = (*out_size).wrapping_add(1 as (usize)); } } } } } fn EncodeContextMap<AllocU32: alloc::Allocator<u32>>(mut m: &mut AllocU32, mut context_map: &[u32], mut context_map_size: usize, mut num_clusters: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut i: usize; let mut rle_symbols: AllocU32::AllocatedMemory; let mut max_run_length_prefix: u32 = 6u32; let mut num_rle_symbols: usize = 0usize; static kSymbolMask: u32 = (1u32 << 9i32) - 1; let mut depths: [u8; 272] = [0;272]; let mut bits: [u16; 272] = [0;272]; StoreVarLenUint8(num_clusters.wrapping_sub(1usize) as u64, storage_ix, storage); if num_clusters == 1usize { return; } rle_symbols = if context_map_size != 0 { m.alloc_cell(context_map_size) } else { AllocU32::AllocatedMemory::default() }; MoveToFrontTransform(context_map, context_map_size, rle_symbols.slice_mut()); RunLengthCodeZeros(context_map_size, rle_symbols.slice_mut(), &mut num_rle_symbols, &mut max_run_length_prefix); let mut histogram: [u32; 272] = [0;272]; i = 0usize; while i < num_rle_symbols { { let _rhs = 1; let _lhs = &mut histogram[(rle_symbols.slice()[(i as (usize))] & kSymbolMask) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } { let mut use_rle: i32 = if !!(max_run_length_prefix > 0u32) { 1i32 } else { 0i32 }; BrotliWriteBits(1, use_rle as (u64), storage_ix, storage); if use_rle != 0 { BrotliWriteBits(4, max_run_length_prefix.wrapping_sub(1u32) as (u64), storage_ix, storage); } } BuildAndStoreHuffmanTree(&mut histogram[..], num_clusters.wrapping_add(max_run_length_prefix as (usize)), tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_rle_symbols { { let rle_symbol: u32 = rle_symbols.slice()[(i as (usize))] & kSymbolMask; let extra_bits_val: u32 = rle_symbols.slice()[(i as (usize))] >> 9i32; BrotliWriteBits(depths[rle_symbol as (usize)] as (u8), bits[rle_symbol as (usize)] as (u64), storage_ix, storage); if rle_symbol > 0u32 && (rle_symbol <= max_run_length_prefix) { BrotliWriteBits(rle_symbol as (u8), extra_bits_val as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); m.free_cell(rle_symbols); } /* CONTINUE FROM HERE fn BuildAndStoreEntropyCodesLiteral(mut m: &mut [MemoryManager], mut xself: &mut BlockEncoder, mut histograms: &[HistogramLiteral], histograms_size: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let alphabet_size: usize = (*xself).alphabet_size_; let table_size: usize = histograms_size.wrapping_mul(alphabet_size); (*xself).depths_ = if table_size != 0 { BrotliAllocate(m, table_size.wrapping_mul(::std::mem::size_of::<u8>())) } else { 0i32 }; (*xself).bits_ = if table_size != 0 { BrotliAllocate(m, table_size.wrapping_mul(::std::mem::size_of::<u16>())) } else { 0i32 }; if !(0i32 == 0) { return; } { let mut i: usize; i = 0usize; while i < histograms_size { { let mut ix: usize = i.wrapping_mul(alphabet_size); BuildAndStoreHuffmanTree(&mut (histograms[(i as (usize))]).data_[0usize], alphabet_size, tree, &mut *(*xself).depths_[(ix as (usize))..], &mut *(*xself).bits_[(ix as (usize))..], storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } } } fn BuildAndStoreEntropyCodesCommand(mut m: &mut [MemoryManager], mut xself: &mut BlockEncoder, mut histograms: &[HistogramCommand], histograms_size: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let alphabet_size: usize = (*xself).alphabet_size_; let table_size: usize = histograms_size.wrapping_mul(alphabet_size); (*xself).depths_ = if table_size != 0 { BrotliAllocate(m, table_size.wrapping_mul(::std::mem::size_of::<u8>())) } else { 0i32 }; (*xself).bits_ = if table_size != 0 { BrotliAllocate(m, table_size.wrapping_mul(::std::mem::size_of::<u16>())) } else { 0i32 }; if !(0i32 == 0) { return; } { let mut i: usize; i = 0usize; while i < histograms_size { { let mut ix: usize = i.wrapping_mul(alphabet_size); BuildAndStoreHuffmanTree(&mut (histograms[(i as (usize))]).data_[0usize], alphabet_size, tree, &mut *(*xself).depths_[(ix as (usize))..], &mut *(*xself).bits_[(ix as (usize))..], storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } } } fn BuildAndStoreEntropyCodesDistance(mut m: &mut [MemoryManager], mut xself: &mut BlockEncoder, mut histograms: &[HistogramDistance], histograms_size: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let alphabet_size: usize = (*xself).alphabet_size_; let table_size: usize = histograms_size.wrapping_mul(alphabet_size); (*xself).depths_ = if table_size != 0 { BrotliAllocate(m, table_size.wrapping_mul(::std::mem::size_of::<u8>())) } else { 0i32 }; (*xself).bits_ = if table_size != 0 { BrotliAllocate(m, table_size.wrapping_mul(::std::mem::size_of::<u16>())) } else { 0i32 }; if !(0i32 == 0) { return; } { let mut i: usize; i = 0usize; while i < histograms_size { { let mut ix: usize = i.wrapping_mul(alphabet_size); BuildAndStoreHuffmanTree(&mut (histograms[(i as (usize))]).data_[0usize], alphabet_size, tree, &mut *(*xself).depths_[(ix as (usize))..], &mut *(*xself).bits_[(ix as (usize))..], storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } } } fn StoreSymbol(mut xself: &mut BlockEncoder, mut symbol: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { if (*xself).block_len_ == 0usize { let mut block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let mut block_len: u32 = *(*xself).block_lengths_[(block_ix as (usize))..]; let mut block_type: u8 = *(*xself).block_types_[(block_ix as (usize))..]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = (block_type as (usize)).wrapping_mul((*xself).alphabet_size_); StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let mut ix: usize = (*xself).entropy_ix_.wrapping_add(symbol); BrotliWriteBits(*(*xself).depths_[(ix as (usize))..] as (usize), *(*xself).bits_[(ix as (usize))..] as (usize), storage_ix, storage); } } fn CommandCopyLenCode(mut xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 ^ (*xself).copy_len_ >> 24i32 } fn GetInsertLengthCode(mut insertlen: usize) -> u16 { if insertlen < 6usize { insertlen as (u16) } else if insertlen < 130usize { let mut nbits: u32 = Log2FloorNonZero(insertlen.wrapping_sub(2usize)).wrapping_sub(1u32); ((nbits << 1i32) as (usize)) .wrapping_add(insertlen.wrapping_sub(2usize) >> nbits) .wrapping_add(2usize) as (u16) } else if insertlen < 2114usize { Log2FloorNonZero(insertlen.wrapping_sub(66usize)).wrapping_add(10u32) as (u16) } else if insertlen < 6210usize { 21u32 as (u16) } else if insertlen < 22594usize { 22u32 as (u16) } else { 23u32 as (u16) } } fn GetCopyLengthCode(mut copylen: usize) -> u16 { if copylen < 10usize { copylen.wrapping_sub(2usize) as (u16) } else if copylen < 134usize { let mut nbits: u32 = Log2FloorNonZero(copylen.wrapping_sub(6usize)).wrapping_sub(1u32); ((nbits << 1i32) as (usize)) .wrapping_add(copylen.wrapping_sub(6usize) >> nbits) .wrapping_add(4usize) as (u16) } else if copylen < 2118usize { Log2FloorNonZero(copylen.wrapping_sub(70usize)).wrapping_add(12u32) as (u16) } else { 23u32 as (u16) } } fn GetInsertExtra(mut inscode: u16) -> u32 { kInsExtra[inscode as (usize)] } fn GetInsertBase(mut inscode: u16) -> u32 { kInsBase[inscode as (usize)] } fn GetCopyBase(mut copycode: u16) -> u32 { kCopyBase[copycode as (usize)] } fn GetCopyExtra(mut copycode: u16) -> u32 { kCopyExtra[copycode as (usize)] } fn StoreCommandExtra(mut cmd: &[Command], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut copylen_code: u32 = CommandCopyLenCode(cmd); let mut inscode: u16 = GetInsertLengthCode((*cmd).insert_len_ as (usize)); let mut copycode: u16 = GetCopyLengthCode(copylen_code as (usize)); let mut insnumextra: u32 = GetInsertExtra(inscode); let mut insextraval: usize = (*cmd).insert_len_.wrapping_sub(GetInsertBase(inscode)) as (usize); let mut copyextraval: usize = copylen_code.wrapping_sub(GetCopyBase(copycode)) as (usize); let mut bits: usize = copyextraval << insnumextra | insextraval; BrotliWriteBits(insnumextra.wrapping_add(GetCopyExtra(copycode)) as (usize), bits, storage_ix, storage); } fn Context(mut p1: u8, mut p2: u8, mut mode: ContextType) -> u8 { if mode as (i32) == ContextType::CONTEXT_LSB6 as (i32) { return (p1 as (i32) & 0x3fi32) as (u8); } if mode as (i32) == ContextType::CONTEXT_MSB6 as (i32) { return (p1 as (i32) >> 2i32) as (u8); } if mode as (i32) == ContextType::CONTEXT_UTF8 as (i32) { return (kUTF8ContextLookup[p1 as (usize)] as (i32) | kUTF8ContextLookup[(p2 as (i32) + 256i32) as (usize)] as (i32)) as (u8); } if mode as (i32) == ContextType::CONTEXT_SIGNED as (i32) { return ((kSigned3BitContextLookup[p1 as (usize)] as (i32) << 3i32) + kSigned3BitContextLookup[p2 as (usize)] as (i32)) as (u8); } 0i32 as (u8) } fn StoreSymbolWithContext(mut xself: &mut BlockEncoder, mut symbol: usize, mut context: usize, mut context_map: &[u32], mut storage_ix: &mut usize, mut storage: &mut [u8], context_bits: usize) { if (*xself).block_len_ == 0usize { let mut block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let mut block_len: u32 = *(*xself).block_lengths_[(block_ix as (usize))..]; let mut block_type: u8 = *(*xself).block_types_[(block_ix as (usize))..]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = block_type as (usize) << context_bits; StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let mut histo_ix: usize = context_map[((*xself).entropy_ix_.wrapping_add(context) as (usize))] as (usize); let mut ix: usize = histo_ix.wrapping_mul((*xself).alphabet_size_).wrapping_add(symbol); BrotliWriteBits(*(*xself).depths_[(ix as (usize))..] as (usize), *(*xself).bits_[(ix as (usize))..] as (usize), storage_ix, storage); } } fn CommandCopyLen(mut xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 } fn CommandDistanceContext(mut xself: &Command) -> u32 { let mut r: u32 = ((*xself).cmd_prefix_ as (i32) >> 6i32) as (u32); let mut c: u32 = ((*xself).cmd_prefix_ as (i32) & 7i32) as (u32); if (r == 0u32 || r == 2u32 || r == 4u32 || r == 7u32) && (c <= 2u32) { return c; } 3u32 } fn CleanupBlockEncoder(mut m: &mut [MemoryManager], mut xself: &mut BlockEncoder) { { BrotliFree(m, (*xself).depths_); (*xself).depths_ = 0i32; } { BrotliFree(m, (*xself).bits_); (*xself).bits_ = 0i32; } } fn JumpToByteBoundary(mut storage_ix: &mut usize, mut storage: &mut [u8]) { *storage_ix = (*storage_ix).wrapping_add(7u32 as (usize)) & !7u32 as (usize); storage[((*storage_ix >> 3i32) as (usize))] = 0i32 as (u8); } pub fn BrotliStoreMetaBlock(mut m: &mut [MemoryManager], mut input: &[u8], mut start_pos: usize, mut length: usize, mut mask: usize, mut prev_byte: u8, mut prev_byte2: u8, mut is_last: i32, mut num_direct_distance_codes: u32, mut distance_postfix_bits: u32, mut literal_context_mode: ContextType, mut commands: &[Command], mut n_commands: usize, mut mb: &[MetaBlockSplit], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut pos: usize = start_pos; let mut i: usize; let mut num_distance_codes: usize = (16u32) .wrapping_add(num_direct_distance_codes) .wrapping_add(48u32 << distance_postfix_bits) as (usize); let mut tree: *mut HuffmanTree; let mut literal_enc: BlockEncoder; let mut command_enc: BlockEncoder; let mut distance_enc: BlockEncoder; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); tree = if 2i32 * 704i32 + 1i32 != 0 { BrotliAllocate(m, ((2i32 * 704i32 + 1i32) as (usize)) .wrapping_mul(::std::mem::size_of::<HuffmanTree>())) } else { 0i32 }; if !(0i32 == 0) { return; } InitBlockEncoder(&mut literal_enc, 256usize, (*mb).literal_split.num_types, (*mb).literal_split.types, (*mb).literal_split.lengths, (*mb).literal_split.num_blocks); InitBlockEncoder(&mut command_enc, 704usize, (*mb).command_split.num_types, (*mb).command_split.types, (*mb).command_split.lengths, (*mb).command_split.num_blocks); InitBlockEncoder(&mut distance_enc, num_distance_codes, (*mb).distance_split.num_types, (*mb).distance_split.types, (*mb).distance_split.lengths, (*mb).distance_split.num_blocks); BuildAndStoreBlockSwitchEntropyCodes(&mut literal_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut command_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut distance_enc, tree, storage_ix, storage); BrotliWriteBits(2usize, distance_postfix_bits as (usize), storage_ix, storage); BrotliWriteBits(4usize, (num_direct_distance_codes >> distance_postfix_bits) as (usize), storage_ix, storage); i = 0usize; while i < (*mb).literal_split.num_types { { BrotliWriteBits(2usize, literal_context_mode as (usize), storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } if (*mb).literal_context_map_size == 0usize { StoreTrivialContextMap((*mb).literal_histograms_size, 6usize, tree, storage_ix, storage); } else { EncodeContextMap(m, (*mb).literal_context_map, (*mb).literal_context_map_size, (*mb).literal_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } } if (*mb).distance_context_map_size == 0usize { StoreTrivialContextMap((*mb).distance_histograms_size, 2usize, tree, storage_ix, storage); } else { EncodeContextMap(m, (*mb).distance_context_map, (*mb).distance_context_map_size, (*mb).distance_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } } BuildAndStoreEntropyCodesLiteral(m, &mut literal_enc, (*mb).literal_histograms, (*mb).literal_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } BuildAndStoreEntropyCodesCommand(m, &mut command_enc, (*mb).command_histograms, (*mb).command_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } BuildAndStoreEntropyCodesDistance(m, &mut distance_enc, (*mb).distance_histograms, (*mb).distance_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } { BrotliFree(m, tree); tree = 0i32; } i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let mut cmd_code: usize = cmd.cmd_prefix_ as (usize); StoreSymbol(&mut command_enc, cmd_code, storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); if (*mb).literal_context_map_size == 0usize { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { StoreSymbol(&mut literal_enc, input[((pos & mask) as (usize))] as (usize), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } else { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { let mut context: usize = Context(prev_byte, prev_byte2, literal_context_mode) as (usize); let mut literal: u8 = input[((pos & mask) as (usize))]; StoreSymbolWithContext(&mut literal_enc, literal as (usize), context, (*mb).literal_context_map, storage_ix, storage, 6usize); prev_byte2 = prev_byte; prev_byte = literal; pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 { prev_byte2 = input[((pos.wrapping_sub(2usize) & mask) as (usize))]; prev_byte = input[((pos.wrapping_sub(1usize) & mask) as (usize))]; if cmd.cmd_prefix_ as (i32) >= 128i32 { let mut dist_code: usize = cmd.dist_prefix_ as (usize); let mut distnumextra: u32 = cmd.dist_extra_ >> 24i32; let mut distextra: usize = (cmd.dist_extra_ & 0xffffffu32) as (usize); if (*mb).distance_context_map_size == 0usize { StoreSymbol(&mut distance_enc, dist_code, storage_ix, storage); } else { let mut context: usize = CommandDistanceContext(&cmd) as (usize); StoreSymbolWithContext(&mut distance_enc, dist_code, context, (*mb).distance_context_map, storage_ix, storage, 2usize); } BrotliWriteBits(distnumextra as (usize), distextra, storage_ix, storage); } } } i = i.wrapping_add(1 as (usize)); } CleanupBlockEncoder(m, &mut distance_enc); CleanupBlockEncoder(m, &mut command_enc); CleanupBlockEncoder(m, &mut literal_enc); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn HistogramClearLiteral(mut xself: &mut HistogramLiteral) { memset((*xself).data_.as_mut_ptr(), 0i32, ::std::mem::size_of::<[u32; 256]>()); (*xself).total_count_ = 0usize; (*xself).bit_cost_ = 3.402e+38f64; } fn HistogramClearCommand(mut xself: &mut HistogramCommand) { memset((*xself).data_.as_mut_ptr(), 0i32, ::std::mem::size_of::<[u32; 704]>()); (*xself).total_count_ = 0usize; (*xself).bit_cost_ = 3.402e+38f64; } fn HistogramClearDistance(mut xself: &mut HistogramDistance) { memset((*xself).data_.as_mut_ptr(), 0i32, ::std::mem::size_of::<[u32; 520]>()); (*xself).total_count_ = 0usize; (*xself).bit_cost_ = 3.402e+38f64; } fn HistogramAddCommand(mut xself: &mut HistogramCommand, mut val: usize) { { let _rhs = 1; let _lhs = &mut (*xself).data_[val]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } (*xself).total_count_ = (*xself).total_count_.wrapping_add(1 as (usize)); } fn HistogramAddLiteral(mut xself: &mut HistogramLiteral, mut val: usize) { { let _rhs = 1; let _lhs = &mut (*xself).data_[val]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } (*xself).total_count_ = (*xself).total_count_.wrapping_add(1 as (usize)); } fn HistogramAddDistance(mut xself: &mut HistogramDistance, mut val: usize) { { let _rhs = 1; let _lhs = &mut (*xself).data_[val]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } (*xself).total_count_ = (*xself).total_count_.wrapping_add(1 as (usize)); } fn BuildHistograms(mut input: &[u8], mut start_pos: usize, mut mask: usize, mut commands: &[Command], mut n_commands: usize, mut lit_histo: &mut [HistogramLiteral], mut cmd_histo: &mut [HistogramCommand], mut dist_histo: &mut [HistogramDistance]) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let mut j: usize; HistogramAddCommand(cmd_histo, cmd.cmd_prefix_ as (usize)); j = cmd.insert_len_ as (usize); while j != 0usize { { HistogramAddLiteral(lit_histo, input[((pos & mask) as (usize))] as (usize)); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { HistogramAddDistance(dist_histo, cmd.dist_prefix_ as (usize)); } } i = i.wrapping_add(1 as (usize)); } } fn StoreDataWithHuffmanCodes(mut input: &[u8], mut start_pos: usize, mut mask: usize, mut commands: &[Command], mut n_commands: usize, mut lit_depth: &[u8], mut lit_bits: &[u16], mut cmd_depth: &[u8], mut cmd_bits: &[u16], mut dist_depth: &[u8], mut dist_bits: &[u16], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let cmd_code: usize = cmd.cmd_prefix_ as (usize); let mut j: usize; BrotliWriteBits(cmd_depth[(cmd_code as (usize))] as (usize), cmd_bits[(cmd_code as (usize))] as (usize), storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); j = cmd.insert_len_ as (usize); while j != 0usize { { let literal: u8 = input[((pos & mask) as (usize))]; BrotliWriteBits(lit_depth[(literal as (usize))] as (usize), lit_bits[(literal as (usize))] as (usize), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { let dist_code: usize = cmd.dist_prefix_ as (usize); let distnumextra: u32 = cmd.dist_extra_ >> 24i32; let distextra: u32 = cmd.dist_extra_ & 0xffffffu32; BrotliWriteBits(dist_depth[(dist_code as (usize))] as (usize), dist_bits[(dist_code as (usize))] as (usize), storage_ix, storage); BrotliWriteBits(distnumextra as (usize), distextra as (usize), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } pub fn BrotliStoreMetaBlockTrivial(mut m: &mut [MemoryManager], mut input: &[u8], mut start_pos: usize, mut length: usize, mut mask: usize, mut is_last: i32, mut commands: &[Command], mut n_commands: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut lit_histo: HistogramLiteral; let mut cmd_histo: HistogramCommand; let mut dist_histo: HistogramDistance; let mut lit_depth: [u8; 256]; let mut lit_bits: [u16; 256]; let mut cmd_depth: [u8; 704]; let mut cmd_bits: [u16; 704]; let mut dist_depth: [u8; 64]; let mut dist_bits: [u16; 64]; let mut tree: *mut HuffmanTree; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); HistogramClearLiteral(&mut lit_histo); HistogramClearCommand(&mut cmd_histo); HistogramClearDistance(&mut dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliWriteBits(13usize, 0usize, storage_ix, storage); tree = if 2i32 * 704i32 + 1i32 != 0 { BrotliAllocate(m, ((2i32 * 704i32 + 1i32) as (usize)) .wrapping_mul(::std::mem::size_of::<HuffmanTree>())) } else { 0i32 }; if !(0i32 == 0) { return; } BuildAndStoreHuffmanTree(lit_histo.data_.as_mut_ptr(), 256usize, tree, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), storage_ix, storage); BuildAndStoreHuffmanTree(cmd_histo.data_.as_mut_ptr(), 704usize, tree, cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), storage_ix, storage); BuildAndStoreHuffmanTree(dist_histo.data_.as_mut_ptr(), 64usize, tree, dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); { BrotliFree(m, tree); tree = 0i32; } StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn StoreStaticCommandHuffmanTree(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(56usize, 0x926244u32 as (usize) << 32i32 | 0x16307003u32 as (usize), storage_ix, storage); BrotliWriteBits(3usize, 0x0u32 as (usize), storage_ix, storage); } fn StoreStaticDistanceHuffmanTree(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(28usize, 0x369dc03u32 as (usize), storage_ix, storage); } pub fn BrotliStoreMetaBlockFast(mut m: &mut [MemoryManager], mut input: &[u8], mut start_pos: usize, mut length: usize, mut mask: usize, mut is_last: i32, mut commands: &[Command], mut n_commands: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BrotliWriteBits(13usize, 0usize, storage_ix, storage); if n_commands <= 128usize { let mut histogram: [u32; 256] = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; let mut pos: usize = start_pos; let mut num_literals: usize = 0usize; let mut i: usize; let mut lit_depth: [u8; 256]; let mut lit_bits: [u16; 256]; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { { let _rhs = 1; let _lhs = &mut histogram[input[((pos & mask) as (usize))] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } num_literals = num_literals.wrapping_add(cmd.insert_len_ as (usize)); pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); } i = i.wrapping_add(1 as (usize)); } BrotliBuildAndStoreHuffmanTreeFast(m, histogram.as_mut_ptr(), num_literals, 8usize, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } StoreStaticCommandHuffmanTree(storage_ix, storage); StoreStaticDistanceHuffmanTree(storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), kStaticCommandCodeDepth.as_ptr(), kStaticCommandCodeBits.as_ptr(), kStaticDistanceCodeDepth.as_ptr(), kStaticDistanceCodeBits.as_ptr(), storage_ix, storage); } else { let mut lit_histo: HistogramLiteral; let mut cmd_histo: HistogramCommand; let mut dist_histo: HistogramDistance; let mut lit_depth: [u8; 256]; let mut lit_bits: [u16; 256]; let mut cmd_depth: [u8; 704]; let mut cmd_bits: [u16; 704]; let mut dist_depth: [u8; 64]; let mut dist_bits: [u16; 64]; HistogramClearLiteral(&mut lit_histo); HistogramClearCommand(&mut cmd_histo); HistogramClearDistance(&mut dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo.data_.as_mut_ptr(), lit_histo.total_count_, 8usize, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } BrotliBuildAndStoreHuffmanTreeFast(m, cmd_histo.data_.as_mut_ptr(), cmd_histo.total_count_, 10usize, cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } BrotliBuildAndStoreHuffmanTreeFast(m, dist_histo.data_.as_mut_ptr(), dist_histo.total_count_, 6usize, dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); } if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn BrotliStoreUncompressedMetaBlockHeader(mut length: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut lenbits: usize; let mut nlenbits: usize; let mut nibblesbits: usize; BrotliWriteBits(1usize, 0usize, storage_ix, storage); BrotliEncodeMlen(length, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2usize, nibblesbits, storage_ix, storage); BrotliWriteBits(nlenbits, lenbits, storage_ix, storage); BrotliWriteBits(1usize, 1usize, storage_ix, storage); } pub fn BrotliStoreUncompressedMetaBlock(mut is_final_block: i32, mut input: &[u8], mut position: usize, mut mask: usize, mut len: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut masked_pos: usize = position & mask; BrotliStoreUncompressedMetaBlockHeader(len, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); if masked_pos.wrapping_add(len) > mask.wrapping_add(1usize) { let mut len1: usize = mask.wrapping_add(1usize).wrapping_sub(masked_pos); let dst_start = ((*storage_ix >> 3i32) as (usize)); storage[dst_start..len1].clone_from_slice(input[masked_pos..masked_pos + len1]) *storage_ix = (*storage_ix).wrapping_add(len1 << 3i32); len = len.wrapping_sub(len1); masked_pos = 0usize; } let dst_start = (*storage_ix >> 3i32) as (usize); storage[dst_start..dst_start + len].clone_from_slice(input[masked_pos..masked_pos + len]) *storage_ix = (*storage_ix).wrapping_add(len << 3i32); BrotliWriteBitsPrepareStorage(*storage_ix, storage); if is_final_block != 0 { BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(1u8, 1u64, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } } pub fn BrotliStoreSyncMetaBlock(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(6usize, 6usize, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } */ make templatized BuildAndStoreEntropyCodes function use super::constants::{BROTLI_NUM_BLOCK_LEN_SYMBOLS, kZeroRepsBits, kZeroRepsDepth, kNonZeroRepsBits, kNonZeroRepsDepth, kCodeLengthBits, kCodeLengthDepth}; use super::entropy_encode::{HuffmanTree, BrotliWriteHuffmanTree, BrotliCreateHuffmanTree, BrotliConvertBitDepthsToSymbols, NewHuffmanTree, InitHuffmanTree, SortHuffmanTreeItems, SortHuffmanTree, BrotliSetDepth}; use super::super::alloc; use super::super::alloc::{SliceWrapper,SliceWrapperMut}; use super::super::core; pub struct PrefixCodeRange { pub offset: u32, pub nbits: u32, } static kBlockLengthPrefixCode: [PrefixCodeRange; BROTLI_NUM_BLOCK_LEN_SYMBOLS] = [PrefixCodeRange { offset: 1u32, nbits: 2u32, }, PrefixCodeRange { offset: 5u32, nbits: 2u32, }, PrefixCodeRange { offset: 9u32, nbits: 2u32, }, PrefixCodeRange { offset: 13u32, nbits: 2u32, }, PrefixCodeRange { offset: 17u32, nbits: 3u32, }, PrefixCodeRange { offset: 25u32, nbits: 3u32, }, PrefixCodeRange { offset: 33u32, nbits: 3u32, }, PrefixCodeRange { offset: 41u32, nbits: 3u32, }, PrefixCodeRange { offset: 49u32, nbits: 4u32, }, PrefixCodeRange { offset: 65u32, nbits: 4u32, }, PrefixCodeRange { offset: 81u32, nbits: 4u32, }, PrefixCodeRange { offset: 97u32, nbits: 4u32, }, PrefixCodeRange { offset: 113u32, nbits: 5u32, }, PrefixCodeRange { offset: 145u32, nbits: 5u32, }, PrefixCodeRange { offset: 177u32, nbits: 5u32, }, PrefixCodeRange { offset: 209u32, nbits: 5u32, }, PrefixCodeRange { offset: 241u32, nbits: 6u32, }, PrefixCodeRange { offset: 305u32, nbits: 6u32, }, PrefixCodeRange { offset: 369u32, nbits: 7u32, }, PrefixCodeRange { offset: 497u32, nbits: 8u32, }, PrefixCodeRange { offset: 753u32, nbits: 9u32, }, PrefixCodeRange { offset: 1265u32, nbits: 10u32, }, PrefixCodeRange { offset: 2289u32, nbits: 11u32, }, PrefixCodeRange { offset: 4337u32, nbits: 12u32, }, PrefixCodeRange { offset: 8433u32, nbits: 13u32, }, PrefixCodeRange { offset: 16625u32, nbits: 24u32, }]; fn BrotliWriteBits(n_bits: u8, bits: u64, mut pos: &mut usize, mut array: &mut [u8]) { assert!((bits >> n_bits as usize) == 0); assert!(n_bits <= 56); let ptr_offset: usize = ((*pos >> 3) as u32) as usize; let mut v = array[ptr_offset] as u64; v |= bits << ((*pos) as u64 & 7); array[ptr_offset + 7] = (v >> 56) as u8; array[ptr_offset + 6] = ((v >> 48) & 0xff) as u8; array[ptr_offset + 5] = ((v >> 40) & 0xff) as u8; array[ptr_offset + 4] = ((v >> 24) & 0xff) as u8; array[ptr_offset + 3] = ((v >> 16) & 0xff) as u8; array[ptr_offset + 2] = ((v >> 8) & 0xff) as u8; array[ptr_offset + 1] = ((v >> 4) & 0xff) as u8; array[ptr_offset] = (v & 0xff) as u8; *pos += n_bits as usize } fn BrotliWriteBitsPrepareStorage(pos: usize, mut array: &mut [u8]) { assert_eq!(pos & 7, 0); array[pos >> 3] = 0; } fn BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes: i32, code_length_bitdepth: &[u8], mut storage_ix: &mut usize, mut storage: &mut [u8]) { static kStorageOrder: [u8; 18] = [1i32 as (u8), 2i32 as (u8), 3i32 as (u8), 4i32 as (u8), 0i32 as (u8), 5i32 as (u8), 17i32 as (u8), 6i32 as (u8), 16i32 as (u8), 7i32 as (u8), 8i32 as (u8), 9i32 as (u8), 10i32 as (u8), 11i32 as (u8), 12i32 as (u8), 13i32 as (u8), 14i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeSymbols: [u8; 6] = [0i32 as (u8), 7i32 as (u8), 3i32 as (u8), 2i32 as (u8), 1i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeBitLengths: [u8; 6] = [2i32 as (u8), 4i32 as (u8), 3i32 as (u8), 2i32 as (u8), 2i32 as (u8), 4i32 as (u8)]; let mut skip_some: u64 = 0u64; let mut codes_to_store: u64 = 18; if num_codes > 1i32 { 'break5: while codes_to_store > 0 { { if code_length_bitdepth[(kStorageOrder[codes_to_store.wrapping_sub(1) as usize] as (usize))] as (i32) != 0i32 { { break 'break5; } } } codes_to_store = codes_to_store.wrapping_sub(1); } } if code_length_bitdepth[(kStorageOrder[0usize] as (usize))] as (i32) == 0i32 && (code_length_bitdepth[(kStorageOrder[1usize] as (usize))] as (i32) == 0i32) { skip_some = 2; if code_length_bitdepth[(kStorageOrder[2usize] as (usize))] as (i32) == 0i32 { skip_some = 3; } } BrotliWriteBits(2, skip_some, storage_ix, storage); { let mut i: u64; i = skip_some; while i < codes_to_store { { let mut l: usize = code_length_bitdepth[(kStorageOrder[i as usize] as (usize))] as (usize); BrotliWriteBits(kHuffmanBitLengthHuffmanCodeBitLengths[l] as (u8), kHuffmanBitLengthHuffmanCodeSymbols[l] as u64, storage_ix, storage); } i = i.wrapping_add(1); } } } fn BrotliStoreHuffmanTreeToBitMask(huffman_tree_size: usize, huffman_tree: &[u8], huffman_tree_extra_bits: &[u8], code_length_bitdepth: &[u8], code_length_bitdepth_symbols: &[u16], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut i: usize; i = 0usize; while i < huffman_tree_size { { let mut ix: usize = huffman_tree[(i as (usize))] as (usize); BrotliWriteBits(code_length_bitdepth[(ix as (usize))] as (u8), code_length_bitdepth_symbols[(ix as (usize))] as (u64), storage_ix, storage); if ix == 16usize { BrotliWriteBits(2, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } else if ix == 17usize { BrotliWriteBits(3, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } pub fn BrotliStoreHuffmanTree(depths: &[u8], num: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut huffman_tree: [u8; 704] = [0; 704]; let mut huffman_tree_extra_bits: [u8; 704] = [0; 704]; let mut huffman_tree_size: usize = 0usize; let mut code_length_bitdepth: [u8; 18] = [0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8)]; let mut code_length_bitdepth_symbols: [u16; 18] = [0;18]; let mut huffman_tree_histogram: [u32; 18] = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; let mut i: usize; let mut num_codes: i32 = 0i32; let mut code: usize = 0usize; 0i32; BrotliWriteHuffmanTree(depths, num, &mut huffman_tree_size, &mut huffman_tree[..], &mut huffman_tree_extra_bits[..]); i = 0usize; while i < huffman_tree_size { { let _rhs = 1; let _lhs = &mut huffman_tree_histogram[huffman_tree[i] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } i = 0usize; 'break3: while i < 18usize { { if huffman_tree_histogram[i] != 0 { if num_codes == 0i32 { code = i; num_codes = 1i32; } else if num_codes == 1i32 { num_codes = 2i32; { { break 'break3; } } } } } i = i.wrapping_add(1 as (usize)); } BrotliCreateHuffmanTree(&mut huffman_tree_histogram, 18usize, 5i32, tree, &mut code_length_bitdepth); BrotliConvertBitDepthsToSymbols(&mut code_length_bitdepth, 18usize, &mut code_length_bitdepth_symbols); BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes, &code_length_bitdepth, storage_ix, storage); if num_codes == 1i32 { code_length_bitdepth[code] = 0i32 as (u8); } BrotliStoreHuffmanTreeToBitMask(huffman_tree_size, &huffman_tree, &huffman_tree_extra_bits, &code_length_bitdepth, &code_length_bitdepth_symbols, storage_ix, storage); } fn StoreStaticCodeLengthCode(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(40, 0xffu32 as (u64) << 32i32 | 0x55555554u32 as (u64), storage_ix, storage); } pub fn BrotliBuildAndStoreHuffmanTreeFast<AllocHT: alloc::Allocator<HuffmanTree>>( mut m : AllocHT, histogram : &[u32], histogram_total : usize, max_bits : usize, mut depth : &mut [u8], mut bits : &mut [u16], mut storage_ix : &mut usize, mut storage : &mut [u8] ){ let mut count: u64 = 0; let mut symbols: [u64; 4] = [0; 4]; let mut length: u64 = 0; let mut total: usize = histogram_total; while total != 0usize { if histogram[(length as (usize))] != 0 { if count < 4 { symbols[count as usize] = length; } count = count.wrapping_add(1); total = total.wrapping_sub(histogram[(length as (usize))] as (usize)); } length = length.wrapping_add(1); } if count <= 1 { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); depth[symbols[0usize] as (usize)] = 0i32 as (u8); bits[symbols[0usize] as (usize)] = 0i32 as (u16); } for depth_elem in depth[..(length as usize)].iter_mut() { *depth_elem = 0; // memset } { let max_tree_size: u64 = (2u64).wrapping_mul(length).wrapping_add(1); let mut tree = if max_tree_size != 0 { m.alloc_cell(max_tree_size as usize) } else { AllocHT::AllocatedMemory::default() // null }; let mut count_limit: u32; if !(0i32 == 0) { return; } count_limit = 1u32; 'break11: loop { { let mut node_index: u32 = 0u32; let mut l: u64; l = length; while l != 0 { l = l.wrapping_sub(1); if histogram[(l as (usize))] != 0 { if histogram[(l as (usize))] >= count_limit { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], histogram[(l as (usize))], -1i32 as (i16), l as (i16)); } else { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], count_limit, -1i32 as (i16), l as (i16)); } node_index = node_index.wrapping_add(1 as (u32)); } } { let n: i32 = node_index as (i32); let mut sentinel: HuffmanTree; let mut i: i32 = 0i32; let mut j: i32 = n + 1i32; let mut k: i32; SortHuffmanTreeItems(tree.slice_mut(), n as (usize), SortHuffmanTree{}); sentinel = NewHuffmanTree(!(0u32), -1i16, -1i16); tree.slice_mut()[(node_index.wrapping_add(1u32) as (usize))] = sentinel.clone(); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(2u32); k = n - 1i32; while k > 0i32 { { let mut left: i32; let mut right: i32; if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { left = i; i = i + 1; } else { left = j; j = j + 1; } if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { right = i; i = i + 1; } else { right = j; j = j + 1; } let sum_total = (tree.slice()[(left as (usize))]) .total_count_ .wrapping_add((tree.slice()[(right as (usize))]).total_count_); (tree.slice_mut()[(node_index.wrapping_sub(1u32) as (usize))]).total_count_ = sum_total; (tree.slice_mut()[(node_index.wrapping_sub(1u32) as (usize))]).index_left_ = left as (i16); (tree.slice_mut()[(node_index.wrapping_sub(1u32) as (usize))]).index_right_or_value_ = right as (i16); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(1u32); } k = k - 1; } if BrotliSetDepth(2i32 * n - 1i32, tree.slice_mut(), depth, 14i32) { { break 'break11; } } } } count_limit = count_limit.wrapping_mul(2u32); } { m.free_cell(core::mem::replace(&mut tree, AllocHT::AllocatedMemory::default())); } } BrotliConvertBitDepthsToSymbols(depth, length as usize, bits); if count <= 4 { let mut i: u64; BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, count.wrapping_sub(1) as u64, storage_ix, storage); i = 0; while i < count { { let mut j: u64; j = i.wrapping_add(1); while j < count { { if depth[(symbols[j as usize] as (usize))] as (i32) < depth[(symbols[i as usize] as (usize)) as usize] as (i32) { let mut brotli_swap_tmp: u64 = symbols[j as usize]; symbols[j as usize] = symbols[i as usize]; symbols[i as usize] = brotli_swap_tmp; } } j = j.wrapping_add(1); } } i = i.wrapping_add(1); } if count == 2 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); } else if count == 3 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[3usize], storage_ix, storage); BrotliWriteBits(1, if depth[(symbols[0usize] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } else { let mut previous_value: u8 = 8i32 as (u8); let mut i: u64; StoreStaticCodeLengthCode(storage_ix, storage); i = 0; while i < length { let value: u8 = depth[(i as (usize))]; let mut reps: u64 = 1; let mut k: u64; k = i.wrapping_add(1); while k < length && (depth[(k as (usize))] as (i32) == value as (i32)) { { reps = reps.wrapping_add(1); } k = k.wrapping_add(1); } i = i.wrapping_add(reps); if value as (i32) == 0i32 { BrotliWriteBits(kZeroRepsDepth[reps as usize] as (u8), kZeroRepsBits[reps as usize] as u64, storage_ix, storage); } else { if previous_value as (i32) != value as (i32) { BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); reps = reps.wrapping_sub(1); } if reps < 3 { while reps != 0 { reps = reps.wrapping_sub(1); BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); } } else { reps = reps.wrapping_sub(3); BrotliWriteBits(kNonZeroRepsDepth[reps as usize] as (u8), kNonZeroRepsBits[reps as usize] as u64, storage_ix, storage); } previous_value = value; } } } } pub enum ContextType { CONTEXT_LSB6 = 0, CONTEXT_MSB6 = 1, CONTEXT_UTF8 = 2, CONTEXT_SIGNED = 3, } #[derive(Clone, Copy)] pub struct Command { pub insert_len_: u32, pub copy_len_: u32, pub dist_extra_: u32, pub cmd_prefix_: u16, pub dist_prefix_: u16, } #[derive(Clone, Copy)] pub struct BlockSplit { pub num_types: usize, pub num_blocks: usize, pub types: *mut u8, pub lengths: *mut u32, pub types_alloc_size: usize, pub lengths_alloc_size: usize, } pub struct HistogramLiteral { pub data_: [u32; 256], pub total_count_: usize, pub bit_cost_: f64, } impl SliceWrapper<u32> for HistogramLiteral { fn slice(&self) -> &[u32] { return &self.data_[..]; } } impl SliceWrapperMut<u32> for HistogramLiteral { fn slice_mut(&mut self) -> &mut [u32] { return &mut self.data_[..]; } } pub struct HistogramCommand { pub data_: [u32; 704], pub total_count_: usize, pub bit_cost_: f64, } impl SliceWrapper<u32> for HistogramCommand { fn slice(&self) -> &[u32] { return &self.data_[..]; } } impl SliceWrapperMut<u32> for HistogramCommand { fn slice_mut(&mut self) -> &mut [u32] { return &mut self.data_[..]; } } pub struct HistogramDistance { pub data_: [u32; 520], pub total_count_: usize, pub bit_cost_: f64, } impl SliceWrapper<u32> for HistogramDistance { fn slice(&self) -> &[u32] { return &self.data_[..]; } } impl SliceWrapperMut<u32> for HistogramDistance { fn slice_mut(&mut self) -> &mut [u32] { return &mut self.data_[..]; } } #[derive(Clone, Copy)] pub struct MetaBlockSplit { pub literal_split: BlockSplit, pub command_split: BlockSplit, pub distance_split: BlockSplit, pub literal_context_map: *mut u32, pub literal_context_map_size: usize, pub distance_context_map: *mut u32, pub distance_context_map_size: usize, pub literal_histograms: *mut HistogramLiteral, pub literal_histograms_size: usize, pub command_histograms: *mut HistogramCommand, pub command_histograms_size: usize, pub distance_histograms: *mut HistogramDistance, pub distance_histograms_size: usize, } #[derive(Clone, Copy)] pub struct BlockTypeCodeCalculator { pub last_type: usize, pub second_last_type: usize, } pub struct BlockSplitCode { pub type_code_calculator: BlockTypeCodeCalculator, pub type_depths: [u8; 258], pub type_bits: [u16; 258], pub length_depths: [u8; 26], pub length_bits: [u16; 26], } pub struct BlockEncoder<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>> { /* pub alloc_u8 : AllocU8, pub alloc_u16 : AllocU16, pub alloc_u32 : AllocU32, pub alloc_ht : AllocHT,*/ pub alphabet_size_: usize, pub num_block_types_: usize, pub block_types_: AllocU8::AllocatedMemory, pub block_lengths_: AllocU32::AllocatedMemory, pub num_blocks_: usize, pub block_split_code_: BlockSplitCode, pub block_ix_: usize, pub block_len_: usize, pub entropy_ix_: usize, pub depths_: AllocU8::AllocatedMemory, pub bits_: AllocU16::AllocatedMemory, } fn Log2FloorNonZero(mut n: u64) -> u32 { let mut result: u32 = 0u32; 'loop1: loop { if { n = n >> 1i32; n } != 0 { result = result.wrapping_add(1 as (u32)); continue 'loop1; } else { break 'loop1; } } result } fn BrotliEncodeMlen(mut length: u32, mut bits: &mut u64, mut numbits: &mut u32, mut nibblesbits: &mut u32) { let mut lg: u32 = (if length == 1u32 { 1u32 } else { Log2FloorNonZero(length.wrapping_sub(1u32) as (u32) as (u64)) .wrapping_add(1u32) }) as (u32); let mut mnibbles: u32 = (if lg < 16u32 { 16u32 } else { lg.wrapping_add(3u32) }) .wrapping_div(4u32); assert!(length > 0); assert!(length <= (1 << 24)); assert!(lg <= 24); *nibblesbits = mnibbles.wrapping_sub(4u32); *numbits = mnibbles.wrapping_mul(4u32); *bits = length.wrapping_sub(1u32) as u64; } fn StoreCompressedMetaBlockHeader(mut is_final_block: i32, mut length: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut lenbits: u64 = 0; let mut nlenbits: u32 = 0; let mut nibblesbits: u32 = 0; BrotliWriteBits(1, is_final_block as (u64), storage_ix, storage); if is_final_block != 0 { BrotliWriteBits(1, 0, storage_ix, storage); } BrotliEncodeMlen(length as u32, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2, nibblesbits as u64, storage_ix, storage); BrotliWriteBits(nlenbits as u8, lenbits, storage_ix, storage); if is_final_block == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } } fn NewBlockTypeCodeCalculator() -> BlockTypeCodeCalculator { return BlockTypeCodeCalculator { last_type: 1, second_last_type: 0, }; } fn NewBlockEncoder<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>> (mut alphabet_size: usize, mut num_block_types: usize, mut block_types: AllocU8::AllocatedMemory, mut block_lengths: AllocU32::AllocatedMemory, num_blocks: usize) -> BlockEncoder<AllocU8, AllocU16, AllocU32> { let block_len: usize; if num_blocks != 0 && block_lengths.slice().len() != 0 { block_len = block_lengths.slice()[0] as usize; } else { block_len = 0; } return BlockEncoder::<AllocU8, AllocU16, AllocU32> { alphabet_size_: alphabet_size, num_block_types_: num_block_types, block_types_: block_types, block_lengths_: block_lengths, num_blocks_: num_blocks, block_split_code_: BlockSplitCode { type_code_calculator: NewBlockTypeCodeCalculator(), type_depths: [0; 258], type_bits: [0; 258], length_depths: [0; 26], length_bits: [0; 26], }, block_ix_: 0, block_len_: block_len, entropy_ix_: 0, depths_: AllocU8::AllocatedMemory::default(), bits_: AllocU16::AllocatedMemory::default(), }; } extern "C" fn NextBlockTypeCode(mut calculator: &mut BlockTypeCodeCalculator, mut type_: u8) -> usize { let mut type_code: usize = (if type_ as (usize) == (*calculator).last_type.wrapping_add(1usize) { 1u32 } else if type_ as (usize) == (*calculator).second_last_type { 0u32 } else { (type_ as (u32)).wrapping_add(2u32) }) as (usize); (*calculator).second_last_type = (*calculator).last_type; (*calculator).last_type = type_ as (usize); type_code } fn BlockLengthPrefixCode(mut len: u32) -> u32 { let mut code: u32 = (if len >= 177u32 { if len >= 753u32 { 20i32 } else { 14i32 } } else if len >= 41u32 { 7i32 } else { 0i32 }) as (u32); while code < (26i32 - 1i32) as (u32) && (len >= kBlockLengthPrefixCode[code.wrapping_add(1u32) as (usize)].offset) { code = code.wrapping_add(1 as (u32)); } code } fn StoreVarLenUint8(mut n: u64, mut storage_ix: &mut usize, mut storage: &mut [u8]) { if n == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } else { let mut nbits: u8 = Log2FloorNonZero(n) as (u8); BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(3, nbits as u64, storage_ix, storage); BrotliWriteBits(nbits, n.wrapping_sub(1u64 << nbits), storage_ix, storage); } } fn StoreSimpleHuffmanTree(mut depths: &[u8], mut symbols: &mut [usize], mut num_symbols: usize, mut max_bits: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, num_symbols.wrapping_sub(1) as u64, storage_ix, storage); { let mut i: usize; i = 0usize; while i < num_symbols { { let mut j: usize; j = i.wrapping_add(1usize); while j < num_symbols { { if depths[(symbols[(j as (usize))] as (usize))] as (i32) < depths[(symbols[(i as (usize))] as (usize))] as (i32) { let mut __brotli_swap_tmp: usize = symbols[(j as (usize))]; symbols[(j as (usize))] = symbols[(i as (usize))]; symbols[(i as (usize))] = __brotli_swap_tmp; } } j = j.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } } if num_symbols == 2usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); } else if num_symbols == 3usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(3usize)] as u64, storage_ix, storage); BrotliWriteBits(1, if depths[(symbols[(0usize)] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } fn BuildAndStoreHuffmanTree(mut histogram: &[u32], length: usize, mut tree: &mut [HuffmanTree], mut depth: &mut [u8], mut bits: &mut [u16], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut count: usize = 0usize; let mut s4: [usize; 4] = [0usize, 0usize, 0usize, 0usize]; let mut i: usize; let mut max_bits: usize = 0usize; i = 0usize; 'break31: while i < length { { if histogram[(i as (usize))] != 0 { if count < 4usize { s4[count] = i; } else if count > 4usize { { break 'break31; } } count = count.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } { let mut max_bits_counter: usize = length.wrapping_sub(1usize); while max_bits_counter != 0 { max_bits_counter = max_bits_counter >> 1i32; max_bits = max_bits.wrapping_add(1 as (usize)); } } if count <= 1usize { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, s4[0usize] as u64, storage_ix, storage); depth[(s4[0usize] as (usize))] = 0i32 as (u8); bits[(s4[0usize] as (usize))] = 0i32 as (u16); return; } for depth_elem in depth[..length].iter_mut() { *depth_elem = 0; // memset } BrotliCreateHuffmanTree(histogram, length, 15i32, tree, depth); BrotliConvertBitDepthsToSymbols(depth, length, bits); if count <= 4usize { StoreSimpleHuffmanTree(depth, &mut s4[..], count, max_bits, storage_ix, storage); } else { BrotliStoreHuffmanTree(depth, length, tree, storage_ix, storage); } } fn GetBlockLengthPrefixCode(mut len: u32, mut code: &mut usize, mut n_extra: &mut u32, mut extra: &mut u32) { *code = BlockLengthPrefixCode(len) as (usize); *n_extra = kBlockLengthPrefixCode[*code].nbits; *extra = len.wrapping_sub(kBlockLengthPrefixCode[*code].offset); } fn StoreBlockSwitch(mut code: &mut BlockSplitCode, block_len: u32, block_type: u8, mut is_first_block: i32, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut typecode: usize = NextBlockTypeCode(&mut (*code).type_code_calculator, block_type); let mut lencode: usize = 0; let mut len_nextra: u32 = 0; let mut len_extra: u32 = 0; if is_first_block == 0 { BrotliWriteBits((*code).type_depths[typecode] as (u8), (*code).type_bits[typecode] as (u64), storage_ix, storage); } GetBlockLengthPrefixCode(block_len, &mut lencode, &mut len_nextra, &mut len_extra); BrotliWriteBits((*code).length_depths[lencode] as (u8), (*code).length_bits[lencode] as (u64), storage_ix, storage); BrotliWriteBits(len_nextra as (u8), len_extra as (u64), storage_ix, storage); } fn BuildAndStoreBlockSplitCode(mut types: &[u8], mut lengths: &[u32], num_blocks: usize, num_types: usize, mut tree: &mut [HuffmanTree], mut code: &mut BlockSplitCode, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut type_histo: [u32; 258] = [0;258]; let mut length_histo: [u32; 26] = [0;26]; let mut i: usize; let mut type_code_calculator = NewBlockTypeCodeCalculator(); i = 0usize; while i < num_blocks { { let mut type_code: usize = NextBlockTypeCode(&mut type_code_calculator, types[(i as (usize))]); if i != 0usize { let _rhs = 1; let _lhs = &mut type_histo[type_code]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } { let _rhs = 1; let _lhs = &mut length_histo[BlockLengthPrefixCode(lengths[(i as (usize))]) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } } i = i.wrapping_add(1 as (usize)); } StoreVarLenUint8(num_types.wrapping_sub(1) as u64, storage_ix, storage); if num_types > 1usize { BuildAndStoreHuffmanTree(&mut type_histo[0usize..], num_types.wrapping_add(2usize), tree, &mut (*code).type_depths[0usize..], &mut (*code).type_bits[0usize..], storage_ix, storage); BuildAndStoreHuffmanTree(&mut length_histo[0usize..], 26usize, tree, &mut (*code).length_depths[0usize..], &mut (*code).length_bits[0usize..], storage_ix, storage); StoreBlockSwitch(code, lengths[(0usize)], types[(0usize)], 1i32, storage_ix, storage); } } fn BuildAndStoreBlockSwitchEntropyCodes<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>>(mut xself: &mut BlockEncoder<AllocU8, AllocU16, AllocU32>, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { BuildAndStoreBlockSplitCode((*xself).block_types_.slice(), (*xself).block_lengths_.slice(), (*xself).num_blocks_, (*xself).num_block_types_, tree, &mut (*xself).block_split_code_, storage_ix, storage); } fn StoreTrivialContextMap(mut num_types: usize, mut context_bits: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { StoreVarLenUint8(num_types.wrapping_sub(1usize) as u64, storage_ix, storage); if num_types > 1usize { let mut repeat_code: usize = context_bits.wrapping_sub(1u32 as (usize)); let mut repeat_bits: usize = (1u32 << repeat_code).wrapping_sub(1u32) as (usize); let mut alphabet_size: usize = num_types.wrapping_add(repeat_code); let mut histogram: [u32; 272] = [0;272]; let mut depths: [u8; 272] = [0;272]; let mut bits: [u16; 272] = [0;272]; let mut i: usize; BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(4u8, repeat_code.wrapping_sub(1usize) as u64, storage_ix, storage); histogram[repeat_code] = num_types as (u32); histogram[0usize] = 1u32; i = context_bits; while i < alphabet_size { { histogram[i] = 1u32; } i = i.wrapping_add(1 as (usize)); } BuildAndStoreHuffmanTree(&mut histogram[..], alphabet_size, tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_types { { let mut code: usize = if i == 0usize { 0usize } else { i.wrapping_add(context_bits).wrapping_sub(1usize) }; BrotliWriteBits(depths[code] as (u8), bits[code] as (u64), storage_ix, storage); BrotliWriteBits(depths[repeat_code] as (u8), bits[repeat_code] as (u64), storage_ix, storage); BrotliWriteBits(repeat_code as u8, repeat_bits as u64, storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); } } fn IndexOf(mut v: &[u8], mut v_size: usize, mut value: u8) -> usize { let mut i: usize = 0usize; while i < v_size { { if v[(i as (usize))] as (i32) == value as (i32) { return i; } } i = i.wrapping_add(1 as (usize)); } i } fn MoveToFront(mut v: &mut [u8], mut index: usize) { let mut value: u8 = v[(index as (usize))]; let mut i: usize; i = index; while i != 0usize { { v[(i as (usize))] = v[(i.wrapping_sub(1usize) as (usize))]; } i = i.wrapping_sub(1 as (usize)); } v[(0usize)] = value; } fn MoveToFrontTransform(mut v_in: &[u32], v_size: usize, mut v_out: &mut [u32]) { let mut i: usize; let mut mtf: [u8; 256] = [0;256]; let mut max_value: u32; if v_size == 0usize { return; } max_value = v_in[(0usize)]; i = 1usize; while i < v_size { { if v_in[(i as (usize))] > max_value { max_value = v_in[(i as (usize))]; } } i = i.wrapping_add(1 as (usize)); } 0i32; i = 0usize; while i <= max_value as (usize) { { mtf[i] = i as (u8); } i = i.wrapping_add(1 as (usize)); } { let mut mtf_size: usize = max_value.wrapping_add(1u32) as (usize); i = 0usize; while i < v_size { { let mut index: usize = IndexOf(&mtf[..], mtf_size, v_in[(i as (usize))] as (u8)); 0i32; v_out[(i as (usize))] = index as (u32); MoveToFront(&mut mtf[..], index); } i = i.wrapping_add(1 as (usize)); } } } fn brotli_max_uint32_t(mut a: u32, mut b: u32) -> u32 { if a > b { a } else { b } } fn brotli_min_uint32_t(mut a: u32, mut b: u32) -> u32 { if a < b { a } else { b } } fn RunLengthCodeZeros(in_size: usize, mut v: &mut [u32], mut out_size: &mut usize, mut max_run_length_prefix: &mut u32) { let mut max_reps: u32 = 0u32; let mut i: usize; let mut max_prefix: u32; i = 0usize; while i < in_size { let mut reps: u32 = 0u32; while i < in_size && (v[(i as (usize))] != 0u32) { i = i.wrapping_add(1 as (usize)); } while i < in_size && (v[(i as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } i = i.wrapping_add(1 as (usize)); } max_reps = brotli_max_uint32_t(reps, max_reps); } max_prefix = if max_reps > 0u32 { Log2FloorNonZero(max_reps as (u64)) } else { 0u32 }; max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix); *max_run_length_prefix = max_prefix; *out_size = 0usize; i = 0usize; while i < in_size { 0i32; if v[(i as (usize))] != 0u32 { v[(*out_size as (usize))] = (v[(i as (usize))]).wrapping_add(*max_run_length_prefix); i = i.wrapping_add(1 as (usize)); *out_size = (*out_size).wrapping_add(1 as (usize)); } else { let mut reps: u32 = 1u32; let mut k: usize; k = i.wrapping_add(1usize); while k < in_size && (v[(k as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } k = k.wrapping_add(1 as (usize)); } i = i.wrapping_add(reps as (usize)); while reps != 0u32 { if reps < 2u32 << max_prefix { let mut run_length_prefix: u32 = Log2FloorNonZero(reps as (u64)); let extra_bits: u32 = reps.wrapping_sub(1u32 << run_length_prefix); v[(*out_size as (usize))] = run_length_prefix.wrapping_add(extra_bits << 9i32); *out_size = (*out_size).wrapping_add(1 as (usize)); { { break; } } } else { let extra_bits: u32 = (1u32 << max_prefix).wrapping_sub(1u32); v[(*out_size as (usize))] = max_prefix.wrapping_add(extra_bits << 9i32); reps = reps.wrapping_sub((2u32 << max_prefix).wrapping_sub(1u32)); *out_size = (*out_size).wrapping_add(1 as (usize)); } } } } } fn EncodeContextMap<AllocU32: alloc::Allocator<u32>>(mut m: &mut AllocU32, mut context_map: &[u32], mut context_map_size: usize, mut num_clusters: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut i: usize; let mut rle_symbols: AllocU32::AllocatedMemory; let mut max_run_length_prefix: u32 = 6u32; let mut num_rle_symbols: usize = 0usize; static kSymbolMask: u32 = (1u32 << 9i32) - 1; let mut depths: [u8; 272] = [0;272]; let mut bits: [u16; 272] = [0;272]; StoreVarLenUint8(num_clusters.wrapping_sub(1usize) as u64, storage_ix, storage); if num_clusters == 1usize { return; } rle_symbols = if context_map_size != 0 { m.alloc_cell(context_map_size) } else { AllocU32::AllocatedMemory::default() }; MoveToFrontTransform(context_map, context_map_size, rle_symbols.slice_mut()); RunLengthCodeZeros(context_map_size, rle_symbols.slice_mut(), &mut num_rle_symbols, &mut max_run_length_prefix); let mut histogram: [u32; 272] = [0;272]; i = 0usize; while i < num_rle_symbols { { let _rhs = 1; let _lhs = &mut histogram[(rle_symbols.slice()[(i as (usize))] & kSymbolMask) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } { let mut use_rle: i32 = if !!(max_run_length_prefix > 0u32) { 1i32 } else { 0i32 }; BrotliWriteBits(1, use_rle as (u64), storage_ix, storage); if use_rle != 0 { BrotliWriteBits(4, max_run_length_prefix.wrapping_sub(1u32) as (u64), storage_ix, storage); } } BuildAndStoreHuffmanTree(&mut histogram[..], num_clusters.wrapping_add(max_run_length_prefix as (usize)), tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_rle_symbols { { let rle_symbol: u32 = rle_symbols.slice()[(i as (usize))] & kSymbolMask; let extra_bits_val: u32 = rle_symbols.slice()[(i as (usize))] >> 9i32; BrotliWriteBits(depths[rle_symbol as (usize)] as (u8), bits[rle_symbol as (usize)] as (u64), storage_ix, storage); if rle_symbol > 0u32 && (rle_symbol <= max_run_length_prefix) { BrotliWriteBits(rle_symbol as (u8), extra_bits_val as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); m.free_cell(rle_symbols); } fn BuildAndStoreEntropyCodes<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>, HistogramType:SliceWrapper<u32> >(mut m8: &mut AllocU8, mut m16: &mut AllocU16, mut xself: &mut BlockEncoder<AllocU8, AllocU16, AllocU32>, mut histograms: &[HistogramType], histograms_size: usize, mut tree: &mut [HuffmanTree], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let alphabet_size: usize = (*xself).alphabet_size_; let table_size: usize = histograms_size.wrapping_mul(alphabet_size); (*xself).depths_ = if table_size != 0 { m8.alloc_cell(table_size) } else { AllocU8::AllocatedMemory::default() }; (*xself).bits_ = if table_size != 0 { m16.alloc_cell(table_size) } else { AllocU16::AllocatedMemory::default() }; { let mut i: usize; i = 0usize; while i < histograms_size { { let mut ix: usize = i.wrapping_mul(alphabet_size); BuildAndStoreHuffmanTree(&(histograms[(i as (usize))]).slice()[0..], alphabet_size, tree, &mut (*xself).depths_.slice_mut()[(ix as (usize))..], &mut (*xself).bits_.slice_mut()[(ix as (usize))..], storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } } } /* fn StoreSymbol(mut xself: &mut BlockEncoder, mut symbol: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { if (*xself).block_len_ == 0usize { let mut block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let mut block_len: u32 = *(*xself).block_lengths_[(block_ix as (usize))..]; let mut block_type: u8 = *(*xself).block_types_[(block_ix as (usize))..]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = (block_type as (usize)).wrapping_mul((*xself).alphabet_size_); StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let mut ix: usize = (*xself).entropy_ix_.wrapping_add(symbol); BrotliWriteBits(*(*xself).depths_[(ix as (usize))..] as (usize), *(*xself).bits_[(ix as (usize))..] as (usize), storage_ix, storage); } } fn CommandCopyLenCode(mut xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 ^ (*xself).copy_len_ >> 24i32 } fn GetInsertLengthCode(mut insertlen: usize) -> u16 { if insertlen < 6usize { insertlen as (u16) } else if insertlen < 130usize { let mut nbits: u32 = Log2FloorNonZero(insertlen.wrapping_sub(2usize)).wrapping_sub(1u32); ((nbits << 1i32) as (usize)) .wrapping_add(insertlen.wrapping_sub(2usize) >> nbits) .wrapping_add(2usize) as (u16) } else if insertlen < 2114usize { Log2FloorNonZero(insertlen.wrapping_sub(66usize)).wrapping_add(10u32) as (u16) } else if insertlen < 6210usize { 21u32 as (u16) } else if insertlen < 22594usize { 22u32 as (u16) } else { 23u32 as (u16) } } fn GetCopyLengthCode(mut copylen: usize) -> u16 { if copylen < 10usize { copylen.wrapping_sub(2usize) as (u16) } else if copylen < 134usize { let mut nbits: u32 = Log2FloorNonZero(copylen.wrapping_sub(6usize)).wrapping_sub(1u32); ((nbits << 1i32) as (usize)) .wrapping_add(copylen.wrapping_sub(6usize) >> nbits) .wrapping_add(4usize) as (u16) } else if copylen < 2118usize { Log2FloorNonZero(copylen.wrapping_sub(70usize)).wrapping_add(12u32) as (u16) } else { 23u32 as (u16) } } fn GetInsertExtra(mut inscode: u16) -> u32 { kInsExtra[inscode as (usize)] } fn GetInsertBase(mut inscode: u16) -> u32 { kInsBase[inscode as (usize)] } fn GetCopyBase(mut copycode: u16) -> u32 { kCopyBase[copycode as (usize)] } fn GetCopyExtra(mut copycode: u16) -> u32 { kCopyExtra[copycode as (usize)] } fn StoreCommandExtra(mut cmd: &[Command], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut copylen_code: u32 = CommandCopyLenCode(cmd); let mut inscode: u16 = GetInsertLengthCode((*cmd).insert_len_ as (usize)); let mut copycode: u16 = GetCopyLengthCode(copylen_code as (usize)); let mut insnumextra: u32 = GetInsertExtra(inscode); let mut insextraval: usize = (*cmd).insert_len_.wrapping_sub(GetInsertBase(inscode)) as (usize); let mut copyextraval: usize = copylen_code.wrapping_sub(GetCopyBase(copycode)) as (usize); let mut bits: usize = copyextraval << insnumextra | insextraval; BrotliWriteBits(insnumextra.wrapping_add(GetCopyExtra(copycode)) as (usize), bits, storage_ix, storage); } fn Context(mut p1: u8, mut p2: u8, mut mode: ContextType) -> u8 { if mode as (i32) == ContextType::CONTEXT_LSB6 as (i32) { return (p1 as (i32) & 0x3fi32) as (u8); } if mode as (i32) == ContextType::CONTEXT_MSB6 as (i32) { return (p1 as (i32) >> 2i32) as (u8); } if mode as (i32) == ContextType::CONTEXT_UTF8 as (i32) { return (kUTF8ContextLookup[p1 as (usize)] as (i32) | kUTF8ContextLookup[(p2 as (i32) + 256i32) as (usize)] as (i32)) as (u8); } if mode as (i32) == ContextType::CONTEXT_SIGNED as (i32) { return ((kSigned3BitContextLookup[p1 as (usize)] as (i32) << 3i32) + kSigned3BitContextLookup[p2 as (usize)] as (i32)) as (u8); } 0i32 as (u8) } fn StoreSymbolWithContext(mut xself: &mut BlockEncoder, mut symbol: usize, mut context: usize, mut context_map: &[u32], mut storage_ix: &mut usize, mut storage: &mut [u8], context_bits: usize) { if (*xself).block_len_ == 0usize { let mut block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let mut block_len: u32 = *(*xself).block_lengths_[(block_ix as (usize))..]; let mut block_type: u8 = *(*xself).block_types_[(block_ix as (usize))..]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = block_type as (usize) << context_bits; StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let mut histo_ix: usize = context_map[((*xself).entropy_ix_.wrapping_add(context) as (usize))] as (usize); let mut ix: usize = histo_ix.wrapping_mul((*xself).alphabet_size_).wrapping_add(symbol); BrotliWriteBits(*(*xself).depths_[(ix as (usize))..] as (usize), *(*xself).bits_[(ix as (usize))..] as (usize), storage_ix, storage); } } fn CommandCopyLen(mut xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 } fn CommandDistanceContext(mut xself: &Command) -> u32 { let mut r: u32 = ((*xself).cmd_prefix_ as (i32) >> 6i32) as (u32); let mut c: u32 = ((*xself).cmd_prefix_ as (i32) & 7i32) as (u32); if (r == 0u32 || r == 2u32 || r == 4u32 || r == 7u32) && (c <= 2u32) { return c; } 3u32 } fn CleanupBlockEncoder(mut m: &mut [MemoryManager], mut xself: &mut BlockEncoder) { { BrotliFree(m, (*xself).depths_); (*xself).depths_ = 0i32; } { BrotliFree(m, (*xself).bits_); (*xself).bits_ = 0i32; } } fn JumpToByteBoundary(mut storage_ix: &mut usize, mut storage: &mut [u8]) { *storage_ix = (*storage_ix).wrapping_add(7u32 as (usize)) & !7u32 as (usize); storage[((*storage_ix >> 3i32) as (usize))] = 0i32 as (u8); } pub fn BrotliStoreMetaBlock(mut m: &mut [MemoryManager], mut input: &[u8], mut start_pos: usize, mut length: usize, mut mask: usize, mut prev_byte: u8, mut prev_byte2: u8, mut is_last: i32, mut num_direct_distance_codes: u32, mut distance_postfix_bits: u32, mut literal_context_mode: ContextType, mut commands: &[Command], mut n_commands: usize, mut mb: &[MetaBlockSplit], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut pos: usize = start_pos; let mut i: usize; let mut num_distance_codes: usize = (16u32) .wrapping_add(num_direct_distance_codes) .wrapping_add(48u32 << distance_postfix_bits) as (usize); let mut tree: *mut HuffmanTree; let mut literal_enc: BlockEncoder; let mut command_enc: BlockEncoder; let mut distance_enc: BlockEncoder; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); tree = if 2i32 * 704i32 + 1i32 != 0 { BrotliAllocate(m, ((2i32 * 704i32 + 1i32) as (usize)) .wrapping_mul(::std::mem::size_of::<HuffmanTree>())) } else { 0i32 }; if !(0i32 == 0) { return; } InitBlockEncoder(&mut literal_enc, 256usize, (*mb).literal_split.num_types, (*mb).literal_split.types, (*mb).literal_split.lengths, (*mb).literal_split.num_blocks); InitBlockEncoder(&mut command_enc, 704usize, (*mb).command_split.num_types, (*mb).command_split.types, (*mb).command_split.lengths, (*mb).command_split.num_blocks); InitBlockEncoder(&mut distance_enc, num_distance_codes, (*mb).distance_split.num_types, (*mb).distance_split.types, (*mb).distance_split.lengths, (*mb).distance_split.num_blocks); BuildAndStoreBlockSwitchEntropyCodes(&mut literal_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut command_enc, tree, storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut distance_enc, tree, storage_ix, storage); BrotliWriteBits(2usize, distance_postfix_bits as (usize), storage_ix, storage); BrotliWriteBits(4usize, (num_direct_distance_codes >> distance_postfix_bits) as (usize), storage_ix, storage); i = 0usize; while i < (*mb).literal_split.num_types { { BrotliWriteBits(2usize, literal_context_mode as (usize), storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } if (*mb).literal_context_map_size == 0usize { StoreTrivialContextMap((*mb).literal_histograms_size, 6usize, tree, storage_ix, storage); } else { EncodeContextMap(m, (*mb).literal_context_map, (*mb).literal_context_map_size, (*mb).literal_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } } if (*mb).distance_context_map_size == 0usize { StoreTrivialContextMap((*mb).distance_histograms_size, 2usize, tree, storage_ix, storage); } else { EncodeContextMap(m, (*mb).distance_context_map, (*mb).distance_context_map_size, (*mb).distance_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } } BuildAndStoreEntropyCodesLiteral(m, &mut literal_enc, (*mb).literal_histograms, (*mb).literal_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } BuildAndStoreEntropyCodesCommand(m, &mut command_enc, (*mb).command_histograms, (*mb).command_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } BuildAndStoreEntropyCodesDistance(m, &mut distance_enc, (*mb).distance_histograms, (*mb).distance_histograms_size, tree, storage_ix, storage); if !(0i32 == 0) { return; } { BrotliFree(m, tree); tree = 0i32; } i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let mut cmd_code: usize = cmd.cmd_prefix_ as (usize); StoreSymbol(&mut command_enc, cmd_code, storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); if (*mb).literal_context_map_size == 0usize { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { StoreSymbol(&mut literal_enc, input[((pos & mask) as (usize))] as (usize), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } else { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { let mut context: usize = Context(prev_byte, prev_byte2, literal_context_mode) as (usize); let mut literal: u8 = input[((pos & mask) as (usize))]; StoreSymbolWithContext(&mut literal_enc, literal as (usize), context, (*mb).literal_context_map, storage_ix, storage, 6usize); prev_byte2 = prev_byte; prev_byte = literal; pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 { prev_byte2 = input[((pos.wrapping_sub(2usize) & mask) as (usize))]; prev_byte = input[((pos.wrapping_sub(1usize) & mask) as (usize))]; if cmd.cmd_prefix_ as (i32) >= 128i32 { let mut dist_code: usize = cmd.dist_prefix_ as (usize); let mut distnumextra: u32 = cmd.dist_extra_ >> 24i32; let mut distextra: usize = (cmd.dist_extra_ & 0xffffffu32) as (usize); if (*mb).distance_context_map_size == 0usize { StoreSymbol(&mut distance_enc, dist_code, storage_ix, storage); } else { let mut context: usize = CommandDistanceContext(&cmd) as (usize); StoreSymbolWithContext(&mut distance_enc, dist_code, context, (*mb).distance_context_map, storage_ix, storage, 2usize); } BrotliWriteBits(distnumextra as (usize), distextra, storage_ix, storage); } } } i = i.wrapping_add(1 as (usize)); } CleanupBlockEncoder(m, &mut distance_enc); CleanupBlockEncoder(m, &mut command_enc); CleanupBlockEncoder(m, &mut literal_enc); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn HistogramClearLiteral(mut xself: &mut HistogramLiteral) { memset((*xself).data_.as_mut_ptr(), 0i32, ::std::mem::size_of::<[u32; 256]>()); (*xself).total_count_ = 0usize; (*xself).bit_cost_ = 3.402e+38f64; } fn HistogramClearCommand(mut xself: &mut HistogramCommand) { memset((*xself).data_.as_mut_ptr(), 0i32, ::std::mem::size_of::<[u32; 704]>()); (*xself).total_count_ = 0usize; (*xself).bit_cost_ = 3.402e+38f64; } fn HistogramClearDistance(mut xself: &mut HistogramDistance) { memset((*xself).data_.as_mut_ptr(), 0i32, ::std::mem::size_of::<[u32; 520]>()); (*xself).total_count_ = 0usize; (*xself).bit_cost_ = 3.402e+38f64; } fn HistogramAddCommand(mut xself: &mut HistogramCommand, mut val: usize) { { let _rhs = 1; let _lhs = &mut (*xself).data_[val]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } (*xself).total_count_ = (*xself).total_count_.wrapping_add(1 as (usize)); } fn HistogramAddLiteral(mut xself: &mut HistogramLiteral, mut val: usize) { { let _rhs = 1; let _lhs = &mut (*xself).data_[val]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } (*xself).total_count_ = (*xself).total_count_.wrapping_add(1 as (usize)); } fn HistogramAddDistance(mut xself: &mut HistogramDistance, mut val: usize) { { let _rhs = 1; let _lhs = &mut (*xself).data_[val]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } (*xself).total_count_ = (*xself).total_count_.wrapping_add(1 as (usize)); } fn BuildHistograms(mut input: &[u8], mut start_pos: usize, mut mask: usize, mut commands: &[Command], mut n_commands: usize, mut lit_histo: &mut [HistogramLiteral], mut cmd_histo: &mut [HistogramCommand], mut dist_histo: &mut [HistogramDistance]) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let mut j: usize; HistogramAddCommand(cmd_histo, cmd.cmd_prefix_ as (usize)); j = cmd.insert_len_ as (usize); while j != 0usize { { HistogramAddLiteral(lit_histo, input[((pos & mask) as (usize))] as (usize)); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { HistogramAddDistance(dist_histo, cmd.dist_prefix_ as (usize)); } } i = i.wrapping_add(1 as (usize)); } } fn StoreDataWithHuffmanCodes(mut input: &[u8], mut start_pos: usize, mut mask: usize, mut commands: &[Command], mut n_commands: usize, mut lit_depth: &[u8], mut lit_bits: &[u16], mut cmd_depth: &[u8], mut cmd_bits: &[u16], mut dist_depth: &[u8], mut dist_bits: &[u16], mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let cmd_code: usize = cmd.cmd_prefix_ as (usize); let mut j: usize; BrotliWriteBits(cmd_depth[(cmd_code as (usize))] as (usize), cmd_bits[(cmd_code as (usize))] as (usize), storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); j = cmd.insert_len_ as (usize); while j != 0usize { { let literal: u8 = input[((pos & mask) as (usize))]; BrotliWriteBits(lit_depth[(literal as (usize))] as (usize), lit_bits[(literal as (usize))] as (usize), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { let dist_code: usize = cmd.dist_prefix_ as (usize); let distnumextra: u32 = cmd.dist_extra_ >> 24i32; let distextra: u32 = cmd.dist_extra_ & 0xffffffu32; BrotliWriteBits(dist_depth[(dist_code as (usize))] as (usize), dist_bits[(dist_code as (usize))] as (usize), storage_ix, storage); BrotliWriteBits(distnumextra as (usize), distextra as (usize), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } pub fn BrotliStoreMetaBlockTrivial(mut m: &mut [MemoryManager], mut input: &[u8], mut start_pos: usize, mut length: usize, mut mask: usize, mut is_last: i32, mut commands: &[Command], mut n_commands: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut lit_histo: HistogramLiteral; let mut cmd_histo: HistogramCommand; let mut dist_histo: HistogramDistance; let mut lit_depth: [u8; 256]; let mut lit_bits: [u16; 256]; let mut cmd_depth: [u8; 704]; let mut cmd_bits: [u16; 704]; let mut dist_depth: [u8; 64]; let mut dist_bits: [u16; 64]; let mut tree: *mut HuffmanTree; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); HistogramClearLiteral(&mut lit_histo); HistogramClearCommand(&mut cmd_histo); HistogramClearDistance(&mut dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliWriteBits(13usize, 0usize, storage_ix, storage); tree = if 2i32 * 704i32 + 1i32 != 0 { BrotliAllocate(m, ((2i32 * 704i32 + 1i32) as (usize)) .wrapping_mul(::std::mem::size_of::<HuffmanTree>())) } else { 0i32 }; if !(0i32 == 0) { return; } BuildAndStoreHuffmanTree(lit_histo.data_.as_mut_ptr(), 256usize, tree, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), storage_ix, storage); BuildAndStoreHuffmanTree(cmd_histo.data_.as_mut_ptr(), 704usize, tree, cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), storage_ix, storage); BuildAndStoreHuffmanTree(dist_histo.data_.as_mut_ptr(), 64usize, tree, dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); { BrotliFree(m, tree); tree = 0i32; } StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn StoreStaticCommandHuffmanTree(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(56usize, 0x926244u32 as (usize) << 32i32 | 0x16307003u32 as (usize), storage_ix, storage); BrotliWriteBits(3usize, 0x0u32 as (usize), storage_ix, storage); } fn StoreStaticDistanceHuffmanTree(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(28usize, 0x369dc03u32 as (usize), storage_ix, storage); } pub fn BrotliStoreMetaBlockFast(mut m: &mut [MemoryManager], mut input: &[u8], mut start_pos: usize, mut length: usize, mut mask: usize, mut is_last: i32, mut commands: &[Command], mut n_commands: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BrotliWriteBits(13usize, 0usize, storage_ix, storage); if n_commands <= 128usize { let mut histogram: [u32; 256] = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; let mut pos: usize = start_pos; let mut num_literals: usize = 0usize; let mut i: usize; let mut lit_depth: [u8; 256]; let mut lit_bits: [u16; 256]; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))]; let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { { let _rhs = 1; let _lhs = &mut histogram[input[((pos & mask) as (usize))] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } num_literals = num_literals.wrapping_add(cmd.insert_len_ as (usize)); pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); } i = i.wrapping_add(1 as (usize)); } BrotliBuildAndStoreHuffmanTreeFast(m, histogram.as_mut_ptr(), num_literals, 8usize, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } StoreStaticCommandHuffmanTree(storage_ix, storage); StoreStaticDistanceHuffmanTree(storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), kStaticCommandCodeDepth.as_ptr(), kStaticCommandCodeBits.as_ptr(), kStaticDistanceCodeDepth.as_ptr(), kStaticDistanceCodeBits.as_ptr(), storage_ix, storage); } else { let mut lit_histo: HistogramLiteral; let mut cmd_histo: HistogramCommand; let mut dist_histo: HistogramDistance; let mut lit_depth: [u8; 256]; let mut lit_bits: [u16; 256]; let mut cmd_depth: [u8; 704]; let mut cmd_bits: [u16; 704]; let mut dist_depth: [u8; 64]; let mut dist_bits: [u16; 64]; HistogramClearLiteral(&mut lit_histo); HistogramClearCommand(&mut cmd_histo); HistogramClearDistance(&mut dist_histo); BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo.data_.as_mut_ptr(), lit_histo.total_count_, 8usize, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } BrotliBuildAndStoreHuffmanTreeFast(m, cmd_histo.data_.as_mut_ptr(), cmd_histo.total_count_, 10usize, cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } BrotliBuildAndStoreHuffmanTreeFast(m, dist_histo.data_.as_mut_ptr(), dist_histo.total_count_, 6usize, dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); if !(0i32 == 0) { return; } StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, lit_depth.as_mut_ptr(), lit_bits.as_mut_ptr(), cmd_depth.as_mut_ptr(), cmd_bits.as_mut_ptr(), dist_depth.as_mut_ptr(), dist_bits.as_mut_ptr(), storage_ix, storage); } if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn BrotliStoreUncompressedMetaBlockHeader(mut length: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut lenbits: usize; let mut nlenbits: usize; let mut nibblesbits: usize; BrotliWriteBits(1usize, 0usize, storage_ix, storage); BrotliEncodeMlen(length, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2usize, nibblesbits, storage_ix, storage); BrotliWriteBits(nlenbits, lenbits, storage_ix, storage); BrotliWriteBits(1usize, 1usize, storage_ix, storage); } pub fn BrotliStoreUncompressedMetaBlock(mut is_final_block: i32, mut input: &[u8], mut position: usize, mut mask: usize, mut len: usize, mut storage_ix: &mut usize, mut storage: &mut [u8]) { let mut masked_pos: usize = position & mask; BrotliStoreUncompressedMetaBlockHeader(len, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); if masked_pos.wrapping_add(len) > mask.wrapping_add(1usize) { let mut len1: usize = mask.wrapping_add(1usize).wrapping_sub(masked_pos); let dst_start = ((*storage_ix >> 3i32) as (usize)); storage[dst_start..len1].clone_from_slice(input[masked_pos..masked_pos + len1]) *storage_ix = (*storage_ix).wrapping_add(len1 << 3i32); len = len.wrapping_sub(len1); masked_pos = 0usize; } let dst_start = (*storage_ix >> 3i32) as (usize); storage[dst_start..dst_start + len].clone_from_slice(input[masked_pos..masked_pos + len]) *storage_ix = (*storage_ix).wrapping_add(len << 3i32); BrotliWriteBitsPrepareStorage(*storage_ix, storage); if is_final_block != 0 { BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(1u8, 1u64, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } } pub fn BrotliStoreSyncMetaBlock(mut storage_ix: &mut usize, mut storage: &mut [u8]) { BrotliWriteBits(6usize, 6usize, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } */
#![allow(unknown_lints)] #![allow(dead_code)] #![allow(unused_imports)] #![allow(unused_macros)] #[cfg(not(feature="no-stdlib"))] use std::io::Write; use super::input_pair::InputPair; use super::block_split::BlockSplit; use enc::backward_references::BrotliEncoderParams; use super::super::dictionary::{kBrotliDictionary, kBrotliDictionarySizeBitsByLength, kBrotliDictionaryOffsetsByLength}; use super::super::transform::{TransformDictionaryWord}; use super::static_dict::kNumDistanceCacheEntries; use super::command::{Command, GetCopyLengthCode, GetInsertLengthCode, CommandDistanceIndexAndOffset}; use super::constants::{BROTLI_NUM_BLOCK_LEN_SYMBOLS, kZeroRepsBits, kZeroRepsDepth, kNonZeroRepsBits, kNonZeroRepsDepth, kCodeLengthBits, kCodeLengthDepth, kStaticCommandCodeDepth, kStaticCommandCodeBits, kStaticDistanceCodeDepth, kStaticDistanceCodeBits, kSigned3BitContextLookup, kUTF8ContextLookup, kInsBase, kInsExtra, kCopyBase, kCopyExtra}; use super::entropy_encode::{HuffmanTree, BrotliWriteHuffmanTree, BrotliCreateHuffmanTree, BrotliConvertBitDepthsToSymbols, NewHuffmanTree, InitHuffmanTree, SortHuffmanTreeItems, HuffmanComparator, BrotliSetDepth}; use super::histogram::{HistogramAddItem, HistogramLiteral, HistogramCommand, HistogramDistance, ContextType}; use super::super::alloc; use super::super::alloc::{SliceWrapper, SliceWrapperMut}; use super::super::core; use super::find_stride; use super::interface; pub struct PrefixCodeRange { pub offset: u32, pub nbits: u32, } fn window_size_from_lgwin(lgwin: i32) -> usize{ (1usize << lgwin) - 16usize } fn context_type_str(context_type:ContextType) -> &'static str { match context_type { ContextType::CONTEXT_LSB6 => "lsb6", ContextType::CONTEXT_MSB6 => "msb6", ContextType::CONTEXT_UTF8 => "utf8", ContextType::CONTEXT_SIGNED => "sign", } } fn prediction_mode_str(prediction_mode_nibble:interface::LiteralPredictionModeNibble) -> &'static str { match prediction_mode_nibble.prediction_mode() { interface::LITERAL_PREDICTION_MODE_SIGN => "sign", interface::LITERAL_PREDICTION_MODE_LSB6 => "lsb6", interface::LITERAL_PREDICTION_MODE_MSB6 => "msb6", interface::LITERAL_PREDICTION_MODE_UTF8 => "utf8", _ => "unknown", } } #[derive(Copy,Clone,Default)] pub struct InputReference<'a>(pub &'a [u8]); impl<'a> SliceWrapper<u8> for InputReference<'a> { fn slice(&self) -> & [u8] { self.0 } } fn is_long_enough_to_be_random(len: usize, high_entropy_detection_quality:u8) -> bool{ return match high_entropy_detection_quality { 0 => false, 1 => len >= 256, 2 => len >= 128, 3 => len >= 96, 4 => len >= 64, 5 => len >= 48, 6 => len >= 32, 7 => len >= 24, 8 => len >= 16, 9 => len >= 8, 10 => len >= 6, 11 => len >= 4, _ => len >= 8, } } const COMMAND_BUFFER_SIZE: usize = 16384; trait CommandProcessor<'a> { fn push<Cb: FnMut(&[interface::Command<InputReference>])>(&mut self, val: interface::Command<InputReference<'a> >, callback :&mut Cb); fn push_literals<Cb>(&mut self, data:&InputPair<'a>, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { if data.0.len() != 0 { self.push(interface::Command::Literal(interface::LiteralCommand{ data:InputReference(data.0), prob:interface::FeatureFlagSliceType::<InputReference>::default(), }), callback); } if data.1.len() != 0 { self.push(interface::Command::Literal(interface::LiteralCommand{ data:InputReference(data.1), prob:interface::FeatureFlagSliceType::<InputReference>::default(), }), callback); } } fn push_rand_literals<Cb>(&mut self, data:&InputPair<'a>, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { if data.0.len() != 0 { self.push(interface::Command::RandLiteral(interface::RandLiteralCommand{ data:InputReference(data.0), }), callback); } if data.1.len() != 0 { self.push(interface::Command::RandLiteral(interface::RandLiteralCommand{ data:InputReference(data.1), }), callback); } } fn push_block_switch_literal<Cb>(&mut self, block_type: u8, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { self.push(interface::Command::BlockSwitchLiteral(interface::LiteralBlockSwitch::new(block_type, 0)), callback) } } struct CommandQueue<'a, AllocU32:alloc::Allocator<u32> > { mb: InputPair<'a>, mb_byte_offset: usize, queue: [interface::Command<InputReference<'a> >;COMMAND_BUFFER_SIZE], loc: usize, last_btypel_index: Option<usize>, entropy_tally_scratch: find_stride::EntropyTally<AllocU32>, entropy_pyramid: find_stride::EntropyPyramid<AllocU32>, context_map_entropy: ContextMapEntropy<'a, AllocU32>, stride_detection_quality: u8, high_entropy_detection_quality: u8, block_type_literal: u8, } impl<'a, AllocU32: alloc::Allocator<u32> > CommandQueue<'a, AllocU32 > { fn new(m32:&mut AllocU32, mb: InputPair<'a>, stride_detection_quality: u8, high_entropy_detection_quality: u8, context_map_entropy: ContextMapEntropy<'a, AllocU32>, ) -> CommandQueue <'a, AllocU32> { let mut entropy_tally_scratch = if stride_detection_quality == 0 && high_entropy_detection_quality == 0 { find_stride::EntropyTally::<AllocU32>::disabled_placeholder(m32) } else { if stride_detection_quality == 0 { find_stride::EntropyTally::<AllocU32>::new(m32, Some(1)) } else { find_stride::EntropyTally::<AllocU32>::new(m32, None) } }; let mut entropy_pyramid = if stride_detection_quality == 0 && high_entropy_detection_quality == 0{ find_stride::EntropyPyramid::<AllocU32>::disabled_placeholder(m32) } else { find_stride::EntropyPyramid::<AllocU32>::new(m32) }; if stride_detection_quality > 0 { entropy_pyramid.populate(mb.0, mb.1, &mut entropy_tally_scratch); } else { if high_entropy_detection_quality != 0 { entropy_pyramid.populate_stride1(mb.0, mb.1); } } CommandQueue { mb:mb, mb_byte_offset:0, queue:[interface::Command::<InputReference<'a>>::default();COMMAND_BUFFER_SIZE], loc:0, entropy_tally_scratch: entropy_tally_scratch, entropy_pyramid: entropy_pyramid, last_btypel_index: None, stride_detection_quality: stride_detection_quality, high_entropy_detection_quality: high_entropy_detection_quality, context_map_entropy: context_map_entropy, block_type_literal: 0, } } fn full(&self) -> bool { self.loc == self.queue.len() } fn size(&self) -> usize { self.loc } fn clear(&mut self) { self.loc = 0; self.block_type_literal = 0; } fn content(&mut self) -> &[interface::Command<InputReference>] { self.queue.split_at(self.loc).0 } fn flush<Cb>(&mut self, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { let mut local_byte_offset = self.mb_byte_offset; let mb_len = self.mb.0.len() + self.mb.1.len(); let cur_stride = self.entropy_tally_scratch.pick_best_stride(self.queue.split_at(self.loc).0, self.mb.0, self.mb.1, &mut self.mb_byte_offset, &self.entropy_pyramid, self.stride_detection_quality); if self.high_entropy_detection_quality > 0 { for command in self.queue.split_at_mut(self.loc).0.iter_mut() { let mut switch_to_random: Option<InputReference> = None; match *command { interface::Command::BlockSwitchCommand(_) | interface::Command::BlockSwitchDistance(_) | interface::Command::PredictionMode(_) => {}, interface::Command::BlockSwitchLiteral(bs) => { self.block_type_literal = bs.block_type(); }, interface::Command::Copy(ref copy) => { local_byte_offset += copy.num_bytes as usize; }, interface::Command::Dict(ref dict) => { local_byte_offset += dict.final_size as usize; }, interface::Command::RandLiteral(ref lit) => { local_byte_offset += lit.data.slice().len(); }, interface::Command::Literal(ref mut lit) => { if is_long_enough_to_be_random(lit.data.slice().len(), self.high_entropy_detection_quality) { //print!("Long enough to be random {}\n", lit.data.slice().len()); let mut priors = self.entropy_tally_scratch.get_previous_bytes( self.mb.0, self.mb.1, local_byte_offset); let mut rev_priors = priors; rev_priors.reverse(); //print!("Stride {} prev {:?} byte offset {} {:?}\n", cur_stride, rev_priors, local_byte_offset, lit.data.slice()); let literal_cost = self.entropy_pyramid.bit_cost_of_literals( lit.data.slice(), local_byte_offset as u32, mb_len, cur_stride, priors, &mut self.entropy_tally_scratch); let cm_literal_cost = self.context_map_entropy.compute_bit_cost_of_data_subset( lit.data.slice(), priors[0], priors[1], self.block_type_literal, self.entropy_tally_scratch.peek()); let min_cost = if cm_literal_cost < literal_cost { cm_literal_cost } else { literal_cost }; local_byte_offset += lit.data.slice().len(); let random_cost = lit.data.slice().len() as find_stride::floatY * 8.0 + 1.0; print!("Rnd Cost {} ({} bytes)\nLit Cost {} ({} bytes) ratio {}\nCML Cost {} ({} bytes) ratio {}\n", random_cost, random_cost as f64 / 8.0, literal_cost, literal_cost as f64 / 8.0, literal_cost as f64 / 8.0 / lit.data.slice().len() as f64, cm_literal_cost, cm_literal_cost as f64 / 8.0, cm_literal_cost as f64 / 8.0 / lit.data.slice().len() as f64 ); if random_cost <= min_cost { // transmute switch_to_random = Some( core::mem::replace(&mut lit.data, InputReference::default())); } } else { local_byte_offset += lit.data.slice().len(); } } } if let Some(data) = switch_to_random { *command = interface::Command::RandLiteral( interface::RandLiteralCommand{ data: data, }); } } } match self.last_btypel_index.clone() { None => {}, Some(literal_block_type_offset) => { match &mut self.queue[literal_block_type_offset] { &mut interface::Command::BlockSwitchLiteral(ref mut cmd) => cmd.1 = cur_stride, _ => panic!("Logic Error: literal block type index must point to literal block type"), } }, } self.last_btypel_index = None; callback(self.queue.split_at(self.loc).0); self.clear(); } fn free<Cb>(mut self, m32: &mut AllocU32, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { self.flush(callback); self.entropy_tally_scratch.free(m32); self.entropy_pyramid.free(m32); self.context_map_entropy.free(m32); } } impl<'a, AllocU32: alloc::Allocator<u32> > CommandProcessor<'a> for CommandQueue<'a, AllocU32 > { fn push<Cb> (&mut self, val: interface::Command<InputReference<'a> >, callback :&mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { self.queue[self.loc] = val; self.loc += 1; if self.full() { self.flush(callback); } } fn push_block_switch_literal<Cb>(&mut self, block_type: u8, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { self.flush(callback); self.last_btypel_index = Some(self.size()); self.push(interface::Command::BlockSwitchLiteral( interface::LiteralBlockSwitch::new(block_type, 0)), callback) } } struct ContextMapEntropy<'a, AllocU32:alloc::Allocator<u32>> { input: InputPair<'a>, entropy_tally: find_stride::EntropyBucketPopulation<AllocU32>, context_map: interface::PredictionModeContextMap<InputReference<'a>>, block_type: u8, local_byte_offset: usize, } impl<'a, AllocU32:alloc::Allocator<u32>> ContextMapEntropy<'a, AllocU32> { fn new(m32: &mut AllocU32, input: InputPair<'a>, prediction_mode: interface::PredictionModeContextMap<InputReference<'a>>) -> Self { ContextMapEntropy::<AllocU32>{ input: input, entropy_tally:find_stride::EntropyBucketPopulation::<AllocU32>::new(m32), context_map: prediction_mode, block_type: 0, local_byte_offset: 0, } } fn compute_bit_cost_of_data_subset(&mut self, data: &[u8], mut prev_byte: u8, mut prev_prev_byte: u8, block_type: u8, scratch: &mut find_stride::EntropyBucketPopulation<AllocU32>) -> find_stride::floatY { scratch.bucket_populations.slice_mut().clone_from_slice(self.entropy_tally.bucket_populations.slice()); scratch.bucket_populations.slice_mut()[65535] += 1; // to demonstrate that we have scratch.bucket_populations.slice_mut()[65535] -= 1; // to demonstrate that we have write capability let mut stray_count = 0 as find_stride::floatY; for val in data.iter() { let huffman_table_index = compute_huffman_table_index_for_context_map(prev_byte, prev_prev_byte, self.context_map, block_type); let loc = &mut scratch.bucket_populations.slice_mut()[huffman_table_index * 256 + *val as usize]; if *loc == 0 { stray_count += 1.0; } else { *loc -= 1; } prev_prev_byte = prev_byte; prev_byte = *val; } if self.entropy_tally.cached_bit_entropy == 0.0 as find_stride::floatY { self.entropy_tally.cached_bit_entropy = find_stride::HuffmanCost(self.entropy_tally.bucket_populations.slice()); } debug_assert_eq!(find_stride::HuffmanCost(self.entropy_tally.bucket_populations.slice()), self.entropy_tally.cached_bit_entropy); scratch.cached_bit_entropy = find_stride::HuffmanCost(scratch.bucket_populations.slice()); self.entropy_tally.cached_bit_entropy - scratch.cached_bit_entropy + stray_count * 8.0 } fn free(&mut self, m32: &mut AllocU32) { self.entropy_tally.free(m32); } } fn compute_huffman_table_index_for_context_map<SliceType: alloc::SliceWrapper<u8> > ( prev_byte: u8, prev_prev_byte: u8, context_map: interface::PredictionModeContextMap<SliceType>, block_type: u8, ) -> usize { let prior = Context(prev_byte, prev_prev_byte, context_map.literal_prediction_mode.to_context_enum().unwrap()); assert!(prior < 64); let context_map_index = ((block_type as usize)<< 6) | prior as usize; if context_map_index < context_map.literal_context_map.slice().len() { context_map.literal_context_map.slice()[context_map_index] as usize } else { prior as usize } } impl<'a, 'b, AllocU32:alloc::Allocator<u32>> CommandProcessor<'b> for ContextMapEntropy<'a, AllocU32> { fn push<Cb: FnMut(&[interface::Command<InputReference>])>(&mut self, val: interface::Command<InputReference<'b>>, callback: &mut Cb) { match val { interface::Command::BlockSwitchCommand(_) | interface::Command::BlockSwitchDistance(_) | interface::Command::PredictionMode(_) => {} interface::Command::Copy(ref copy) => { self.local_byte_offset += copy.num_bytes as usize; }, interface::Command::Dict(ref dict) => { self.local_byte_offset += dict.final_size as usize; }, interface::Command::RandLiteral(ref lit) => { self.local_byte_offset += lit.data.slice().len(); }, interface::Command::BlockSwitchLiteral(block_type) => self.block_type = block_type.block_type(), interface::Command::Literal(ref lit) => { let mut priors= [0u8, 0u8]; if self.local_byte_offset > 1 { priors[0] = self.input[self.local_byte_offset - 2]; priors[1] = self.input[self.local_byte_offset - 1]; } for literal in lit.data.slice().iter() { let huffman_table_index = compute_huffman_table_index_for_context_map(priors[1], priors[0], self.context_map, self.block_type); self.entropy_tally.bucket_populations.slice_mut()[((huffman_table_index as usize) << 8) | *literal as usize] += 1; priors[0] = priors[1]; priors[1] = *literal; } self.local_byte_offset += lit.data.slice().len(); } } let cbval = [val]; callback(&cbval[..]); } } #[cfg(not(feature="no-stdlib"))] fn warn_on_missing_free() { let _err = ::std::io::stderr().write(b"Need to free entropy_tally_scratch before dropping CommandQueue\n"); } #[cfg(feature="no-stdlib")] fn warn_on_missing_free() { // no way to warn in this case } impl<'a, AllocU32: alloc::Allocator<u32>> Drop for CommandQueue<'a, AllocU32> { fn drop(&mut self) { if !self.entropy_tally_scratch.is_free() { warn_on_missing_free(); } } } fn process_command_queue<'a, Cb:FnMut(&[interface::Command<InputReference>]), CmdProcessor: CommandProcessor<'a> > ( command_queue: &mut CmdProcessor, input: InputPair<'a>, commands: &[Command], n_postfix: u32, n_direct: u32, dist_cache: &[i32;kNumDistanceCacheEntries], mut recoder_state :RecoderState, block_type: &MetaBlockSplitRefs, params: &BrotliEncoderParams, context_type:Option<ContextType>, callback: &mut Cb, ) -> RecoderState { let mut input_iter = input.clone(); let mut local_dist_cache = [0i32;kNumDistanceCacheEntries]; local_dist_cache.clone_from_slice(&dist_cache[..]); let mut btypel_counter = 0usize; let mut btypec_counter = 0usize; let mut btyped_counter = 0usize; let mut btypel_sub = if block_type.btypel.num_types == 1 { 1u32<<31 } else {block_type.btypel.lengths[0]}; let mut btypec_sub = if block_type.btypec.num_types == 1 { 1u32<<31 } else {block_type.btypec.lengths[0]}; let mut btyped_sub = if block_type.btyped.num_types == 1 { 1u32<<31 } else {block_type.btyped.lengths[0]}; { command_queue.push_block_switch_literal(0, callback); } let mut mb_len = input.len(); for cmd in commands.iter() { let (inserts, interim) = input_iter.split_at(core::cmp::min(cmd.insert_len_ as usize, mb_len)); recoder_state.num_bytes_encoded += inserts.len(); let _copy_cursor = input.len() - interim.len(); // let distance_context = CommandDistanceContext(cmd); let copylen_code: u32 = CommandCopyLenCode(cmd); let (prev_dist_index, dist_offset) = CommandDistanceIndexAndOffset(cmd, n_postfix, n_direct); let final_distance: usize; if prev_dist_index == 0 { final_distance = dist_offset as usize; } else { final_distance = (local_dist_cache[prev_dist_index - 1] as isize + dist_offset) as usize; } let copy_len = copylen_code as usize; let actual_copy_len : usize; let max_distance = core::cmp::min(recoder_state.num_bytes_encoded, window_size_from_lgwin(params.lgwin)); assert!(inserts.len() <= mb_len); { btypec_sub -= 1; if btypec_sub == 0 { btypec_counter += 1; if block_type.btypec.types.len() > btypec_counter { btypec_sub = block_type.btypec.lengths[btypec_counter]; command_queue.push(interface::Command::BlockSwitchCommand( interface::BlockSwitch(block_type.btypec.types[btypec_counter])), callback); } else { btypec_sub = 1u32 << 31; } } } if inserts.len() != 0 { let mut tmp_inserts = inserts; while tmp_inserts.len() > btypel_sub as usize { // we have to divide some: let (in_a, in_b) = tmp_inserts.split_at(btypel_sub as usize); if in_a.len() != 0 { if let Some(_) = context_type { command_queue.push_literals(&in_a, callback); } else { command_queue.push_rand_literals(&in_a, callback); } } mb_len -= in_a.len(); tmp_inserts = in_b; btypel_counter += 1; if block_type.btypel.types.len() > btypel_counter { btypel_sub = block_type.btypel.lengths[btypel_counter]; command_queue.push_block_switch_literal(block_type.btypel.types[btypel_counter], callback); } else { btypel_sub = 1u32<<31; } } if let Some(_) = context_type { command_queue.push_literals(&tmp_inserts, callback); }else { command_queue.push_rand_literals(&tmp_inserts, callback); } if tmp_inserts.len() != 0 { mb_len -= tmp_inserts.len(); btypel_sub -= tmp_inserts.len() as u32; } } if copy_len != 0 && cmd.cmd_prefix_ >= 128 { btyped_sub -= 1; if btyped_sub == 0 { btyped_counter += 1; if block_type.btyped.types.len() > btyped_counter { btyped_sub = block_type.btyped.lengths[btyped_counter]; command_queue.push(interface::Command::BlockSwitchDistance( interface::BlockSwitch(block_type.btyped.types[btyped_counter])), callback); } else { btyped_sub = 1u32 << 31; } } } if final_distance > max_distance { // is dictionary assert!(copy_len >= 4); assert!(copy_len < 25); let dictionary_offset = final_distance - max_distance - 1; let ndbits = kBrotliDictionarySizeBitsByLength[copy_len] as usize; let action = dictionary_offset >> ndbits; let word_sub_index = dictionary_offset & ((1 << ndbits) - 1); let word_index = word_sub_index * copy_len + kBrotliDictionaryOffsetsByLength[copy_len] as usize; let raw_word = &kBrotliDictionary[word_index..word_index + copy_len]; let mut transformed_word = [0u8; 38]; actual_copy_len = TransformDictionaryWord(&mut transformed_word[..], raw_word, copy_len as i32, action as i32) as usize; if actual_copy_len <= mb_len { command_queue.push(interface::Command::Dict( interface::DictCommand{ word_size: copy_len as u8, transform: action as u8, final_size: actual_copy_len as u8, empty: 0, word_id: word_sub_index as u32, }), callback); mb_len -= actual_copy_len; assert_eq!(InputPair(transformed_word.split_at(actual_copy_len).0, &[]), interim.split_at(actual_copy_len).0); } else if mb_len != 0 { // truncated dictionary word: represent it as literals instead // won't be random noise since it fits in the dictionary, so we won't check for rand command_queue.push_literals(&interim.split_at(mb_len).0, callback); mb_len = 0; assert_eq!(InputPair(transformed_word.split_at(mb_len).0, &[]), interim.split_at(mb_len).0); } } else { actual_copy_len = core::cmp::min(mb_len, copy_len); if actual_copy_len != 0 { command_queue.push(interface::Command::Copy( interface::CopyCommand{ distance: final_distance as u32, num_bytes: actual_copy_len as u32, }), callback); } mb_len -= actual_copy_len; if prev_dist_index != 1 || dist_offset != 0 { // update distance cache unless it's the "0 distance symbol" let mut tmp_dist_cache = [0i32;kNumDistanceCacheEntries - 1]; tmp_dist_cache.clone_from_slice(&local_dist_cache[..kNumDistanceCacheEntries - 1]); local_dist_cache[1..].clone_from_slice(&tmp_dist_cache[..]); local_dist_cache[0] = final_distance as i32; } } let (copied, remainder) = interim.split_at(actual_copy_len); recoder_state.num_bytes_encoded += copied.len(); input_iter = remainder; } recoder_state } fn LogMetaBlock<'a, AllocU32:alloc::Allocator<u32>, Cb>(m32:&mut AllocU32, commands: &[Command], input0: &'a[u8],input1: &'a[u8], n_postfix: u32, n_direct: u32, dist_cache: &[i32;kNumDistanceCacheEntries], recoder_state :&mut RecoderState, block_type: MetaBlockSplitRefs, params: &BrotliEncoderParams, context_type:Option<ContextType>, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]){ let mut local_literal_context_map = [0u8; 256 * 64]; let mut local_distance_context_map = [0u8; 256 * 64]; assert_eq!(*block_type.btypel.types.iter().max().unwrap_or(&0) as u32 + 1, block_type.btypel.num_types); assert_eq!(*block_type.btypec.types.iter().max().unwrap_or(&0) as u32 + 1, block_type.btypec.num_types); assert_eq!(*block_type.btyped.types.iter().max().unwrap_or(&0) as u32 + 1, block_type.btyped.num_types); if block_type.literal_context_map.len() <= 256 * 64 { for (index, item) in block_type.literal_context_map.iter().enumerate() { local_literal_context_map[index] = *item as u8; } } if block_type.distance_context_map.len() <= 256 * 64 { for (index, item) in block_type.distance_context_map.iter().enumerate() { local_distance_context_map[index] = *item as u8; } } let prediction_mode = interface::PredictionModeContextMap::<InputReference>{ literal_prediction_mode: interface::LiteralPredictionModeNibble(context_type.unwrap_or(ContextType::CONTEXT_LSB6) as u8), literal_context_map:InputReference(&local_literal_context_map.split_at(block_type.literal_context_map.len()).0), distance_context_map:InputReference(&local_distance_context_map.split_at(block_type.distance_context_map.len()).0), }; let mut context_map_entropy = ContextMapEntropy::<AllocU32>::new(m32, InputPair(input0, input1), prediction_mode); let input = InputPair(input0, input1); process_command_queue(&mut context_map_entropy, input, commands, n_postfix, n_direct, dist_cache, *recoder_state, &block_type, params, context_type, &mut |_x|()); let mut command_queue = CommandQueue::new(m32, InputPair(input0, input1), params.stride_detection_quality, params.high_entropy_detection_quality, context_map_entropy); command_queue.push(interface::Command::PredictionMode( prediction_mode.clone()), callback); *recoder_state = process_command_queue(&mut command_queue, input, commands, n_postfix, n_direct, dist_cache, *recoder_state, &block_type, params, context_type, callback); command_queue.free(m32, callback); // ::std::io::stderr().write(input0).unwrap(); // ::std::io::stderr().write(input1).unwrap(); } static kBlockLengthPrefixCode: [PrefixCodeRange; BROTLI_NUM_BLOCK_LEN_SYMBOLS] = [PrefixCodeRange { offset: 1u32, nbits: 2u32, }, PrefixCodeRange { offset: 5u32, nbits: 2u32, }, PrefixCodeRange { offset: 9u32, nbits: 2u32, }, PrefixCodeRange { offset: 13u32, nbits: 2u32, }, PrefixCodeRange { offset: 17u32, nbits: 3u32, }, PrefixCodeRange { offset: 25u32, nbits: 3u32, }, PrefixCodeRange { offset: 33u32, nbits: 3u32, }, PrefixCodeRange { offset: 41u32, nbits: 3u32, }, PrefixCodeRange { offset: 49u32, nbits: 4u32, }, PrefixCodeRange { offset: 65u32, nbits: 4u32, }, PrefixCodeRange { offset: 81u32, nbits: 4u32, }, PrefixCodeRange { offset: 97u32, nbits: 4u32, }, PrefixCodeRange { offset: 113u32, nbits: 5u32, }, PrefixCodeRange { offset: 145u32, nbits: 5u32, }, PrefixCodeRange { offset: 177u32, nbits: 5u32, }, PrefixCodeRange { offset: 209u32, nbits: 5u32, }, PrefixCodeRange { offset: 241u32, nbits: 6u32, }, PrefixCodeRange { offset: 305u32, nbits: 6u32, }, PrefixCodeRange { offset: 369u32, nbits: 7u32, }, PrefixCodeRange { offset: 497u32, nbits: 8u32, }, PrefixCodeRange { offset: 753u32, nbits: 9u32, }, PrefixCodeRange { offset: 1265u32, nbits: 10u32, }, PrefixCodeRange { offset: 2289u32, nbits: 11u32, }, PrefixCodeRange { offset: 4337u32, nbits: 12u32, }, PrefixCodeRange { offset: 8433u32, nbits: 13u32, }, PrefixCodeRange { offset: 16625u32, nbits: 24u32, }]; fn BrotliWriteBits(n_bits: u8, bits: u64, pos: &mut usize, array: &mut [u8]) { assert!((bits >> n_bits as usize) == 0); assert!(n_bits <= 56); let ptr_offset: usize = ((*pos >> 3) as u32) as usize; let mut v = array[ptr_offset] as u64; v |= bits << ((*pos) as u64 & 7); array[ptr_offset + 7] = (v >> 56) as u8; array[ptr_offset + 6] = ((v >> 48) & 0xff) as u8; array[ptr_offset + 5] = ((v >> 40) & 0xff) as u8; array[ptr_offset + 4] = ((v >> 32) & 0xff) as u8; array[ptr_offset + 3] = ((v >> 24) & 0xff) as u8; array[ptr_offset + 2] = ((v >> 16) & 0xff) as u8; array[ptr_offset + 1] = ((v >> 8) & 0xff) as u8; array[ptr_offset] = (v & 0xff) as u8; *pos += n_bits as usize } fn BrotliWriteBitsPrepareStorage(pos: usize, array: &mut [u8]) { assert_eq!(pos & 7, 0); array[pos >> 3] = 0; } fn BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes: i32, code_length_bitdepth: &[u8], storage_ix: &mut usize, storage: &mut [u8]) { static kStorageOrder: [u8; 18] = [1i32 as (u8), 2i32 as (u8), 3i32 as (u8), 4i32 as (u8), 0i32 as (u8), 5i32 as (u8), 17i32 as (u8), 6i32 as (u8), 16i32 as (u8), 7i32 as (u8), 8i32 as (u8), 9i32 as (u8), 10i32 as (u8), 11i32 as (u8), 12i32 as (u8), 13i32 as (u8), 14i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeSymbols: [u8; 6] = [0i32 as (u8), 7i32 as (u8), 3i32 as (u8), 2i32 as (u8), 1i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeBitLengths: [u8; 6] = [2i32 as (u8), 4i32 as (u8), 3i32 as (u8), 2i32 as (u8), 2i32 as (u8), 4i32 as (u8)]; let mut skip_some: u64 = 0u64; let mut codes_to_store: u64 = 18; if num_codes > 1i32 { 'break5: while codes_to_store > 0 { { if code_length_bitdepth[(kStorageOrder[codes_to_store.wrapping_sub(1) as usize] as (usize))] as (i32) != 0i32 { { break 'break5; } } } codes_to_store = codes_to_store.wrapping_sub(1); } } if code_length_bitdepth[(kStorageOrder[0usize] as (usize))] as (i32) == 0i32 && (code_length_bitdepth[(kStorageOrder[1usize] as (usize))] as (i32) == 0i32) { skip_some = 2; if code_length_bitdepth[(kStorageOrder[2usize] as (usize))] as (i32) == 0i32 { skip_some = 3; } } BrotliWriteBits(2, skip_some, storage_ix, storage); { let mut i: u64; i = skip_some; while i < codes_to_store { { let l: usize = code_length_bitdepth[(kStorageOrder[i as usize] as (usize))] as (usize); BrotliWriteBits(kHuffmanBitLengthHuffmanCodeBitLengths[l] as (u8), kHuffmanBitLengthHuffmanCodeSymbols[l] as u64, storage_ix, storage); } i = i.wrapping_add(1); } } } fn BrotliStoreHuffmanTreeToBitMask(huffman_tree_size: usize, huffman_tree: &[u8], huffman_tree_extra_bits: &[u8], code_length_bitdepth: &[u8], code_length_bitdepth_symbols: &[u16], storage_ix: &mut usize, storage: &mut [u8]) { let mut i: usize; i = 0usize; while i < huffman_tree_size { { let ix: usize = huffman_tree[(i as (usize))] as (usize); BrotliWriteBits(code_length_bitdepth[(ix as (usize))] as (u8), code_length_bitdepth_symbols[(ix as (usize))] as (u64), storage_ix, storage); if ix == 16usize { BrotliWriteBits(2, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } else if ix == 17usize { BrotliWriteBits(3, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } pub fn BrotliStoreHuffmanTree(depths: &[u8], num: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { let mut huffman_tree: [u8; 704] = [0; 704]; let mut huffman_tree_extra_bits: [u8; 704] = [0; 704]; let mut huffman_tree_size: usize = 0usize; let mut code_length_bitdepth: [u8; 18] = [0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8)]; let mut code_length_bitdepth_symbols: [u16; 18] = [0; 18]; let mut huffman_tree_histogram: [u32; 18] = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; let mut i: usize; let mut num_codes: i32 = 0i32; let mut code: usize = 0usize; 0i32; BrotliWriteHuffmanTree(depths, num, &mut huffman_tree_size, &mut huffman_tree[..], &mut huffman_tree_extra_bits[..]); i = 0usize; while i < huffman_tree_size { { let _rhs = 1; let _lhs = &mut huffman_tree_histogram[huffman_tree[i] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } i = 0usize; 'break3: while i < 18usize { { if huffman_tree_histogram[i] != 0 { if num_codes == 0i32 { code = i; num_codes = 1i32; } else if num_codes == 1i32 { num_codes = 2i32; { { break 'break3; } } } } } i = i.wrapping_add(1 as (usize)); } BrotliCreateHuffmanTree(&mut huffman_tree_histogram, 18usize, 5i32, tree, &mut code_length_bitdepth); BrotliConvertBitDepthsToSymbols(&mut code_length_bitdepth, 18usize, &mut code_length_bitdepth_symbols); BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes, &code_length_bitdepth, storage_ix, storage); if num_codes == 1i32 { code_length_bitdepth[code] = 0i32 as (u8); } BrotliStoreHuffmanTreeToBitMask(huffman_tree_size, &huffman_tree, &huffman_tree_extra_bits, &code_length_bitdepth, &code_length_bitdepth_symbols, storage_ix, storage); } fn StoreStaticCodeLengthCode(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(40, 0xffu32 as (u64) << 32i32 | 0x55555554u32 as (u64), storage_ix, storage); } pub struct SimpleSortHuffmanTree {} impl HuffmanComparator for SimpleSortHuffmanTree { fn Cmp(self: &Self, v0: &HuffmanTree, v1: &HuffmanTree) -> bool { return (*v0).total_count_ < (*v1).total_count_; } } pub fn BrotliBuildAndStoreHuffmanTreeFast<AllocHT: alloc::Allocator<HuffmanTree>>( m : &mut AllocHT, histogram : &[u32], histogram_total : usize, max_bits : usize, depth : &mut [u8], bits : &mut [u16], storage_ix : &mut usize, storage : &mut [u8] ){ let mut count: u64 = 0; let mut symbols: [u64; 4] = [0; 4]; let mut length: u64 = 0; let mut total: usize = histogram_total; while total != 0usize { if histogram[(length as (usize))] != 0 { if count < 4 { symbols[count as usize] = length; } count = count.wrapping_add(1); total = total.wrapping_sub(histogram[(length as (usize))] as (usize)); } length = length.wrapping_add(1); } if count <= 1 { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); depth[symbols[0usize] as (usize)] = 0i32 as (u8); bits[symbols[0usize] as (usize)] = 0i32 as (u16); return ; } for depth_elem in depth[..(length as usize)].iter_mut() { *depth_elem = 0; // memset } { let max_tree_size: u64 = (2u64).wrapping_mul(length).wrapping_add(1); let mut tree = if max_tree_size != 0 { m.alloc_cell(max_tree_size as usize) } else { AllocHT::AllocatedMemory::default() // null }; let mut count_limit: u32; if !(0i32 == 0) { return; } count_limit = 1u32; 'break11: loop { { let mut node_index: u32 = 0u32; let mut l: u64; l = length; while l != 0 { l = l.wrapping_sub(1); if histogram[(l as (usize))] != 0 { if histogram[(l as (usize))] >= count_limit { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], histogram[(l as (usize))], -1i32 as (i16), l as (i16)); } else { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], count_limit, -1i32 as (i16), l as (i16)); } node_index = node_index.wrapping_add(1 as (u32)); } } { let n: i32 = node_index as (i32); let sentinel: HuffmanTree; let mut i: i32 = 0i32; let mut j: i32 = n + 1i32; let mut k: i32; SortHuffmanTreeItems(tree.slice_mut(), n as (usize), SimpleSortHuffmanTree {}); sentinel = NewHuffmanTree(!(0u32), -1i16, -1i16); tree.slice_mut()[(node_index.wrapping_add(1u32) as (usize))] = sentinel.clone(); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(2u32); k = n - 1i32; while k > 0i32 { { let left: i32; let right: i32; if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { left = i; i = i + 1; } else { left = j; j = j + 1; } if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { right = i; i = i + 1; } else { right = j; j = j + 1; } let sum_total = (tree.slice()[(left as (usize))]) .total_count_ .wrapping_add((tree.slice()[(right as (usize))]).total_count_); let tree_ind = (node_index.wrapping_sub(1u32) as (usize)); (tree.slice_mut()[tree_ind]).total_count_ = sum_total; (tree.slice_mut()[tree_ind]).index_left_ = left as (i16); (tree.slice_mut()[tree_ind]).index_right_or_value_ = right as (i16); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(1u32); } k = k - 1; } if BrotliSetDepth(2i32 * n - 1i32, tree.slice_mut(), depth, 14i32) { { break 'break11; } } } } count_limit = count_limit.wrapping_mul(2u32); } { m.free_cell(core::mem::replace(&mut tree, AllocHT::AllocatedMemory::default())); } } BrotliConvertBitDepthsToSymbols(depth, length as usize, bits); if count <= 4 { let mut i: u64; BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, count.wrapping_sub(1) as u64, storage_ix, storage); i = 0; while i < count { { let mut j: u64; j = i.wrapping_add(1); while j < count { { if depth[(symbols[j as usize] as (usize))] as (i32) < depth[(symbols[i as usize] as (usize)) as usize] as (i32) { let brotli_swap_tmp: u64 = symbols[j as usize]; symbols[j as usize] = symbols[i as usize]; symbols[i as usize] = brotli_swap_tmp; } } j = j.wrapping_add(1); } } i = i.wrapping_add(1); } if count == 2 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); } else if count == 3 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[3usize], storage_ix, storage); BrotliWriteBits(1, if depth[(symbols[0usize] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } else { let mut previous_value: u8 = 8i32 as (u8); let mut i: u64; StoreStaticCodeLengthCode(storage_ix, storage); i = 0; while i < length { let value: u8 = depth[(i as (usize))]; let mut reps: u64 = 1; let mut k: u64; k = i.wrapping_add(1); while k < length && (depth[(k as (usize))] as (i32) == value as (i32)) { { reps = reps.wrapping_add(1); } k = k.wrapping_add(1); } i = i.wrapping_add(reps); if value as (i32) == 0i32 { BrotliWriteBits(kZeroRepsDepth[reps as usize] as (u8), kZeroRepsBits[reps as usize] as u64, storage_ix, storage); } else { if previous_value as (i32) != value as (i32) { BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); reps = reps.wrapping_sub(1); } if reps < 3 { while reps != 0 { reps = reps.wrapping_sub(1); BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); } } else { reps = reps.wrapping_sub(3); BrotliWriteBits(kNonZeroRepsDepth[reps as usize] as (u8), kNonZeroRepsBits[reps as usize] as u64, storage_ix, storage); } previous_value = value; } } } } pub struct MetaBlockSplit<AllocU8: alloc::Allocator<u8>, AllocU32: alloc::Allocator<u32>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>> { pub literal_split: BlockSplit<AllocU8, AllocU32>, pub command_split: BlockSplit<AllocU8, AllocU32>, pub distance_split: BlockSplit<AllocU8, AllocU32>, pub literal_context_map: AllocU32::AllocatedMemory, pub literal_context_map_size: usize, pub distance_context_map: AllocU32::AllocatedMemory, pub distance_context_map_size: usize, pub literal_histograms: AllocHL::AllocatedMemory, pub literal_histograms_size: usize, pub command_histograms: AllocHC::AllocatedMemory, pub command_histograms_size: usize, pub distance_histograms: AllocHD::AllocatedMemory, pub distance_histograms_size: usize, } impl <AllocU8: alloc::Allocator<u8>, AllocU32: alloc::Allocator<u32>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>> MetaBlockSplit <AllocU8, AllocU32, AllocHL, AllocHC, AllocHD> { pub fn new() -> Self { return MetaBlockSplit { literal_split:BlockSplit::<AllocU8, AllocU32>::new(), command_split:BlockSplit::<AllocU8, AllocU32>::new(), distance_split:BlockSplit::<AllocU8, AllocU32>::new(), literal_context_map : AllocU32::AllocatedMemory::default(), literal_context_map_size : 0, distance_context_map : AllocU32::AllocatedMemory::default(), distance_context_map_size : 0, literal_histograms : AllocHL::AllocatedMemory::default(), literal_histograms_size : 0, command_histograms : AllocHC::AllocatedMemory::default(), command_histograms_size : 0, distance_histograms : AllocHD::AllocatedMemory::default(), distance_histograms_size : 0, } } pub fn destroy(&mut self, m8: &mut AllocU8, m32: &mut AllocU32, mhl: &mut AllocHL, mhc: &mut AllocHC, mhd: &mut AllocHD) { self.literal_split.destroy(m8,m32); self.command_split.destroy(m8,m32); self.distance_split.destroy(m8,m32); m32.free_cell(core::mem::replace(&mut self.literal_context_map, AllocU32::AllocatedMemory::default())); self.literal_context_map_size = 0; m32.free_cell(core::mem::replace(&mut self.distance_context_map, AllocU32::AllocatedMemory::default())); self.distance_context_map_size = 0; mhl.free_cell(core::mem::replace(&mut self.literal_histograms, AllocHL::AllocatedMemory::default())); self.literal_histograms_size = 0; mhc.free_cell(core::mem::replace(&mut self.command_histograms, AllocHC::AllocatedMemory::default())); self.command_histograms_size = 0; mhd.free_cell(core::mem::replace(&mut self.distance_histograms, AllocHD::AllocatedMemory::default())); self.distance_histograms_size = 0; } } #[derive(Clone, Copy)] pub struct BlockTypeCodeCalculator { pub last_type: usize, pub second_last_type: usize, } pub struct BlockSplitCode { pub type_code_calculator: BlockTypeCodeCalculator, pub type_depths: [u8; 258], pub type_bits: [u16; 258], pub length_depths: [u8; 26], pub length_bits: [u16; 26], } pub struct BlockEncoder<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> { /* pub alloc_u8 : AllocU8, pub alloc_u16 : AllocU16, pub alloc_u32 : AllocU32, pub alloc_ht : AllocHT,*/ pub alphabet_size_: usize, pub num_block_types_: usize, pub block_types_: &'a [u8], pub block_lengths_: &'a [u32], pub num_blocks_: usize, pub block_split_code_: BlockSplitCode, pub block_ix_: usize, pub block_len_: usize, pub entropy_ix_: usize, pub depths_: AllocU8::AllocatedMemory, pub bits_: AllocU16::AllocatedMemory, } fn Log2FloorNonZero(mut n: u64) -> u32 { let mut result: u32 = 0u32; 'loop1: loop { if { n = n >> 1i32; n } != 0 { result = result.wrapping_add(1 as (u32)); continue 'loop1; } else { break 'loop1; } } result } fn BrotliEncodeMlen(length: u32, bits: &mut u64, numbits: &mut u32, nibblesbits: &mut u32) { let lg: u32 = (if length == 1u32 { 1u32 } else { Log2FloorNonZero(length.wrapping_sub(1u32) as (u32) as (u64)).wrapping_add(1u32) }) as (u32); let mnibbles: u32 = (if lg < 16u32 { 16u32 } else { lg.wrapping_add(3u32) }) .wrapping_div(4u32); assert!(length > 0); assert!(length <= (1 << 24)); assert!(lg <= 24); *nibblesbits = mnibbles.wrapping_sub(4u32); *numbits = mnibbles.wrapping_mul(4u32); *bits = length.wrapping_sub(1u32) as u64; } fn StoreCompressedMetaBlockHeader(is_final_block: i32, length: usize, storage_ix: &mut usize, storage: &mut [u8]) { let mut lenbits: u64 = 0; let mut nlenbits: u32 = 0; let mut nibblesbits: u32 = 0; BrotliWriteBits(1, is_final_block as (u64), storage_ix, storage); if is_final_block != 0 { BrotliWriteBits(1, 0, storage_ix, storage); } BrotliEncodeMlen(length as u32, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2, nibblesbits as u64, storage_ix, storage); BrotliWriteBits(nlenbits as u8, lenbits, storage_ix, storage); if is_final_block == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } } fn NewBlockTypeCodeCalculator() -> BlockTypeCodeCalculator { return BlockTypeCodeCalculator { last_type: 1, second_last_type: 0, }; } fn NewBlockEncoder<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> (alphabet_size: usize, num_block_types: usize, block_types: &'a [u8], block_lengths: &'a [u32], num_blocks: usize) -> BlockEncoder<'a, AllocU8, AllocU16> { let block_len: usize; if num_blocks != 0 && block_lengths.len() != 0 { block_len = block_lengths[0] as usize; } else { block_len = 0; } return BlockEncoder::<AllocU8, AllocU16> { alphabet_size_: alphabet_size, num_block_types_: num_block_types, block_types_: block_types, block_lengths_: block_lengths, num_blocks_: num_blocks, block_split_code_: BlockSplitCode { type_code_calculator: NewBlockTypeCodeCalculator(), type_depths: [0; 258], type_bits: [0; 258], length_depths: [0; 26], length_bits: [0; 26], }, block_ix_: 0, block_len_: block_len, entropy_ix_: 0, depths_: AllocU8::AllocatedMemory::default(), bits_: AllocU16::AllocatedMemory::default(), }; } fn NextBlockTypeCode(calculator: &mut BlockTypeCodeCalculator, type_: u8) -> usize { let type_code: usize = (if type_ as (usize) == (*calculator).last_type.wrapping_add(1usize) { 1u32 } else if type_ as (usize) == (*calculator).second_last_type { 0u32 } else { (type_ as (u32)).wrapping_add(2u32) }) as (usize); (*calculator).second_last_type = (*calculator).last_type; (*calculator).last_type = type_ as (usize); type_code } fn BlockLengthPrefixCode(len: u32) -> u32 { let mut code: u32 = (if len >= 177u32 { if len >= 753u32 { 20i32 } else { 14i32 } } else if len >= 41u32 { 7i32 } else { 0i32 }) as (u32); while code < (26i32 - 1i32) as (u32) && (len >= kBlockLengthPrefixCode[code.wrapping_add(1u32) as (usize)].offset) { code = code.wrapping_add(1 as (u32)); } code } fn StoreVarLenUint8(n: u64, storage_ix: &mut usize, storage: &mut [u8]) { if n == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } else { let nbits: u8 = Log2FloorNonZero(n) as (u8); BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(3, nbits as u64, storage_ix, storage); BrotliWriteBits(nbits, n.wrapping_sub(1u64 << nbits), storage_ix, storage); } } fn StoreSimpleHuffmanTree(depths: &[u8], symbols: &mut [usize], num_symbols: usize, max_bits: usize, storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, num_symbols.wrapping_sub(1) as u64, storage_ix, storage); { let mut i: usize; i = 0usize; while i < num_symbols { { let mut j: usize; j = i.wrapping_add(1usize); while j < num_symbols { { if depths[(symbols[(j as (usize))] as (usize))] as (i32) < depths[(symbols[(i as (usize))] as (usize))] as (i32) { let mut __brotli_swap_tmp: usize = symbols[(j as (usize))]; symbols[(j as (usize))] = symbols[(i as (usize))]; symbols[(i as (usize))] = __brotli_swap_tmp; } } j = j.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } } if num_symbols == 2usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); } else if num_symbols == 3usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(3usize)] as u64, storage_ix, storage); BrotliWriteBits(1, if depths[(symbols[(0usize)] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } fn BuildAndStoreHuffmanTree(histogram: &[u32], length: usize, tree: &mut [HuffmanTree], depth: &mut [u8], bits: &mut [u16], storage_ix: &mut usize, storage: &mut [u8]) { let mut count: usize = 0usize; let mut s4: [usize; 4] = [0usize, 0usize, 0usize, 0usize]; let mut i: usize; let mut max_bits: usize = 0usize; i = 0usize; 'break31: while i < length { { if histogram[(i as (usize))] != 0 { if count < 4usize { s4[count] = i; } else if count > 4usize { { break 'break31; } } count = count.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } { let mut max_bits_counter: usize = length.wrapping_sub(1usize); while max_bits_counter != 0 { max_bits_counter = max_bits_counter >> 1i32; max_bits = max_bits.wrapping_add(1 as (usize)); } } if count <= 1usize { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, s4[0usize] as u64, storage_ix, storage); depth[(s4[0usize] as (usize))] = 0i32 as (u8); bits[(s4[0usize] as (usize))] = 0i32 as (u16); return; } for depth_elem in depth[..length].iter_mut() { *depth_elem = 0; // memset } BrotliCreateHuffmanTree(histogram, length, 15i32, tree, depth); BrotliConvertBitDepthsToSymbols(depth, length, bits); if count <= 4usize { StoreSimpleHuffmanTree(depth, &mut s4[..], count, max_bits, storage_ix, storage); } else { BrotliStoreHuffmanTree(depth, length, tree, storage_ix, storage); } } fn GetBlockLengthPrefixCode(len: u32, code: &mut usize, n_extra: &mut u32, extra: &mut u32) { *code = BlockLengthPrefixCode(len) as (usize); *n_extra = kBlockLengthPrefixCode[*code].nbits; *extra = len.wrapping_sub(kBlockLengthPrefixCode[*code].offset); } fn StoreBlockSwitch(code: &mut BlockSplitCode, block_len: u32, block_type: u8, is_first_block: i32, storage_ix: &mut usize, storage: &mut [u8]) { let typecode: usize = NextBlockTypeCode(&mut (*code).type_code_calculator, block_type); let mut lencode: usize = 0; let mut len_nextra: u32 = 0; let mut len_extra: u32 = 0; if is_first_block == 0 { BrotliWriteBits((*code).type_depths[typecode] as (u8), (*code).type_bits[typecode] as (u64), storage_ix, storage); } GetBlockLengthPrefixCode(block_len, &mut lencode, &mut len_nextra, &mut len_extra); BrotliWriteBits((*code).length_depths[lencode] as (u8), (*code).length_bits[lencode] as (u64), storage_ix, storage); BrotliWriteBits(len_nextra as (u8), len_extra as (u64), storage_ix, storage); } fn BuildAndStoreBlockSplitCode(types: &[u8], lengths: &[u32], num_blocks: usize, num_types: usize, tree: &mut [HuffmanTree], code: &mut BlockSplitCode, storage_ix: &mut usize, storage: &mut [u8]) { let mut type_histo: [u32; 258] = [0; 258]; let mut length_histo: [u32; 26] = [0; 26]; let mut i: usize; let mut type_code_calculator = NewBlockTypeCodeCalculator(); i = 0usize; while i < num_blocks { { let type_code: usize = NextBlockTypeCode(&mut type_code_calculator, types[(i as (usize))]); if i != 0usize { let _rhs = 1; let _lhs = &mut type_histo[type_code]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } { let _rhs = 1; let _lhs = &mut length_histo[BlockLengthPrefixCode(lengths[(i as (usize))]) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } } i = i.wrapping_add(1 as (usize)); } StoreVarLenUint8(num_types.wrapping_sub(1) as u64, storage_ix, storage); if num_types > 1usize { BuildAndStoreHuffmanTree(&mut type_histo[0usize..], num_types.wrapping_add(2usize), tree, &mut (*code).type_depths[0usize..], &mut (*code).type_bits[0usize..], storage_ix, storage); BuildAndStoreHuffmanTree(&mut length_histo[0usize..], 26usize, tree, &mut (*code).length_depths[0usize..], &mut (*code).length_bits[0usize..], storage_ix, storage); StoreBlockSwitch(code, lengths[(0usize)], types[(0usize)], 1i32, storage_ix, storage); } } fn BuildAndStoreBlockSwitchEntropyCodes<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> (xself: &mut BlockEncoder<'a, AllocU8, AllocU16>, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { BuildAndStoreBlockSplitCode((*xself).block_types_, (*xself).block_lengths_, (*xself).num_blocks_, (*xself).num_block_types_, tree, &mut (*xself).block_split_code_, storage_ix, storage); } fn StoreTrivialContextMap(num_types: usize, context_bits: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { StoreVarLenUint8(num_types.wrapping_sub(1usize) as u64, storage_ix, storage); if num_types > 1usize { let repeat_code: usize = context_bits.wrapping_sub(1u32 as (usize)); let repeat_bits: usize = (1u32 << repeat_code).wrapping_sub(1u32) as (usize); let alphabet_size: usize = num_types.wrapping_add(repeat_code); let mut histogram: [u32; 272] = [0; 272]; let mut depths: [u8; 272] = [0; 272]; let mut bits: [u16; 272] = [0; 272]; let mut i: usize; BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(4u8, repeat_code.wrapping_sub(1usize) as u64, storage_ix, storage); histogram[repeat_code] = num_types as (u32); histogram[0usize] = 1u32; i = context_bits; while i < alphabet_size { { histogram[i] = 1u32; } i = i.wrapping_add(1 as (usize)); } BuildAndStoreHuffmanTree(&mut histogram[..], alphabet_size, tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_types { { let code: usize = if i == 0usize { 0usize } else { i.wrapping_add(context_bits).wrapping_sub(1usize) }; BrotliWriteBits(depths[code] as (u8), bits[code] as (u64), storage_ix, storage); BrotliWriteBits(depths[repeat_code] as (u8), bits[repeat_code] as (u64), storage_ix, storage); BrotliWriteBits(repeat_code as u8, repeat_bits as u64, storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); } } fn IndexOf(v: &[u8], v_size: usize, value: u8) -> usize { let mut i: usize = 0usize; while i < v_size { { if v[(i as (usize))] as (i32) == value as (i32) { return i; } } i = i.wrapping_add(1 as (usize)); } i } fn MoveToFront(v: &mut [u8], index: usize) { let value: u8 = v[(index as (usize))]; let mut i: usize; i = index; while i != 0usize { { v[(i as (usize))] = v[(i.wrapping_sub(1usize) as (usize))]; } i = i.wrapping_sub(1 as (usize)); } v[(0usize)] = value; } fn MoveToFrontTransform(v_in: &[u32], v_size: usize, v_out: &mut [u32]) { let mut i: usize; let mut mtf: [u8; 256] = [0; 256]; let mut max_value: u32; if v_size == 0usize { return; } max_value = v_in[(0usize)]; i = 1usize; while i < v_size { { if v_in[(i as (usize))] > max_value { max_value = v_in[(i as (usize))]; } } i = i.wrapping_add(1 as (usize)); } 0i32; i = 0usize; while i <= max_value as (usize) { { mtf[i] = i as (u8); } i = i.wrapping_add(1 as (usize)); } { let mtf_size: usize = max_value.wrapping_add(1u32) as (usize); i = 0usize; while i < v_size { { let index: usize = IndexOf(&mtf[..], mtf_size, v_in[(i as (usize))] as (u8)); 0i32; v_out[(i as (usize))] = index as (u32); MoveToFront(&mut mtf[..], index); } i = i.wrapping_add(1 as (usize)); } } } fn brotli_max_uint32_t(a: u32, b: u32) -> u32 { if a > b { a } else { b } } fn brotli_min_uint32_t(a: u32, b: u32) -> u32 { if a < b { a } else { b } } fn RunLengthCodeZeros(in_size: usize, v: &mut [u32], out_size: &mut usize, max_run_length_prefix: &mut u32) { let mut max_reps: u32 = 0u32; let mut i: usize; let mut max_prefix: u32; i = 0usize; while i < in_size { let mut reps: u32 = 0u32; while i < in_size && (v[(i as (usize))] != 0u32) { i = i.wrapping_add(1 as (usize)); } while i < in_size && (v[(i as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } i = i.wrapping_add(1 as (usize)); } max_reps = brotli_max_uint32_t(reps, max_reps); } max_prefix = if max_reps > 0u32 { Log2FloorNonZero(max_reps as (u64)) } else { 0u32 }; max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix); *max_run_length_prefix = max_prefix; *out_size = 0usize; i = 0usize; while i < in_size { 0i32; if v[(i as (usize))] != 0u32 { v[(*out_size as (usize))] = (v[(i as (usize))]).wrapping_add(*max_run_length_prefix); i = i.wrapping_add(1 as (usize)); *out_size = (*out_size).wrapping_add(1 as (usize)); } else { let mut reps: u32 = 1u32; let mut k: usize; k = i.wrapping_add(1usize); while k < in_size && (v[(k as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } k = k.wrapping_add(1 as (usize)); } i = i.wrapping_add(reps as (usize)); while reps != 0u32 { if reps < 2u32 << max_prefix { let run_length_prefix: u32 = Log2FloorNonZero(reps as (u64)); let extra_bits: u32 = reps.wrapping_sub(1u32 << run_length_prefix); v[(*out_size as (usize))] = run_length_prefix.wrapping_add(extra_bits << 9i32); *out_size = (*out_size).wrapping_add(1 as (usize)); { { break; } } } else { let extra_bits: u32 = (1u32 << max_prefix).wrapping_sub(1u32); v[(*out_size as (usize))] = max_prefix.wrapping_add(extra_bits << 9i32); reps = reps.wrapping_sub((2u32 << max_prefix).wrapping_sub(1u32)); *out_size = (*out_size).wrapping_add(1 as (usize)); } } } } } fn EncodeContextMap<AllocU32: alloc::Allocator<u32>>(m: &mut AllocU32, context_map: &[u32], context_map_size: usize, num_clusters: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { let mut i: usize; let mut rle_symbols: AllocU32::AllocatedMemory; let mut max_run_length_prefix: u32 = 6u32; let mut num_rle_symbols: usize = 0usize; static kSymbolMask: u32 = (1u32 << 9i32) - 1; let mut depths: [u8; 272] = [0; 272]; let mut bits: [u16; 272] = [0; 272]; StoreVarLenUint8(num_clusters.wrapping_sub(1usize) as u64, storage_ix, storage); if num_clusters == 1usize { return; } rle_symbols = if context_map_size != 0 { m.alloc_cell(context_map_size) } else { AllocU32::AllocatedMemory::default() }; MoveToFrontTransform(context_map, context_map_size, rle_symbols.slice_mut()); RunLengthCodeZeros(context_map_size, rle_symbols.slice_mut(), &mut num_rle_symbols, &mut max_run_length_prefix); let mut histogram: [u32; 272] = [0; 272]; i = 0usize; while i < num_rle_symbols { { let _rhs = 1; let _lhs = &mut histogram[(rle_symbols.slice()[(i as (usize))] & kSymbolMask) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } { let use_rle: i32 = if !!(max_run_length_prefix > 0u32) { 1i32 } else { 0i32 }; BrotliWriteBits(1, use_rle as (u64), storage_ix, storage); if use_rle != 0 { BrotliWriteBits(4, max_run_length_prefix.wrapping_sub(1u32) as (u64), storage_ix, storage); } } BuildAndStoreHuffmanTree(&mut histogram[..], num_clusters.wrapping_add(max_run_length_prefix as (usize)), tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_rle_symbols { { let rle_symbol: u32 = rle_symbols.slice()[(i as (usize))] & kSymbolMask; let extra_bits_val: u32 = rle_symbols.slice()[(i as (usize))] >> 9i32; BrotliWriteBits(depths[rle_symbol as (usize)] as (u8), bits[rle_symbol as (usize)] as (u64), storage_ix, storage); if rle_symbol > 0u32 && (rle_symbol <= max_run_length_prefix) { BrotliWriteBits(rle_symbol as (u8), extra_bits_val as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); m.free_cell(rle_symbols); } fn BuildAndStoreEntropyCodes<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, HistogramType: SliceWrapper<u32>> (m8: &mut AllocU8, m16: &mut AllocU16, xself: &mut BlockEncoder<AllocU8, AllocU16>, histograms: &[HistogramType], histograms_size: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { let alphabet_size: usize = (*xself).alphabet_size_; let table_size: usize = histograms_size.wrapping_mul(alphabet_size); (*xself).depths_ = if table_size != 0 { m8.alloc_cell(table_size) } else { AllocU8::AllocatedMemory::default() }; (*xself).bits_ = if table_size != 0 { m16.alloc_cell(table_size) } else { AllocU16::AllocatedMemory::default() }; { let mut i: usize; i = 0usize; while i < histograms_size { { let ix: usize = i.wrapping_mul(alphabet_size); BuildAndStoreHuffmanTree(&(histograms[(i as (usize))]).slice()[0..], alphabet_size, tree, &mut (*xself).depths_.slice_mut()[(ix as (usize))..], &mut (*xself).bits_.slice_mut()[(ix as (usize))..], storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } } } fn StoreSymbol<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> (xself: &mut BlockEncoder<AllocU8, AllocU16>, symbol: usize, storage_ix: &mut usize, storage: &mut [u8]){ if (*xself).block_len_ == 0usize { let block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let block_len: u32 = (*xself).block_lengths_[(block_ix as (usize))]; let block_type: u8 = (*xself).block_types_[(block_ix as (usize))]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = (block_type as (usize)).wrapping_mul((*xself).alphabet_size_); StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let ix: usize = (*xself).entropy_ix_.wrapping_add(symbol); BrotliWriteBits((*xself).depths_.slice()[(ix as (usize))] as (u8), (*xself).bits_.slice()[(ix as (usize))] as (u64), storage_ix, storage); } } fn CommandCopyLenCode(xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 ^ (*xself).copy_len_ >> 24i32 } fn GetInsertExtra(inscode: u16) -> u32 { kInsExtra[inscode as (usize)] } fn GetInsertBase(inscode: u16) -> u32 { kInsBase[inscode as (usize)] } fn GetCopyBase(copycode: u16) -> u32 { kCopyBase[copycode as (usize)] } fn GetCopyExtra(copycode: u16) -> u32 { kCopyExtra[copycode as (usize)] } fn StoreCommandExtra(cmd: &Command, storage_ix: &mut usize, storage: &mut [u8]) { let copylen_code: u32 = CommandCopyLenCode(cmd); let inscode: u16 = GetInsertLengthCode((*cmd).insert_len_ as (usize)); let copycode: u16 = GetCopyLengthCode(copylen_code as (usize)); let insnumextra: u32 = GetInsertExtra(inscode); let insextraval: u64 = (*cmd).insert_len_.wrapping_sub(GetInsertBase(inscode)) as (u64); let copyextraval: u64 = copylen_code.wrapping_sub(GetCopyBase(copycode)) as (u64); let bits: u64 = copyextraval << insnumextra | insextraval; BrotliWriteBits(insnumextra.wrapping_add(GetCopyExtra(copycode)) as (u8), bits, storage_ix, storage); } fn Context(p1: u8, p2: u8, mode: ContextType) -> u8 { match mode { ContextType::CONTEXT_LSB6 => { return (p1 as (i32) & 0x3fi32) as (u8); } ContextType::CONTEXT_MSB6 => { return (p1 as (i32) >> 2i32) as (u8); } ContextType::CONTEXT_UTF8 => { return (kUTF8ContextLookup[p1 as (usize)] as (i32) | kUTF8ContextLookup[(p2 as (i32) + 256i32) as (usize)] as (i32)) as (u8); } ContextType::CONTEXT_SIGNED => { return ((kSigned3BitContextLookup[p1 as (usize)] as (i32) << 3i32) + kSigned3BitContextLookup[p2 as (usize)] as (i32)) as (u8); } } // 0i32 as (u8) } fn StoreSymbolWithContext<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>>(xself: &mut BlockEncoder<AllocU8, AllocU16>, symbol: usize, context: usize, context_map: &[u32], storage_ix: &mut usize, storage: &mut [u8], context_bits: usize){ if (*xself).block_len_ == 0usize { let block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let block_len: u32 = (*xself).block_lengths_[(block_ix as (usize))]; let block_type: u8 = (*xself).block_types_[(block_ix as (usize))]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = block_type as (usize) << context_bits; StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let histo_ix: usize = context_map[((*xself).entropy_ix_.wrapping_add(context) as (usize))] as (usize); let ix: usize = histo_ix.wrapping_mul((*xself).alphabet_size_).wrapping_add(symbol); BrotliWriteBits((*xself).depths_.slice()[(ix as (usize))] as (u8), (*xself).bits_.slice()[(ix as (usize))] as (u64), storage_ix, storage); } } fn CommandCopyLen(xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 } fn CommandDistanceContext(xself: &Command) -> u32 { let r: u32 = ((*xself).cmd_prefix_ as (i32) >> 6i32) as (u32); let c: u32 = ((*xself).cmd_prefix_ as (i32) & 7i32) as (u32); if (r == 0u32 || r == 2u32 || r == 4u32 || r == 7u32) && (c <= 2u32) { return c; } 3u32 } fn CleanupBlockEncoder<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>>(m8: &mut AllocU8, m16 : &mut AllocU16, xself: &mut BlockEncoder<AllocU8, AllocU16>){ m8.free_cell(core::mem::replace(&mut (*xself).depths_, AllocU8::AllocatedMemory::default())); m16.free_cell(core::mem::replace(&mut (*xself).bits_, AllocU16::AllocatedMemory::default())); } fn JumpToByteBoundary(storage_ix: &mut usize, storage: &mut [u8]) { *storage_ix = (*storage_ix).wrapping_add(7u32 as (usize)) & !7u32 as (usize); storage[((*storage_ix >> 3i32) as (usize))] = 0i32 as (u8); } pub fn BrotliStoreMetaBlock<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>, AllocHT: alloc::Allocator<HuffmanTree>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>, Cb> (m8: &mut AllocU8, m16: &mut AllocU16, m32: &mut AllocU32, mht: &mut AllocHT, input: &'a[u8], start_pos: usize, length: usize, mask: usize, params: &BrotliEncoderParams, mut prev_byte: u8, mut prev_byte2: u8, is_last: i32, num_direct_distance_codes: u32, distance_postfix_bits: u32, literal_context_mode: ContextType, distance_cache: &[i32; kNumDistanceCacheEntries], commands: &[Command], n_commands: usize, mb: &mut MetaBlockSplit<AllocU8, AllocU32, AllocHL, AllocHC, AllocHD>, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], callback: &mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { let (input0,input1) = InputPairFromMaskedInput(input, start_pos, length, mask); if params.log_meta_block { LogMetaBlock(m32, commands.split_at(n_commands).0, input0, input1, distance_postfix_bits, num_direct_distance_codes, distance_cache, recoder_state, block_split_reference(mb), params, Some(literal_context_mode), callback); } let mut pos: usize = start_pos; let mut i: usize; let num_distance_codes: usize = (16u32) .wrapping_add(num_direct_distance_codes) .wrapping_add(48u32 << distance_postfix_bits) as (usize); let mut tree: AllocHT::AllocatedMemory; let mut literal_enc: BlockEncoder<AllocU8, AllocU16>; let mut command_enc: BlockEncoder<AllocU8, AllocU16>; let mut distance_enc: BlockEncoder<AllocU8, AllocU16>; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); tree = if 2i32 * 704i32 + 1i32 != 0 { mht.alloc_cell((2i32 * 704i32 + 1i32) as (usize)) } else { AllocHT::AllocatedMemory::default() }; literal_enc = NewBlockEncoder::<AllocU8, AllocU16>(256usize, (*mb).literal_split.num_types, (*mb).literal_split.types.slice(), (*mb).literal_split.lengths.slice(), (*mb).literal_split.num_blocks); command_enc = NewBlockEncoder::<AllocU8, AllocU16>(704usize, (*mb).command_split.num_types, (*mb).command_split.types.slice(), (*mb).command_split.lengths.slice(), (*mb).command_split.num_blocks); distance_enc = NewBlockEncoder::<AllocU8, AllocU16>(num_distance_codes, (*mb).distance_split.num_types, (*mb).distance_split.types.slice(), (*mb).distance_split.lengths.slice(), (*mb).distance_split.num_blocks); BuildAndStoreBlockSwitchEntropyCodes(&mut literal_enc, tree.slice_mut(), storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut command_enc, tree.slice_mut(), storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut distance_enc, tree.slice_mut(), storage_ix, storage); BrotliWriteBits(2, distance_postfix_bits as (u64), storage_ix, storage); BrotliWriteBits(4, (num_direct_distance_codes >> distance_postfix_bits) as (u64), storage_ix, storage); i = 0usize; while i < (*mb).literal_split.num_types { { BrotliWriteBits(2, literal_context_mode as (u64), storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } if (*mb).literal_context_map_size == 0usize { StoreTrivialContextMap((*mb).literal_histograms_size, 6, tree.slice_mut(), storage_ix, storage); } else { EncodeContextMap(m32, (*mb).literal_context_map.slice(), (*mb).literal_context_map_size, (*mb).literal_histograms_size, tree.slice_mut(), storage_ix, storage); } if (*mb).distance_context_map_size == 0usize { StoreTrivialContextMap((*mb).distance_histograms_size, 2usize, tree.slice_mut(), storage_ix, storage); } else { EncodeContextMap(m32, (*mb).distance_context_map.slice(), (*mb).distance_context_map_size, (*mb).distance_histograms_size, tree.slice_mut(), storage_ix, storage); } BuildAndStoreEntropyCodes(m8, m16, &mut literal_enc, (*mb).literal_histograms.slice(), (*mb).literal_histograms_size, tree.slice_mut(), storage_ix, storage); BuildAndStoreEntropyCodes(m8, m16, &mut command_enc, (*mb).command_histograms.slice(), (*mb).command_histograms_size, tree.slice_mut(), storage_ix, storage); BuildAndStoreEntropyCodes(m8, m16, &mut distance_enc, (*mb).distance_histograms.slice(), (*mb).distance_histograms_size, tree.slice_mut(), storage_ix, storage); { mht.free_cell(core::mem::replace(&mut tree, AllocHT::AllocatedMemory::default())); } i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let cmd_code: usize = cmd.cmd_prefix_ as (usize); StoreSymbol(&mut command_enc, cmd_code, storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); if (*mb).literal_context_map_size == 0usize { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { StoreSymbol(&mut literal_enc, input[((pos & mask) as (usize))] as (usize), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } else { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { let context: usize = Context(prev_byte, prev_byte2, literal_context_mode) as (usize); let literal: u8 = input[((pos & mask) as (usize))]; StoreSymbolWithContext(&mut literal_enc, literal as (usize), context, (*mb).literal_context_map.slice(), storage_ix, storage, 6usize); prev_byte2 = prev_byte; prev_byte = literal; pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 { prev_byte2 = input[((pos.wrapping_sub(2usize) & mask) as (usize))]; prev_byte = input[((pos.wrapping_sub(1usize) & mask) as (usize))]; if cmd.cmd_prefix_ as (i32) >= 128i32 { let dist_code: usize = cmd.dist_prefix_ as (usize); let distnumextra: u32 = cmd.dist_extra_ >> 24i32; let distextra: usize = (cmd.dist_extra_ & 0xffffffu32) as (usize); if (*mb).distance_context_map_size == 0usize { StoreSymbol(&mut distance_enc, dist_code, storage_ix, storage); } else { let context: usize = CommandDistanceContext(&cmd) as (usize); StoreSymbolWithContext(&mut distance_enc, dist_code, context, (*mb).distance_context_map.slice(), storage_ix, storage, 2usize); } BrotliWriteBits(distnumextra as (u8), distextra as u64, storage_ix, storage); } } } i = i.wrapping_add(1 as (usize)); } CleanupBlockEncoder(m8, m16, &mut distance_enc); CleanupBlockEncoder(m8, m16, &mut command_enc); CleanupBlockEncoder(m8, m16, &mut literal_enc); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn BuildHistograms(input: &[u8], start_pos: usize, mask: usize, commands: &[Command], n_commands: usize, lit_histo: &mut HistogramLiteral, cmd_histo: &mut HistogramCommand, dist_histo: &mut HistogramDistance) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let mut j: usize; HistogramAddItem(cmd_histo, cmd.cmd_prefix_ as (usize)); j = cmd.insert_len_ as (usize); while j != 0usize { { HistogramAddItem(lit_histo, input[((pos & mask) as (usize))] as (usize)); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { HistogramAddItem(dist_histo, cmd.dist_prefix_ as (usize)); } } i = i.wrapping_add(1 as (usize)); } } fn StoreDataWithHuffmanCodes(input: &[u8], start_pos: usize, mask: usize, commands: &[Command], n_commands: usize, lit_depth: &[u8], lit_bits: &[u16], cmd_depth: &[u8], cmd_bits: &[u16], dist_depth: &[u8], dist_bits: &[u16], storage_ix: &mut usize, storage: &mut [u8]) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let cmd_code: usize = cmd.cmd_prefix_ as (usize); let mut j: usize; BrotliWriteBits(cmd_depth[(cmd_code as (usize))] as (u8), cmd_bits[(cmd_code as (usize))] as (u64), storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); j = cmd.insert_len_ as (usize); while j != 0usize { { let literal: u8 = input[((pos & mask) as (usize))]; BrotliWriteBits(lit_depth[(literal as (usize))] as (u8), lit_bits[(literal as (usize))] as (u64), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { let dist_code: usize = cmd.dist_prefix_ as (usize); let distnumextra: u32 = cmd.dist_extra_ >> 24i32; let distextra: u32 = cmd.dist_extra_ & 0xffffffu32; BrotliWriteBits(dist_depth[(dist_code as (usize))] as (u8), dist_bits[(dist_code as (usize))] as (u64), storage_ix, storage); BrotliWriteBits(distnumextra as (u8), distextra as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } fn nop<'a>(_data:&[interface::Command<InputReference>]){ } pub fn BrotliStoreMetaBlockTrivial<'a, AllocU32:alloc::Allocator<u32>, Cb> (m32:&mut AllocU32, input: &'a [u8], start_pos: usize, length: usize, mask: usize, params: &BrotliEncoderParams, is_last: i32, distance_cache: &[i32; kNumDistanceCacheEntries], commands: &[Command], n_commands: usize, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], f:&mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { let (input0,input1) = InputPairFromMaskedInput(input, start_pos, length, mask); if params.log_meta_block { LogMetaBlock(m32, commands.split_at(n_commands).0, input0, input1, 0, 0, distance_cache, recoder_state, block_split_nop(), params, Some(ContextType::CONTEXT_LSB6), f); } let mut lit_histo: HistogramLiteral = HistogramLiteral::default(); let mut cmd_histo: HistogramCommand = HistogramCommand::default(); let mut dist_histo: HistogramDistance = HistogramDistance::default(); let mut lit_depth: [u8; 256] = [0; 256]; // FIXME these zero-initializations are costly let mut lit_bits: [u16; 256] = [0; 256]; let mut cmd_depth: [u8; 704] = [0; 704]; let mut cmd_bits: [u16; 704] = [0; 704]; let mut dist_depth: [u8; 64] = [0; 64]; let mut dist_bits: [u16; 64] = [0; 64]; const MAX_HUFFMAN_TREE_SIZE: usize = (2i32 * 704i32 + 1i32) as usize; let mut tree: [HuffmanTree; MAX_HUFFMAN_TREE_SIZE] = [HuffmanTree { total_count_: 0, index_left_: 0, index_right_or_value_: 0, }; MAX_HUFFMAN_TREE_SIZE]; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliWriteBits(13, 0, storage_ix, storage); BuildAndStoreHuffmanTree(lit_histo.slice_mut(), 256, &mut tree[..], &mut lit_depth[..], &mut lit_bits[..], storage_ix, storage); BuildAndStoreHuffmanTree(cmd_histo.slice_mut(), 704usize, &mut tree[..], &mut cmd_depth[..], &mut cmd_bits[..], storage_ix, storage); BuildAndStoreHuffmanTree(dist_histo.slice_mut(), 64usize, &mut tree[..], &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, &mut lit_depth[..], &mut lit_bits[..], &mut cmd_depth[..], &mut cmd_bits[..], &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn StoreStaticCommandHuffmanTree(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(56, 0x926244u32 as (u64) << 32i32 | 0x16307003, storage_ix, storage); BrotliWriteBits(3, 0x0u64, storage_ix, storage); } fn StoreStaticDistanceHuffmanTree(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(28, 0x369dc03u64, storage_ix, storage); } struct BlockSplitRef<'a> { types: &'a [u8], lengths:&'a [u32], num_types: u32, } impl<'a> Default for BlockSplitRef<'a> { fn default() -> Self { BlockSplitRef { types:&[], lengths:&[], num_types:1, } } } #[derive(Default)] struct MetaBlockSplitRefs<'a> { btypel : BlockSplitRef<'a>, literal_context_map:&'a [u32], btypec : BlockSplitRef<'a>, btyped : BlockSplitRef<'a>, distance_context_map:&'a [u32], } fn block_split_nop() -> MetaBlockSplitRefs<'static> { return MetaBlockSplitRefs::default() } fn block_split_reference<'a, AllocU8: alloc::Allocator<u8>, AllocU32: alloc::Allocator<u32>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>> (mb:&'a MetaBlockSplit<AllocU8, AllocU32, AllocHL, AllocHC, AllocHD>) -> MetaBlockSplitRefs<'a> { return MetaBlockSplitRefs::<'a> { btypel:BlockSplitRef { types: mb.literal_split.types.slice().split_at(mb.literal_split.num_blocks).0, lengths:mb.literal_split.lengths.slice().split_at(mb.literal_split.num_blocks).0, num_types:mb.literal_split.num_types as u32, }, literal_context_map: mb.literal_context_map.slice().split_at(mb.literal_context_map_size).0, btypec:BlockSplitRef { types: mb.command_split.types.slice().split_at(mb.command_split.num_blocks).0, lengths:mb.command_split.lengths.slice().split_at(mb.command_split.num_blocks).0, num_types:mb.command_split.num_types as u32, }, btyped:BlockSplitRef { types: mb.distance_split.types.slice().split_at(mb.distance_split.num_blocks).0, lengths:mb.distance_split.lengths.slice().split_at(mb.distance_split.num_blocks).0, num_types:mb.distance_split.num_types as u32, }, distance_context_map: mb.distance_context_map.slice().split_at(mb.distance_context_map_size).0, } } #[derive(Clone, Copy)] pub struct RecoderState { pub num_bytes_encoded : usize, } impl RecoderState { pub fn new() -> Self { RecoderState{ num_bytes_encoded:0, } } } pub fn BrotliStoreMetaBlockFast<Cb, AllocU32:alloc::Allocator<u32>, AllocHT: alloc::Allocator<HuffmanTree>>(m : &mut AllocHT, m32: &mut AllocU32, input: &[u8], start_pos: usize, length: usize, mask: usize, params: &BrotliEncoderParams, is_last: i32, dist_cache: &[i32; kNumDistanceCacheEntries], commands: &[Command], n_commands: usize, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], cb: &mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { let (input0,input1) = InputPairFromMaskedInput(input, start_pos, length, mask); if params.log_meta_block { LogMetaBlock(m32, commands.split_at(n_commands).0, input0, input1, 0, 0, dist_cache, recoder_state, block_split_nop(), params, Some(ContextType::CONTEXT_LSB6), cb); } StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BrotliWriteBits(13, 0, storage_ix, storage); if n_commands <= 128usize { let mut histogram: [u32; 256] = [0; 256]; let mut pos: usize = start_pos; let mut num_literals: usize = 0usize; let mut i: usize; let mut lit_depth: [u8; 256] = [0; 256]; let mut lit_bits: [u16; 256] = [0; 256]; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { { let _rhs = 1; let _lhs = &mut histogram[input[((pos & mask) as (usize))] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } num_literals = num_literals.wrapping_add(cmd.insert_len_ as (usize)); pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); } i = i.wrapping_add(1 as (usize)); } BrotliBuildAndStoreHuffmanTreeFast(m, &mut histogram[..], num_literals, 8usize, &mut lit_depth[..], &mut lit_bits[..], storage_ix, storage); StoreStaticCommandHuffmanTree(storage_ix, storage); StoreStaticDistanceHuffmanTree(storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, &mut lit_depth[..], &mut lit_bits[..], &kStaticCommandCodeDepth[..], &kStaticCommandCodeBits[..], &kStaticDistanceCodeDepth[..], &kStaticDistanceCodeBits[..], storage_ix, storage); } else { let mut lit_histo: HistogramLiteral = HistogramLiteral::default(); let mut cmd_histo: HistogramCommand = HistogramCommand::default(); let mut dist_histo: HistogramDistance = HistogramDistance::default(); let mut lit_depth: [u8; 256] = [0; 256]; let mut lit_bits: [u16; 256] = [0; 256]; let mut cmd_depth: [u8; 704] = [0; 704]; let mut cmd_bits: [u16; 704] = [0; 704]; let mut dist_depth: [u8; 64] = [0; 64]; let mut dist_bits: [u16; 64] = [0; 64]; BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo.slice(), lit_histo.total_count_, 8usize, &mut lit_depth[..], &mut lit_bits[..], storage_ix, storage); BrotliBuildAndStoreHuffmanTreeFast(m, cmd_histo.slice(), cmd_histo.total_count_, 10usize, &mut cmd_depth[..], &mut cmd_bits[..], storage_ix, storage); BrotliBuildAndStoreHuffmanTreeFast(m, dist_histo.slice(), dist_histo.total_count_, 6usize, &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, &mut lit_depth[..], &mut lit_bits[..], &mut cmd_depth[..], &mut cmd_bits[..], &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); } if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn BrotliStoreUncompressedMetaBlockHeader(length: usize, storage_ix: &mut usize, storage: &mut [u8]) { let mut lenbits: u64 = 0; let mut nlenbits: u32 = 0; let mut nibblesbits: u32 = 0; BrotliWriteBits(1, 0, storage_ix, storage); BrotliEncodeMlen(length as u32, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2, nibblesbits as u64, storage_ix, storage); BrotliWriteBits(nlenbits as u8, lenbits as u64, storage_ix, storage); BrotliWriteBits(1, 1, storage_ix, storage); } fn InputPairFromMaskedInput<'a>(input:&'a [u8], position: usize, len: usize, mask:usize) -> (&'a [u8], &'a [u8]) { let masked_pos: usize = position & mask; if masked_pos.wrapping_add(len) > mask.wrapping_add(1usize) { let len1: usize = mask.wrapping_add(1usize).wrapping_sub(masked_pos); return (&input[masked_pos..(masked_pos + len1)], &input[0..len.wrapping_sub(len1)]); } return (&input[masked_pos..masked_pos + len], &[]); } pub fn BrotliStoreUncompressedMetaBlock<Cb, AllocU32:alloc::Allocator<u32>> (m32:&mut AllocU32, is_final_block: i32, input: &[u8], position: usize, mask: usize, params: &BrotliEncoderParams, len: usize, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], suppress_meta_block_logging: bool, cb: &mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]){ let (input0,input1) = InputPairFromMaskedInput(input, position, len, mask); BrotliStoreUncompressedMetaBlockHeader(len, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); let dst_start0 = ((*storage_ix >> 3i32) as (usize)); storage[dst_start0..(dst_start0 + input0.len())].clone_from_slice(input0); *storage_ix = (*storage_ix).wrapping_add(input0.len() << 3i32); let dst_start1 = ((*storage_ix >> 3i32) as (usize)); storage[dst_start1..(dst_start1 + input1.len())].clone_from_slice(input1); *storage_ix = (*storage_ix).wrapping_add(input1.len() << 3i32); BrotliWriteBitsPrepareStorage(*storage_ix, storage); if params.log_meta_block && !suppress_meta_block_logging { let cmds = [Command{insert_len_:len as u32, copy_len_:0, dist_extra_:0, cmd_prefix_:0, dist_prefix_:0 }]; LogMetaBlock(m32, &cmds, input0, input1, 0, 0, &[0i32, 0i32, 0i32, 0i32], recoder_state, block_split_nop(), params, None, cb); } if is_final_block != 0 { BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(1u8, 1u64, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } } pub fn BrotliStoreSyncMetaBlock(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(6, 6, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } compute literal block entropy assuming context map and only choose random if both stride and context map are worse by more than 8 bits #![allow(unknown_lints)] #![allow(dead_code)] #![allow(unused_imports)] #![allow(unused_macros)] #[cfg(not(feature="no-stdlib"))] use std::io::Write; use super::input_pair::InputPair; use super::block_split::BlockSplit; use enc::backward_references::BrotliEncoderParams; use super::super::dictionary::{kBrotliDictionary, kBrotliDictionarySizeBitsByLength, kBrotliDictionaryOffsetsByLength}; use super::super::transform::{TransformDictionaryWord}; use super::static_dict::kNumDistanceCacheEntries; use super::command::{Command, GetCopyLengthCode, GetInsertLengthCode, CommandDistanceIndexAndOffset}; use super::constants::{BROTLI_NUM_BLOCK_LEN_SYMBOLS, kZeroRepsBits, kZeroRepsDepth, kNonZeroRepsBits, kNonZeroRepsDepth, kCodeLengthBits, kCodeLengthDepth, kStaticCommandCodeDepth, kStaticCommandCodeBits, kStaticDistanceCodeDepth, kStaticDistanceCodeBits, kSigned3BitContextLookup, kUTF8ContextLookup, kInsBase, kInsExtra, kCopyBase, kCopyExtra}; use super::entropy_encode::{HuffmanTree, BrotliWriteHuffmanTree, BrotliCreateHuffmanTree, BrotliConvertBitDepthsToSymbols, NewHuffmanTree, InitHuffmanTree, SortHuffmanTreeItems, HuffmanComparator, BrotliSetDepth}; use super::histogram::{HistogramAddItem, HistogramLiteral, HistogramCommand, HistogramDistance, ContextType}; use super::super::alloc; use super::super::alloc::{SliceWrapper, SliceWrapperMut}; use super::super::core; use super::find_stride; use super::interface; pub struct PrefixCodeRange { pub offset: u32, pub nbits: u32, } fn window_size_from_lgwin(lgwin: i32) -> usize{ (1usize << lgwin) - 16usize } fn context_type_str(context_type:ContextType) -> &'static str { match context_type { ContextType::CONTEXT_LSB6 => "lsb6", ContextType::CONTEXT_MSB6 => "msb6", ContextType::CONTEXT_UTF8 => "utf8", ContextType::CONTEXT_SIGNED => "sign", } } fn prediction_mode_str(prediction_mode_nibble:interface::LiteralPredictionModeNibble) -> &'static str { match prediction_mode_nibble.prediction_mode() { interface::LITERAL_PREDICTION_MODE_SIGN => "sign", interface::LITERAL_PREDICTION_MODE_LSB6 => "lsb6", interface::LITERAL_PREDICTION_MODE_MSB6 => "msb6", interface::LITERAL_PREDICTION_MODE_UTF8 => "utf8", _ => "unknown", } } #[derive(Copy,Clone,Default)] pub struct InputReference<'a>(pub &'a [u8]); impl<'a> SliceWrapper<u8> for InputReference<'a> { fn slice(&self) -> & [u8] { self.0 } } fn is_long_enough_to_be_random(len: usize, high_entropy_detection_quality:u8) -> bool{ return match high_entropy_detection_quality { 0 => false, 1 => len >= 256, 2 => len >= 128, 3 => len >= 96, 4 => len >= 64, 5 => len >= 48, 6 => len >= 32, 7 => len >= 24, 8 => len >= 16, 9 => len >= 8, 10 => len >= 4, 11 => len >= 1, _ => len >= 8, } } const COMMAND_BUFFER_SIZE: usize = 16384; trait CommandProcessor<'a> { fn push<Cb: FnMut(&[interface::Command<InputReference>])>(&mut self, val: interface::Command<InputReference<'a> >, callback :&mut Cb); fn push_literals<Cb>(&mut self, data:&InputPair<'a>, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { if data.0.len() != 0 { self.push(interface::Command::Literal(interface::LiteralCommand{ data:InputReference(data.0), prob:interface::FeatureFlagSliceType::<InputReference>::default(), }), callback); } if data.1.len() != 0 { self.push(interface::Command::Literal(interface::LiteralCommand{ data:InputReference(data.1), prob:interface::FeatureFlagSliceType::<InputReference>::default(), }), callback); } } fn push_rand_literals<Cb>(&mut self, data:&InputPair<'a>, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { if data.0.len() != 0 { self.push(interface::Command::RandLiteral(interface::RandLiteralCommand{ data:InputReference(data.0), }), callback); } if data.1.len() != 0 { self.push(interface::Command::RandLiteral(interface::RandLiteralCommand{ data:InputReference(data.1), }), callback); } } fn push_block_switch_literal<Cb>(&mut self, block_type: u8, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { self.push(interface::Command::BlockSwitchLiteral(interface::LiteralBlockSwitch::new(block_type, 0)), callback) } } struct CommandQueue<'a, AllocU32:alloc::Allocator<u32> > { mb: InputPair<'a>, mb_byte_offset: usize, queue: [interface::Command<InputReference<'a> >;COMMAND_BUFFER_SIZE], loc: usize, last_btypel_index: Option<usize>, entropy_tally_scratch: find_stride::EntropyTally<AllocU32>, entropy_pyramid: find_stride::EntropyPyramid<AllocU32>, context_map_entropy: ContextMapEntropy<'a, AllocU32>, stride_detection_quality: u8, high_entropy_detection_quality: u8, block_type_literal: u8, } impl<'a, AllocU32: alloc::Allocator<u32> > CommandQueue<'a, AllocU32 > { fn new(m32:&mut AllocU32, mb: InputPair<'a>, stride_detection_quality: u8, high_entropy_detection_quality: u8, context_map_entropy: ContextMapEntropy<'a, AllocU32>, ) -> CommandQueue <'a, AllocU32> { let mut entropy_tally_scratch = if stride_detection_quality == 0 && high_entropy_detection_quality == 0 { find_stride::EntropyTally::<AllocU32>::disabled_placeholder(m32) } else { if stride_detection_quality == 0 { find_stride::EntropyTally::<AllocU32>::new(m32, Some(1)) } else { find_stride::EntropyTally::<AllocU32>::new(m32, None) } }; let mut entropy_pyramid = if stride_detection_quality == 0 && high_entropy_detection_quality == 0{ find_stride::EntropyPyramid::<AllocU32>::disabled_placeholder(m32) } else { find_stride::EntropyPyramid::<AllocU32>::new(m32) }; if stride_detection_quality > 0 { entropy_pyramid.populate(mb.0, mb.1, &mut entropy_tally_scratch); } else { if high_entropy_detection_quality != 0 { entropy_pyramid.populate_stride1(mb.0, mb.1); } } CommandQueue { mb:mb, mb_byte_offset:0, queue:[interface::Command::<InputReference<'a>>::default();COMMAND_BUFFER_SIZE], loc:0, entropy_tally_scratch: entropy_tally_scratch, entropy_pyramid: entropy_pyramid, last_btypel_index: None, stride_detection_quality: stride_detection_quality, high_entropy_detection_quality: high_entropy_detection_quality, context_map_entropy: context_map_entropy, block_type_literal: 0, } } fn full(&self) -> bool { self.loc == self.queue.len() } fn size(&self) -> usize { self.loc } fn clear(&mut self) { self.loc = 0; self.block_type_literal = 0; } fn content(&mut self) -> &[interface::Command<InputReference>] { self.queue.split_at(self.loc).0 } fn flush<Cb>(&mut self, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { let mut local_byte_offset = self.mb_byte_offset; let mb_len = self.mb.0.len() + self.mb.1.len(); let cur_stride = self.entropy_tally_scratch.pick_best_stride(self.queue.split_at(self.loc).0, self.mb.0, self.mb.1, &mut self.mb_byte_offset, &self.entropy_pyramid, self.stride_detection_quality); if self.high_entropy_detection_quality > 0 { for command in self.queue.split_at_mut(self.loc).0.iter_mut() { let mut switch_to_random: Option<InputReference> = None; match *command { interface::Command::BlockSwitchCommand(_) | interface::Command::BlockSwitchDistance(_) | interface::Command::PredictionMode(_) => {}, interface::Command::BlockSwitchLiteral(bs) => { self.block_type_literal = bs.block_type(); }, interface::Command::Copy(ref copy) => { local_byte_offset += copy.num_bytes as usize; }, interface::Command::Dict(ref dict) => { local_byte_offset += dict.final_size as usize; }, interface::Command::RandLiteral(ref lit) => { local_byte_offset += lit.data.slice().len(); }, interface::Command::Literal(ref mut lit) => { if is_long_enough_to_be_random(lit.data.slice().len(), self.high_entropy_detection_quality) { //print!("Long enough to be random {}\n", lit.data.slice().len()); let mut priors = self.entropy_tally_scratch.get_previous_bytes( self.mb.0, self.mb.1, local_byte_offset); let mut rev_priors = priors; rev_priors.reverse(); //print!("Stride {} prev {:?} byte offset {} {:?}\n", cur_stride, rev_priors, local_byte_offset, lit.data.slice()); let literal_cost = self.entropy_pyramid.bit_cost_of_literals( lit.data.slice(), local_byte_offset as u32, mb_len, cur_stride, priors, &mut self.entropy_tally_scratch); let cm_literal_cost = self.context_map_entropy.compute_bit_cost_of_data_subset( lit.data.slice(), priors[0], priors[1], self.block_type_literal, self.entropy_tally_scratch.peek()); let min_cost = if cm_literal_cost < literal_cost { cm_literal_cost } else { literal_cost }; local_byte_offset += lit.data.slice().len(); let random_cost = lit.data.slice().len() as find_stride::floatY * 8.0 + 8.0; print!("Rnd Cost {} ({} bytes) rndratio: {}\nlit Cost {} ({} bytes) ratio {}\nCml Cost {} ({} bytes) ratio {}\n", random_cost, random_cost as f64 / 8.0, random_cost as f64 / min_cost as f64, literal_cost, literal_cost as f64 / 8.0, literal_cost as f64 / 8.0 / lit.data.slice().len() as f64, cm_literal_cost, cm_literal_cost as f64 / 8.0, cm_literal_cost as f64 / 8.0 / lit.data.slice().len() as f64 ); if random_cost <= min_cost { switch_to_random = Some( core::mem::replace(&mut lit.data, InputReference::default())); } } else { local_byte_offset += lit.data.slice().len(); } } } if let Some(data) = switch_to_random { *command = interface::Command::RandLiteral( interface::RandLiteralCommand{ data: data, }); } } } match self.last_btypel_index.clone() { None => {}, Some(literal_block_type_offset) => { match &mut self.queue[literal_block_type_offset] { &mut interface::Command::BlockSwitchLiteral(ref mut cmd) => cmd.1 = cur_stride, _ => panic!("Logic Error: literal block type index must point to literal block type"), } }, } self.last_btypel_index = None; callback(self.queue.split_at(self.loc).0); self.clear(); } fn free<Cb>(mut self, m32: &mut AllocU32, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { self.flush(callback); self.entropy_tally_scratch.free(m32); self.entropy_pyramid.free(m32); self.context_map_entropy.free(m32); } } impl<'a, AllocU32: alloc::Allocator<u32> > CommandProcessor<'a> for CommandQueue<'a, AllocU32 > { fn push<Cb> (&mut self, val: interface::Command<InputReference<'a> >, callback :&mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { self.queue[self.loc] = val; self.loc += 1; if self.full() { self.flush(callback); } } fn push_block_switch_literal<Cb>(&mut self, block_type: u8, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]) { self.flush(callback); self.last_btypel_index = Some(self.size()); self.push(interface::Command::BlockSwitchLiteral( interface::LiteralBlockSwitch::new(block_type, 0)), callback) } } struct ContextMapEntropy<'a, AllocU32:alloc::Allocator<u32>> { input: InputPair<'a>, entropy_tally: find_stride::EntropyBucketPopulation<AllocU32>, context_map: interface::PredictionModeContextMap<InputReference<'a>>, block_type: u8, local_byte_offset: usize, } impl<'a, AllocU32:alloc::Allocator<u32>> ContextMapEntropy<'a, AllocU32> { fn new(m32: &mut AllocU32, input: InputPair<'a>, prediction_mode: interface::PredictionModeContextMap<InputReference<'a>>) -> Self { ContextMapEntropy::<AllocU32>{ input: input, entropy_tally:find_stride::EntropyBucketPopulation::<AllocU32>::new(m32), context_map: prediction_mode, block_type: 0, local_byte_offset: 0, } } fn compute_bit_cost_of_data_subset(&mut self, data: &[u8], mut prev_byte: u8, mut prev_prev_byte: u8, block_type: u8, scratch: &mut find_stride::EntropyBucketPopulation<AllocU32>) -> find_stride::floatY { scratch.bucket_populations.slice_mut().clone_from_slice(self.entropy_tally.bucket_populations.slice()); scratch.bucket_populations.slice_mut()[65535] += 1; // to demonstrate that we have scratch.bucket_populations.slice_mut()[65535] -= 1; // to demonstrate that we have write capability let mut stray_count = 0 as find_stride::floatY; for val in data.iter() { let huffman_table_index = compute_huffman_table_index_for_context_map(prev_byte, prev_prev_byte, self.context_map, block_type); let loc = &mut scratch.bucket_populations.slice_mut()[huffman_table_index * 256 + *val as usize]; //let mut stray = false; if *loc == 0 { stray_count += 1.0; //stray = true; } else { *loc -= 1; } //println!("{} {:02x}{:02x} => {:02x} (bt: {}, ind: {}, cnt: {})", if stray {"S"} else {"L"}, prev_byte, prev_prev_byte, *val, block_type, huffman_table_index, *loc); prev_prev_byte = prev_byte; prev_byte = *val; } if self.entropy_tally.cached_bit_entropy == 0.0 as find_stride::floatY { self.entropy_tally.cached_bit_entropy = find_stride::HuffmanCost(self.entropy_tally.bucket_populations.slice()); } debug_assert_eq!(find_stride::HuffmanCost(self.entropy_tally.bucket_populations.slice()), self.entropy_tally.cached_bit_entropy); scratch.cached_bit_entropy = find_stride::HuffmanCost(scratch.bucket_populations.slice()); self.entropy_tally.cached_bit_entropy - scratch.cached_bit_entropy + stray_count * 8.0 } fn free(&mut self, m32: &mut AllocU32) { self.entropy_tally.free(m32); } } fn compute_huffman_table_index_for_context_map<SliceType: alloc::SliceWrapper<u8> > ( prev_byte: u8, prev_prev_byte: u8, context_map: interface::PredictionModeContextMap<SliceType>, block_type: u8, ) -> usize { let prior = Context(prev_byte, prev_prev_byte, context_map.literal_prediction_mode.to_context_enum().unwrap()); assert!(prior < 64); let context_map_index = ((block_type as usize)<< 6) | prior as usize; if context_map_index < context_map.literal_context_map.slice().len() { context_map.literal_context_map.slice()[context_map_index] as usize } else { prior as usize } } impl<'a, 'b, AllocU32:alloc::Allocator<u32>> CommandProcessor<'b> for ContextMapEntropy<'a, AllocU32> { fn push<Cb: FnMut(&[interface::Command<InputReference>])>(&mut self, val: interface::Command<InputReference<'b>>, callback: &mut Cb) { match val { interface::Command::BlockSwitchCommand(_) | interface::Command::BlockSwitchDistance(_) | interface::Command::PredictionMode(_) => {} interface::Command::Copy(ref copy) => { self.local_byte_offset += copy.num_bytes as usize; }, interface::Command::Dict(ref dict) => { self.local_byte_offset += dict.final_size as usize; }, interface::Command::RandLiteral(ref lit) => { self.local_byte_offset += lit.data.slice().len(); }, interface::Command::BlockSwitchLiteral(block_type) => self.block_type = block_type.block_type(), interface::Command::Literal(ref lit) => { let mut priors= [0u8, 0u8]; if self.local_byte_offset > 1 { priors[0] = self.input[self.local_byte_offset - 2]; priors[1] = self.input[self.local_byte_offset - 1]; } for literal in lit.data.slice().iter() { let huffman_table_index = compute_huffman_table_index_for_context_map(priors[1], priors[0], self.context_map, self.block_type); self.entropy_tally.bucket_populations.slice_mut()[((huffman_table_index as usize) << 8) | *literal as usize] += 1; //println!("I {:02x}{:02x} => {:02x} (bt: {}, ind: {} cnt: {})", priors[1], priors[0], *literal, self.block_type, huffman_table_index, self.entropy_tally.bucket_populations.slice_mut()[((huffman_table_index as usize) << 8) | *literal as usize]); priors[0] = priors[1]; priors[1] = *literal; } self.local_byte_offset += lit.data.slice().len(); } } let cbval = [val]; callback(&cbval[..]); } } #[cfg(not(feature="no-stdlib"))] fn warn_on_missing_free() { let _err = ::std::io::stderr().write(b"Need to free entropy_tally_scratch before dropping CommandQueue\n"); } #[cfg(feature="no-stdlib")] fn warn_on_missing_free() { // no way to warn in this case } impl<'a, AllocU32: alloc::Allocator<u32>> Drop for CommandQueue<'a, AllocU32> { fn drop(&mut self) { if !self.entropy_tally_scratch.is_free() { warn_on_missing_free(); } } } fn process_command_queue<'a, Cb:FnMut(&[interface::Command<InputReference>]), CmdProcessor: CommandProcessor<'a> > ( command_queue: &mut CmdProcessor, input: InputPair<'a>, commands: &[Command], n_postfix: u32, n_direct: u32, dist_cache: &[i32;kNumDistanceCacheEntries], mut recoder_state :RecoderState, block_type: &MetaBlockSplitRefs, params: &BrotliEncoderParams, context_type:Option<ContextType>, callback: &mut Cb, ) -> RecoderState { let mut input_iter = input.clone(); let mut local_dist_cache = [0i32;kNumDistanceCacheEntries]; local_dist_cache.clone_from_slice(&dist_cache[..]); let mut btypel_counter = 0usize; let mut btypec_counter = 0usize; let mut btyped_counter = 0usize; let mut btypel_sub = if block_type.btypel.num_types == 1 { 1u32<<31 } else {block_type.btypel.lengths[0]}; let mut btypec_sub = if block_type.btypec.num_types == 1 { 1u32<<31 } else {block_type.btypec.lengths[0]}; let mut btyped_sub = if block_type.btyped.num_types == 1 { 1u32<<31 } else {block_type.btyped.lengths[0]}; { command_queue.push_block_switch_literal(0, callback); } let mut mb_len = input.len(); for cmd in commands.iter() { let (inserts, interim) = input_iter.split_at(core::cmp::min(cmd.insert_len_ as usize, mb_len)); recoder_state.num_bytes_encoded += inserts.len(); let _copy_cursor = input.len() - interim.len(); // let distance_context = CommandDistanceContext(cmd); let copylen_code: u32 = CommandCopyLenCode(cmd); let (prev_dist_index, dist_offset) = CommandDistanceIndexAndOffset(cmd, n_postfix, n_direct); let final_distance: usize; if prev_dist_index == 0 { final_distance = dist_offset as usize; } else { final_distance = (local_dist_cache[prev_dist_index - 1] as isize + dist_offset) as usize; } let copy_len = copylen_code as usize; let actual_copy_len : usize; let max_distance = core::cmp::min(recoder_state.num_bytes_encoded, window_size_from_lgwin(params.lgwin)); assert!(inserts.len() <= mb_len); { btypec_sub -= 1; if btypec_sub == 0 { btypec_counter += 1; if block_type.btypec.types.len() > btypec_counter { btypec_sub = block_type.btypec.lengths[btypec_counter]; command_queue.push(interface::Command::BlockSwitchCommand( interface::BlockSwitch(block_type.btypec.types[btypec_counter])), callback); } else { btypec_sub = 1u32 << 31; } } } if inserts.len() != 0 { let mut tmp_inserts = inserts; while tmp_inserts.len() > btypel_sub as usize { // we have to divide some: let (in_a, in_b) = tmp_inserts.split_at(btypel_sub as usize); if in_a.len() != 0 { if let Some(_) = context_type { command_queue.push_literals(&in_a, callback); } else { command_queue.push_rand_literals(&in_a, callback); } } mb_len -= in_a.len(); tmp_inserts = in_b; btypel_counter += 1; if block_type.btypel.types.len() > btypel_counter { btypel_sub = block_type.btypel.lengths[btypel_counter]; command_queue.push_block_switch_literal(block_type.btypel.types[btypel_counter], callback); } else { btypel_sub = 1u32<<31; } } if let Some(_) = context_type { command_queue.push_literals(&tmp_inserts, callback); }else { command_queue.push_rand_literals(&tmp_inserts, callback); } if tmp_inserts.len() != 0 { mb_len -= tmp_inserts.len(); btypel_sub -= tmp_inserts.len() as u32; } } if copy_len != 0 && cmd.cmd_prefix_ >= 128 { btyped_sub -= 1; if btyped_sub == 0 { btyped_counter += 1; if block_type.btyped.types.len() > btyped_counter { btyped_sub = block_type.btyped.lengths[btyped_counter]; command_queue.push(interface::Command::BlockSwitchDistance( interface::BlockSwitch(block_type.btyped.types[btyped_counter])), callback); } else { btyped_sub = 1u32 << 31; } } } if final_distance > max_distance { // is dictionary assert!(copy_len >= 4); assert!(copy_len < 25); let dictionary_offset = final_distance - max_distance - 1; let ndbits = kBrotliDictionarySizeBitsByLength[copy_len] as usize; let action = dictionary_offset >> ndbits; let word_sub_index = dictionary_offset & ((1 << ndbits) - 1); let word_index = word_sub_index * copy_len + kBrotliDictionaryOffsetsByLength[copy_len] as usize; let raw_word = &kBrotliDictionary[word_index..word_index + copy_len]; let mut transformed_word = [0u8; 38]; actual_copy_len = TransformDictionaryWord(&mut transformed_word[..], raw_word, copy_len as i32, action as i32) as usize; if actual_copy_len <= mb_len { command_queue.push(interface::Command::Dict( interface::DictCommand{ word_size: copy_len as u8, transform: action as u8, final_size: actual_copy_len as u8, empty: 0, word_id: word_sub_index as u32, }), callback); mb_len -= actual_copy_len; assert_eq!(InputPair(transformed_word.split_at(actual_copy_len).0, &[]), interim.split_at(actual_copy_len).0); } else if mb_len != 0 { // truncated dictionary word: represent it as literals instead // won't be random noise since it fits in the dictionary, so we won't check for rand command_queue.push_literals(&interim.split_at(mb_len).0, callback); mb_len = 0; assert_eq!(InputPair(transformed_word.split_at(mb_len).0, &[]), interim.split_at(mb_len).0); } } else { actual_copy_len = core::cmp::min(mb_len, copy_len); if actual_copy_len != 0 { command_queue.push(interface::Command::Copy( interface::CopyCommand{ distance: final_distance as u32, num_bytes: actual_copy_len as u32, }), callback); } mb_len -= actual_copy_len; if prev_dist_index != 1 || dist_offset != 0 { // update distance cache unless it's the "0 distance symbol" let mut tmp_dist_cache = [0i32;kNumDistanceCacheEntries - 1]; tmp_dist_cache.clone_from_slice(&local_dist_cache[..kNumDistanceCacheEntries - 1]); local_dist_cache[1..].clone_from_slice(&tmp_dist_cache[..]); local_dist_cache[0] = final_distance as i32; } } let (copied, remainder) = interim.split_at(actual_copy_len); recoder_state.num_bytes_encoded += copied.len(); input_iter = remainder; } recoder_state } fn LogMetaBlock<'a, AllocU32:alloc::Allocator<u32>, Cb>(m32:&mut AllocU32, commands: &[Command], input0: &'a[u8],input1: &'a[u8], n_postfix: u32, n_direct: u32, dist_cache: &[i32;kNumDistanceCacheEntries], recoder_state :&mut RecoderState, block_type: MetaBlockSplitRefs, params: &BrotliEncoderParams, context_type:Option<ContextType>, callback: &mut Cb) where Cb:FnMut(&[interface::Command<InputReference>]){ let mut local_literal_context_map = [0u8; 256 * 64]; let mut local_distance_context_map = [0u8; 256 * 64]; assert_eq!(*block_type.btypel.types.iter().max().unwrap_or(&0) as u32 + 1, block_type.btypel.num_types); assert_eq!(*block_type.btypec.types.iter().max().unwrap_or(&0) as u32 + 1, block_type.btypec.num_types); assert_eq!(*block_type.btyped.types.iter().max().unwrap_or(&0) as u32 + 1, block_type.btyped.num_types); if block_type.literal_context_map.len() <= 256 * 64 { for (index, item) in block_type.literal_context_map.iter().enumerate() { local_literal_context_map[index] = *item as u8; } } if block_type.distance_context_map.len() <= 256 * 64 { for (index, item) in block_type.distance_context_map.iter().enumerate() { local_distance_context_map[index] = *item as u8; } } let prediction_mode = interface::PredictionModeContextMap::<InputReference>{ literal_prediction_mode: interface::LiteralPredictionModeNibble(context_type.unwrap_or(ContextType::CONTEXT_LSB6) as u8), literal_context_map:InputReference(&local_literal_context_map.split_at(block_type.literal_context_map.len()).0), distance_context_map:InputReference(&local_distance_context_map.split_at(block_type.distance_context_map.len()).0), }; let mut context_map_entropy = ContextMapEntropy::<AllocU32>::new(m32, InputPair(input0, input1), prediction_mode); let input = InputPair(input0, input1); process_command_queue(&mut context_map_entropy, input, commands, n_postfix, n_direct, dist_cache, *recoder_state, &block_type, params, context_type, &mut |_x|()); let mut command_queue = CommandQueue::new(m32, InputPair(input0, input1), params.stride_detection_quality, params.high_entropy_detection_quality, context_map_entropy); command_queue.push(interface::Command::PredictionMode( prediction_mode.clone()), callback); *recoder_state = process_command_queue(&mut command_queue, input, commands, n_postfix, n_direct, dist_cache, *recoder_state, &block_type, params, context_type, callback); command_queue.free(m32, callback); // ::std::io::stderr().write(input0).unwrap(); // ::std::io::stderr().write(input1).unwrap(); } static kBlockLengthPrefixCode: [PrefixCodeRange; BROTLI_NUM_BLOCK_LEN_SYMBOLS] = [PrefixCodeRange { offset: 1u32, nbits: 2u32, }, PrefixCodeRange { offset: 5u32, nbits: 2u32, }, PrefixCodeRange { offset: 9u32, nbits: 2u32, }, PrefixCodeRange { offset: 13u32, nbits: 2u32, }, PrefixCodeRange { offset: 17u32, nbits: 3u32, }, PrefixCodeRange { offset: 25u32, nbits: 3u32, }, PrefixCodeRange { offset: 33u32, nbits: 3u32, }, PrefixCodeRange { offset: 41u32, nbits: 3u32, }, PrefixCodeRange { offset: 49u32, nbits: 4u32, }, PrefixCodeRange { offset: 65u32, nbits: 4u32, }, PrefixCodeRange { offset: 81u32, nbits: 4u32, }, PrefixCodeRange { offset: 97u32, nbits: 4u32, }, PrefixCodeRange { offset: 113u32, nbits: 5u32, }, PrefixCodeRange { offset: 145u32, nbits: 5u32, }, PrefixCodeRange { offset: 177u32, nbits: 5u32, }, PrefixCodeRange { offset: 209u32, nbits: 5u32, }, PrefixCodeRange { offset: 241u32, nbits: 6u32, }, PrefixCodeRange { offset: 305u32, nbits: 6u32, }, PrefixCodeRange { offset: 369u32, nbits: 7u32, }, PrefixCodeRange { offset: 497u32, nbits: 8u32, }, PrefixCodeRange { offset: 753u32, nbits: 9u32, }, PrefixCodeRange { offset: 1265u32, nbits: 10u32, }, PrefixCodeRange { offset: 2289u32, nbits: 11u32, }, PrefixCodeRange { offset: 4337u32, nbits: 12u32, }, PrefixCodeRange { offset: 8433u32, nbits: 13u32, }, PrefixCodeRange { offset: 16625u32, nbits: 24u32, }]; fn BrotliWriteBits(n_bits: u8, bits: u64, pos: &mut usize, array: &mut [u8]) { assert!((bits >> n_bits as usize) == 0); assert!(n_bits <= 56); let ptr_offset: usize = ((*pos >> 3) as u32) as usize; let mut v = array[ptr_offset] as u64; v |= bits << ((*pos) as u64 & 7); array[ptr_offset + 7] = (v >> 56) as u8; array[ptr_offset + 6] = ((v >> 48) & 0xff) as u8; array[ptr_offset + 5] = ((v >> 40) & 0xff) as u8; array[ptr_offset + 4] = ((v >> 32) & 0xff) as u8; array[ptr_offset + 3] = ((v >> 24) & 0xff) as u8; array[ptr_offset + 2] = ((v >> 16) & 0xff) as u8; array[ptr_offset + 1] = ((v >> 8) & 0xff) as u8; array[ptr_offset] = (v & 0xff) as u8; *pos += n_bits as usize } fn BrotliWriteBitsPrepareStorage(pos: usize, array: &mut [u8]) { assert_eq!(pos & 7, 0); array[pos >> 3] = 0; } fn BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes: i32, code_length_bitdepth: &[u8], storage_ix: &mut usize, storage: &mut [u8]) { static kStorageOrder: [u8; 18] = [1i32 as (u8), 2i32 as (u8), 3i32 as (u8), 4i32 as (u8), 0i32 as (u8), 5i32 as (u8), 17i32 as (u8), 6i32 as (u8), 16i32 as (u8), 7i32 as (u8), 8i32 as (u8), 9i32 as (u8), 10i32 as (u8), 11i32 as (u8), 12i32 as (u8), 13i32 as (u8), 14i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeSymbols: [u8; 6] = [0i32 as (u8), 7i32 as (u8), 3i32 as (u8), 2i32 as (u8), 1i32 as (u8), 15i32 as (u8)]; static kHuffmanBitLengthHuffmanCodeBitLengths: [u8; 6] = [2i32 as (u8), 4i32 as (u8), 3i32 as (u8), 2i32 as (u8), 2i32 as (u8), 4i32 as (u8)]; let mut skip_some: u64 = 0u64; let mut codes_to_store: u64 = 18; if num_codes > 1i32 { 'break5: while codes_to_store > 0 { { if code_length_bitdepth[(kStorageOrder[codes_to_store.wrapping_sub(1) as usize] as (usize))] as (i32) != 0i32 { { break 'break5; } } } codes_to_store = codes_to_store.wrapping_sub(1); } } if code_length_bitdepth[(kStorageOrder[0usize] as (usize))] as (i32) == 0i32 && (code_length_bitdepth[(kStorageOrder[1usize] as (usize))] as (i32) == 0i32) { skip_some = 2; if code_length_bitdepth[(kStorageOrder[2usize] as (usize))] as (i32) == 0i32 { skip_some = 3; } } BrotliWriteBits(2, skip_some, storage_ix, storage); { let mut i: u64; i = skip_some; while i < codes_to_store { { let l: usize = code_length_bitdepth[(kStorageOrder[i as usize] as (usize))] as (usize); BrotliWriteBits(kHuffmanBitLengthHuffmanCodeBitLengths[l] as (u8), kHuffmanBitLengthHuffmanCodeSymbols[l] as u64, storage_ix, storage); } i = i.wrapping_add(1); } } } fn BrotliStoreHuffmanTreeToBitMask(huffman_tree_size: usize, huffman_tree: &[u8], huffman_tree_extra_bits: &[u8], code_length_bitdepth: &[u8], code_length_bitdepth_symbols: &[u16], storage_ix: &mut usize, storage: &mut [u8]) { let mut i: usize; i = 0usize; while i < huffman_tree_size { { let ix: usize = huffman_tree[(i as (usize))] as (usize); BrotliWriteBits(code_length_bitdepth[(ix as (usize))] as (u8), code_length_bitdepth_symbols[(ix as (usize))] as (u64), storage_ix, storage); if ix == 16usize { BrotliWriteBits(2, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } else if ix == 17usize { BrotliWriteBits(3, huffman_tree_extra_bits[(i as (usize))] as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } pub fn BrotliStoreHuffmanTree(depths: &[u8], num: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { let mut huffman_tree: [u8; 704] = [0; 704]; let mut huffman_tree_extra_bits: [u8; 704] = [0; 704]; let mut huffman_tree_size: usize = 0usize; let mut code_length_bitdepth: [u8; 18] = [0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8), 0i32 as (u8)]; let mut code_length_bitdepth_symbols: [u16; 18] = [0; 18]; let mut huffman_tree_histogram: [u32; 18] = [0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32, 0u32]; let mut i: usize; let mut num_codes: i32 = 0i32; let mut code: usize = 0usize; 0i32; BrotliWriteHuffmanTree(depths, num, &mut huffman_tree_size, &mut huffman_tree[..], &mut huffman_tree_extra_bits[..]); i = 0usize; while i < huffman_tree_size { { let _rhs = 1; let _lhs = &mut huffman_tree_histogram[huffman_tree[i] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } i = 0usize; 'break3: while i < 18usize { { if huffman_tree_histogram[i] != 0 { if num_codes == 0i32 { code = i; num_codes = 1i32; } else if num_codes == 1i32 { num_codes = 2i32; { { break 'break3; } } } } } i = i.wrapping_add(1 as (usize)); } BrotliCreateHuffmanTree(&mut huffman_tree_histogram, 18usize, 5i32, tree, &mut code_length_bitdepth); BrotliConvertBitDepthsToSymbols(&mut code_length_bitdepth, 18usize, &mut code_length_bitdepth_symbols); BrotliStoreHuffmanTreeOfHuffmanTreeToBitMask(num_codes, &code_length_bitdepth, storage_ix, storage); if num_codes == 1i32 { code_length_bitdepth[code] = 0i32 as (u8); } BrotliStoreHuffmanTreeToBitMask(huffman_tree_size, &huffman_tree, &huffman_tree_extra_bits, &code_length_bitdepth, &code_length_bitdepth_symbols, storage_ix, storage); } fn StoreStaticCodeLengthCode(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(40, 0xffu32 as (u64) << 32i32 | 0x55555554u32 as (u64), storage_ix, storage); } pub struct SimpleSortHuffmanTree {} impl HuffmanComparator for SimpleSortHuffmanTree { fn Cmp(self: &Self, v0: &HuffmanTree, v1: &HuffmanTree) -> bool { return (*v0).total_count_ < (*v1).total_count_; } } pub fn BrotliBuildAndStoreHuffmanTreeFast<AllocHT: alloc::Allocator<HuffmanTree>>( m : &mut AllocHT, histogram : &[u32], histogram_total : usize, max_bits : usize, depth : &mut [u8], bits : &mut [u16], storage_ix : &mut usize, storage : &mut [u8] ){ let mut count: u64 = 0; let mut symbols: [u64; 4] = [0; 4]; let mut length: u64 = 0; let mut total: usize = histogram_total; while total != 0usize { if histogram[(length as (usize))] != 0 { if count < 4 { symbols[count as usize] = length; } count = count.wrapping_add(1); total = total.wrapping_sub(histogram[(length as (usize))] as (usize)); } length = length.wrapping_add(1); } if count <= 1 { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); depth[symbols[0usize] as (usize)] = 0i32 as (u8); bits[symbols[0usize] as (usize)] = 0i32 as (u16); return ; } for depth_elem in depth[..(length as usize)].iter_mut() { *depth_elem = 0; // memset } { let max_tree_size: u64 = (2u64).wrapping_mul(length).wrapping_add(1); let mut tree = if max_tree_size != 0 { m.alloc_cell(max_tree_size as usize) } else { AllocHT::AllocatedMemory::default() // null }; let mut count_limit: u32; if !(0i32 == 0) { return; } count_limit = 1u32; 'break11: loop { { let mut node_index: u32 = 0u32; let mut l: u64; l = length; while l != 0 { l = l.wrapping_sub(1); if histogram[(l as (usize))] != 0 { if histogram[(l as (usize))] >= count_limit { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], histogram[(l as (usize))], -1i32 as (i16), l as (i16)); } else { InitHuffmanTree(&mut tree.slice_mut()[(node_index as (usize))], count_limit, -1i32 as (i16), l as (i16)); } node_index = node_index.wrapping_add(1 as (u32)); } } { let n: i32 = node_index as (i32); let sentinel: HuffmanTree; let mut i: i32 = 0i32; let mut j: i32 = n + 1i32; let mut k: i32; SortHuffmanTreeItems(tree.slice_mut(), n as (usize), SimpleSortHuffmanTree {}); sentinel = NewHuffmanTree(!(0u32), -1i16, -1i16); tree.slice_mut()[(node_index.wrapping_add(1u32) as (usize))] = sentinel.clone(); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(2u32); k = n - 1i32; while k > 0i32 { { let left: i32; let right: i32; if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { left = i; i = i + 1; } else { left = j; j = j + 1; } if (tree.slice()[(i as (usize))]).total_count_ <= (tree.slice()[(j as (usize))]).total_count_ { right = i; i = i + 1; } else { right = j; j = j + 1; } let sum_total = (tree.slice()[(left as (usize))]) .total_count_ .wrapping_add((tree.slice()[(right as (usize))]).total_count_); let tree_ind = (node_index.wrapping_sub(1u32) as (usize)); (tree.slice_mut()[tree_ind]).total_count_ = sum_total; (tree.slice_mut()[tree_ind]).index_left_ = left as (i16); (tree.slice_mut()[tree_ind]).index_right_or_value_ = right as (i16); tree.slice_mut()[(node_index as (usize))] = sentinel.clone(); node_index = node_index.wrapping_add(1u32); } k = k - 1; } if BrotliSetDepth(2i32 * n - 1i32, tree.slice_mut(), depth, 14i32) { { break 'break11; } } } } count_limit = count_limit.wrapping_mul(2u32); } { m.free_cell(core::mem::replace(&mut tree, AllocHT::AllocatedMemory::default())); } } BrotliConvertBitDepthsToSymbols(depth, length as usize, bits); if count <= 4 { let mut i: u64; BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, count.wrapping_sub(1) as u64, storage_ix, storage); i = 0; while i < count { { let mut j: u64; j = i.wrapping_add(1); while j < count { { if depth[(symbols[j as usize] as (usize))] as (i32) < depth[(symbols[i as usize] as (usize)) as usize] as (i32) { let brotli_swap_tmp: u64 = symbols[j as usize]; symbols[j as usize] = symbols[i as usize]; symbols[i as usize] = brotli_swap_tmp; } } j = j.wrapping_add(1); } } i = i.wrapping_add(1); } if count == 2 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); } else if count == 3 { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[0usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[1usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[2usize], storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[3usize], storage_ix, storage); BrotliWriteBits(1, if depth[(symbols[0usize] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } else { let mut previous_value: u8 = 8i32 as (u8); let mut i: u64; StoreStaticCodeLengthCode(storage_ix, storage); i = 0; while i < length { let value: u8 = depth[(i as (usize))]; let mut reps: u64 = 1; let mut k: u64; k = i.wrapping_add(1); while k < length && (depth[(k as (usize))] as (i32) == value as (i32)) { { reps = reps.wrapping_add(1); } k = k.wrapping_add(1); } i = i.wrapping_add(reps); if value as (i32) == 0i32 { BrotliWriteBits(kZeroRepsDepth[reps as usize] as (u8), kZeroRepsBits[reps as usize] as u64, storage_ix, storage); } else { if previous_value as (i32) != value as (i32) { BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); reps = reps.wrapping_sub(1); } if reps < 3 { while reps != 0 { reps = reps.wrapping_sub(1); BrotliWriteBits(kCodeLengthDepth[value as (usize)] as (u8), kCodeLengthBits[value as (usize)] as (u64), storage_ix, storage); } } else { reps = reps.wrapping_sub(3); BrotliWriteBits(kNonZeroRepsDepth[reps as usize] as (u8), kNonZeroRepsBits[reps as usize] as u64, storage_ix, storage); } previous_value = value; } } } } pub struct MetaBlockSplit<AllocU8: alloc::Allocator<u8>, AllocU32: alloc::Allocator<u32>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>> { pub literal_split: BlockSplit<AllocU8, AllocU32>, pub command_split: BlockSplit<AllocU8, AllocU32>, pub distance_split: BlockSplit<AllocU8, AllocU32>, pub literal_context_map: AllocU32::AllocatedMemory, pub literal_context_map_size: usize, pub distance_context_map: AllocU32::AllocatedMemory, pub distance_context_map_size: usize, pub literal_histograms: AllocHL::AllocatedMemory, pub literal_histograms_size: usize, pub command_histograms: AllocHC::AllocatedMemory, pub command_histograms_size: usize, pub distance_histograms: AllocHD::AllocatedMemory, pub distance_histograms_size: usize, } impl <AllocU8: alloc::Allocator<u8>, AllocU32: alloc::Allocator<u32>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>> MetaBlockSplit <AllocU8, AllocU32, AllocHL, AllocHC, AllocHD> { pub fn new() -> Self { return MetaBlockSplit { literal_split:BlockSplit::<AllocU8, AllocU32>::new(), command_split:BlockSplit::<AllocU8, AllocU32>::new(), distance_split:BlockSplit::<AllocU8, AllocU32>::new(), literal_context_map : AllocU32::AllocatedMemory::default(), literal_context_map_size : 0, distance_context_map : AllocU32::AllocatedMemory::default(), distance_context_map_size : 0, literal_histograms : AllocHL::AllocatedMemory::default(), literal_histograms_size : 0, command_histograms : AllocHC::AllocatedMemory::default(), command_histograms_size : 0, distance_histograms : AllocHD::AllocatedMemory::default(), distance_histograms_size : 0, } } pub fn destroy(&mut self, m8: &mut AllocU8, m32: &mut AllocU32, mhl: &mut AllocHL, mhc: &mut AllocHC, mhd: &mut AllocHD) { self.literal_split.destroy(m8,m32); self.command_split.destroy(m8,m32); self.distance_split.destroy(m8,m32); m32.free_cell(core::mem::replace(&mut self.literal_context_map, AllocU32::AllocatedMemory::default())); self.literal_context_map_size = 0; m32.free_cell(core::mem::replace(&mut self.distance_context_map, AllocU32::AllocatedMemory::default())); self.distance_context_map_size = 0; mhl.free_cell(core::mem::replace(&mut self.literal_histograms, AllocHL::AllocatedMemory::default())); self.literal_histograms_size = 0; mhc.free_cell(core::mem::replace(&mut self.command_histograms, AllocHC::AllocatedMemory::default())); self.command_histograms_size = 0; mhd.free_cell(core::mem::replace(&mut self.distance_histograms, AllocHD::AllocatedMemory::default())); self.distance_histograms_size = 0; } } #[derive(Clone, Copy)] pub struct BlockTypeCodeCalculator { pub last_type: usize, pub second_last_type: usize, } pub struct BlockSplitCode { pub type_code_calculator: BlockTypeCodeCalculator, pub type_depths: [u8; 258], pub type_bits: [u16; 258], pub length_depths: [u8; 26], pub length_bits: [u16; 26], } pub struct BlockEncoder<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> { /* pub alloc_u8 : AllocU8, pub alloc_u16 : AllocU16, pub alloc_u32 : AllocU32, pub alloc_ht : AllocHT,*/ pub alphabet_size_: usize, pub num_block_types_: usize, pub block_types_: &'a [u8], pub block_lengths_: &'a [u32], pub num_blocks_: usize, pub block_split_code_: BlockSplitCode, pub block_ix_: usize, pub block_len_: usize, pub entropy_ix_: usize, pub depths_: AllocU8::AllocatedMemory, pub bits_: AllocU16::AllocatedMemory, } fn Log2FloorNonZero(mut n: u64) -> u32 { let mut result: u32 = 0u32; 'loop1: loop { if { n = n >> 1i32; n } != 0 { result = result.wrapping_add(1 as (u32)); continue 'loop1; } else { break 'loop1; } } result } fn BrotliEncodeMlen(length: u32, bits: &mut u64, numbits: &mut u32, nibblesbits: &mut u32) { let lg: u32 = (if length == 1u32 { 1u32 } else { Log2FloorNonZero(length.wrapping_sub(1u32) as (u32) as (u64)).wrapping_add(1u32) }) as (u32); let mnibbles: u32 = (if lg < 16u32 { 16u32 } else { lg.wrapping_add(3u32) }) .wrapping_div(4u32); assert!(length > 0); assert!(length <= (1 << 24)); assert!(lg <= 24); *nibblesbits = mnibbles.wrapping_sub(4u32); *numbits = mnibbles.wrapping_mul(4u32); *bits = length.wrapping_sub(1u32) as u64; } fn StoreCompressedMetaBlockHeader(is_final_block: i32, length: usize, storage_ix: &mut usize, storage: &mut [u8]) { let mut lenbits: u64 = 0; let mut nlenbits: u32 = 0; let mut nibblesbits: u32 = 0; BrotliWriteBits(1, is_final_block as (u64), storage_ix, storage); if is_final_block != 0 { BrotliWriteBits(1, 0, storage_ix, storage); } BrotliEncodeMlen(length as u32, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2, nibblesbits as u64, storage_ix, storage); BrotliWriteBits(nlenbits as u8, lenbits, storage_ix, storage); if is_final_block == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } } fn NewBlockTypeCodeCalculator() -> BlockTypeCodeCalculator { return BlockTypeCodeCalculator { last_type: 1, second_last_type: 0, }; } fn NewBlockEncoder<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> (alphabet_size: usize, num_block_types: usize, block_types: &'a [u8], block_lengths: &'a [u32], num_blocks: usize) -> BlockEncoder<'a, AllocU8, AllocU16> { let block_len: usize; if num_blocks != 0 && block_lengths.len() != 0 { block_len = block_lengths[0] as usize; } else { block_len = 0; } return BlockEncoder::<AllocU8, AllocU16> { alphabet_size_: alphabet_size, num_block_types_: num_block_types, block_types_: block_types, block_lengths_: block_lengths, num_blocks_: num_blocks, block_split_code_: BlockSplitCode { type_code_calculator: NewBlockTypeCodeCalculator(), type_depths: [0; 258], type_bits: [0; 258], length_depths: [0; 26], length_bits: [0; 26], }, block_ix_: 0, block_len_: block_len, entropy_ix_: 0, depths_: AllocU8::AllocatedMemory::default(), bits_: AllocU16::AllocatedMemory::default(), }; } fn NextBlockTypeCode(calculator: &mut BlockTypeCodeCalculator, type_: u8) -> usize { let type_code: usize = (if type_ as (usize) == (*calculator).last_type.wrapping_add(1usize) { 1u32 } else if type_ as (usize) == (*calculator).second_last_type { 0u32 } else { (type_ as (u32)).wrapping_add(2u32) }) as (usize); (*calculator).second_last_type = (*calculator).last_type; (*calculator).last_type = type_ as (usize); type_code } fn BlockLengthPrefixCode(len: u32) -> u32 { let mut code: u32 = (if len >= 177u32 { if len >= 753u32 { 20i32 } else { 14i32 } } else if len >= 41u32 { 7i32 } else { 0i32 }) as (u32); while code < (26i32 - 1i32) as (u32) && (len >= kBlockLengthPrefixCode[code.wrapping_add(1u32) as (usize)].offset) { code = code.wrapping_add(1 as (u32)); } code } fn StoreVarLenUint8(n: u64, storage_ix: &mut usize, storage: &mut [u8]) { if n == 0 { BrotliWriteBits(1, 0, storage_ix, storage); } else { let nbits: u8 = Log2FloorNonZero(n) as (u8); BrotliWriteBits(1, 1, storage_ix, storage); BrotliWriteBits(3, nbits as u64, storage_ix, storage); BrotliWriteBits(nbits, n.wrapping_sub(1u64 << nbits), storage_ix, storage); } } fn StoreSimpleHuffmanTree(depths: &[u8], symbols: &mut [usize], num_symbols: usize, max_bits: usize, storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(2, 1, storage_ix, storage); BrotliWriteBits(2, num_symbols.wrapping_sub(1) as u64, storage_ix, storage); { let mut i: usize; i = 0usize; while i < num_symbols { { let mut j: usize; j = i.wrapping_add(1usize); while j < num_symbols { { if depths[(symbols[(j as (usize))] as (usize))] as (i32) < depths[(symbols[(i as (usize))] as (usize))] as (i32) { let mut __brotli_swap_tmp: usize = symbols[(j as (usize))]; symbols[(j as (usize))] = symbols[(i as (usize))]; symbols[(i as (usize))] = __brotli_swap_tmp; } } j = j.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } } if num_symbols == 2usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); } else if num_symbols == 3usize { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); } else { BrotliWriteBits(max_bits as u8, symbols[(0usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(1usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(2usize)] as u64, storage_ix, storage); BrotliWriteBits(max_bits as u8, symbols[(3usize)] as u64, storage_ix, storage); BrotliWriteBits(1, if depths[(symbols[(0usize)] as (usize))] as (i32) == 1i32 { 1i32 } else { 0i32 } as (u64), storage_ix, storage); } } fn BuildAndStoreHuffmanTree(histogram: &[u32], length: usize, tree: &mut [HuffmanTree], depth: &mut [u8], bits: &mut [u16], storage_ix: &mut usize, storage: &mut [u8]) { let mut count: usize = 0usize; let mut s4: [usize; 4] = [0usize, 0usize, 0usize, 0usize]; let mut i: usize; let mut max_bits: usize = 0usize; i = 0usize; 'break31: while i < length { { if histogram[(i as (usize))] != 0 { if count < 4usize { s4[count] = i; } else if count > 4usize { { break 'break31; } } count = count.wrapping_add(1 as (usize)); } } i = i.wrapping_add(1 as (usize)); } { let mut max_bits_counter: usize = length.wrapping_sub(1usize); while max_bits_counter != 0 { max_bits_counter = max_bits_counter >> 1i32; max_bits = max_bits.wrapping_add(1 as (usize)); } } if count <= 1usize { BrotliWriteBits(4, 1, storage_ix, storage); BrotliWriteBits(max_bits as u8, s4[0usize] as u64, storage_ix, storage); depth[(s4[0usize] as (usize))] = 0i32 as (u8); bits[(s4[0usize] as (usize))] = 0i32 as (u16); return; } for depth_elem in depth[..length].iter_mut() { *depth_elem = 0; // memset } BrotliCreateHuffmanTree(histogram, length, 15i32, tree, depth); BrotliConvertBitDepthsToSymbols(depth, length, bits); if count <= 4usize { StoreSimpleHuffmanTree(depth, &mut s4[..], count, max_bits, storage_ix, storage); } else { BrotliStoreHuffmanTree(depth, length, tree, storage_ix, storage); } } fn GetBlockLengthPrefixCode(len: u32, code: &mut usize, n_extra: &mut u32, extra: &mut u32) { *code = BlockLengthPrefixCode(len) as (usize); *n_extra = kBlockLengthPrefixCode[*code].nbits; *extra = len.wrapping_sub(kBlockLengthPrefixCode[*code].offset); } fn StoreBlockSwitch(code: &mut BlockSplitCode, block_len: u32, block_type: u8, is_first_block: i32, storage_ix: &mut usize, storage: &mut [u8]) { let typecode: usize = NextBlockTypeCode(&mut (*code).type_code_calculator, block_type); let mut lencode: usize = 0; let mut len_nextra: u32 = 0; let mut len_extra: u32 = 0; if is_first_block == 0 { BrotliWriteBits((*code).type_depths[typecode] as (u8), (*code).type_bits[typecode] as (u64), storage_ix, storage); } GetBlockLengthPrefixCode(block_len, &mut lencode, &mut len_nextra, &mut len_extra); BrotliWriteBits((*code).length_depths[lencode] as (u8), (*code).length_bits[lencode] as (u64), storage_ix, storage); BrotliWriteBits(len_nextra as (u8), len_extra as (u64), storage_ix, storage); } fn BuildAndStoreBlockSplitCode(types: &[u8], lengths: &[u32], num_blocks: usize, num_types: usize, tree: &mut [HuffmanTree], code: &mut BlockSplitCode, storage_ix: &mut usize, storage: &mut [u8]) { let mut type_histo: [u32; 258] = [0; 258]; let mut length_histo: [u32; 26] = [0; 26]; let mut i: usize; let mut type_code_calculator = NewBlockTypeCodeCalculator(); i = 0usize; while i < num_blocks { { let type_code: usize = NextBlockTypeCode(&mut type_code_calculator, types[(i as (usize))]); if i != 0usize { let _rhs = 1; let _lhs = &mut type_histo[type_code]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } { let _rhs = 1; let _lhs = &mut length_histo[BlockLengthPrefixCode(lengths[(i as (usize))]) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } } i = i.wrapping_add(1 as (usize)); } StoreVarLenUint8(num_types.wrapping_sub(1) as u64, storage_ix, storage); if num_types > 1usize { BuildAndStoreHuffmanTree(&mut type_histo[0usize..], num_types.wrapping_add(2usize), tree, &mut (*code).type_depths[0usize..], &mut (*code).type_bits[0usize..], storage_ix, storage); BuildAndStoreHuffmanTree(&mut length_histo[0usize..], 26usize, tree, &mut (*code).length_depths[0usize..], &mut (*code).length_bits[0usize..], storage_ix, storage); StoreBlockSwitch(code, lengths[(0usize)], types[(0usize)], 1i32, storage_ix, storage); } } fn BuildAndStoreBlockSwitchEntropyCodes<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> (xself: &mut BlockEncoder<'a, AllocU8, AllocU16>, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { BuildAndStoreBlockSplitCode((*xself).block_types_, (*xself).block_lengths_, (*xself).num_blocks_, (*xself).num_block_types_, tree, &mut (*xself).block_split_code_, storage_ix, storage); } fn StoreTrivialContextMap(num_types: usize, context_bits: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { StoreVarLenUint8(num_types.wrapping_sub(1usize) as u64, storage_ix, storage); if num_types > 1usize { let repeat_code: usize = context_bits.wrapping_sub(1u32 as (usize)); let repeat_bits: usize = (1u32 << repeat_code).wrapping_sub(1u32) as (usize); let alphabet_size: usize = num_types.wrapping_add(repeat_code); let mut histogram: [u32; 272] = [0; 272]; let mut depths: [u8; 272] = [0; 272]; let mut bits: [u16; 272] = [0; 272]; let mut i: usize; BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(4u8, repeat_code.wrapping_sub(1usize) as u64, storage_ix, storage); histogram[repeat_code] = num_types as (u32); histogram[0usize] = 1u32; i = context_bits; while i < alphabet_size { { histogram[i] = 1u32; } i = i.wrapping_add(1 as (usize)); } BuildAndStoreHuffmanTree(&mut histogram[..], alphabet_size, tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_types { { let code: usize = if i == 0usize { 0usize } else { i.wrapping_add(context_bits).wrapping_sub(1usize) }; BrotliWriteBits(depths[code] as (u8), bits[code] as (u64), storage_ix, storage); BrotliWriteBits(depths[repeat_code] as (u8), bits[repeat_code] as (u64), storage_ix, storage); BrotliWriteBits(repeat_code as u8, repeat_bits as u64, storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); } } fn IndexOf(v: &[u8], v_size: usize, value: u8) -> usize { let mut i: usize = 0usize; while i < v_size { { if v[(i as (usize))] as (i32) == value as (i32) { return i; } } i = i.wrapping_add(1 as (usize)); } i } fn MoveToFront(v: &mut [u8], index: usize) { let value: u8 = v[(index as (usize))]; let mut i: usize; i = index; while i != 0usize { { v[(i as (usize))] = v[(i.wrapping_sub(1usize) as (usize))]; } i = i.wrapping_sub(1 as (usize)); } v[(0usize)] = value; } fn MoveToFrontTransform(v_in: &[u32], v_size: usize, v_out: &mut [u32]) { let mut i: usize; let mut mtf: [u8; 256] = [0; 256]; let mut max_value: u32; if v_size == 0usize { return; } max_value = v_in[(0usize)]; i = 1usize; while i < v_size { { if v_in[(i as (usize))] > max_value { max_value = v_in[(i as (usize))]; } } i = i.wrapping_add(1 as (usize)); } 0i32; i = 0usize; while i <= max_value as (usize) { { mtf[i] = i as (u8); } i = i.wrapping_add(1 as (usize)); } { let mtf_size: usize = max_value.wrapping_add(1u32) as (usize); i = 0usize; while i < v_size { { let index: usize = IndexOf(&mtf[..], mtf_size, v_in[(i as (usize))] as (u8)); 0i32; v_out[(i as (usize))] = index as (u32); MoveToFront(&mut mtf[..], index); } i = i.wrapping_add(1 as (usize)); } } } fn brotli_max_uint32_t(a: u32, b: u32) -> u32 { if a > b { a } else { b } } fn brotli_min_uint32_t(a: u32, b: u32) -> u32 { if a < b { a } else { b } } fn RunLengthCodeZeros(in_size: usize, v: &mut [u32], out_size: &mut usize, max_run_length_prefix: &mut u32) { let mut max_reps: u32 = 0u32; let mut i: usize; let mut max_prefix: u32; i = 0usize; while i < in_size { let mut reps: u32 = 0u32; while i < in_size && (v[(i as (usize))] != 0u32) { i = i.wrapping_add(1 as (usize)); } while i < in_size && (v[(i as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } i = i.wrapping_add(1 as (usize)); } max_reps = brotli_max_uint32_t(reps, max_reps); } max_prefix = if max_reps > 0u32 { Log2FloorNonZero(max_reps as (u64)) } else { 0u32 }; max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix); *max_run_length_prefix = max_prefix; *out_size = 0usize; i = 0usize; while i < in_size { 0i32; if v[(i as (usize))] != 0u32 { v[(*out_size as (usize))] = (v[(i as (usize))]).wrapping_add(*max_run_length_prefix); i = i.wrapping_add(1 as (usize)); *out_size = (*out_size).wrapping_add(1 as (usize)); } else { let mut reps: u32 = 1u32; let mut k: usize; k = i.wrapping_add(1usize); while k < in_size && (v[(k as (usize))] == 0u32) { { reps = reps.wrapping_add(1 as (u32)); } k = k.wrapping_add(1 as (usize)); } i = i.wrapping_add(reps as (usize)); while reps != 0u32 { if reps < 2u32 << max_prefix { let run_length_prefix: u32 = Log2FloorNonZero(reps as (u64)); let extra_bits: u32 = reps.wrapping_sub(1u32 << run_length_prefix); v[(*out_size as (usize))] = run_length_prefix.wrapping_add(extra_bits << 9i32); *out_size = (*out_size).wrapping_add(1 as (usize)); { { break; } } } else { let extra_bits: u32 = (1u32 << max_prefix).wrapping_sub(1u32); v[(*out_size as (usize))] = max_prefix.wrapping_add(extra_bits << 9i32); reps = reps.wrapping_sub((2u32 << max_prefix).wrapping_sub(1u32)); *out_size = (*out_size).wrapping_add(1 as (usize)); } } } } } fn EncodeContextMap<AllocU32: alloc::Allocator<u32>>(m: &mut AllocU32, context_map: &[u32], context_map_size: usize, num_clusters: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { let mut i: usize; let mut rle_symbols: AllocU32::AllocatedMemory; let mut max_run_length_prefix: u32 = 6u32; let mut num_rle_symbols: usize = 0usize; static kSymbolMask: u32 = (1u32 << 9i32) - 1; let mut depths: [u8; 272] = [0; 272]; let mut bits: [u16; 272] = [0; 272]; StoreVarLenUint8(num_clusters.wrapping_sub(1usize) as u64, storage_ix, storage); if num_clusters == 1usize { return; } rle_symbols = if context_map_size != 0 { m.alloc_cell(context_map_size) } else { AllocU32::AllocatedMemory::default() }; MoveToFrontTransform(context_map, context_map_size, rle_symbols.slice_mut()); RunLengthCodeZeros(context_map_size, rle_symbols.slice_mut(), &mut num_rle_symbols, &mut max_run_length_prefix); let mut histogram: [u32; 272] = [0; 272]; i = 0usize; while i < num_rle_symbols { { let _rhs = 1; let _lhs = &mut histogram[(rle_symbols.slice()[(i as (usize))] & kSymbolMask) as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } i = i.wrapping_add(1 as (usize)); } { let use_rle: i32 = if !!(max_run_length_prefix > 0u32) { 1i32 } else { 0i32 }; BrotliWriteBits(1, use_rle as (u64), storage_ix, storage); if use_rle != 0 { BrotliWriteBits(4, max_run_length_prefix.wrapping_sub(1u32) as (u64), storage_ix, storage); } } BuildAndStoreHuffmanTree(&mut histogram[..], num_clusters.wrapping_add(max_run_length_prefix as (usize)), tree, &mut depths[..], &mut bits[..], storage_ix, storage); i = 0usize; while i < num_rle_symbols { { let rle_symbol: u32 = rle_symbols.slice()[(i as (usize))] & kSymbolMask; let extra_bits_val: u32 = rle_symbols.slice()[(i as (usize))] >> 9i32; BrotliWriteBits(depths[rle_symbol as (usize)] as (u8), bits[rle_symbol as (usize)] as (u64), storage_ix, storage); if rle_symbol > 0u32 && (rle_symbol <= max_run_length_prefix) { BrotliWriteBits(rle_symbol as (u8), extra_bits_val as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } BrotliWriteBits(1, 1, storage_ix, storage); m.free_cell(rle_symbols); } fn BuildAndStoreEntropyCodes<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, HistogramType: SliceWrapper<u32>> (m8: &mut AllocU8, m16: &mut AllocU16, xself: &mut BlockEncoder<AllocU8, AllocU16>, histograms: &[HistogramType], histograms_size: usize, tree: &mut [HuffmanTree], storage_ix: &mut usize, storage: &mut [u8]) { let alphabet_size: usize = (*xself).alphabet_size_; let table_size: usize = histograms_size.wrapping_mul(alphabet_size); (*xself).depths_ = if table_size != 0 { m8.alloc_cell(table_size) } else { AllocU8::AllocatedMemory::default() }; (*xself).bits_ = if table_size != 0 { m16.alloc_cell(table_size) } else { AllocU16::AllocatedMemory::default() }; { let mut i: usize; i = 0usize; while i < histograms_size { { let ix: usize = i.wrapping_mul(alphabet_size); BuildAndStoreHuffmanTree(&(histograms[(i as (usize))]).slice()[0..], alphabet_size, tree, &mut (*xself).depths_.slice_mut()[(ix as (usize))..], &mut (*xself).bits_.slice_mut()[(ix as (usize))..], storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } } } fn StoreSymbol<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>> (xself: &mut BlockEncoder<AllocU8, AllocU16>, symbol: usize, storage_ix: &mut usize, storage: &mut [u8]){ if (*xself).block_len_ == 0usize { let block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let block_len: u32 = (*xself).block_lengths_[(block_ix as (usize))]; let block_type: u8 = (*xself).block_types_[(block_ix as (usize))]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = (block_type as (usize)).wrapping_mul((*xself).alphabet_size_); StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let ix: usize = (*xself).entropy_ix_.wrapping_add(symbol); BrotliWriteBits((*xself).depths_.slice()[(ix as (usize))] as (u8), (*xself).bits_.slice()[(ix as (usize))] as (u64), storage_ix, storage); } } fn CommandCopyLenCode(xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 ^ (*xself).copy_len_ >> 24i32 } fn GetInsertExtra(inscode: u16) -> u32 { kInsExtra[inscode as (usize)] } fn GetInsertBase(inscode: u16) -> u32 { kInsBase[inscode as (usize)] } fn GetCopyBase(copycode: u16) -> u32 { kCopyBase[copycode as (usize)] } fn GetCopyExtra(copycode: u16) -> u32 { kCopyExtra[copycode as (usize)] } fn StoreCommandExtra(cmd: &Command, storage_ix: &mut usize, storage: &mut [u8]) { let copylen_code: u32 = CommandCopyLenCode(cmd); let inscode: u16 = GetInsertLengthCode((*cmd).insert_len_ as (usize)); let copycode: u16 = GetCopyLengthCode(copylen_code as (usize)); let insnumextra: u32 = GetInsertExtra(inscode); let insextraval: u64 = (*cmd).insert_len_.wrapping_sub(GetInsertBase(inscode)) as (u64); let copyextraval: u64 = copylen_code.wrapping_sub(GetCopyBase(copycode)) as (u64); let bits: u64 = copyextraval << insnumextra | insextraval; BrotliWriteBits(insnumextra.wrapping_add(GetCopyExtra(copycode)) as (u8), bits, storage_ix, storage); } fn Context(p1: u8, p2: u8, mode: ContextType) -> u8 { match mode { ContextType::CONTEXT_LSB6 => { return (p1 as (i32) & 0x3fi32) as (u8); } ContextType::CONTEXT_MSB6 => { return (p1 as (i32) >> 2i32) as (u8); } ContextType::CONTEXT_UTF8 => { return (kUTF8ContextLookup[p1 as (usize)] as (i32) | kUTF8ContextLookup[(p2 as (i32) + 256i32) as (usize)] as (i32)) as (u8); } ContextType::CONTEXT_SIGNED => { return ((kSigned3BitContextLookup[p1 as (usize)] as (i32) << 3i32) + kSigned3BitContextLookup[p2 as (usize)] as (i32)) as (u8); } } // 0i32 as (u8) } fn StoreSymbolWithContext<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>>(xself: &mut BlockEncoder<AllocU8, AllocU16>, symbol: usize, context: usize, context_map: &[u32], storage_ix: &mut usize, storage: &mut [u8], context_bits: usize){ if (*xself).block_len_ == 0usize { let block_ix: usize = { (*xself).block_ix_ = (*xself).block_ix_.wrapping_add(1 as (usize)); (*xself).block_ix_ }; let block_len: u32 = (*xself).block_lengths_[(block_ix as (usize))]; let block_type: u8 = (*xself).block_types_[(block_ix as (usize))]; (*xself).block_len_ = block_len as (usize); (*xself).entropy_ix_ = block_type as (usize) << context_bits; StoreBlockSwitch(&mut (*xself).block_split_code_, block_len, block_type, 0i32, storage_ix, storage); } (*xself).block_len_ = (*xself).block_len_.wrapping_sub(1 as (usize)); { let histo_ix: usize = context_map[((*xself).entropy_ix_.wrapping_add(context) as (usize))] as (usize); let ix: usize = histo_ix.wrapping_mul((*xself).alphabet_size_).wrapping_add(symbol); BrotliWriteBits((*xself).depths_.slice()[(ix as (usize))] as (u8), (*xself).bits_.slice()[(ix as (usize))] as (u64), storage_ix, storage); } } fn CommandCopyLen(xself: &Command) -> u32 { (*xself).copy_len_ & 0xffffffu32 } fn CommandDistanceContext(xself: &Command) -> u32 { let r: u32 = ((*xself).cmd_prefix_ as (i32) >> 6i32) as (u32); let c: u32 = ((*xself).cmd_prefix_ as (i32) & 7i32) as (u32); if (r == 0u32 || r == 2u32 || r == 4u32 || r == 7u32) && (c <= 2u32) { return c; } 3u32 } fn CleanupBlockEncoder<AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>>(m8: &mut AllocU8, m16 : &mut AllocU16, xself: &mut BlockEncoder<AllocU8, AllocU16>){ m8.free_cell(core::mem::replace(&mut (*xself).depths_, AllocU8::AllocatedMemory::default())); m16.free_cell(core::mem::replace(&mut (*xself).bits_, AllocU16::AllocatedMemory::default())); } fn JumpToByteBoundary(storage_ix: &mut usize, storage: &mut [u8]) { *storage_ix = (*storage_ix).wrapping_add(7u32 as (usize)) & !7u32 as (usize); storage[((*storage_ix >> 3i32) as (usize))] = 0i32 as (u8); } pub fn BrotliStoreMetaBlock<'a, AllocU8: alloc::Allocator<u8>, AllocU16: alloc::Allocator<u16>, AllocU32: alloc::Allocator<u32>, AllocHT: alloc::Allocator<HuffmanTree>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>, Cb> (m8: &mut AllocU8, m16: &mut AllocU16, m32: &mut AllocU32, mht: &mut AllocHT, input: &'a[u8], start_pos: usize, length: usize, mask: usize, params: &BrotliEncoderParams, mut prev_byte: u8, mut prev_byte2: u8, is_last: i32, num_direct_distance_codes: u32, distance_postfix_bits: u32, literal_context_mode: ContextType, distance_cache: &[i32; kNumDistanceCacheEntries], commands: &[Command], n_commands: usize, mb: &mut MetaBlockSplit<AllocU8, AllocU32, AllocHL, AllocHC, AllocHD>, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], callback: &mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { let (input0,input1) = InputPairFromMaskedInput(input, start_pos, length, mask); if params.log_meta_block { LogMetaBlock(m32, commands.split_at(n_commands).0, input0, input1, distance_postfix_bits, num_direct_distance_codes, distance_cache, recoder_state, block_split_reference(mb), params, Some(literal_context_mode), callback); } let mut pos: usize = start_pos; let mut i: usize; let num_distance_codes: usize = (16u32) .wrapping_add(num_direct_distance_codes) .wrapping_add(48u32 << distance_postfix_bits) as (usize); let mut tree: AllocHT::AllocatedMemory; let mut literal_enc: BlockEncoder<AllocU8, AllocU16>; let mut command_enc: BlockEncoder<AllocU8, AllocU16>; let mut distance_enc: BlockEncoder<AllocU8, AllocU16>; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); tree = if 2i32 * 704i32 + 1i32 != 0 { mht.alloc_cell((2i32 * 704i32 + 1i32) as (usize)) } else { AllocHT::AllocatedMemory::default() }; literal_enc = NewBlockEncoder::<AllocU8, AllocU16>(256usize, (*mb).literal_split.num_types, (*mb).literal_split.types.slice(), (*mb).literal_split.lengths.slice(), (*mb).literal_split.num_blocks); command_enc = NewBlockEncoder::<AllocU8, AllocU16>(704usize, (*mb).command_split.num_types, (*mb).command_split.types.slice(), (*mb).command_split.lengths.slice(), (*mb).command_split.num_blocks); distance_enc = NewBlockEncoder::<AllocU8, AllocU16>(num_distance_codes, (*mb).distance_split.num_types, (*mb).distance_split.types.slice(), (*mb).distance_split.lengths.slice(), (*mb).distance_split.num_blocks); BuildAndStoreBlockSwitchEntropyCodes(&mut literal_enc, tree.slice_mut(), storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut command_enc, tree.slice_mut(), storage_ix, storage); BuildAndStoreBlockSwitchEntropyCodes(&mut distance_enc, tree.slice_mut(), storage_ix, storage); BrotliWriteBits(2, distance_postfix_bits as (u64), storage_ix, storage); BrotliWriteBits(4, (num_direct_distance_codes >> distance_postfix_bits) as (u64), storage_ix, storage); i = 0usize; while i < (*mb).literal_split.num_types { { BrotliWriteBits(2, literal_context_mode as (u64), storage_ix, storage); } i = i.wrapping_add(1 as (usize)); } if (*mb).literal_context_map_size == 0usize { StoreTrivialContextMap((*mb).literal_histograms_size, 6, tree.slice_mut(), storage_ix, storage); } else { EncodeContextMap(m32, (*mb).literal_context_map.slice(), (*mb).literal_context_map_size, (*mb).literal_histograms_size, tree.slice_mut(), storage_ix, storage); } if (*mb).distance_context_map_size == 0usize { StoreTrivialContextMap((*mb).distance_histograms_size, 2usize, tree.slice_mut(), storage_ix, storage); } else { EncodeContextMap(m32, (*mb).distance_context_map.slice(), (*mb).distance_context_map_size, (*mb).distance_histograms_size, tree.slice_mut(), storage_ix, storage); } BuildAndStoreEntropyCodes(m8, m16, &mut literal_enc, (*mb).literal_histograms.slice(), (*mb).literal_histograms_size, tree.slice_mut(), storage_ix, storage); BuildAndStoreEntropyCodes(m8, m16, &mut command_enc, (*mb).command_histograms.slice(), (*mb).command_histograms_size, tree.slice_mut(), storage_ix, storage); BuildAndStoreEntropyCodes(m8, m16, &mut distance_enc, (*mb).distance_histograms.slice(), (*mb).distance_histograms_size, tree.slice_mut(), storage_ix, storage); { mht.free_cell(core::mem::replace(&mut tree, AllocHT::AllocatedMemory::default())); } i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let cmd_code: usize = cmd.cmd_prefix_ as (usize); StoreSymbol(&mut command_enc, cmd_code, storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); if (*mb).literal_context_map_size == 0usize { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { StoreSymbol(&mut literal_enc, input[((pos & mask) as (usize))] as (usize), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } else { let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { let context: usize = Context(prev_byte, prev_byte2, literal_context_mode) as (usize); let literal: u8 = input[((pos & mask) as (usize))]; StoreSymbolWithContext(&mut literal_enc, literal as (usize), context, (*mb).literal_context_map.slice(), storage_ix, storage, 6usize); prev_byte2 = prev_byte; prev_byte = literal; pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 { prev_byte2 = input[((pos.wrapping_sub(2usize) & mask) as (usize))]; prev_byte = input[((pos.wrapping_sub(1usize) & mask) as (usize))]; if cmd.cmd_prefix_ as (i32) >= 128i32 { let dist_code: usize = cmd.dist_prefix_ as (usize); let distnumextra: u32 = cmd.dist_extra_ >> 24i32; let distextra: usize = (cmd.dist_extra_ & 0xffffffu32) as (usize); if (*mb).distance_context_map_size == 0usize { StoreSymbol(&mut distance_enc, dist_code, storage_ix, storage); } else { let context: usize = CommandDistanceContext(&cmd) as (usize); StoreSymbolWithContext(&mut distance_enc, dist_code, context, (*mb).distance_context_map.slice(), storage_ix, storage, 2usize); } BrotliWriteBits(distnumextra as (u8), distextra as u64, storage_ix, storage); } } } i = i.wrapping_add(1 as (usize)); } CleanupBlockEncoder(m8, m16, &mut distance_enc); CleanupBlockEncoder(m8, m16, &mut command_enc); CleanupBlockEncoder(m8, m16, &mut literal_enc); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn BuildHistograms(input: &[u8], start_pos: usize, mask: usize, commands: &[Command], n_commands: usize, lit_histo: &mut HistogramLiteral, cmd_histo: &mut HistogramCommand, dist_histo: &mut HistogramDistance) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let mut j: usize; HistogramAddItem(cmd_histo, cmd.cmd_prefix_ as (usize)); j = cmd.insert_len_ as (usize); while j != 0usize { { HistogramAddItem(lit_histo, input[((pos & mask) as (usize))] as (usize)); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { HistogramAddItem(dist_histo, cmd.dist_prefix_ as (usize)); } } i = i.wrapping_add(1 as (usize)); } } fn StoreDataWithHuffmanCodes(input: &[u8], start_pos: usize, mask: usize, commands: &[Command], n_commands: usize, lit_depth: &[u8], lit_bits: &[u16], cmd_depth: &[u8], cmd_bits: &[u16], dist_depth: &[u8], dist_bits: &[u16], storage_ix: &mut usize, storage: &mut [u8]) { let mut pos: usize = start_pos; let mut i: usize; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let cmd_code: usize = cmd.cmd_prefix_ as (usize); let mut j: usize; BrotliWriteBits(cmd_depth[(cmd_code as (usize))] as (u8), cmd_bits[(cmd_code as (usize))] as (u64), storage_ix, storage); StoreCommandExtra(&cmd, storage_ix, storage); j = cmd.insert_len_ as (usize); while j != 0usize { { let literal: u8 = input[((pos & mask) as (usize))]; BrotliWriteBits(lit_depth[(literal as (usize))] as (u8), lit_bits[(literal as (usize))] as (u64), storage_ix, storage); pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); if CommandCopyLen(&cmd) != 0 && (cmd.cmd_prefix_ as (i32) >= 128i32) { let dist_code: usize = cmd.dist_prefix_ as (usize); let distnumextra: u32 = cmd.dist_extra_ >> 24i32; let distextra: u32 = cmd.dist_extra_ & 0xffffffu32; BrotliWriteBits(dist_depth[(dist_code as (usize))] as (u8), dist_bits[(dist_code as (usize))] as (u64), storage_ix, storage); BrotliWriteBits(distnumextra as (u8), distextra as (u64), storage_ix, storage); } } i = i.wrapping_add(1 as (usize)); } } fn nop<'a>(_data:&[interface::Command<InputReference>]){ } pub fn BrotliStoreMetaBlockTrivial<'a, AllocU32:alloc::Allocator<u32>, Cb> (m32:&mut AllocU32, input: &'a [u8], start_pos: usize, length: usize, mask: usize, params: &BrotliEncoderParams, is_last: i32, distance_cache: &[i32; kNumDistanceCacheEntries], commands: &[Command], n_commands: usize, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], f:&mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { let (input0,input1) = InputPairFromMaskedInput(input, start_pos, length, mask); if params.log_meta_block { LogMetaBlock(m32, commands.split_at(n_commands).0, input0, input1, 0, 0, distance_cache, recoder_state, block_split_nop(), params, Some(ContextType::CONTEXT_LSB6), f); } let mut lit_histo: HistogramLiteral = HistogramLiteral::default(); let mut cmd_histo: HistogramCommand = HistogramCommand::default(); let mut dist_histo: HistogramDistance = HistogramDistance::default(); let mut lit_depth: [u8; 256] = [0; 256]; // FIXME these zero-initializations are costly let mut lit_bits: [u16; 256] = [0; 256]; let mut cmd_depth: [u8; 704] = [0; 704]; let mut cmd_bits: [u16; 704] = [0; 704]; let mut dist_depth: [u8; 64] = [0; 64]; let mut dist_bits: [u16; 64] = [0; 64]; const MAX_HUFFMAN_TREE_SIZE: usize = (2i32 * 704i32 + 1i32) as usize; let mut tree: [HuffmanTree; MAX_HUFFMAN_TREE_SIZE] = [HuffmanTree { total_count_: 0, index_left_: 0, index_right_or_value_: 0, }; MAX_HUFFMAN_TREE_SIZE]; StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliWriteBits(13, 0, storage_ix, storage); BuildAndStoreHuffmanTree(lit_histo.slice_mut(), 256, &mut tree[..], &mut lit_depth[..], &mut lit_bits[..], storage_ix, storage); BuildAndStoreHuffmanTree(cmd_histo.slice_mut(), 704usize, &mut tree[..], &mut cmd_depth[..], &mut cmd_bits[..], storage_ix, storage); BuildAndStoreHuffmanTree(dist_histo.slice_mut(), 64usize, &mut tree[..], &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, &mut lit_depth[..], &mut lit_bits[..], &mut cmd_depth[..], &mut cmd_bits[..], &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn StoreStaticCommandHuffmanTree(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(56, 0x926244u32 as (u64) << 32i32 | 0x16307003, storage_ix, storage); BrotliWriteBits(3, 0x0u64, storage_ix, storage); } fn StoreStaticDistanceHuffmanTree(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(28, 0x369dc03u64, storage_ix, storage); } struct BlockSplitRef<'a> { types: &'a [u8], lengths:&'a [u32], num_types: u32, } impl<'a> Default for BlockSplitRef<'a> { fn default() -> Self { BlockSplitRef { types:&[], lengths:&[], num_types:1, } } } #[derive(Default)] struct MetaBlockSplitRefs<'a> { btypel : BlockSplitRef<'a>, literal_context_map:&'a [u32], btypec : BlockSplitRef<'a>, btyped : BlockSplitRef<'a>, distance_context_map:&'a [u32], } fn block_split_nop() -> MetaBlockSplitRefs<'static> { return MetaBlockSplitRefs::default() } fn block_split_reference<'a, AllocU8: alloc::Allocator<u8>, AllocU32: alloc::Allocator<u32>, AllocHL: alloc::Allocator<HistogramLiteral>, AllocHC: alloc::Allocator<HistogramCommand>, AllocHD: alloc::Allocator<HistogramDistance>> (mb:&'a MetaBlockSplit<AllocU8, AllocU32, AllocHL, AllocHC, AllocHD>) -> MetaBlockSplitRefs<'a> { return MetaBlockSplitRefs::<'a> { btypel:BlockSplitRef { types: mb.literal_split.types.slice().split_at(mb.literal_split.num_blocks).0, lengths:mb.literal_split.lengths.slice().split_at(mb.literal_split.num_blocks).0, num_types:mb.literal_split.num_types as u32, }, literal_context_map: mb.literal_context_map.slice().split_at(mb.literal_context_map_size).0, btypec:BlockSplitRef { types: mb.command_split.types.slice().split_at(mb.command_split.num_blocks).0, lengths:mb.command_split.lengths.slice().split_at(mb.command_split.num_blocks).0, num_types:mb.command_split.num_types as u32, }, btyped:BlockSplitRef { types: mb.distance_split.types.slice().split_at(mb.distance_split.num_blocks).0, lengths:mb.distance_split.lengths.slice().split_at(mb.distance_split.num_blocks).0, num_types:mb.distance_split.num_types as u32, }, distance_context_map: mb.distance_context_map.slice().split_at(mb.distance_context_map_size).0, } } #[derive(Clone, Copy)] pub struct RecoderState { pub num_bytes_encoded : usize, } impl RecoderState { pub fn new() -> Self { RecoderState{ num_bytes_encoded:0, } } } pub fn BrotliStoreMetaBlockFast<Cb, AllocU32:alloc::Allocator<u32>, AllocHT: alloc::Allocator<HuffmanTree>>(m : &mut AllocHT, m32: &mut AllocU32, input: &[u8], start_pos: usize, length: usize, mask: usize, params: &BrotliEncoderParams, is_last: i32, dist_cache: &[i32; kNumDistanceCacheEntries], commands: &[Command], n_commands: usize, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], cb: &mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]) { let (input0,input1) = InputPairFromMaskedInput(input, start_pos, length, mask); if params.log_meta_block { LogMetaBlock(m32, commands.split_at(n_commands).0, input0, input1, 0, 0, dist_cache, recoder_state, block_split_nop(), params, Some(ContextType::CONTEXT_LSB6), cb); } StoreCompressedMetaBlockHeader(is_last, length, storage_ix, storage); BrotliWriteBits(13, 0, storage_ix, storage); if n_commands <= 128usize { let mut histogram: [u32; 256] = [0; 256]; let mut pos: usize = start_pos; let mut num_literals: usize = 0usize; let mut i: usize; let mut lit_depth: [u8; 256] = [0; 256]; let mut lit_bits: [u16; 256] = [0; 256]; i = 0usize; while i < n_commands { { let cmd: Command = commands[(i as (usize))].clone(); let mut j: usize; j = cmd.insert_len_ as (usize); while j != 0usize { { { let _rhs = 1; let _lhs = &mut histogram[input[((pos & mask) as (usize))] as (usize)]; *_lhs = (*_lhs).wrapping_add(_rhs as (u32)); } pos = pos.wrapping_add(1 as (usize)); } j = j.wrapping_sub(1 as (usize)); } num_literals = num_literals.wrapping_add(cmd.insert_len_ as (usize)); pos = pos.wrapping_add(CommandCopyLen(&cmd) as (usize)); } i = i.wrapping_add(1 as (usize)); } BrotliBuildAndStoreHuffmanTreeFast(m, &mut histogram[..], num_literals, 8usize, &mut lit_depth[..], &mut lit_bits[..], storage_ix, storage); StoreStaticCommandHuffmanTree(storage_ix, storage); StoreStaticDistanceHuffmanTree(storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, &mut lit_depth[..], &mut lit_bits[..], &kStaticCommandCodeDepth[..], &kStaticCommandCodeBits[..], &kStaticDistanceCodeDepth[..], &kStaticDistanceCodeBits[..], storage_ix, storage); } else { let mut lit_histo: HistogramLiteral = HistogramLiteral::default(); let mut cmd_histo: HistogramCommand = HistogramCommand::default(); let mut dist_histo: HistogramDistance = HistogramDistance::default(); let mut lit_depth: [u8; 256] = [0; 256]; let mut lit_bits: [u16; 256] = [0; 256]; let mut cmd_depth: [u8; 704] = [0; 704]; let mut cmd_bits: [u16; 704] = [0; 704]; let mut dist_depth: [u8; 64] = [0; 64]; let mut dist_bits: [u16; 64] = [0; 64]; BuildHistograms(input, start_pos, mask, commands, n_commands, &mut lit_histo, &mut cmd_histo, &mut dist_histo); BrotliBuildAndStoreHuffmanTreeFast(m, lit_histo.slice(), lit_histo.total_count_, 8usize, &mut lit_depth[..], &mut lit_bits[..], storage_ix, storage); BrotliBuildAndStoreHuffmanTreeFast(m, cmd_histo.slice(), cmd_histo.total_count_, 10usize, &mut cmd_depth[..], &mut cmd_bits[..], storage_ix, storage); BrotliBuildAndStoreHuffmanTreeFast(m, dist_histo.slice(), dist_histo.total_count_, 6usize, &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); StoreDataWithHuffmanCodes(input, start_pos, mask, commands, n_commands, &mut lit_depth[..], &mut lit_bits[..], &mut cmd_depth[..], &mut cmd_bits[..], &mut dist_depth[..], &mut dist_bits[..], storage_ix, storage); } if is_last != 0 { JumpToByteBoundary(storage_ix, storage); } } fn BrotliStoreUncompressedMetaBlockHeader(length: usize, storage_ix: &mut usize, storage: &mut [u8]) { let mut lenbits: u64 = 0; let mut nlenbits: u32 = 0; let mut nibblesbits: u32 = 0; BrotliWriteBits(1, 0, storage_ix, storage); BrotliEncodeMlen(length as u32, &mut lenbits, &mut nlenbits, &mut nibblesbits); BrotliWriteBits(2, nibblesbits as u64, storage_ix, storage); BrotliWriteBits(nlenbits as u8, lenbits as u64, storage_ix, storage); BrotliWriteBits(1, 1, storage_ix, storage); } fn InputPairFromMaskedInput<'a>(input:&'a [u8], position: usize, len: usize, mask:usize) -> (&'a [u8], &'a [u8]) { let masked_pos: usize = position & mask; if masked_pos.wrapping_add(len) > mask.wrapping_add(1usize) { let len1: usize = mask.wrapping_add(1usize).wrapping_sub(masked_pos); return (&input[masked_pos..(masked_pos + len1)], &input[0..len.wrapping_sub(len1)]); } return (&input[masked_pos..masked_pos + len], &[]); } pub fn BrotliStoreUncompressedMetaBlock<Cb, AllocU32:alloc::Allocator<u32>> (m32:&mut AllocU32, is_final_block: i32, input: &[u8], position: usize, mask: usize, params: &BrotliEncoderParams, len: usize, recoder_state: &mut RecoderState, storage_ix: &mut usize, storage: &mut [u8], suppress_meta_block_logging: bool, cb: &mut Cb) where Cb: FnMut(&[interface::Command<InputReference>]){ let (input0,input1) = InputPairFromMaskedInput(input, position, len, mask); BrotliStoreUncompressedMetaBlockHeader(len, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); let dst_start0 = ((*storage_ix >> 3i32) as (usize)); storage[dst_start0..(dst_start0 + input0.len())].clone_from_slice(input0); *storage_ix = (*storage_ix).wrapping_add(input0.len() << 3i32); let dst_start1 = ((*storage_ix >> 3i32) as (usize)); storage[dst_start1..(dst_start1 + input1.len())].clone_from_slice(input1); *storage_ix = (*storage_ix).wrapping_add(input1.len() << 3i32); BrotliWriteBitsPrepareStorage(*storage_ix, storage); if params.log_meta_block && !suppress_meta_block_logging { let cmds = [Command{insert_len_:len as u32, copy_len_:0, dist_extra_:0, cmd_prefix_:0, dist_prefix_:0 }]; LogMetaBlock(m32, &cmds, input0, input1, 0, 0, &[0i32, 0i32, 0i32, 0i32], recoder_state, block_split_nop(), params, None, cb); } if is_final_block != 0 { BrotliWriteBits(1u8, 1u64, storage_ix, storage); BrotliWriteBits(1u8, 1u64, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); } } pub fn BrotliStoreSyncMetaBlock(storage_ix: &mut usize, storage: &mut [u8]) { BrotliWriteBits(6, 6, storage_ix, storage); JumpToByteBoundary(storage_ix, storage); }
#![feature(phase)] #![crate_name = "terrain"] extern crate cgmath; extern crate gfx; #[phase(plugin)] extern crate gfx_macros; extern crate glfw; extern crate native; extern crate time; extern crate genmesh; extern crate noise; use cgmath::FixedArray; use cgmath::{Matrix4, Point3, Vector3}; use cgmath::{Transform, AffineMatrix3}; use gfx::{Device, DeviceHelper}; use glfw::Context; use genmesh::{Vertices, MapToVertices, Triangulate}; use genmesh::generators::Plane; use time::precise_time_s; use noise::source::Perlin; use noise::source::Source; #[vertex_format] struct Vertex { #[name = "a_Pos"] pos: [f32, ..3], #[name = "a_Color"] color: [f32, ..3], } impl Clone for Vertex { fn clone(&self) -> Vertex { Vertex { pos: self.pos, color: self.color } } } // The shader_param attribute makes sure the following struct can be used to // pass parameters to a shader. Its argument is the name of the type that will // be generated to represent your the program. Search for link_program below, to // see how it's used. #[shader_param(MyProgram)] struct Params { #[name = "u_Model"] model: [[f32, ..4], ..4], #[name = "u_View"] view: [[f32, ..4], ..4], #[name = "u_Proj"] proj: [[f32, ..4], ..4], } static VERTEX_SRC: gfx::ShaderSource = shaders! { GLSL_120: b" #version 120 attribute vec3 a_Pos; attribute vec3 a_Color; varying vec3 v_Color; uniform mat4 u_Model; uniform mat4 u_View; uniform mat4 u_Proj; void main() { v_Color = a_Color; gl_Position = u_Proj * u_View * u_Model * vec4(a_Pos, 1.0); } " GLSL_150: b" #version 150 core in vec3 a_Pos; in vec3 a_Color; out vec3 v_Color; uniform mat4 u_Model; uniform mat4 u_View; uniform mat4 u_Proj; void main() { v_Color = a_Color; gl_Position = u_Proj * u_View * u_Model * vec4(a_Pos, 1.0); } " }; static FRAGMENT_SRC: gfx::ShaderSource = shaders! { GLSL_120: b" #version 120 varying vec3 v_Color; out vec4 o_Color; void main() { o_Color = vec4(v_Color, 1.0); } " GLSL_150: b" #version 150 core in vec3 v_Color; out vec4 o_Color; void main() { o_Color = vec4(v_Color, 1.0); } " }; // We need to run on the main thread, so ensure we are using the `native` runtime. This is // technically not needed, since this is the default, but it's not guaranteed. #[start] fn start(argc: int, argv: *const *const u8) -> int { native::start(argc, argv, main) } fn calculate_color(height: f32) -> [f32, ..3] { if height > 8.0 { [0.9, 0.9, 0.9] // white } else if height > 0.0 { [0.7, 0.7, 0.7] // greay } else if height > -5.0 { [0.2, 0.7, 0.2] // green } else { [0.2, 0.2, 0.7] // blue } } fn main() { let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap(); glfw.window_hint(glfw::ContextVersion(3, 2)); glfw.window_hint(glfw::OpenglForwardCompat(true)); glfw.window_hint(glfw::OpenglProfile(glfw::OpenGlCoreProfile)); let (window, events) = glfw .create_window(800, 600, "Terrain example", glfw::Windowed) .expect("Failed to create GLFW window."); window.make_current(); glfw.set_error_callback(glfw::FAIL_ON_ERRORS); window.set_key_polling(true); let (w, h) = window.get_framebuffer_size(); let frame = gfx::Frame::new(w as u16, h as u16); let mut device = gfx::GlDevice::new(|s| glfw.get_proc_address(s)); let mut renderer = device.create_renderer(); let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true); let noise = Perlin::new(); let vertex_data: Vec<Vertex> = Plane::subdivide(256, 256) .vertex(|(x, y)| { let h = noise.get(x, y, 0.0) * 32.0; Vertex { pos: [25.0 * x, 25.0 * y, h], color: calculate_color(h), } }) .triangulate() .vertices() .collect(); let mesh = device.create_mesh(vertex_data); let slice = mesh.get_slice(gfx::TriangleList); let prog: MyProgram = device .link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone()) .unwrap(); let aspect = w as f32 / h as f32; let mut data = Params { model: Matrix4::identity().into_fixed(), view: Matrix4::identity().into_fixed(), proj: cgmath::perspective(cgmath::deg(60.0f32), aspect, 0.1, 1000.0).into_fixed(), }; let clear_data = gfx::ClearData { color: Some([0.3, 0.3, 0.3, 1.0]), depth: Some(1.0), stencil: None, }; while !window.should_close() { glfw.poll_events(); for (_, event) in glfw::flush_messages(&events) { match event { glfw::KeyEvent(glfw::KeyEscape, _, glfw::Press, _) => window.set_should_close(true), _ => {}, } } let time = precise_time_s() as f32; let x = time.sin(); let y = time.cos(); let view: AffineMatrix3<f32> = Transform::look_at( &Point3::new(x * 32.0, y * 32.0, 16.0), &Point3::new(0.0, 0.0, 0.0), &Vector3::unit_z(), ); data.view = view.mat.into_fixed(); renderer.reset(); renderer.clear(clear_data, &frame); renderer.draw(&mesh, slice, &frame, (&prog, &data), &state).unwrap(); device.submit(renderer.as_buffer()); window.swap_buffers(); } } move to an indexed plane #![feature(phase)] #![crate_name = "terrain"] extern crate cgmath; extern crate gfx; #[phase(plugin)] extern crate gfx_macros; extern crate glfw; extern crate native; extern crate time; extern crate genmesh; extern crate noise; use cgmath::FixedArray; use cgmath::{Matrix4, Point3, Vector3}; use cgmath::{Transform, AffineMatrix3}; use gfx::{Device, DeviceHelper}; use glfw::Context; use genmesh::{Vertices, Triangulate}; use genmesh::generators::{Plane, SharedVertex, IndexedPolygon}; use time::precise_time_s; use noise::source::Perlin; use noise::source::Source; #[vertex_format] struct Vertex { #[name = "a_Pos"] pos: [f32, ..3], #[name = "a_Color"] color: [f32, ..3], } impl Clone for Vertex { fn clone(&self) -> Vertex { Vertex { pos: self.pos, color: self.color } } } // The shader_param attribute makes sure the following struct can be used to // pass parameters to a shader. Its argument is the name of the type that will // be generated to represent your the program. Search for link_program below, to // see how it's used. #[shader_param(MyProgram)] struct Params { #[name = "u_Model"] model: [[f32, ..4], ..4], #[name = "u_View"] view: [[f32, ..4], ..4], #[name = "u_Proj"] proj: [[f32, ..4], ..4], } static VERTEX_SRC: gfx::ShaderSource = shaders! { GLSL_120: b" #version 120 attribute vec3 a_Pos; attribute vec3 a_Color; varying vec3 v_Color; uniform mat4 u_Model; uniform mat4 u_View; uniform mat4 u_Proj; void main() { v_Color = a_Color; gl_Position = u_Proj * u_View * u_Model * vec4(a_Pos, 1.0); } " GLSL_150: b" #version 150 core in vec3 a_Pos; in vec3 a_Color; out vec3 v_Color; uniform mat4 u_Model; uniform mat4 u_View; uniform mat4 u_Proj; void main() { v_Color = a_Color; gl_Position = u_Proj * u_View * u_Model * vec4(a_Pos, 1.0); } " }; static FRAGMENT_SRC: gfx::ShaderSource = shaders! { GLSL_120: b" #version 120 varying vec3 v_Color; out vec4 o_Color; void main() { o_Color = vec4(v_Color, 1.0); } " GLSL_150: b" #version 150 core in vec3 v_Color; out vec4 o_Color; void main() { o_Color = vec4(v_Color, 1.0); } " }; // We need to run on the main thread, so ensure we are using the `native` runtime. This is // technically not needed, since this is the default, but it's not guaranteed. #[start] fn start(argc: int, argv: *const *const u8) -> int { native::start(argc, argv, main) } fn calculate_color(height: f32) -> [f32, ..3] { if height > 8.0 { [0.9, 0.9, 0.9] // white } else if height > 0.0 { [0.7, 0.7, 0.7] // greay } else if height > -5.0 { [0.2, 0.7, 0.2] // green } else { [0.2, 0.2, 0.7] // blue } } fn main() { let glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap(); glfw.window_hint(glfw::ContextVersion(3, 2)); glfw.window_hint(glfw::OpenglForwardCompat(true)); glfw.window_hint(glfw::OpenglProfile(glfw::OpenGlCoreProfile)); let (window, events) = glfw .create_window(800, 600, "Terrain example", glfw::Windowed) .expect("Failed to create GLFW window."); window.make_current(); glfw.set_error_callback(glfw::FAIL_ON_ERRORS); window.set_key_polling(true); let (w, h) = window.get_framebuffer_size(); let frame = gfx::Frame::new(w as u16, h as u16); let mut device = gfx::GlDevice::new(|s| glfw.get_proc_address(s)); let mut renderer = device.create_renderer(); let state = gfx::DrawState::new().depth(gfx::state::LessEqual, true); let noise = Perlin::new(); let plane = Plane::subdivide(256, 256); let vertex_data: Vec<Vertex> = plane.shared_vertex_iter() .map(|(x, y)| { let h = noise.get(x, y, 0.0) * 32.0; Vertex { pos: [25.0 * x, 25.0 * y, h], color: calculate_color(h), } }) .collect(); let index_data: Vec<u32> = plane.indexed_polygon_iter() .triangulate() .vertices() .map(|i| i as u32) .collect(); let slice = { let buf = device.create_buffer_static(&index_data); gfx::IndexSlice32(gfx::TriangleList, buf, 0, index_data.len() as u32) }; let mesh = device.create_mesh(vertex_data); let prog: MyProgram = device .link_program(VERTEX_SRC.clone(), FRAGMENT_SRC.clone()) .unwrap(); let aspect = w as f32 / h as f32; let mut data = Params { model: Matrix4::identity().into_fixed(), view: Matrix4::identity().into_fixed(), proj: cgmath::perspective(cgmath::deg(60.0f32), aspect, 0.1, 1000.0).into_fixed(), }; let clear_data = gfx::ClearData { color: Some([0.3, 0.3, 0.3, 1.0]), depth: Some(1.0), stencil: None, }; while !window.should_close() { glfw.poll_events(); for (_, event) in glfw::flush_messages(&events) { match event { glfw::KeyEvent(glfw::KeyEscape, _, glfw::Press, _) => window.set_should_close(true), _ => {}, } } let time = precise_time_s() as f32; let x = time.sin(); let y = time.cos(); let view: AffineMatrix3<f32> = Transform::look_at( &Point3::new(x * 32.0, y * 32.0, 16.0), &Point3::new(0.0, 0.0, 0.0), &Vector3::unit_z(), ); data.view = view.mat.into_fixed(); renderer.reset(); renderer.clear(clear_data, &frame); renderer.draw(&mesh, slice, &frame, (&prog, &data), &state).unwrap(); device.submit(renderer.as_buffer()); window.swap_buffers(); } }
//! Partially ordered elements with a least upper bound. //! //! Lattices form the basis of differential dataflow's efficient execution in the presence of //! iterative sub-computations. All logical times in differential dataflow must implement the //! `Lattice` trait, and all reasoning in operators are done it terms of `Lattice` methods. use timely::order::PartialOrder; /// A bounded partially ordered type supporting joins and meets. pub trait Lattice : PartialOrder { /// The smallest element of the type. /// /// #Examples /// /// ``` /// use differential_dataflow::lattice::Lattice; /// /// let min = <usize as Lattice>::minimum(); /// assert_eq!(min, usize::min_value()); /// ``` fn minimum() -> Self; /// The largest element of the type. /// /// #Examples /// /// ``` /// use differential_dataflow::lattice::Lattice; /// /// let max = <usize as Lattice>::maximum(); /// assert_eq!(max, usize::max_value()); /// ``` fn maximum() -> Self; /// The smallest element greater than or equal to both arguments. /// /// # Examples /// /// ``` /// # extern crate timely; /// # extern crate differential_dataflow; /// # use timely::PartialOrder; /// # use timely::progress::nested::product::Product; /// # use differential_dataflow::lattice::Lattice; /// # fn main() { /// /// let time1 = Product::new(3, 7); /// let time2 = Product::new(4, 6); /// let join = time1.join(&time2); /// /// assert_eq!(join, Product::new(4, 7)); /// # } /// ``` fn join(&self, &Self) -> Self; /// The largest element less than or equal to both arguments. /// /// # Examples /// /// ``` /// # extern crate timely; /// # extern crate differential_dataflow; /// # use timely::PartialOrder; /// # use timely::progress::nested::product::Product; /// # use differential_dataflow::lattice::Lattice; /// # fn main() { /// /// let time1 = Product::new(3, 7); /// let time2 = Product::new(4, 6); /// let meet = time1.meet(&time2); /// /// assert_eq!(meet, Product::new(3, 6)); /// # } /// ``` fn meet(&self, &Self) -> Self; /// Advances self to the largest time indistinguishable under `frontier`. /// /// This method produces the "largest" lattice element with the property that for every /// lattice element greater than some element of `frontier`, both the result and `self` /// compare identically to the lattice element. The result is the "largest" element in /// the sense that any other element with the same property (compares identically to times /// greater or equal to `frontier`) must be less or equal to the result. /// /// When provided an empty frontier, the result is `<Self as Lattice>::maximum()`. It should /// perhaps be distinguished by an `Option<Self>` type, but the `None` case only happens /// when `frontier` is empty, which the caller can see for themselves if they want to be /// clever. /// /// # Examples /// /// ``` /// # extern crate timely; /// # extern crate differential_dataflow; /// # use timely::PartialOrder; /// # use timely::progress::nested::product::Product; /// # use differential_dataflow::lattice::Lattice; /// # fn main() { /// /// let time = Product::new(3, 7); /// let frontier = vec![Product::new(4, 8), Product::new(5, 3)]; /// let advanced = time.advance_by(&frontier[..]); /// /// // `time` and `advanced` are indistinguishable to elements >= an element of `frontier` /// for i in 0 .. 10 { /// for j in 0 .. 10 { /// let test = Product::new(i, j); /// // for `test` in the future of `frontier` .. /// if frontier.iter().any(|t| t.less_equal(&test)) { /// assert_eq!(time.less_equal(&test), advanced.less_equal(&test)); /// } /// } /// } /// /// assert_eq!(advanced, Product::new(4, 7)); /// # } /// ``` #[inline(always)] fn advance_by(&self, frontier: &[Self]) -> Self where Self: Sized{ if frontier.len() > 0 { let mut result = self.join(&frontier[0]); for f in &frontier[1..] { result = result.meet(&self.join(f)); } result } else { Self::maximum() } } } // /// A carrier trait for totally ordered lattices. // /// // /// Types that implement `TotalOrder` are stating that their `Lattice` is in fact a total order. // /// This type is only used to restrict implementations to certain types of lattices. // /// // /// This trait is automatically implemented for integer scalars, and for products of these types // /// with "empty" timestamps (e.g. `RootTimestamp` and `()`). Be careful implementing this trait // /// for your own timestamp types, as it may lead to the applicability of incorrect implementations. // /// // /// Note that this trait is distinct from `Ord`; many implementors of `Lattice` also implement // /// `Ord` so that they may be sorted, deduplicated, etc. This implementation neither derives any // /// information from an `Ord` implementation nor informs it in any way. // /// // /// #Examples // /// // /// ``` // /// use differential_dataflow::lattice::TotalOrder; // /// // /// // The `join` and `meet` of totally ordered elements are always one of the two. // /// fn invariant<T: TotalOrder>(elt1: T, elt2: T) { // /// if elt1.less_equal(&elt2) { // /// assert!(elt1.meet(&elt2) == elt1); // /// assert!(elt1.join(&elt2) == elt2); // /// } // /// else { // /// assert!(elt1.meet(&elt2) == elt2); // /// assert!(elt1.join(&elt2) == elt1); // /// } // /// } // /// ``` // pub trait TotalOrder : Lattice { } use timely::progress::nested::product::Product; impl<T1: Lattice, T2: Lattice> Lattice for Product<T1, T2> { #[inline(always)] fn minimum() -> Self { Product::new(T1::minimum(), T2::minimum()) } #[inline(always)] fn maximum() -> Self { Product::new(T1::maximum(), T2::maximum()) } #[inline(always)] fn join(&self, other: &Product<T1, T2>) -> Product<T1, T2> { Product { outer: self.outer.join(&other.outer), inner: self.inner.join(&other.inner), } } #[inline(always)] fn meet(&self, other: &Product<T1, T2>) -> Product<T1, T2> { Product { outer: self.outer.meet(&other.outer), inner: self.inner.meet(&other.inner), } } } macro_rules! implement_lattice { ($index_type:ty, $minimum:expr, $maximum:expr) => ( impl Lattice for $index_type { #[inline(always)] fn minimum() -> Self { $minimum } #[inline(always)] fn maximum() -> Self { $maximum } #[inline(always)] fn join(&self, other: &Self) -> Self { ::std::cmp::max(*self, *other) } #[inline(always)] fn meet(&self, other: &Self) -> Self { ::std::cmp::min(*self, *other) } } ) } use timely::progress::timestamp::RootTimestamp; use std::time::Duration; implement_lattice!(RootTimestamp, RootTimestamp, RootTimestamp); implement_lattice!(Duration, Duration::new(0, 0), Duration::new(u64::max_value(), u32::max_value())); implement_lattice!(usize, usize::min_value(), usize::max_value()); implement_lattice!(u64, u64::min_value(), u64::max_value()); implement_lattice!(u32, u32::min_value(), u32::max_value()); implement_lattice!(i32, i32::min_value(), i32::max_value()); implement_lattice!((), (), ()); // impl TotalOrder for RootTimestamp { } // impl TotalOrder for usize { } // impl TotalOrder for u64 { } // impl TotalOrder for u32 { } // impl TotalOrder for i32 { } // impl TotalOrder for () { } fix lattice for Duration //! Partially ordered elements with a least upper bound. //! //! Lattices form the basis of differential dataflow's efficient execution in the presence of //! iterative sub-computations. All logical times in differential dataflow must implement the //! `Lattice` trait, and all reasoning in operators are done it terms of `Lattice` methods. use timely::order::PartialOrder; /// A bounded partially ordered type supporting joins and meets. pub trait Lattice : PartialOrder { /// The smallest element of the type. /// /// #Examples /// /// ``` /// use differential_dataflow::lattice::Lattice; /// /// let min = <usize as Lattice>::minimum(); /// assert_eq!(min, usize::min_value()); /// ``` fn minimum() -> Self; /// The largest element of the type. /// /// #Examples /// /// ``` /// use differential_dataflow::lattice::Lattice; /// /// let max = <usize as Lattice>::maximum(); /// assert_eq!(max, usize::max_value()); /// ``` fn maximum() -> Self; /// The smallest element greater than or equal to both arguments. /// /// # Examples /// /// ``` /// # extern crate timely; /// # extern crate differential_dataflow; /// # use timely::PartialOrder; /// # use timely::progress::nested::product::Product; /// # use differential_dataflow::lattice::Lattice; /// # fn main() { /// /// let time1 = Product::new(3, 7); /// let time2 = Product::new(4, 6); /// let join = time1.join(&time2); /// /// assert_eq!(join, Product::new(4, 7)); /// # } /// ``` fn join(&self, &Self) -> Self; /// The largest element less than or equal to both arguments. /// /// # Examples /// /// ``` /// # extern crate timely; /// # extern crate differential_dataflow; /// # use timely::PartialOrder; /// # use timely::progress::nested::product::Product; /// # use differential_dataflow::lattice::Lattice; /// # fn main() { /// /// let time1 = Product::new(3, 7); /// let time2 = Product::new(4, 6); /// let meet = time1.meet(&time2); /// /// assert_eq!(meet, Product::new(3, 6)); /// # } /// ``` fn meet(&self, &Self) -> Self; /// Advances self to the largest time indistinguishable under `frontier`. /// /// This method produces the "largest" lattice element with the property that for every /// lattice element greater than some element of `frontier`, both the result and `self` /// compare identically to the lattice element. The result is the "largest" element in /// the sense that any other element with the same property (compares identically to times /// greater or equal to `frontier`) must be less or equal to the result. /// /// When provided an empty frontier, the result is `<Self as Lattice>::maximum()`. It should /// perhaps be distinguished by an `Option<Self>` type, but the `None` case only happens /// when `frontier` is empty, which the caller can see for themselves if they want to be /// clever. /// /// # Examples /// /// ``` /// # extern crate timely; /// # extern crate differential_dataflow; /// # use timely::PartialOrder; /// # use timely::progress::nested::product::Product; /// # use differential_dataflow::lattice::Lattice; /// # fn main() { /// /// let time = Product::new(3, 7); /// let frontier = vec![Product::new(4, 8), Product::new(5, 3)]; /// let advanced = time.advance_by(&frontier[..]); /// /// // `time` and `advanced` are indistinguishable to elements >= an element of `frontier` /// for i in 0 .. 10 { /// for j in 0 .. 10 { /// let test = Product::new(i, j); /// // for `test` in the future of `frontier` .. /// if frontier.iter().any(|t| t.less_equal(&test)) { /// assert_eq!(time.less_equal(&test), advanced.less_equal(&test)); /// } /// } /// } /// /// assert_eq!(advanced, Product::new(4, 7)); /// # } /// ``` #[inline(always)] fn advance_by(&self, frontier: &[Self]) -> Self where Self: Sized{ if frontier.len() > 0 { let mut result = self.join(&frontier[0]); for f in &frontier[1..] { result = result.meet(&self.join(f)); } result } else { Self::maximum() } } } // /// A carrier trait for totally ordered lattices. // /// // /// Types that implement `TotalOrder` are stating that their `Lattice` is in fact a total order. // /// This type is only used to restrict implementations to certain types of lattices. // /// // /// This trait is automatically implemented for integer scalars, and for products of these types // /// with "empty" timestamps (e.g. `RootTimestamp` and `()`). Be careful implementing this trait // /// for your own timestamp types, as it may lead to the applicability of incorrect implementations. // /// // /// Note that this trait is distinct from `Ord`; many implementors of `Lattice` also implement // /// `Ord` so that they may be sorted, deduplicated, etc. This implementation neither derives any // /// information from an `Ord` implementation nor informs it in any way. // /// // /// #Examples // /// // /// ``` // /// use differential_dataflow::lattice::TotalOrder; // /// // /// // The `join` and `meet` of totally ordered elements are always one of the two. // /// fn invariant<T: TotalOrder>(elt1: T, elt2: T) { // /// if elt1.less_equal(&elt2) { // /// assert!(elt1.meet(&elt2) == elt1); // /// assert!(elt1.join(&elt2) == elt2); // /// } // /// else { // /// assert!(elt1.meet(&elt2) == elt2); // /// assert!(elt1.join(&elt2) == elt1); // /// } // /// } // /// ``` // pub trait TotalOrder : Lattice { } use timely::progress::nested::product::Product; impl<T1: Lattice, T2: Lattice> Lattice for Product<T1, T2> { #[inline(always)] fn minimum() -> Self { Product::new(T1::minimum(), T2::minimum()) } #[inline(always)] fn maximum() -> Self { Product::new(T1::maximum(), T2::maximum()) } #[inline(always)] fn join(&self, other: &Product<T1, T2>) -> Product<T1, T2> { Product { outer: self.outer.join(&other.outer), inner: self.inner.join(&other.inner), } } #[inline(always)] fn meet(&self, other: &Product<T1, T2>) -> Product<T1, T2> { Product { outer: self.outer.meet(&other.outer), inner: self.inner.meet(&other.inner), } } } macro_rules! implement_lattice { ($index_type:ty, $minimum:expr, $maximum:expr) => ( impl Lattice for $index_type { #[inline(always)] fn minimum() -> Self { $minimum } #[inline(always)] fn maximum() -> Self { $maximum } #[inline(always)] fn join(&self, other: &Self) -> Self { ::std::cmp::max(*self, *other) } #[inline(always)] fn meet(&self, other: &Self) -> Self { ::std::cmp::min(*self, *other) } } ) } use timely::progress::timestamp::RootTimestamp; use std::time::Duration; implement_lattice!(RootTimestamp, RootTimestamp, RootTimestamp); implement_lattice!(Duration, Duration::new(0, 0), Duration::new(u64::max_value(), 1_000_000_000 - 1)); implement_lattice!(usize, usize::min_value(), usize::max_value()); implement_lattice!(u64, u64::min_value(), u64::max_value()); implement_lattice!(u32, u32::min_value(), u32::max_value()); implement_lattice!(i32, i32::min_value(), i32::max_value()); implement_lattice!((), (), ()); // impl TotalOrder for RootTimestamp { } // impl TotalOrder for usize { } // impl TotalOrder for u64 { } // impl TotalOrder for u32 { } // impl TotalOrder for i32 { } // impl TotalOrder for () { }
#[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use rand::distributions::{Distribution, Standard}; use rand::Rng; #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; use std::mem; use alga::general::Real; use base::dimension::U3; use base::helper; use base::storage::Storage; use base::{Matrix4, Vector, Vector3}; use geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as an homogeneous 4x4 matrix. pub struct Orthographic3<N: Real> { matrix: Matrix4<N>, } impl<N: Real> Copy for Orthographic3<N> {} impl<N: Real> Clone for Orthographic3<N> { #[inline] fn clone(&self) -> Self { Orthographic3::from_matrix_unchecked(self.matrix.clone()) } } impl<N: Real> fmt::Debug for Orthographic3<N> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } impl<N: Real> PartialEq for Orthographic3<N> { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix } } #[cfg(feature = "serde-serialize")] impl<N: Real + Serialize> Serialize for Orthographic3<N> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer { self.matrix.serialize(serializer) } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real + Deserialize<'a>> Deserialize<'a> for Orthographic3<N> { fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> where Des: Deserializer<'a> { let matrix = Matrix4::<N>::deserialize(deserializer)?; Ok(Orthographic3::from_matrix_unchecked(matrix)) } } impl<N: Real> Orthographic3<N> { /// Creates a new orthographic projection matrix. #[inline] pub fn new(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> Self { assert!( left < right, "The left corner must be farther than the right corner." ); assert!( bottom < top, "The top corner must be higher than the bottom corner." ); assert!( znear < zfar, "The far plane must be farther than the near plane." ); let matrix = Matrix4::<N>::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); res.set_bottom_and_top(bottom, top); res.set_znear_and_zfar(znear, zfar); res } /// Wraps the given matrix to interpret it as a 3D orthographic matrix. /// /// It is not checked whether or not the given matrix actually represents an orthographic /// projection. #[inline] pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self { Orthographic3 { matrix: matrix } } /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] pub fn from_fov(aspect: N, vfov: N, znear: N, zfar: N) -> Self { assert!( znear < zfar, "The far plane must be farther than the near plane." ); assert!( !relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero." ); let half: N = ::convert(0.5); let width = zfar * (vfov * half).tan(); let height = width / aspect; Self::new( -width * half, width * half, -height * half, height * half, znear, zfar, ) } /// Retrieves the inverse of the underlying homogeneous matrix. #[inline] pub fn inverse(&self) -> Matrix4<N> { let mut res = self.to_homogeneous(); let inv_m11 = N::one() / self.matrix[(0, 0)]; let inv_m22 = N::one() / self.matrix[(1, 1)]; let inv_m33 = N::one() / self.matrix[(2, 2)]; res[(0, 0)] = inv_m11; res[(1, 1)] = inv_m22; res[(2, 2)] = inv_m33; res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; res } /// Computes the corresponding homogeneous matrix. #[inline] pub fn to_homogeneous(&self) -> Matrix4<N> { self.matrix } /// A reference to the underlying homogeneous transformation matrix. #[inline] pub fn as_matrix(&self) -> &Matrix4<N> { &self.matrix } /// A reference to this transformation seen as a `Projective3`. #[inline] pub fn as_projective(&self) -> &Projective3<N> { unsafe { mem::transmute(self) } } /// This transformation seen as a `Projective3`. #[inline] pub fn to_projective(&self) -> Projective3<N> { Projective3::from_matrix_unchecked(self.matrix) } /// Retrieves the underlying homogeneous matrix. #[inline] pub fn unwrap(self) -> Matrix4<N> { self.matrix } /// The smallest x-coordinate of the view cuboid. #[inline] pub fn left(&self) -> N { (-N::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] } /// The largest x-coordinate of the view cuboid. #[inline] pub fn right(&self) -> N { (N::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] } /// The smallest y-coordinate of the view cuboid. #[inline] pub fn bottom(&self) -> N { (-N::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] } /// The largest y-coordinate of the view cuboid. #[inline] pub fn top(&self) -> N { (N::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] } /// The near plane offset of the view cuboid. #[inline] pub fn znear(&self) -> N { (N::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] } /// The far plane offset of the view cuboid. #[inline] pub fn zfar(&self) -> N { (-N::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] } // FIXME: when we get specialization, specialize the Mul impl instead. /// Projects a point. Faster than matrix multiplication. #[inline] pub fn project_point(&self, p: &Point3<N>) -> Point3<N> { Point3::new( self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)], self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)], self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)], ) } /// Un-projects a point. Faster than multiplication by the underlying matrix inverse. #[inline] pub fn unproject_point(&self, p: &Point3<N>) -> Point3<N> { Point3::new( (p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)], (p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)], (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)], ) } // FIXME: when we get specialization, specialize the Mul impl instead. /// Projects a vector. Faster than matrix multiplication. #[inline] pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N> where SB: Storage<N, U3> { Vector3::new( self.matrix[(0, 0)] * p[0], self.matrix[(1, 1)] * p[1], self.matrix[(2, 2)] * p[2], ) } /// Sets the smallest x-coordinate of the view cuboid. #[inline] pub fn set_left(&mut self, left: N) { let right = self.right(); self.set_left_and_right(left, right); } /// Sets the largest x-coordinate of the view cuboid. #[inline] pub fn set_right(&mut self, right: N) { let left = self.left(); self.set_left_and_right(left, right); } /// Sets the smallest y-coordinate of the view cuboid. #[inline] pub fn set_bottom(&mut self, bottom: N) { let top = self.top(); self.set_bottom_and_top(bottom, top); } /// Sets the largest y-coordinate of the view cuboid. #[inline] pub fn set_top(&mut self, top: N) { let bottom = self.bottom(); self.set_bottom_and_top(bottom, top); } /// Sets the near plane offset of the view cuboid. #[inline] pub fn set_znear(&mut self, znear: N) { let zfar = self.zfar(); self.set_znear_and_zfar(znear, zfar); } /// Sets the far plane offset of the view cuboid. #[inline] pub fn set_zfar(&mut self, zfar: N) { let znear = self.znear(); self.set_znear_and_zfar(znear, zfar); } /// Sets the view cuboid coordinates along the `x` axis. #[inline] pub fn set_left_and_right(&mut self, left: N, right: N) { assert!( left < right, "The left corner must be farther than the right corner." ); self.matrix[(0, 0)] = ::convert::<_, N>(2.0) / (right - left); self.matrix[(0, 3)] = -(right + left) / (right - left); } /// Sets the view cuboid coordinates along the `y` axis. #[inline] pub fn set_bottom_and_top(&mut self, bottom: N, top: N) { assert!( bottom < top, "The top corner must be higher than the bottom corner." ); self.matrix[(1, 1)] = ::convert::<_, N>(2.0) / (top - bottom); self.matrix[(1, 3)] = -(top + bottom) / (top - bottom); } /// Sets the near and far plane offsets of the view cuboid. #[inline] pub fn set_znear_and_zfar(&mut self, znear: N, zfar: N) { assert!( !relative_eq!(zfar - znear, N::zero()), "The near-plane and far-plane must not be superimposed." ); self.matrix[(2, 2)] = -::convert::<_, N>(2.0) / (zfar - znear); self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear); } } impl<N: Real> Distribution<Orthographic3<N>> for Standard where Standard: Distribution<N> { fn sample<R: Rng + ?Sized>(&self, r: &mut R) -> Orthographic3<N> { let left = r.gen(); let right = helper::reject_rand(r, |x: &N| *x > left); let bottom = r.gen(); let top = helper::reject_rand(r, |x: &N| *x > bottom); let znear = r.gen(); let zfar = helper::reject_rand(r, |x: &N| *x > znear); Orthographic3::new(left, right, bottom, top, znear, zfar) } } #[cfg(feature = "arbitrary")] impl<N: Real + Arbitrary> Arbitrary for Orthographic3<N> where Matrix4<N>: Send { fn arbitrary<G: Gen>(g: &mut G) -> Self { let left = Arbitrary::arbitrary(g); let right = helper::reject(g, |x: &N| *x > left); let bottom = Arbitrary::arbitrary(g); let top = helper::reject(g, |x: &N| *x > bottom); let znear = Arbitrary::arbitrary(g); let zfar = helper::reject(g, |x: &N| *x > znear); Self::new(left, right, bottom, top, znear, zfar) } } impl<N: Real> From<Orthographic3<N>> for Matrix4<N> { #[inline] fn from(orth: Orthographic3<N>) -> Self { orth.unwrap() } } Remove over-restrictive assertions on Orthographic3 construction + add doc-tests. Fix #365 #[cfg(feature = "arbitrary")] use quickcheck::{Arbitrary, Gen}; use rand::distributions::{Distribution, Standard}; use rand::Rng; #[cfg(feature = "serde-serialize")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt; use std::mem; use alga::general::Real; use base::dimension::U3; use base::helper; use base::storage::Storage; use base::{Matrix4, Vector, Vector3}; use geometry::{Point3, Projective3}; /// A 3D orthographic projection stored as an homogeneous 4x4 matrix. pub struct Orthographic3<N: Real> { matrix: Matrix4<N>, } impl<N: Real> Copy for Orthographic3<N> {} impl<N: Real> Clone for Orthographic3<N> { #[inline] fn clone(&self) -> Self { Orthographic3::from_matrix_unchecked(self.matrix.clone()) } } impl<N: Real> fmt::Debug for Orthographic3<N> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { self.matrix.fmt(f) } } impl<N: Real> PartialEq for Orthographic3<N> { #[inline] fn eq(&self, right: &Self) -> bool { self.matrix == right.matrix } } #[cfg(feature = "serde-serialize")] impl<N: Real + Serialize> Serialize for Orthographic3<N> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer { self.matrix.serialize(serializer) } } #[cfg(feature = "serde-serialize")] impl<'a, N: Real + Deserialize<'a>> Deserialize<'a> for Orthographic3<N> { fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error> where Des: Deserializer<'a> { let matrix = Matrix4::<N>::deserialize(deserializer)?; Ok(Orthographic3::from_matrix_unchecked(matrix)) } } impl<N: Real> Orthographic3<N> { /// Creates a new orthographic projection matrix. /// /// This follows the OpenGL convention, so this will flip the `z` axis. /// /// # Example /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::{Orthographic3, Point3}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// // Check this projection actually transforms the view cuboid into the double-unit cube. /// // See https://www.nalgebra.org/projections/#orthographic-projection for more details. /// let p1 = Point3::new(1.0, 2.0, -0.1); /// let p2 = Point3::new(1.0, 2.0, -1000.0); /// let p3 = Point3::new(1.0, 20.0, -0.1); /// let p4 = Point3::new(1.0, 20.0, -1000.0); /// let p5 = Point3::new(10.0, 2.0, -0.1); /// let p6 = Point3::new(10.0, 2.0, -1000.0); /// let p7 = Point3::new(10.0, 20.0, -0.1); /// let p8 = Point3::new(10.0, 20.0, -1000.0); /// /// assert_relative_eq!(proj.project_point(&p1), Point3::new(-1.0, -1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p2), Point3::new(-1.0, -1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p3), Point3::new(-1.0, 1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p4), Point3::new(-1.0, 1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p5), Point3::new( 1.0, -1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p6), Point3::new( 1.0, -1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p7), Point3::new( 1.0, 1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p8), Point3::new( 1.0, 1.0, 1.0)); /// /// // This also works with flipped axis. In other words, we allow that /// // `left > right`, `bottom > top`, and/or `znear > zfar`. /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// /// assert_relative_eq!(proj.project_point(&p1), Point3::new( 1.0, 1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p2), Point3::new( 1.0, 1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p3), Point3::new( 1.0, -1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p4), Point3::new( 1.0, -1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p5), Point3::new(-1.0, 1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p6), Point3::new(-1.0, 1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p7), Point3::new(-1.0, -1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p8), Point3::new(-1.0, -1.0, -1.0)); /// ``` #[inline] pub fn new(left: N, right: N, bottom: N, top: N, znear: N, zfar: N) -> Self { let matrix = Matrix4::<N>::identity(); let mut res = Self::from_matrix_unchecked(matrix); res.set_left_and_right(left, right); res.set_bottom_and_top(bottom, top); res.set_znear_and_zfar(znear, zfar); res } /// Wraps the given matrix to interpret it as a 3D orthographic matrix. /// /// It is not checked whether or not the given matrix actually represents an orthographic /// projection. /// /// # Example /// ``` /// # use nalgebra::{Orthographic3, Point3, Matrix4}; /// let mat = Matrix4::new( /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, /// 0.0, 0.0, 0.0, 1.0 /// ); /// let proj = Orthographic3::from_matrix_unchecked(mat); /// assert_eq!(proj, Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0)); /// ``` #[inline] pub fn from_matrix_unchecked(matrix: Matrix4<N>) -> Self { Orthographic3 { matrix: matrix } } /// Creates a new orthographic projection matrix from an aspect ratio and the vertical field of view. #[inline] pub fn from_fov(aspect: N, vfov: N, znear: N, zfar: N) -> Self { assert!( znear != zfar, "The far plane must not be equal to the near plane." ); assert!( !relative_eq!(aspect, N::zero()), "The apsect ratio must not be zero." ); let half: N = ::convert(0.5); let width = zfar * (vfov * half).tan(); let height = width / aspect; Self::new( -width * half, width * half, -height * half, height * half, znear, zfar, ) } /// Retrieves the inverse of the underlying homogeneous matrix. /// /// # Example /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::{Orthographic3, Point3, Matrix4}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// let inv = proj.inverse(); /// /// assert_relative_eq!(inv * proj.as_matrix(), Matrix4::identity()); /// assert_relative_eq!(proj.as_matrix() * inv, Matrix4::identity()); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// let inv = proj.inverse(); /// assert_relative_eq!(inv * proj.as_matrix(), Matrix4::identity()); /// assert_relative_eq!(proj.as_matrix() * inv, Matrix4::identity()); /// ``` #[inline] pub fn inverse(&self) -> Matrix4<N> { let mut res = self.to_homogeneous(); let inv_m11 = N::one() / self.matrix[(0, 0)]; let inv_m22 = N::one() / self.matrix[(1, 1)]; let inv_m33 = N::one() / self.matrix[(2, 2)]; res[(0, 0)] = inv_m11; res[(1, 1)] = inv_m22; res[(2, 2)] = inv_m33; res[(0, 3)] = -self.matrix[(0, 3)] * inv_m11; res[(1, 3)] = -self.matrix[(1, 3)] * inv_m22; res[(2, 3)] = -self.matrix[(2, 3)] * inv_m33; res } /// Computes the corresponding homogeneous matrix. /// /// # Example /// ``` /// # use nalgebra::{Orthographic3, Point3, Matrix4}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// let expected = Matrix4::new( /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, /// 0.0, 0.0, 0.0, 1.0 /// ); /// assert_eq!(proj.to_homogeneous(), expected); /// ``` #[inline] pub fn to_homogeneous(&self) -> Matrix4<N> { self.matrix } /// A reference to the underlying homogeneous transformation matrix. /// /// # Example /// ``` /// # use nalgebra::{Orthographic3, Point3, Matrix4}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// let expected = Matrix4::new( /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, /// 0.0, 0.0, 0.0, 1.0 /// ); /// assert_eq!(*proj.as_matrix(), expected); /// ``` #[inline] pub fn as_matrix(&self) -> &Matrix4<N> { &self.matrix } /// A reference to this transformation seen as a `Projective3`. /// /// # Example /// ``` /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_eq!(proj.as_projective().to_homogeneous(), proj.to_homogeneous()); /// ``` #[inline] pub fn as_projective(&self) -> &Projective3<N> { unsafe { mem::transmute(self) } } /// This transformation seen as a `Projective3`. /// /// # Example /// ``` /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_eq!(proj.to_projective().to_homogeneous(), proj.to_homogeneous()); /// ``` #[inline] pub fn to_projective(&self) -> Projective3<N> { Projective3::from_matrix_unchecked(self.matrix) } /// Retrieves the underlying homogeneous matrix. /// /// # Example /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::{Orthographic3, Point3, Matrix4}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// let expected = Matrix4::new( /// 2.0 / 9.0, 0.0, 0.0, -11.0 / 9.0, /// 0.0, 2.0 / 18.0, 0.0, -22.0 / 18.0, /// 0.0, 0.0, -2.0 / 999.9, -1000.1 / 999.9, /// 0.0, 0.0, 0.0, 1.0 /// ); /// assert_eq!(proj.unwrap(), expected); /// ``` #[inline] pub fn unwrap(self) -> Matrix4<N> { self.matrix } /// The left offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_relative_eq!(proj.left(), 1.0, epsilon = 1.0e-6); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// assert_relative_eq!(proj.left(), 10.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn left(&self) -> N { (-N::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] } /// The right offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_relative_eq!(proj.right(), 10.0, epsilon = 1.0e-6); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// assert_relative_eq!(proj.right(), 1.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn right(&self) -> N { (N::one() - self.matrix[(0, 3)]) / self.matrix[(0, 0)] } /// The bottom offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_relative_eq!(proj.bottom(), 2.0, epsilon = 1.0e-6); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// assert_relative_eq!(proj.bottom(), 20.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn bottom(&self) -> N { (-N::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] } /// The top offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_relative_eq!(proj.top(), 20.0, epsilon = 1.0e-6); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// assert_relative_eq!(proj.top(), 2.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn top(&self) -> N { (N::one() - self.matrix[(1, 3)]) / self.matrix[(1, 1)] } /// The near plane offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_relative_eq!(proj.znear(), 0.1, epsilon = 1.0e-6); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// assert_relative_eq!(proj.znear(), 1000.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn znear(&self) -> N { (N::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] } /// The far plane offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// assert_relative_eq!(proj.zfar(), 1000.0, epsilon = 1.0e-6); /// /// let proj = Orthographic3::new(10.0, 1.0, 20.0, 2.0, 1000.0, 0.1); /// assert_relative_eq!(proj.zfar(), 0.1, epsilon = 1.0e-6); /// ``` #[inline] pub fn zfar(&self) -> N { (-N::one() + self.matrix[(2, 3)]) / self.matrix[(2, 2)] } // FIXME: when we get specialization, specialize the Mul impl instead. /// Projects a point. Faster than matrix multiplication. /// /// # Example /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::{Orthographic3, Point3}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// /// let p1 = Point3::new(1.0, 2.0, -0.1); /// let p2 = Point3::new(1.0, 2.0, -1000.0); /// let p3 = Point3::new(1.0, 20.0, -0.1); /// let p4 = Point3::new(1.0, 20.0, -1000.0); /// let p5 = Point3::new(10.0, 2.0, -0.1); /// let p6 = Point3::new(10.0, 2.0, -1000.0); /// let p7 = Point3::new(10.0, 20.0, -0.1); /// let p8 = Point3::new(10.0, 20.0, -1000.0); /// /// assert_relative_eq!(proj.project_point(&p1), Point3::new(-1.0, -1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p2), Point3::new(-1.0, -1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p3), Point3::new(-1.0, 1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p4), Point3::new(-1.0, 1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p5), Point3::new( 1.0, -1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p6), Point3::new( 1.0, -1.0, 1.0)); /// assert_relative_eq!(proj.project_point(&p7), Point3::new( 1.0, 1.0, -1.0)); /// assert_relative_eq!(proj.project_point(&p8), Point3::new( 1.0, 1.0, 1.0)); /// ``` #[inline] pub fn project_point(&self, p: &Point3<N>) -> Point3<N> { Point3::new( self.matrix[(0, 0)] * p[0] + self.matrix[(0, 3)], self.matrix[(1, 1)] * p[1] + self.matrix[(1, 3)], self.matrix[(2, 2)] * p[2] + self.matrix[(2, 3)], ) } /// Un-projects a point. Faster than multiplication by the underlying matrix inverse. /// /// # Example /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::{Orthographic3, Point3}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// /// let p1 = Point3::new(-1.0, -1.0, -1.0); /// let p2 = Point3::new(-1.0, -1.0, 1.0); /// let p3 = Point3::new(-1.0, 1.0, -1.0); /// let p4 = Point3::new(-1.0, 1.0, 1.0); /// let p5 = Point3::new( 1.0, -1.0, -1.0); /// let p6 = Point3::new( 1.0, -1.0, 1.0); /// let p7 = Point3::new( 1.0, 1.0, -1.0); /// let p8 = Point3::new( 1.0, 1.0, 1.0); /// /// assert_relative_eq!(proj.unproject_point(&p1), Point3::new(1.0, 2.0, -0.1), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p2), Point3::new(1.0, 2.0, -1000.0), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p3), Point3::new(1.0, 20.0, -0.1), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p4), Point3::new(1.0, 20.0, -1000.0), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p5), Point3::new(10.0, 2.0, -0.1), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p6), Point3::new(10.0, 2.0, -1000.0), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p7), Point3::new(10.0, 20.0, -0.1), epsilon = 1.0e-6); /// assert_relative_eq!(proj.unproject_point(&p8), Point3::new(10.0, 20.0, -1000.0), epsilon = 1.0e-6); /// ``` #[inline] pub fn unproject_point(&self, p: &Point3<N>) -> Point3<N> { Point3::new( (p[0] - self.matrix[(0, 3)]) / self.matrix[(0, 0)], (p[1] - self.matrix[(1, 3)]) / self.matrix[(1, 1)], (p[2] - self.matrix[(2, 3)]) / self.matrix[(2, 2)], ) } // FIXME: when we get specialization, specialize the Mul impl instead. /// Projects a vector. Faster than matrix multiplication. /// /// Vectors are not affected by the translation part of the projection. /// /// # Example /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::{Orthographic3, Vector3}; /// let proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// /// let v1 = Vector3::x(); /// let v2 = Vector3::y(); /// let v3 = Vector3::z(); /// /// assert_relative_eq!(proj.project_vector(&v1), Vector3::x() * 2.0 / 9.0); /// assert_relative_eq!(proj.project_vector(&v2), Vector3::y() * 2.0 / 18.0); /// assert_relative_eq!(proj.project_vector(&v3), Vector3::z() * -2.0 / 999.9); /// ``` #[inline] pub fn project_vector<SB>(&self, p: &Vector<N, U3, SB>) -> Vector3<N> where SB: Storage<N, U3> { Vector3::new( self.matrix[(0, 0)] * p[0], self.matrix[(1, 1)] * p[1], self.matrix[(2, 2)] * p[2], ) } /// Sets the left offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_left(2.0); /// assert_relative_eq!(proj.left(), 2.0, epsilon = 1.0e-6); /// /// // It is OK to set a left offset greater than the current right offset. /// proj.set_left(20.0); /// assert_relative_eq!(proj.left(), 20.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_left(&mut self, left: N) { let right = self.right(); self.set_left_and_right(left, right); } /// Sets the right offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_right(15.0); /// assert_relative_eq!(proj.right(), 15.0, epsilon = 1.0e-6); /// /// // It is OK to set a right offset smaller than the current left offset. /// proj.set_right(-3.0); /// assert_relative_eq!(proj.right(), -3.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_right(&mut self, right: N) { let left = self.left(); self.set_left_and_right(left, right); } /// Sets the bottom offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_bottom(8.0); /// assert_relative_eq!(proj.bottom(), 8.0, epsilon = 1.0e-6); /// /// // It is OK to set a bottom offset greater than the current top offset. /// proj.set_bottom(50.0); /// assert_relative_eq!(proj.bottom(), 50.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_bottom(&mut self, bottom: N) { let top = self.top(); self.set_bottom_and_top(bottom, top); } /// Sets the top offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_top(15.0); /// assert_relative_eq!(proj.top(), 15.0, epsilon = 1.0e-6); /// /// // It is OK to set a top offset smaller than the current bottom offset. /// proj.set_top(-3.0); /// assert_relative_eq!(proj.top(), -3.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_top(&mut self, top: N) { let bottom = self.bottom(); self.set_bottom_and_top(bottom, top); } /// Sets the near plane offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_znear(8.0); /// assert_relative_eq!(proj.znear(), 8.0, epsilon = 1.0e-6); /// /// // It is OK to set a znear greater than the current zfar. /// proj.set_znear(5000.0); /// assert_relative_eq!(proj.znear(), 5000.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_znear(&mut self, znear: N) { let zfar = self.zfar(); self.set_znear_and_zfar(znear, zfar); } /// Sets the far plane offset of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_zfar(15.0); /// assert_relative_eq!(proj.zfar(), 15.0, epsilon = 1.0e-6); /// /// // It is OK to set a zfar smaller than the current znear. /// proj.set_zfar(-3.0); /// assert_relative_eq!(proj.zfar(), -3.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_zfar(&mut self, zfar: N) { let znear = self.znear(); self.set_znear_and_zfar(znear, zfar); } /// Sets the view cuboid offsets along the `x` axis. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_left_and_right(7.0, 70.0); /// assert_relative_eq!(proj.left(), 7.0, epsilon = 1.0e-6); /// assert_relative_eq!(proj.right(), 70.0, epsilon = 1.0e-6); /// /// // It is also OK to have `left > right`. /// proj.set_left_and_right(70.0, 7.0); /// assert_relative_eq!(proj.left(), 70.0, epsilon = 1.0e-6); /// assert_relative_eq!(proj.right(), 7.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_left_and_right(&mut self, left: N, right: N) { assert!( left != right, "The left corner must not be equal to the right corner." ); self.matrix[(0, 0)] = ::convert::<_, N>(2.0) / (right - left); self.matrix[(0, 3)] = -(right + left) / (right - left); } /// Sets the view cuboid offsets along the `y` axis. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_bottom_and_top(7.0, 70.0); /// assert_relative_eq!(proj.bottom(), 7.0, epsilon = 1.0e-6); /// assert_relative_eq!(proj.top(), 70.0, epsilon = 1.0e-6); /// /// // It is also OK to have `bottom > top`. /// proj.set_bottom_and_top(70.0, 7.0); /// assert_relative_eq!(proj.bottom(), 70.0, epsilon = 1.0e-6); /// assert_relative_eq!(proj.top(), 7.0, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_bottom_and_top(&mut self, bottom: N, top: N) { assert!( bottom != top, "The top corner must not be equal to the bottom corner." ); self.matrix[(1, 1)] = ::convert::<_, N>(2.0) / (top - bottom); self.matrix[(1, 3)] = -(top + bottom) / (top - bottom); } /// Sets the near and far plane offsets of the view cuboid. /// /// ``` /// # #[macro_use] extern crate approx; /// # extern crate nalgebra; /// # use nalgebra::Orthographic3; /// let mut proj = Orthographic3::new(1.0, 10.0, 2.0, 20.0, 0.1, 1000.0); /// proj.set_znear_and_zfar(50.0, 5000.0); /// assert_relative_eq!(proj.znear(), 50.0, epsilon = 1.0e-6); /// assert_relative_eq!(proj.zfar(), 5000.0, epsilon = 1.0e-6); /// /// // It is also OK to have `znear > zfar`. /// proj.set_znear_and_zfar(5000.0, 0.5); /// assert_relative_eq!(proj.znear(), 5000.0, epsilon = 1.0e-6); /// assert_relative_eq!(proj.zfar(), 0.5, epsilon = 1.0e-6); /// ``` #[inline] pub fn set_znear_and_zfar(&mut self, znear: N, zfar: N) { assert!( zfar != znear, "The near-plane and far-plane must not be superimposed." ); self.matrix[(2, 2)] = -::convert::<_, N>(2.0) / (zfar - znear); self.matrix[(2, 3)] = -(zfar + znear) / (zfar - znear); } } impl<N: Real> Distribution<Orthographic3<N>> for Standard where Standard: Distribution<N> { fn sample<R: Rng + ?Sized>(&self, r: &mut R) -> Orthographic3<N> { let left = r.gen(); let right = helper::reject_rand(r, |x: &N| *x > left); let bottom = r.gen(); let top = helper::reject_rand(r, |x: &N| *x > bottom); let znear = r.gen(); let zfar = helper::reject_rand(r, |x: &N| *x > znear); Orthographic3::new(left, right, bottom, top, znear, zfar) } } #[cfg(feature = "arbitrary")] impl<N: Real + Arbitrary> Arbitrary for Orthographic3<N> where Matrix4<N>: Send { fn arbitrary<G: Gen>(g: &mut G) -> Self { let left = Arbitrary::arbitrary(g); let right = helper::reject(g, |x: &N| *x > left); let bottom = Arbitrary::arbitrary(g); let top = helper::reject(g, |x: &N| *x > bottom); let znear = Arbitrary::arbitrary(g); let zfar = helper::reject(g, |x: &N| *x > znear); Self::new(left, right, bottom, top, znear, zfar) } } impl<N: Real> From<Orthographic3<N>> for Matrix4<N> { #[inline] fn from(orth: Orthographic3<N>) -> Self { orth.unwrap() } }
//! Container types use uuid::Uuid; pub static MIN_SIZE: Size = Size { w: 80u32, h: 40u32 }; use rustwlc::handle::{WlcView, WlcOutput}; use rustwlc::{Geometry, ResizeEdge, Point, Size, VIEW_FULLSCREEN}; use super::borders::{Borders, BordersDraw}; use ::render::{Renderable, Drawable}; use super::bar::Bar; /// A handle to either a view or output #[derive(Debug, Clone, PartialEq, Eq)] pub enum Handle { View(WlcView), Output(WlcOutput) } /// Types of containers #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ContainerType { /// Root container, only one exists Root, /// WlcOutput/Monitor Output, /// A workspace Workspace, /// A Container, houses views and other containers Container, /// A view (window) View } impl ContainerType { /// Whether this container can be used as the parent of another pub fn can_have_child(self, other: ContainerType) -> bool { use self::ContainerType::*; match self { Root => other == Output, Output => other == Workspace, Workspace => other == Container, Container => other == Container || other == View, View => false } } } /// Layout mode for a container #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Layout { Horizontal, Vertical } /// Represents an item in the container tree. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Container { /// Root node of the container Root(Uuid), /// Output Output { /// Handle to the wlc handle: WlcOutput, /// Optional background for the output background: Option<WlcView>, /// Optional bar for the output bar: Option<Bar>, /// UUID associated with container, client program can use container id: Uuid, }, /// Workspace Workspace { /// Name of the workspace name: String, /// The geometry of the workspace on the screen. /// Might be different if there is e.g a bar present geometry: Geometry, /// `Vec` of all children that are fullscreen. /// This is used to disable certain features while there is a fullscreen /// (e.g: focus switching, resizing, and moving containers) fullscreen_c: Vec<Uuid>, /// UUID associated with container, client program can use container id: Uuid, }, /// Container Container { /// How the container is layed out layout: Layout, /// If the container is floating floating: bool, /// If the container is fullscreen fullscreen: bool, /// The geometry of the container, relative to the parent container geometry: Geometry, /// UUID associated with container, client program can use container id: Uuid, /// The border drawn to the screen borders: Option<Borders>, }, /// View or window View { /// The wlc handle to the view handle: WlcView, /// Whether this view is floating floating: bool, /// Effective geometry. This is the size of the container including /// borders and gaps. It does _not_ change when an app becomes /// fullscreen. E.g to get the fullscreen size use `handle.get_geometry` effective_geometry: Geometry, /// UUID associated with container, client program can use container id: Uuid, /// The border drawn to the screen borders: Option<Borders>, } } impl Container { /// Creates a new root container. pub fn new_root() -> Container { Container::Root(Uuid::new_v4()) } /// Creates a new output container with the given output pub fn new_output(handle: WlcOutput) -> Container { Container::Output { handle: handle, background: None, bar: None, id: Uuid::new_v4() } } /// Creates a new workspace container with the given name and size. /// Usually the size is the same as the output it resides on, /// unless there is a bar or something. pub fn new_workspace(name: String, geometry: Geometry) -> Container { Container::Workspace { name: name, geometry: geometry, fullscreen_c: Vec::new(), id: Uuid::new_v4() } } /// Creates a new container pub fn new_container(geometry: Geometry) -> Container { Container::Container { layout: Layout::Horizontal, floating: false, fullscreen: false, geometry: geometry, id: Uuid::new_v4(), borders: None } } /// Creates a new view container with the given handle pub fn new_view(handle: WlcView, borders: Option<Borders>) -> Container { let geometry = handle.get_geometry() .expect("View had no geometry"); Container::View { handle: handle, floating: false, effective_geometry: geometry, id: Uuid::new_v4(), borders: borders } } /// Sets the visibility of this container pub fn set_visibility(&mut self, visibility: bool) { let mask = if visibility { 1 } else { 0 }; if let Some(handle) = self.get_handle() { match handle { Handle::View(view) => { view.set_mask(mask) }, _ => {}, } } } /// Gets the type of this container pub fn get_type(&self) -> ContainerType { match *self { Container::Root(_) => ContainerType::Root, Container::Output { .. } => ContainerType::Output, Container::Workspace { .. } => ContainerType::Workspace, Container::Container { .. } => ContainerType::Container, Container::View { .. } => ContainerType::View } } /// Gets the view handle of the view container, if this is a view container pub fn get_handle(&self) -> Option<Handle> { match *self { Container::View { ref handle, ..} => Some(Handle::View(handle.clone())), Container::Output { ref handle, .. } => Some(Handle::Output(handle.clone())), _ => None } } /// Gets the name of the workspace, if this container is a workspace. pub fn get_name(&self) -> Option<&str> { match *self { Container::Workspace { ref name, ..} => Some(name), _ => None } } /// Gets the geometry of the container, if the container has one. /// Root: Returns None /// Workspace/Output: Size is the size of the screen, origin is just 0,0 /// Container/View: Size is the size of the container, /// origin is the coordinates relative to the parent container. pub fn get_geometry(&self) -> Option<Geometry> { match *self { Container::Root(_) => None, Container::Output { ref handle, ref bar, .. } => { let mut resolution = handle.get_resolution() .expect("Couldn't get output resolution"); let mut origin = Point { x: 0, y: 0 }; if let Some(handle) = bar.as_ref().map(|bar| **bar) { let bar_g = handle.get_geometry() .expect("Bar had no geometry"); let Size { h, .. } = bar_g.size; // TODO Allow bars on the horizontal side // This is for bottom //resolution.h = resolution.h.saturating_sub(h); origin.y += h as i32; resolution.h = resolution.h.saturating_sub(h) } Some(Geometry { origin: origin, size: resolution }) }, Container::Workspace { geometry, .. } | Container::Container { geometry, .. } => Some(geometry), Container::View { effective_geometry, .. } => { Some(effective_geometry) }, } } /// Gets the actual geometry for a `WlcView` or `WlcOutput` /// /// Unlike `get_geometry`, this does not account for borders/gaps, /// and instead is just a thin wrapper around /// `handle.get_geometry`/`handle.get_resolution`. /// /// Most of the time you want `get_geometry`, as you should account for the /// borders, gaps, and top bar. /// /// For non-`View`/`Output` containers, this always returns `None` pub fn get_actual_geometry(&self) -> Option<Geometry> { match *self { Container::View { handle, .. } => handle.get_geometry(), Container::Output { handle, .. } => { handle.get_resolution() .map(|size| Geometry { origin: Point { x: 0, y: 0 }, size: size }) }, _ => None } } /// Sets the geometry behind the container. Does nothing if container is root. /// /// For view you need to set the appropriate edges (which can be empty). /// If you are not intending to set the geometry of a view, simply pass `ResizeEdge::empty()` pub fn set_geometry(&mut self, edges: ResizeEdge, geo: Geometry) { match *self { Container::Root(_) => error!("Tried to set the geometry of the root!"), Container::Output { ref handle, .. } => { handle.set_resolution(geo.size, 1); }, Container::Workspace { ref mut geometry, .. } | Container::Container { ref mut geometry, .. } => { *geometry = geo; }, Container::View { ref handle, ref mut effective_geometry, .. } => { handle.set_geometry(edges, geo); *effective_geometry = geo; } } } pub fn set_layout(&mut self, new_layout: Layout) -> Result<(), String>{ match *self { Container::Container { ref mut layout, .. } => *layout = new_layout, ref other => return Err( format!("Can only set the layout of a container, not {:?}", other)) } Ok(()) } pub fn get_id(&self) -> Uuid { match *self { Container::Root(id) | Container::Output { id, .. } | Container::Workspace { id, .. } | Container::Container { id, .. } | Container::View { id, .. } => { id } } } pub fn floating(&self) -> bool { match *self { Container::View { floating, .. } | Container::Container { floating, .. } => floating, Container::Workspace { .. } | Container::Output { .. } | Container::Root(_) => false } } // TODO Make these set_* functions that can fail return a proper error type. /// If not set on a view or container, error is returned telling what /// container type that this function was (incorrectly) called on. pub fn set_floating(&mut self, val: bool) -> Result<ContainerType, ContainerType> { let c_type = self.get_type(); match *self { Container::View { ref mut floating, .. } | Container::Container { ref mut floating, .. } => { *floating = val; Ok(c_type) }, _ => { Err(c_type) } } } /// Sets the fullscreen flag on the container to the specified value. /// /// If called on a non View/Container, then returns an Err with the wrong type. pub fn set_fullscreen(&mut self, val: bool) -> Result<(), ContainerType> { let c_type = self.get_type(); match *self { Container::View { handle, effective_geometry, .. } => { handle.set_state(VIEW_FULLSCREEN, val); if !val { handle.set_geometry(ResizeEdge::empty(), effective_geometry) } Ok(()) }, Container::Container { ref mut fullscreen, .. } => { *fullscreen = val; Ok(()) }, _ => Err(c_type) } } /// Determines if a container is fullscreen. /// /// Workspaces, Outputs, and the Root are never fullscreen. pub fn fullscreen(&self) -> bool { match *self { Container::View { handle, .. } => { handle.get_state().intersects(VIEW_FULLSCREEN) }, Container::Container { fullscreen, .. } => fullscreen, _ => false } } /// Updates the workspace (`self`) that the `id` resides in to reflect /// whether the container with the `id` is fullscreen (`toggle`). /// /// If called with a non-workspace an Err is returned with /// the incorrect type. pub fn update_fullscreen_c(&mut self, id: Uuid, toggle: bool) -> Result<(), ContainerType> { let c_type = self.get_type(); match *self { Container::Workspace { ref mut fullscreen_c, .. } => { if !toggle { match fullscreen_c.iter().position(|c_id| *c_id == id) { Some(index) => { fullscreen_c.remove(index); }, None => {} } } else { fullscreen_c.push(id); } Ok(()) }, _ => Err(c_type) } } /// If the container is a workspace, returns the children in the workspace that /// are fullscreen. The last child is the one visible to the user. /// /// Computes in O(1) time. /// /// If the container is not a workspace, None is returned. pub fn fullscreen_c(&self) -> Option<&Vec<Uuid>> { match *self { Container::Workspace { ref fullscreen_c, .. } => Some(fullscreen_c), _ => None } } /// Gets the name of the container. /// /// Container::Root: returns simply the string "Root Container" /// Container::Output: The name of the output /// Container::Workspace: The name of the workspace /// Container::Container: Layout style (e.g horizontal) /// Container::View: The name taken from `WlcView` pub fn name(&self) -> String { match *self { Container::Root(_) => "Root Container".into(), Container::Output { handle, .. } => { handle.get_name() }, Container::Workspace { ref name, .. } => name.clone(), Container::Container { layout, .. } => { format!("{:?}", layout) }, Container::View { handle, ..} => { handle.get_title() } } } pub fn render_borders(&mut self) { match *self { Container::View { ref mut borders, .. } | Container::Container { ref mut borders, .. } => { if let Some(borders) = borders.as_mut() { borders.render(); } }, _ => panic!("Tried to render a non-view / non-container") } } pub fn draw_borders(&mut self) { // TODO Eventually, we should use an enum to choose which way to draw the // border, but for now this will do. match *self { Container::View { ref mut borders, handle, .. } => { if let Some(borders_) = borders.take() { let geometry = handle.get_geometry() .expect("View had no geometry"); // TODO Don't hard code color *borders = BordersDraw::new(borders_.enable_cairo().unwrap()) .draw(geometry).ok(); } }, Container::Container { ref mut borders, .. } => { borders.take(); }, _ => panic!("Tried to render a non-view / non-container") } } /// Resizes the border buffer to fit within this geometry, if the /// `View`/`Container` has a border wrapping it. /// /// # Panics /// Panics on non-`View`/`Container`s pub fn resize_borders(&mut self, geo: Geometry) { match *self { Container::View { ref mut borders, ..} => { if let Some(borders_) = borders.take() { *borders = borders_.reallocate_buffer(geo) } }, Container::Container { ref mut borders, ..} => { // TODO FIXME let output = WlcOutput::focused(); *borders = borders.take().and_then(|b| b.reallocate_buffer(geo)) .or_else(|| Borders::new(geo, output)); } ref container => { error!("Tried to resize border to {:#?} on {:#?}", geo, container); panic!("Expected a View/Container, got a different type") } } } } #[cfg(test)] mod tests { use super::*; use rustwlc::*; #[test] fn can_have_child() { let root = ContainerType::Root; let output = ContainerType::Output; let workspace = ContainerType::Workspace; let container = ContainerType::Container; let view = ContainerType::View; assert!(root.can_have_child(output), "Root > output"); assert!(output.can_have_child(workspace), "Output > workspace"); assert!(workspace.can_have_child(container), "Workspace > container"); assert!(container.can_have_child(container), "Container > container"); assert!(container.can_have_child(view), "Container > view"); assert!(!root.can_have_child(root), "! Root > root"); assert!(!root.can_have_child(workspace), "! Root > workspace"); assert!(!root.can_have_child(container), "! Root > container"); assert!(!root.can_have_child(view), "! Root > view"); assert!(!output.can_have_child(root), "! Output > root"); assert!(!output.can_have_child(output), "! Output > output"); assert!(!output.can_have_child(container), "! Output > container"); assert!(!output.can_have_child(view), "! Output > view"); assert!(!workspace.can_have_child(root), "! Workspace > root"); assert!(!workspace.can_have_child(output), "! Workspace > output"); assert!(!workspace.can_have_child(workspace), "! Workspace > workspace"); assert!(!workspace.can_have_child(view), "! Workspace > view"); assert!(!container.can_have_child(root), "! Container > root"); assert!(!container.can_have_child(workspace), "! Container > workspace"); assert!(!container.can_have_child(output), "! Container > container"); assert!(!view.can_have_child(root), "! View > root"); assert!(!view.can_have_child(output), "! View > output"); assert!(!view.can_have_child(workspace), "! View > workspace"); assert!(!view.can_have_child(container), "! View > container"); assert!(!view.can_have_child(view), "! View > view"); } #[test] #[allow(unused_variables)] /// Tests set and get geometry fn geometry_test() { use rustwlc::*; let test_geometry1 = Geometry { origin: Point { x: 800, y: 600 }, size: Size { w: 500, h: 500} }; let test_geometry2 = Geometry { origin: Point { x: 1024, y: 2048}, size: Size { w: 500, h: 700} }; let root = Container::new_root(); assert!(root.get_geometry().is_none()); let output = Container::new_output(WlcView::root().as_output()); let workspace = Container::new_workspace("1".to_string(), Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 500, h: 500 } }); assert_eq!(workspace.get_geometry(), Some(Geometry { size: Size { w: 500, h: 500}, origin: Point { x: 0, y: 0} })); } #[test] fn layout_change_test() { let root = Container::new_root(); let output = Container::new_output(WlcView::root().as_output()); let workspace = Container::new_workspace("1".to_string(), Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 500, h: 500 } }); let mut container = Container::new_container(Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 0, h:0} }); let view = Container::new_view(WlcView::root(), None); /* Container first, the only thing we can set the layout on */ let layout = match container { Container::Container { ref layout, .. } => layout.clone(), _ => panic!() }; assert_eq!(layout, Layout::Horizontal); let layouts = [Layout::Vertical, Layout::Horizontal]; for new_layout in &layouts { container.set_layout(*new_layout).ok(); let layout = match container { Container::Container { ref layout, .. } => layout.clone(), _ => panic!() }; assert_eq!(layout, *new_layout); } for new_layout in &layouts { for container in &mut [root.clone(), output.clone(), workspace.clone(), view.clone()] { let result = container.set_layout(*new_layout); assert!(result.is_err()); } } } #[test] fn floating_tests() { let mut root = Container::new_root(); let mut output = Container::new_output(WlcView::root().as_output()); let mut workspace = Container::new_workspace("1".to_string(), Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 500, h: 500 } }); let mut container = Container::new_container(Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 0, h:0} }); let mut view = Container::new_view(WlcView::root(), None); // by default, none are floating. assert!(!root.floating()); assert!(!output.floating()); assert!(!workspace.floating()); assert!(!container.floating()); assert!(!view.floating()); // trying to do anything to root, output, or workspace is Err. assert_eq!(root.set_floating(true), Err(ContainerType::Root)); assert_eq!(root.set_floating(false), Err(ContainerType::Root)); assert_eq!(output.set_floating(true), Err(ContainerType::Output)); assert_eq!(output.set_floating(false), Err(ContainerType::Output)); assert_eq!(workspace.set_floating(true), Err(ContainerType::Workspace)); assert_eq!(workspace.set_floating(false), Err(ContainerType::Workspace)); assert_eq!(container.set_floating(true), Ok(ContainerType::Container)); assert!(container.floating()); assert_eq!(container.set_floating(false), Ok(ContainerType::Container)); assert!(!container.floating()); assert_eq!(view.set_floating(true), Ok(ContainerType::View)); assert!(view.floating()); assert_eq!(view.set_floating(false), Ok(ContainerType::View)); assert!(!view.floating()); } } Reloading w/ pos border size works //! Container types use uuid::Uuid; pub static MIN_SIZE: Size = Size { w: 80u32, h: 40u32 }; use rustwlc::handle::{WlcView, WlcOutput}; use rustwlc::{Geometry, ResizeEdge, Point, Size, VIEW_FULLSCREEN}; use super::borders::{Borders, BordersDraw}; use ::render::{Renderable, Drawable}; use super::bar::Bar; /// A handle to either a view or output #[derive(Debug, Clone, PartialEq, Eq)] pub enum Handle { View(WlcView), Output(WlcOutput) } /// Types of containers #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ContainerType { /// Root container, only one exists Root, /// WlcOutput/Monitor Output, /// A workspace Workspace, /// A Container, houses views and other containers Container, /// A view (window) View } impl ContainerType { /// Whether this container can be used as the parent of another pub fn can_have_child(self, other: ContainerType) -> bool { use self::ContainerType::*; match self { Root => other == Output, Output => other == Workspace, Workspace => other == Container, Container => other == Container || other == View, View => false } } } /// Layout mode for a container #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Layout { Horizontal, Vertical } /// Represents an item in the container tree. #[derive(Debug, Clone, PartialEq, Eq)] pub enum Container { /// Root node of the container Root(Uuid), /// Output Output { /// Handle to the wlc handle: WlcOutput, /// Optional background for the output background: Option<WlcView>, /// Optional bar for the output bar: Option<Bar>, /// UUID associated with container, client program can use container id: Uuid, }, /// Workspace Workspace { /// Name of the workspace name: String, /// The geometry of the workspace on the screen. /// Might be different if there is e.g a bar present geometry: Geometry, /// `Vec` of all children that are fullscreen. /// This is used to disable certain features while there is a fullscreen /// (e.g: focus switching, resizing, and moving containers) fullscreen_c: Vec<Uuid>, /// UUID associated with container, client program can use container id: Uuid, }, /// Container Container { /// How the container is layed out layout: Layout, /// If the container is floating floating: bool, /// If the container is fullscreen fullscreen: bool, /// The geometry of the container, relative to the parent container geometry: Geometry, /// UUID associated with container, client program can use container id: Uuid, /// The border drawn to the screen borders: Option<Borders>, }, /// View or window View { /// The wlc handle to the view handle: WlcView, /// Whether this view is floating floating: bool, /// Effective geometry. This is the size of the container including /// borders and gaps. It does _not_ change when an app becomes /// fullscreen. E.g to get the fullscreen size use `handle.get_geometry` effective_geometry: Geometry, /// UUID associated with container, client program can use container id: Uuid, /// The border drawn to the screen borders: Option<Borders>, } } impl Container { /// Creates a new root container. pub fn new_root() -> Container { Container::Root(Uuid::new_v4()) } /// Creates a new output container with the given output pub fn new_output(handle: WlcOutput) -> Container { Container::Output { handle: handle, background: None, bar: None, id: Uuid::new_v4() } } /// Creates a new workspace container with the given name and size. /// Usually the size is the same as the output it resides on, /// unless there is a bar or something. pub fn new_workspace(name: String, geometry: Geometry) -> Container { Container::Workspace { name: name, geometry: geometry, fullscreen_c: Vec::new(), id: Uuid::new_v4() } } /// Creates a new container pub fn new_container(geometry: Geometry) -> Container { Container::Container { layout: Layout::Horizontal, floating: false, fullscreen: false, geometry: geometry, id: Uuid::new_v4(), borders: None } } /// Creates a new view container with the given handle pub fn new_view(handle: WlcView, borders: Option<Borders>) -> Container { let geometry = handle.get_geometry() .expect("View had no geometry"); Container::View { handle: handle, floating: false, effective_geometry: geometry, id: Uuid::new_v4(), borders: borders } } /// Sets the visibility of this container pub fn set_visibility(&mut self, visibility: bool) { let mask = if visibility { 1 } else { 0 }; if let Some(handle) = self.get_handle() { match handle { Handle::View(view) => { view.set_mask(mask) }, _ => {}, } } } /// Gets the type of this container pub fn get_type(&self) -> ContainerType { match *self { Container::Root(_) => ContainerType::Root, Container::Output { .. } => ContainerType::Output, Container::Workspace { .. } => ContainerType::Workspace, Container::Container { .. } => ContainerType::Container, Container::View { .. } => ContainerType::View } } /// Gets the view handle of the view container, if this is a view container pub fn get_handle(&self) -> Option<Handle> { match *self { Container::View { ref handle, ..} => Some(Handle::View(handle.clone())), Container::Output { ref handle, .. } => Some(Handle::Output(handle.clone())), _ => None } } /// Gets the name of the workspace, if this container is a workspace. pub fn get_name(&self) -> Option<&str> { match *self { Container::Workspace { ref name, ..} => Some(name), _ => None } } /// Gets the geometry of the container, if the container has one. /// Root: Returns None /// Workspace/Output: Size is the size of the screen, origin is just 0,0 /// Container/View: Size is the size of the container, /// origin is the coordinates relative to the parent container. pub fn get_geometry(&self) -> Option<Geometry> { match *self { Container::Root(_) => None, Container::Output { ref handle, ref bar, .. } => { let mut resolution = handle.get_resolution() .expect("Couldn't get output resolution"); let mut origin = Point { x: 0, y: 0 }; if let Some(handle) = bar.as_ref().map(|bar| **bar) { let bar_g = handle.get_geometry() .expect("Bar had no geometry"); let Size { h, .. } = bar_g.size; // TODO Allow bars on the horizontal side // This is for bottom //resolution.h = resolution.h.saturating_sub(h); origin.y += h as i32; resolution.h = resolution.h.saturating_sub(h) } Some(Geometry { origin: origin, size: resolution }) }, Container::Workspace { geometry, .. } | Container::Container { geometry, .. } => Some(geometry), Container::View { effective_geometry, .. } => { Some(effective_geometry) }, } } /// Gets the actual geometry for a `WlcView` or `WlcOutput` /// /// Unlike `get_geometry`, this does not account for borders/gaps, /// and instead is just a thin wrapper around /// `handle.get_geometry`/`handle.get_resolution`. /// /// Most of the time you want `get_geometry`, as you should account for the /// borders, gaps, and top bar. /// /// For non-`View`/`Output` containers, this always returns `None` pub fn get_actual_geometry(&self) -> Option<Geometry> { match *self { Container::View { handle, .. } => handle.get_geometry(), Container::Output { handle, .. } => { handle.get_resolution() .map(|size| Geometry { origin: Point { x: 0, y: 0 }, size: size }) }, _ => None } } /// Sets the geometry behind the container. Does nothing if container is root. /// /// For view you need to set the appropriate edges (which can be empty). /// If you are not intending to set the geometry of a view, simply pass `ResizeEdge::empty()` pub fn set_geometry(&mut self, edges: ResizeEdge, geo: Geometry) { match *self { Container::Root(_) => error!("Tried to set the geometry of the root!"), Container::Output { ref handle, .. } => { handle.set_resolution(geo.size, 1); }, Container::Workspace { ref mut geometry, .. } | Container::Container { ref mut geometry, .. } => { *geometry = geo; }, Container::View { ref handle, ref mut effective_geometry, .. } => { handle.set_geometry(edges, geo); *effective_geometry = geo; } } } pub fn set_layout(&mut self, new_layout: Layout) -> Result<(), String>{ match *self { Container::Container { ref mut layout, .. } => *layout = new_layout, ref other => return Err( format!("Can only set the layout of a container, not {:?}", other)) } Ok(()) } pub fn get_id(&self) -> Uuid { match *self { Container::Root(id) | Container::Output { id, .. } | Container::Workspace { id, .. } | Container::Container { id, .. } | Container::View { id, .. } => { id } } } pub fn floating(&self) -> bool { match *self { Container::View { floating, .. } | Container::Container { floating, .. } => floating, Container::Workspace { .. } | Container::Output { .. } | Container::Root(_) => false } } // TODO Make these set_* functions that can fail return a proper error type. /// If not set on a view or container, error is returned telling what /// container type that this function was (incorrectly) called on. pub fn set_floating(&mut self, val: bool) -> Result<ContainerType, ContainerType> { let c_type = self.get_type(); match *self { Container::View { ref mut floating, .. } | Container::Container { ref mut floating, .. } => { *floating = val; Ok(c_type) }, _ => { Err(c_type) } } } /// Sets the fullscreen flag on the container to the specified value. /// /// If called on a non View/Container, then returns an Err with the wrong type. pub fn set_fullscreen(&mut self, val: bool) -> Result<(), ContainerType> { let c_type = self.get_type(); match *self { Container::View { handle, effective_geometry, .. } => { handle.set_state(VIEW_FULLSCREEN, val); if !val { handle.set_geometry(ResizeEdge::empty(), effective_geometry) } Ok(()) }, Container::Container { ref mut fullscreen, .. } => { *fullscreen = val; Ok(()) }, _ => Err(c_type) } } /// Determines if a container is fullscreen. /// /// Workspaces, Outputs, and the Root are never fullscreen. pub fn fullscreen(&self) -> bool { match *self { Container::View { handle, .. } => { handle.get_state().intersects(VIEW_FULLSCREEN) }, Container::Container { fullscreen, .. } => fullscreen, _ => false } } /// Updates the workspace (`self`) that the `id` resides in to reflect /// whether the container with the `id` is fullscreen (`toggle`). /// /// If called with a non-workspace an Err is returned with /// the incorrect type. pub fn update_fullscreen_c(&mut self, id: Uuid, toggle: bool) -> Result<(), ContainerType> { let c_type = self.get_type(); match *self { Container::Workspace { ref mut fullscreen_c, .. } => { if !toggle { match fullscreen_c.iter().position(|c_id| *c_id == id) { Some(index) => { fullscreen_c.remove(index); }, None => {} } } else { fullscreen_c.push(id); } Ok(()) }, _ => Err(c_type) } } /// If the container is a workspace, returns the children in the workspace that /// are fullscreen. The last child is the one visible to the user. /// /// Computes in O(1) time. /// /// If the container is not a workspace, None is returned. pub fn fullscreen_c(&self) -> Option<&Vec<Uuid>> { match *self { Container::Workspace { ref fullscreen_c, .. } => Some(fullscreen_c), _ => None } } /// Gets the name of the container. /// /// Container::Root: returns simply the string "Root Container" /// Container::Output: The name of the output /// Container::Workspace: The name of the workspace /// Container::Container: Layout style (e.g horizontal) /// Container::View: The name taken from `WlcView` pub fn name(&self) -> String { match *self { Container::Root(_) => "Root Container".into(), Container::Output { handle, .. } => { handle.get_name() }, Container::Workspace { ref name, .. } => name.clone(), Container::Container { layout, .. } => { format!("{:?}", layout) }, Container::View { handle, ..} => { handle.get_title() } } } pub fn render_borders(&mut self) { match *self { Container::View { ref mut borders, .. } | Container::Container { ref mut borders, .. } => { if let Some(borders) = borders.as_mut() { borders.render(); } }, _ => panic!("Tried to render a non-view / non-container") } } pub fn draw_borders(&mut self) { // TODO Eventually, we should use an enum to choose which way to draw the // border, but for now this will do. match *self { Container::View { ref mut borders, handle, .. } => { if let Some(borders_) = borders.take() { let geometry = handle.get_geometry() .expect("View had no geometry"); // TODO Don't hard code color *borders = BordersDraw::new(borders_.enable_cairo().unwrap()) .draw(geometry).ok(); } }, Container::Container { ref mut borders, .. } => { borders.take(); }, _ => panic!("Tried to render a non-view / non-container") } } /// Resizes the border buffer to fit within this geometry, if the /// `View`/`Container` has a border wrapping it. /// /// # Panics /// Panics on non-`View`/`Container`s pub fn resize_borders(&mut self, geo: Geometry) { match *self { Container::View { handle, ref mut borders, ..} => { if let Some(borders_) = borders.take() { *borders = borders_.reallocate_buffer(geo) } else { let thickness = Borders::thickness(); if thickness > 0 { let output = handle.get_output(); *borders = Borders::new(geo, output); } } }, Container::Container { ref mut borders, ..} => { // TODO FIXME let output = WlcOutput::focused(); *borders = borders.take().and_then(|b| b.reallocate_buffer(geo)) .or_else(|| Borders::new(geo, output)); } ref container => { error!("Tried to resize border to {:#?} on {:#?}", geo, container); panic!("Expected a View/Container, got a different type") } } } } #[cfg(test)] mod tests { use super::*; use rustwlc::*; #[test] fn can_have_child() { let root = ContainerType::Root; let output = ContainerType::Output; let workspace = ContainerType::Workspace; let container = ContainerType::Container; let view = ContainerType::View; assert!(root.can_have_child(output), "Root > output"); assert!(output.can_have_child(workspace), "Output > workspace"); assert!(workspace.can_have_child(container), "Workspace > container"); assert!(container.can_have_child(container), "Container > container"); assert!(container.can_have_child(view), "Container > view"); assert!(!root.can_have_child(root), "! Root > root"); assert!(!root.can_have_child(workspace), "! Root > workspace"); assert!(!root.can_have_child(container), "! Root > container"); assert!(!root.can_have_child(view), "! Root > view"); assert!(!output.can_have_child(root), "! Output > root"); assert!(!output.can_have_child(output), "! Output > output"); assert!(!output.can_have_child(container), "! Output > container"); assert!(!output.can_have_child(view), "! Output > view"); assert!(!workspace.can_have_child(root), "! Workspace > root"); assert!(!workspace.can_have_child(output), "! Workspace > output"); assert!(!workspace.can_have_child(workspace), "! Workspace > workspace"); assert!(!workspace.can_have_child(view), "! Workspace > view"); assert!(!container.can_have_child(root), "! Container > root"); assert!(!container.can_have_child(workspace), "! Container > workspace"); assert!(!container.can_have_child(output), "! Container > container"); assert!(!view.can_have_child(root), "! View > root"); assert!(!view.can_have_child(output), "! View > output"); assert!(!view.can_have_child(workspace), "! View > workspace"); assert!(!view.can_have_child(container), "! View > container"); assert!(!view.can_have_child(view), "! View > view"); } #[test] #[allow(unused_variables)] /// Tests set and get geometry fn geometry_test() { use rustwlc::*; let test_geometry1 = Geometry { origin: Point { x: 800, y: 600 }, size: Size { w: 500, h: 500} }; let test_geometry2 = Geometry { origin: Point { x: 1024, y: 2048}, size: Size { w: 500, h: 700} }; let root = Container::new_root(); assert!(root.get_geometry().is_none()); let output = Container::new_output(WlcView::root().as_output()); let workspace = Container::new_workspace("1".to_string(), Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 500, h: 500 } }); assert_eq!(workspace.get_geometry(), Some(Geometry { size: Size { w: 500, h: 500}, origin: Point { x: 0, y: 0} })); } #[test] fn layout_change_test() { let root = Container::new_root(); let output = Container::new_output(WlcView::root().as_output()); let workspace = Container::new_workspace("1".to_string(), Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 500, h: 500 } }); let mut container = Container::new_container(Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 0, h:0} }); let view = Container::new_view(WlcView::root(), None); /* Container first, the only thing we can set the layout on */ let layout = match container { Container::Container { ref layout, .. } => layout.clone(), _ => panic!() }; assert_eq!(layout, Layout::Horizontal); let layouts = [Layout::Vertical, Layout::Horizontal]; for new_layout in &layouts { container.set_layout(*new_layout).ok(); let layout = match container { Container::Container { ref layout, .. } => layout.clone(), _ => panic!() }; assert_eq!(layout, *new_layout); } for new_layout in &layouts { for container in &mut [root.clone(), output.clone(), workspace.clone(), view.clone()] { let result = container.set_layout(*new_layout); assert!(result.is_err()); } } } #[test] fn floating_tests() { let mut root = Container::new_root(); let mut output = Container::new_output(WlcView::root().as_output()); let mut workspace = Container::new_workspace("1".to_string(), Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 500, h: 500 } }); let mut container = Container::new_container(Geometry { origin: Point { x: 0, y: 0}, size: Size { w: 0, h:0} }); let mut view = Container::new_view(WlcView::root(), None); // by default, none are floating. assert!(!root.floating()); assert!(!output.floating()); assert!(!workspace.floating()); assert!(!container.floating()); assert!(!view.floating()); // trying to do anything to root, output, or workspace is Err. assert_eq!(root.set_floating(true), Err(ContainerType::Root)); assert_eq!(root.set_floating(false), Err(ContainerType::Root)); assert_eq!(output.set_floating(true), Err(ContainerType::Output)); assert_eq!(output.set_floating(false), Err(ContainerType::Output)); assert_eq!(workspace.set_floating(true), Err(ContainerType::Workspace)); assert_eq!(workspace.set_floating(false), Err(ContainerType::Workspace)); assert_eq!(container.set_floating(true), Ok(ContainerType::Container)); assert!(container.floating()); assert_eq!(container.set_floating(false), Ok(ContainerType::Container)); assert!(!container.floating()); assert_eq!(view.set_floating(true), Ok(ContainerType::View)); assert!(view.floating()); assert_eq!(view.set_floating(false), Ok(ContainerType::View)); assert!(!view.floating()); } }
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15679 //! An owned, growable string that enforces that its contents are valid UTF-8. use core::prelude::*; use core::default::Default; use core::fmt; use core::mem; use core::ptr; use core::raw::Slice; use {Collection, Mutable, MutableSeq}; use hash; use str; use str::{CharRange, StrAllocating, MaybeOwned, Owned, Slice}; use vec::Vec; /// A growable string stored as a UTF-8 encoded buffer. #[deriving(Clone, PartialEq, PartialOrd, Eq, Ord)] pub struct String { vec: Vec<u8>, } impl String { /// Creates a new string buffer initialized with the empty string. /// /// # Example /// /// ``` /// let mut s = String::new(); /// ``` #[inline] pub fn new() -> String { String { vec: Vec::new(), } } /// Creates a new string buffer with the given capacity. /// The string will be able to hold exactly `capacity` bytes without /// reallocating. If `capacity` is 0, the string will not allocate. /// /// # Example /// /// ``` /// let mut s = String::with_capacity(10); /// ``` #[inline] pub fn with_capacity(capacity: uint) -> String { String { vec: Vec::with_capacity(capacity), } } /// Creates a new string buffer from the given string. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub fn from_str(string: &str) -> String { String { vec: Vec::from_slice(string.as_bytes()) } } /// Deprecated. Replaced by `string::raw::from_parts` #[inline] #[deprecated = "Replaced by string::raw::from_parts"] pub unsafe fn from_raw_parts(length: uint, capacity: uint, ptr: *mut u8) -> String { raw::from_parts(ptr, length, capacity) } /// Deprecated. #[deprecated = "obsoleted by the removal of ~str"] #[inline] pub fn from_owned_str(string: String) -> String { string } /// Returns the vector as a string buffer, if possible, taking care not to /// copy it. /// /// Returns `Err` with the original vector if the vector contains invalid /// UTF-8. /// /// # Example /// /// ```rust /// let hello_vec = vec![104, 101, 108, 108, 111]; /// let s = String::from_utf8(hello_vec); /// assert_eq!(s, Ok("hello".to_string())); /// /// let invalid_vec = vec![240, 144, 128]; /// let s = String::from_utf8(invalid_vec); /// assert_eq!(s, Err(vec![240, 144, 128])); /// ``` #[inline] pub fn from_utf8(vec: Vec<u8>) -> Result<String, Vec<u8>> { if str::is_utf8(vec.as_slice()) { Ok(String { vec: vec }) } else { Err(vec) } } /// Converts a vector of bytes to a new utf-8 string. /// Any invalid utf-8 sequences are replaced with U+FFFD REPLACEMENT CHARACTER. /// /// # Example /// /// ```rust /// let input = b"Hello \xF0\x90\x80World"; /// let output = String::from_utf8_lossy(input); /// assert_eq!(output.as_slice(), "Hello \uFFFDWorld"); /// ``` pub fn from_utf8_lossy<'a>(v: &'a [u8]) -> MaybeOwned<'a> { if str::is_utf8(v) { return Slice(unsafe { mem::transmute(v) }) } static TAG_CONT_U8: u8 = 128u8; static REPLACEMENT: &'static [u8] = b"\xEF\xBF\xBD"; // U+FFFD in UTF-8 let mut i = 0; let total = v.len(); fn unsafe_get(xs: &[u8], i: uint) -> u8 { unsafe { *xs.unsafe_ref(i) } } fn safe_get(xs: &[u8], i: uint, total: uint) -> u8 { if i >= total { 0 } else { unsafe_get(xs, i) } } let mut res = String::with_capacity(total); if i > 0 { unsafe { res.push_bytes(v.slice_to(i)) }; } // subseqidx is the index of the first byte of the subsequence we're looking at. // It's used to copy a bunch of contiguous good codepoints at once instead of copying // them one by one. let mut subseqidx = 0; while i < total { let i_ = i; let byte = unsafe_get(v, i); i += 1; macro_rules! error(() => ({ unsafe { if subseqidx != i_ { res.push_bytes(v.slice(subseqidx, i_)); } subseqidx = i; res.push_bytes(REPLACEMENT); } })) if byte < 128u8 { // subseqidx handles this } else { let w = str::utf8_char_width(byte); match w { 2 => { if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } 3 => { match (byte, safe_get(v, i, total)) { (0xE0 , 0xA0 .. 0xBF) => (), (0xE1 .. 0xEC, 0x80 .. 0xBF) => (), (0xED , 0x80 .. 0x9F) => (), (0xEE .. 0xEF, 0x80 .. 0xBF) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } 4 => { match (byte, safe_get(v, i, total)) { (0xF0 , 0x90 .. 0xBF) => (), (0xF1 .. 0xF3, 0x80 .. 0xBF) => (), (0xF4 , 0x80 .. 0x8F) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } _ => { error!(); continue; } } } } if subseqidx < total { unsafe { res.push_bytes(v.slice(subseqidx, total)) }; } Owned(res.into_string()) } /// Decode a UTF-16 encoded vector `v` into a `String`, returning `None` /// if `v` contains any invalid data. /// /// # Example /// /// ```rust /// // 𝄞music /// let mut v = [0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0x0069, 0x0063]; /// assert_eq!(String::from_utf16(v), Some("𝄞music".to_string())); /// /// // 𝄞mu<invalid>ic /// v[4] = 0xD800; /// assert_eq!(String::from_utf16(v), None); /// ``` pub fn from_utf16(v: &[u16]) -> Option<String> { let mut s = String::with_capacity(v.len() / 2); for c in str::utf16_items(v) { match c { str::ScalarValue(c) => s.push_char(c), str::LoneSurrogate(_) => return None } } Some(s) } /// Decode a UTF-16 encoded vector `v` into a string, replacing /// invalid data with the replacement character (U+FFFD). /// /// # Example /// ```rust /// // 𝄞mus<invalid>ic<invalid> /// let v = [0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0xDD1E, 0x0069, 0x0063, /// 0xD834]; /// /// assert_eq!(String::from_utf16_lossy(v), /// "𝄞mus\uFFFDic\uFFFD".to_string()); /// ``` pub fn from_utf16_lossy(v: &[u16]) -> String { str::utf16_items(v).map(|c| c.to_char_lossy()).collect() } /// Convert a vector of chars to a string. /// /// # Example /// /// ```rust /// let chars = ['h', 'e', 'l', 'l', 'o']; /// let s = String::from_chars(chars); /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub fn from_chars(chs: &[char]) -> String { chs.iter().map(|c| *c).collect() } /// Return the underlying byte buffer, encoded as UTF-8. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// let bytes = s.into_bytes(); /// assert_eq!(bytes, vec![104, 101, 108, 108, 111]); /// ``` #[inline] pub fn into_bytes(self) -> Vec<u8> { self.vec } /// Pushes the given string onto this buffer; then, returns `self` so that it can be used /// again. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// let big = s.append(" ").append("world").append("!"); /// // s has now been moved and cannot be used /// /// assert_eq!(big.as_slice(), "hello world!"); /// ``` #[inline] pub fn append(mut self, second: &str) -> String { self.push_str(second); self } /// Creates a string buffer by repeating a character `length` times. /// /// # Example /// /// ``` /// let s = String::from_char(5, 'a'); /// assert_eq!(s.as_slice(), "aaaaa"); /// ``` #[inline] pub fn from_char(length: uint, ch: char) -> String { if length == 0 { return String::new() } let mut buf = String::new(); buf.push_char(ch); let size = buf.len() * length; buf.reserve(size); for _ in range(1, length) { buf.push_char(ch) } buf } /// Convert a byte to a UTF-8 string. /// /// # Failure /// /// Fails if invalid UTF-8 /// /// # Example /// /// ```rust /// let s = String::from_byte(104); /// assert_eq!(s.as_slice(), "h"); /// ``` pub fn from_byte(b: u8) -> String { assert!(b < 128u8); String::from_char(1, b as char) } /// Pushes the given string onto this string buffer. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// s.push_str("bar"); /// assert_eq!(s.as_slice(), "foobar"); /// ``` #[inline] pub fn push_str(&mut self, string: &str) { self.vec.push_all(string.as_bytes()) } /// Push `ch` onto the given string `count` times. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// s.grow(5, 'Z'); /// assert_eq!(s.as_slice(), "fooZZZZZ"); /// ``` #[inline] pub fn grow(&mut self, count: uint, ch: char) { for _ in range(0, count) { self.push_char(ch) } } /// Returns the number of bytes that this string buffer can hold without reallocating. /// /// # Example /// /// ``` /// let s = String::with_capacity(10); /// assert!(s.byte_capacity() >= 10); /// ``` #[inline] pub fn byte_capacity(&self) -> uint { self.vec.capacity() } /// Reserves capacity for at least `extra` additional bytes in this string buffer. /// /// # Example /// /// ``` /// let mut s = String::with_capacity(10); /// let before = s.byte_capacity(); /// s.reserve_additional(100); /// assert!(s.byte_capacity() - before >= 100); /// ``` #[inline] pub fn reserve_additional(&mut self, extra: uint) { self.vec.reserve_additional(extra) } /// Reserves capacity for at least `capacity` bytes in this string buffer. /// /// # Example /// /// ``` /// let mut s = String::new(); /// s.reserve(10); /// assert!(s.byte_capacity() >= 10); /// ``` #[inline] pub fn reserve(&mut self, capacity: uint) { self.vec.reserve(capacity) } /// Reserves capacity for exactly `capacity` bytes in this string buffer. /// /// # Example /// /// ``` /// let mut s = String::new(); /// s.reserve_exact(10); /// assert_eq!(s.byte_capacity(), 10); /// ``` #[inline] pub fn reserve_exact(&mut self, capacity: uint) { self.vec.reserve_exact(capacity) } /// Shrinks the capacity of this string buffer to match its length. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// s.reserve(100); /// assert!(s.byte_capacity() >= 100); /// s.shrink_to_fit(); /// assert_eq!(s.byte_capacity(), 3); /// ``` #[inline] pub fn shrink_to_fit(&mut self) { self.vec.shrink_to_fit() } /// Adds the given character to the end of the string. /// /// # Example /// /// ``` /// let mut s = String::from_str("abc"); /// s.push_char('1'); /// s.push_char('2'); /// s.push_char('3'); /// assert_eq!(s.as_slice(), "abc123"); /// ``` #[inline] pub fn push_char(&mut self, ch: char) { let cur_len = self.len(); // This may use up to 4 bytes. self.vec.reserve_additional(4); unsafe { // Attempt to not use an intermediate buffer by just pushing bytes // directly onto this string. let slice = Slice { data: self.vec.as_ptr().offset(cur_len as int), len: 4, }; let used = ch.encode_utf8(mem::transmute(slice)); self.vec.set_len(cur_len + used); } } /// Pushes the given bytes onto this string buffer. /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::new(); /// unsafe { /// s.push_bytes([104, 101, 108, 108, 111]); /// } /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub unsafe fn push_bytes(&mut self, bytes: &[u8]) { self.vec.push_all(bytes) } /// Works with the underlying buffer as a byte slice. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// assert_eq!(s.as_bytes(), &[104, 101, 108, 108, 111]); /// ``` #[inline] pub fn as_bytes<'a>(&'a self) -> &'a [u8] { self.vec.as_slice() } /// Works with the underlying buffer as a mutable byte slice. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("hello"); /// unsafe { /// let bytes = s.as_mut_bytes(); /// bytes[1] = 51; /// bytes[4] = 48; /// } /// assert_eq!(s.as_bytes(), &[104, 51, 108, 108, 48]); /// assert_eq!(s.as_slice(), "h3ll0") /// ``` #[inline] pub unsafe fn as_mut_bytes<'a>(&'a mut self) -> &'a mut [u8] { self.vec.as_mut_slice() } /// Shorten a string to the specified length. /// /// # Failure /// /// Fails if `len` > current length. /// /// # Example /// /// ``` /// let mut s = String::from_str("hello"); /// s.truncate(2); /// assert_eq!(s.as_slice(), "he"); /// ``` #[inline] pub fn truncate(&mut self, len: uint) { assert!(self.as_slice().is_char_boundary(len)); self.vec.truncate(len) } /// Appends a byte to this string buffer. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("hell"); /// unsafe { /// s.push_byte(111); /// } /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub unsafe fn push_byte(&mut self, byte: u8) { self.vec.push(byte) } /// Removes the last byte from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// unsafe { /// assert_eq!(s.pop_byte(), Some(111)); /// assert_eq!(s.pop_byte(), Some(111)); /// assert_eq!(s.pop_byte(), Some(102)); /// assert_eq!(s.pop_byte(), None); /// } /// ``` #[inline] pub unsafe fn pop_byte(&mut self) -> Option<u8> { let len = self.len(); if len == 0 { return None } let byte = self.as_bytes()[len - 1]; self.vec.set_len(len - 1); Some(byte) } /// Removes the last character from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// assert_eq!(s.pop_char(), Some('o')); /// assert_eq!(s.pop_char(), Some('o')); /// assert_eq!(s.pop_char(), Some('f')); /// assert_eq!(s.pop_char(), None); /// ``` #[inline] pub fn pop_char(&mut self) -> Option<char> { let len = self.len(); if len == 0 { return None } let CharRange {ch, next} = self.as_slice().char_range_at_reverse(len); unsafe { self.vec.set_len(next); } Some(ch) } /// Removes the first byte from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// unsafe { /// assert_eq!(s.shift_byte(), Some(102)); /// assert_eq!(s.shift_byte(), Some(111)); /// assert_eq!(s.shift_byte(), Some(111)); /// assert_eq!(s.shift_byte(), None); /// } /// ``` pub unsafe fn shift_byte(&mut self) -> Option<u8> { self.vec.shift() } /// Removes the first character from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// # Warning /// /// This is a O(n) operation as it requires copying every element in the buffer. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// assert_eq!(s.shift_char(), Some('f')); /// assert_eq!(s.shift_char(), Some('o')); /// assert_eq!(s.shift_char(), Some('o')); /// assert_eq!(s.shift_char(), None); /// ``` pub fn shift_char (&mut self) -> Option<char> { let len = self.len(); if len == 0 { return None } let CharRange {ch, next} = self.as_slice().char_range_at(0); let new_len = len - next; unsafe { ptr::copy_memory(self.vec.as_mut_ptr(), self.vec.as_ptr().offset(next as int), new_len); self.vec.set_len(new_len); } Some(ch) } /// Views the string buffer as a mutable sequence of bytes. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("hello"); /// unsafe { /// let vec = s.as_mut_vec(); /// assert!(vec == &mut vec![104, 101, 108, 108, 111]); /// vec.reverse(); /// } /// assert_eq!(s.as_slice(), "olleh"); /// ``` pub unsafe fn as_mut_vec<'a>(&'a mut self) -> &'a mut Vec<u8> { &mut self.vec } } impl Collection for String { #[inline] fn len(&self) -> uint { self.vec.len() } } impl Mutable for String { #[inline] fn clear(&mut self) { self.vec.clear() } } impl FromIterator<char> for String { fn from_iter<I:Iterator<char>>(iterator: I) -> String { let mut buf = String::new(); buf.extend(iterator); buf } } impl Extendable<char> for String { fn extend<I:Iterator<char>>(&mut self, mut iterator: I) { for ch in iterator { self.push_char(ch) } } } impl Str for String { #[inline] fn as_slice<'a>(&'a self) -> &'a str { unsafe { mem::transmute(self.vec.as_slice()) } } } impl StrAllocating for String { #[inline] fn into_string(self) -> String { self } } impl Default for String { fn default() -> String { String::new() } } impl fmt::Show for String { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_slice().fmt(f) } } impl<H: hash::Writer> hash::Hash<H> for String { #[inline] fn hash(&self, hasher: &mut H) { self.as_slice().hash(hasher) } } impl<'a, S: Str> Equiv<S> for String { #[inline] fn equiv(&self, other: &S) -> bool { self.as_slice() == other.as_slice() } } impl<S: Str> Add<S, String> for String { fn add(&self, other: &S) -> String { let mut s = String::from_str(self.as_slice()); s.push_str(other.as_slice()); return s; } } /// Unsafe operations pub mod raw { use core::mem; use core::ptr::RawPtr; use core::raw::Slice; use super::String; use vec::Vec; /// Creates a new `String` from length, capacity, and a pointer. /// /// This is unsafe because: /// * We call `Vec::from_raw_parts` to get a `Vec<u8>` /// * We assume that the `Vec` contains valid UTF-8 #[inline] pub unsafe fn from_parts(buf: *mut u8, length: uint, capacity: uint) -> String { String { vec: Vec::from_raw_parts(length, capacity, buf), } } /// Create `String` from a *u8 buffer of the given length /// /// This function is unsafe because of two reasons: /// * A raw pointer is dereferenced and transmuted to `&[u8]` /// * The slice is not checked to see whether it contains valid UTF-8 pub unsafe fn from_buf_len(buf: *const u8, len: uint) -> String { use slice::CloneableVector; let slice: &[u8] = mem::transmute(Slice { data: buf, len: len, }); self::from_utf8(slice.to_vec()) } /// Create a `String` from a null-terminated *u8 buffer /// /// This function is unsafe because we dereference memory until we find the NUL character, /// which is not guaranteed to be present. Additionaly, the slice is not checked to see /// whether it contains valid UTF-8 pub unsafe fn from_buf(buf: *const u8) -> String { let mut len = 0; while *buf.offset(len) != 0 { len += 1; } self::from_buf_len(buf, len as uint) } /// Converts a vector of bytes to a new `String` without checking if /// it contains valid UTF-8. This is unsafe because it assumes that /// the utf-8-ness of the vector has already been validated. #[inline] pub unsafe fn from_utf8(bytes: Vec<u8>) -> String { String { vec: bytes } } } #[cfg(test)] mod tests { use std::prelude::*; use test::Bencher; use {Mutable, MutableSeq}; use str; use str::{Str, StrSlice, Owned, Slice}; use super::String; use vec::Vec; #[test] fn test_from_str() { let owned: Option<::std::string::String> = from_str("string"); assert_eq!(owned.as_ref().map(|s| s.as_slice()), Some("string")); } #[test] fn test_from_utf8() { let xs = Vec::from_slice(b"hello"); assert_eq!(String::from_utf8(xs), Ok(String::from_str("hello"))); let xs = Vec::from_slice("ศไทย中华Việt Nam".as_bytes()); assert_eq!(String::from_utf8(xs), Ok(String::from_str("ศไทย中华Việt Nam"))); let xs = Vec::from_slice(b"hello\xFF"); assert_eq!(String::from_utf8(xs), Err(Vec::from_slice(b"hello\xFF"))); } #[test] fn test_from_utf8_lossy() { let xs = b"hello"; assert_eq!(String::from_utf8_lossy(xs), Slice("hello")); let xs = "ศไทย中华Việt Nam".as_bytes(); assert_eq!(String::from_utf8_lossy(xs), Slice("ศไทย中华Việt Nam")); let xs = b"Hello\xC2 There\xFF Goodbye"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("Hello\uFFFD There\uFFFD Goodbye"))); let xs = b"Hello\xC0\x80 There\xE6\x83 Goodbye"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("Hello\uFFFD\uFFFD There\uFFFD Goodbye"))); let xs = b"\xF5foo\xF5\x80bar"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFDfoo\uFFFD\uFFFDbar"))); let xs = b"\xF1foo\xF1\x80bar\xF1\x80\x80baz"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFDfoo\uFFFDbar\uFFFDbaz"))); let xs = b"\xF4foo\xF4\x80bar\xF4\xBFbaz"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFDfoo\uFFFDbar\uFFFD\uFFFDbaz"))); let xs = b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFD\uFFFD\uFFFD\uFFFD\ foo\U00010000bar"))); // surrogates let xs = b"\xED\xA0\x80foo\xED\xBF\xBFbar"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFD\uFFFD\uFFFDfoo\ \uFFFD\uFFFD\uFFFDbar"))); } #[test] fn test_from_utf16() { let pairs = [(String::from_str("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n"), vec![0xd800_u16, 0xdf45_u16, 0xd800_u16, 0xdf3f_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf46_u16, 0xd800_u16, 0xdf39_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf30_u16, 0x000a_u16]), (String::from_str("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n"), vec![0xd801_u16, 0xdc12_u16, 0xd801_u16, 0xdc49_u16, 0xd801_u16, 0xdc2e_u16, 0xd801_u16, 0xdc40_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4b_u16, 0x0020_u16, 0xd801_u16, 0xdc0f_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4d_u16, 0x000a_u16]), (String::from_str("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n"), vec![0xd800_u16, 0xdf00_u16, 0xd800_u16, 0xdf16_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf11_u16, 0xd800_u16, 0xdf09_u16, 0x00b7_u16, 0xd800_u16, 0xdf0c_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf15_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf09_u16, 0xd800_u16, 0xdf11_u16, 0x000a_u16 ]), (String::from_str("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n"), vec![0xd801_u16, 0xdc8b_u16, 0xd801_u16, 0xdc98_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc91_u16, 0xd801_u16, 0xdc9b_u16, 0xd801_u16, 0xdc92_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc93_u16, 0x0020_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc9a_u16, 0xd801_u16, 0xdc8d_u16, 0x0020_u16, 0xd801_u16, 0xdc8f_u16, 0xd801_u16, 0xdc9c_u16, 0xd801_u16, 0xdc92_u16, 0xd801_u16, 0xdc96_u16, 0xd801_u16, 0xdc86_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc86_u16, 0x000a_u16 ]), // Issue #12318, even-numbered non-BMP planes (String::from_str("\U00020000"), vec![0xD840, 0xDC00])]; for p in pairs.iter() { let (s, u) = (*p).clone(); let s_as_utf16 = s.as_slice().utf16_units().collect::<Vec<u16>>(); let u_as_string = String::from_utf16(u.as_slice()).unwrap(); assert!(str::is_utf16(u.as_slice())); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); assert_eq!(String::from_utf16_lossy(u.as_slice()), s); assert_eq!(String::from_utf16(s_as_utf16.as_slice()).unwrap(), s); assert_eq!(u_as_string.as_slice().utf16_units().collect::<Vec<u16>>(), u); } } #[test] fn test_utf16_invalid() { // completely positive cases tested above. // lead + eof assert_eq!(String::from_utf16([0xD800]), None); // lead + lead assert_eq!(String::from_utf16([0xD800, 0xD800]), None); // isolated trail assert_eq!(String::from_utf16([0x0061, 0xDC00]), None); // general assert_eq!(String::from_utf16([0xD800, 0xd801, 0xdc8b, 0xD800]), None); } #[test] fn test_from_utf16_lossy() { // completely positive cases tested above. // lead + eof assert_eq!(String::from_utf16_lossy([0xD800]), String::from_str("\uFFFD")); // lead + lead assert_eq!(String::from_utf16_lossy([0xD800, 0xD800]), String::from_str("\uFFFD\uFFFD")); // isolated trail assert_eq!(String::from_utf16_lossy([0x0061, 0xDC00]), String::from_str("a\uFFFD")); // general assert_eq!(String::from_utf16_lossy([0xD800, 0xd801, 0xdc8b, 0xD800]), String::from_str("\uFFFD𐒋\uFFFD")); } #[test] fn test_from_buf_len() { unsafe { let a = vec![65u8, 65, 65, 65, 65, 65, 65, 0]; assert_eq!(super::raw::from_buf_len(a.as_ptr(), 3), String::from_str("AAA")); } } #[test] fn test_from_buf() { unsafe { let a = vec![65, 65, 65, 65, 65, 65, 65, 0]; let b = a.as_ptr(); let c = super::raw::from_buf(b); assert_eq!(c, String::from_str("AAAAAAA")); } } #[test] fn test_push_bytes() { let mut s = String::from_str("ABC"); unsafe { s.push_bytes([ 'D' as u8 ]); } assert_eq!(s.as_slice(), "ABCD"); } #[test] fn test_push_str() { let mut s = String::new(); s.push_str(""); assert_eq!(s.as_slice().slice_from(0), ""); s.push_str("abc"); assert_eq!(s.as_slice().slice_from(0), "abc"); s.push_str("ประเทศไทย中华Việt Nam"); assert_eq!(s.as_slice().slice_from(0), "abcประเทศไทย中华Việt Nam"); } #[test] fn test_push_char() { let mut data = String::from_str("ประเทศไทย中"); data.push_char('华'); data.push_char('b'); // 1 byte data.push_char('¢'); // 2 byte data.push_char('€'); // 3 byte data.push_char('𤭢'); // 4 byte assert_eq!(data.as_slice(), "ประเทศไทย中华b¢€𤭢"); } #[test] fn test_pop_char() { let mut data = String::from_str("ประเทศไทย中华b¢€𤭢"); assert_eq!(data.pop_char().unwrap(), '𤭢'); // 4 bytes assert_eq!(data.pop_char().unwrap(), '€'); // 3 bytes assert_eq!(data.pop_char().unwrap(), '¢'); // 2 bytes assert_eq!(data.pop_char().unwrap(), 'b'); // 1 bytes assert_eq!(data.pop_char().unwrap(), '华'); assert_eq!(data.as_slice(), "ประเทศไทย中"); } #[test] fn test_shift_char() { let mut data = String::from_str("𤭢€¢b华ประเทศไทย中"); assert_eq!(data.shift_char().unwrap(), '𤭢'); // 4 bytes assert_eq!(data.shift_char().unwrap(), '€'); // 3 bytes assert_eq!(data.shift_char().unwrap(), '¢'); // 2 bytes assert_eq!(data.shift_char().unwrap(), 'b'); // 1 bytes assert_eq!(data.shift_char().unwrap(), '华'); assert_eq!(data.as_slice(), "ประเทศไทย中"); } #[test] fn test_str_truncate() { let mut s = String::from_str("12345"); s.truncate(5); assert_eq!(s.as_slice(), "12345"); s.truncate(3); assert_eq!(s.as_slice(), "123"); s.truncate(0); assert_eq!(s.as_slice(), ""); let mut s = String::from_str("12345"); let p = s.as_slice().as_ptr(); s.truncate(3); s.push_str("6"); let p_ = s.as_slice().as_ptr(); assert_eq!(p_, p); } #[test] #[should_fail] fn test_str_truncate_invalid_len() { let mut s = String::from_str("12345"); s.truncate(6); } #[test] #[should_fail] fn test_str_truncate_split_codepoint() { let mut s = String::from_str("\u00FC"); // ü s.truncate(1); } #[test] fn test_str_clear() { let mut s = String::from_str("12345"); s.clear(); assert_eq!(s.len(), 0); assert_eq!(s.as_slice(), ""); } #[test] fn test_str_add() { let a = String::from_str("12345"); let b = a + "2"; let b = b + String::from_str("2"); assert_eq!(b.len(), 7); assert_eq!(b.as_slice(), "1234522"); } #[bench] fn bench_with_capacity(b: &mut Bencher) { b.iter(|| { String::with_capacity(100) }); } #[bench] fn bench_push_str(b: &mut Bencher) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; b.iter(|| { let mut r = String::new(); r.push_str(s); }); } #[bench] fn from_utf8_lossy_100_ascii(b: &mut Bencher) { let s = b"Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; assert_eq!(100, s.len()); b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_100_multibyte(b: &mut Bencher) { let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes(); assert_eq!(100, s.len()); b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_invalid(b: &mut Bencher) { let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye"; b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_100_invalid(b: &mut Bencher) { let s = Vec::from_elem(100, 0xF5u8); b.iter(|| { let _ = String::from_utf8_lossy(s.as_slice()); }); } } Fix a whitespace typo // Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // // ignore-lexer-test FIXME #15679 //! An owned, growable string that enforces that its contents are valid UTF-8. use core::prelude::*; use core::default::Default; use core::fmt; use core::mem; use core::ptr; use core::raw::Slice; use {Collection, Mutable, MutableSeq}; use hash; use str; use str::{CharRange, StrAllocating, MaybeOwned, Owned, Slice}; use vec::Vec; /// A growable string stored as a UTF-8 encoded buffer. #[deriving(Clone, PartialEq, PartialOrd, Eq, Ord)] pub struct String { vec: Vec<u8>, } impl String { /// Creates a new string buffer initialized with the empty string. /// /// # Example /// /// ``` /// let mut s = String::new(); /// ``` #[inline] pub fn new() -> String { String { vec: Vec::new(), } } /// Creates a new string buffer with the given capacity. /// The string will be able to hold exactly `capacity` bytes without /// reallocating. If `capacity` is 0, the string will not allocate. /// /// # Example /// /// ``` /// let mut s = String::with_capacity(10); /// ``` #[inline] pub fn with_capacity(capacity: uint) -> String { String { vec: Vec::with_capacity(capacity), } } /// Creates a new string buffer from the given string. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub fn from_str(string: &str) -> String { String { vec: Vec::from_slice(string.as_bytes()) } } /// Deprecated. Replaced by `string::raw::from_parts` #[inline] #[deprecated = "Replaced by string::raw::from_parts"] pub unsafe fn from_raw_parts(length: uint, capacity: uint, ptr: *mut u8) -> String { raw::from_parts(ptr, length, capacity) } /// Deprecated. #[deprecated = "obsoleted by the removal of ~str"] #[inline] pub fn from_owned_str(string: String) -> String { string } /// Returns the vector as a string buffer, if possible, taking care not to /// copy it. /// /// Returns `Err` with the original vector if the vector contains invalid /// UTF-8. /// /// # Example /// /// ```rust /// let hello_vec = vec![104, 101, 108, 108, 111]; /// let s = String::from_utf8(hello_vec); /// assert_eq!(s, Ok("hello".to_string())); /// /// let invalid_vec = vec![240, 144, 128]; /// let s = String::from_utf8(invalid_vec); /// assert_eq!(s, Err(vec![240, 144, 128])); /// ``` #[inline] pub fn from_utf8(vec: Vec<u8>) -> Result<String, Vec<u8>> { if str::is_utf8(vec.as_slice()) { Ok(String { vec: vec }) } else { Err(vec) } } /// Converts a vector of bytes to a new utf-8 string. /// Any invalid utf-8 sequences are replaced with U+FFFD REPLACEMENT CHARACTER. /// /// # Example /// /// ```rust /// let input = b"Hello \xF0\x90\x80World"; /// let output = String::from_utf8_lossy(input); /// assert_eq!(output.as_slice(), "Hello \uFFFDWorld"); /// ``` pub fn from_utf8_lossy<'a>(v: &'a [u8]) -> MaybeOwned<'a> { if str::is_utf8(v) { return Slice(unsafe { mem::transmute(v) }) } static TAG_CONT_U8: u8 = 128u8; static REPLACEMENT: &'static [u8] = b"\xEF\xBF\xBD"; // U+FFFD in UTF-8 let mut i = 0; let total = v.len(); fn unsafe_get(xs: &[u8], i: uint) -> u8 { unsafe { *xs.unsafe_ref(i) } } fn safe_get(xs: &[u8], i: uint, total: uint) -> u8 { if i >= total { 0 } else { unsafe_get(xs, i) } } let mut res = String::with_capacity(total); if i > 0 { unsafe { res.push_bytes(v.slice_to(i)) }; } // subseqidx is the index of the first byte of the subsequence we're looking at. // It's used to copy a bunch of contiguous good codepoints at once instead of copying // them one by one. let mut subseqidx = 0; while i < total { let i_ = i; let byte = unsafe_get(v, i); i += 1; macro_rules! error(() => ({ unsafe { if subseqidx != i_ { res.push_bytes(v.slice(subseqidx, i_)); } subseqidx = i; res.push_bytes(REPLACEMENT); } })) if byte < 128u8 { // subseqidx handles this } else { let w = str::utf8_char_width(byte); match w { 2 => { if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } 3 => { match (byte, safe_get(v, i, total)) { (0xE0 , 0xA0 .. 0xBF) => (), (0xE1 .. 0xEC, 0x80 .. 0xBF) => (), (0xED , 0x80 .. 0x9F) => (), (0xEE .. 0xEF, 0x80 .. 0xBF) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } 4 => { match (byte, safe_get(v, i, total)) { (0xF0 , 0x90 .. 0xBF) => (), (0xF1 .. 0xF3, 0x80 .. 0xBF) => (), (0xF4 , 0x80 .. 0x8F) => (), _ => { error!(); continue; } } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; if safe_get(v, i, total) & 192u8 != TAG_CONT_U8 { error!(); continue; } i += 1; } _ => { error!(); continue; } } } } if subseqidx < total { unsafe { res.push_bytes(v.slice(subseqidx, total)) }; } Owned(res.into_string()) } /// Decode a UTF-16 encoded vector `v` into a `String`, returning `None` /// if `v` contains any invalid data. /// /// # Example /// /// ```rust /// // 𝄞music /// let mut v = [0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0x0069, 0x0063]; /// assert_eq!(String::from_utf16(v), Some("𝄞music".to_string())); /// /// // 𝄞mu<invalid>ic /// v[4] = 0xD800; /// assert_eq!(String::from_utf16(v), None); /// ``` pub fn from_utf16(v: &[u16]) -> Option<String> { let mut s = String::with_capacity(v.len() / 2); for c in str::utf16_items(v) { match c { str::ScalarValue(c) => s.push_char(c), str::LoneSurrogate(_) => return None } } Some(s) } /// Decode a UTF-16 encoded vector `v` into a string, replacing /// invalid data with the replacement character (U+FFFD). /// /// # Example /// ```rust /// // 𝄞mus<invalid>ic<invalid> /// let v = [0xD834, 0xDD1E, 0x006d, 0x0075, /// 0x0073, 0xDD1E, 0x0069, 0x0063, /// 0xD834]; /// /// assert_eq!(String::from_utf16_lossy(v), /// "𝄞mus\uFFFDic\uFFFD".to_string()); /// ``` pub fn from_utf16_lossy(v: &[u16]) -> String { str::utf16_items(v).map(|c| c.to_char_lossy()).collect() } /// Convert a vector of chars to a string. /// /// # Example /// /// ```rust /// let chars = ['h', 'e', 'l', 'l', 'o']; /// let s = String::from_chars(chars); /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub fn from_chars(chs: &[char]) -> String { chs.iter().map(|c| *c).collect() } /// Return the underlying byte buffer, encoded as UTF-8. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// let bytes = s.into_bytes(); /// assert_eq!(bytes, vec![104, 101, 108, 108, 111]); /// ``` #[inline] pub fn into_bytes(self) -> Vec<u8> { self.vec } /// Pushes the given string onto this buffer; then, returns `self` so that it can be used /// again. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// let big = s.append(" ").append("world").append("!"); /// // s has now been moved and cannot be used /// /// assert_eq!(big.as_slice(), "hello world!"); /// ``` #[inline] pub fn append(mut self, second: &str) -> String { self.push_str(second); self } /// Creates a string buffer by repeating a character `length` times. /// /// # Example /// /// ``` /// let s = String::from_char(5, 'a'); /// assert_eq!(s.as_slice(), "aaaaa"); /// ``` #[inline] pub fn from_char(length: uint, ch: char) -> String { if length == 0 { return String::new() } let mut buf = String::new(); buf.push_char(ch); let size = buf.len() * length; buf.reserve(size); for _ in range(1, length) { buf.push_char(ch) } buf } /// Convert a byte to a UTF-8 string. /// /// # Failure /// /// Fails if invalid UTF-8 /// /// # Example /// /// ```rust /// let s = String::from_byte(104); /// assert_eq!(s.as_slice(), "h"); /// ``` pub fn from_byte(b: u8) -> String { assert!(b < 128u8); String::from_char(1, b as char) } /// Pushes the given string onto this string buffer. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// s.push_str("bar"); /// assert_eq!(s.as_slice(), "foobar"); /// ``` #[inline] pub fn push_str(&mut self, string: &str) { self.vec.push_all(string.as_bytes()) } /// Push `ch` onto the given string `count` times. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// s.grow(5, 'Z'); /// assert_eq!(s.as_slice(), "fooZZZZZ"); /// ``` #[inline] pub fn grow(&mut self, count: uint, ch: char) { for _ in range(0, count) { self.push_char(ch) } } /// Returns the number of bytes that this string buffer can hold without reallocating. /// /// # Example /// /// ``` /// let s = String::with_capacity(10); /// assert!(s.byte_capacity() >= 10); /// ``` #[inline] pub fn byte_capacity(&self) -> uint { self.vec.capacity() } /// Reserves capacity for at least `extra` additional bytes in this string buffer. /// /// # Example /// /// ``` /// let mut s = String::with_capacity(10); /// let before = s.byte_capacity(); /// s.reserve_additional(100); /// assert!(s.byte_capacity() - before >= 100); /// ``` #[inline] pub fn reserve_additional(&mut self, extra: uint) { self.vec.reserve_additional(extra) } /// Reserves capacity for at least `capacity` bytes in this string buffer. /// /// # Example /// /// ``` /// let mut s = String::new(); /// s.reserve(10); /// assert!(s.byte_capacity() >= 10); /// ``` #[inline] pub fn reserve(&mut self, capacity: uint) { self.vec.reserve(capacity) } /// Reserves capacity for exactly `capacity` bytes in this string buffer. /// /// # Example /// /// ``` /// let mut s = String::new(); /// s.reserve_exact(10); /// assert_eq!(s.byte_capacity(), 10); /// ``` #[inline] pub fn reserve_exact(&mut self, capacity: uint) { self.vec.reserve_exact(capacity) } /// Shrinks the capacity of this string buffer to match its length. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// s.reserve(100); /// assert!(s.byte_capacity() >= 100); /// s.shrink_to_fit(); /// assert_eq!(s.byte_capacity(), 3); /// ``` #[inline] pub fn shrink_to_fit(&mut self) { self.vec.shrink_to_fit() } /// Adds the given character to the end of the string. /// /// # Example /// /// ``` /// let mut s = String::from_str("abc"); /// s.push_char('1'); /// s.push_char('2'); /// s.push_char('3'); /// assert_eq!(s.as_slice(), "abc123"); /// ``` #[inline] pub fn push_char(&mut self, ch: char) { let cur_len = self.len(); // This may use up to 4 bytes. self.vec.reserve_additional(4); unsafe { // Attempt to not use an intermediate buffer by just pushing bytes // directly onto this string. let slice = Slice { data: self.vec.as_ptr().offset(cur_len as int), len: 4, }; let used = ch.encode_utf8(mem::transmute(slice)); self.vec.set_len(cur_len + used); } } /// Pushes the given bytes onto this string buffer. /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::new(); /// unsafe { /// s.push_bytes([104, 101, 108, 108, 111]); /// } /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub unsafe fn push_bytes(&mut self, bytes: &[u8]) { self.vec.push_all(bytes) } /// Works with the underlying buffer as a byte slice. /// /// # Example /// /// ``` /// let s = String::from_str("hello"); /// assert_eq!(s.as_bytes(), &[104, 101, 108, 108, 111]); /// ``` #[inline] pub fn as_bytes<'a>(&'a self) -> &'a [u8] { self.vec.as_slice() } /// Works with the underlying buffer as a mutable byte slice. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("hello"); /// unsafe { /// let bytes = s.as_mut_bytes(); /// bytes[1] = 51; /// bytes[4] = 48; /// } /// assert_eq!(s.as_bytes(), &[104, 51, 108, 108, 48]); /// assert_eq!(s.as_slice(), "h3ll0") /// ``` #[inline] pub unsafe fn as_mut_bytes<'a>(&'a mut self) -> &'a mut [u8] { self.vec.as_mut_slice() } /// Shorten a string to the specified length. /// /// # Failure /// /// Fails if `len` > current length. /// /// # Example /// /// ``` /// let mut s = String::from_str("hello"); /// s.truncate(2); /// assert_eq!(s.as_slice(), "he"); /// ``` #[inline] pub fn truncate(&mut self, len: uint) { assert!(self.as_slice().is_char_boundary(len)); self.vec.truncate(len) } /// Appends a byte to this string buffer. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("hell"); /// unsafe { /// s.push_byte(111); /// } /// assert_eq!(s.as_slice(), "hello"); /// ``` #[inline] pub unsafe fn push_byte(&mut self, byte: u8) { self.vec.push(byte) } /// Removes the last byte from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// unsafe { /// assert_eq!(s.pop_byte(), Some(111)); /// assert_eq!(s.pop_byte(), Some(111)); /// assert_eq!(s.pop_byte(), Some(102)); /// assert_eq!(s.pop_byte(), None); /// } /// ``` #[inline] pub unsafe fn pop_byte(&mut self) -> Option<u8> { let len = self.len(); if len == 0 { return None } let byte = self.as_bytes()[len - 1]; self.vec.set_len(len - 1); Some(byte) } /// Removes the last character from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// assert_eq!(s.pop_char(), Some('o')); /// assert_eq!(s.pop_char(), Some('o')); /// assert_eq!(s.pop_char(), Some('f')); /// assert_eq!(s.pop_char(), None); /// ``` #[inline] pub fn pop_char(&mut self) -> Option<char> { let len = self.len(); if len == 0 { return None } let CharRange {ch, next} = self.as_slice().char_range_at_reverse(len); unsafe { self.vec.set_len(next); } Some(ch) } /// Removes the first byte from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// unsafe { /// assert_eq!(s.shift_byte(), Some(102)); /// assert_eq!(s.shift_byte(), Some(111)); /// assert_eq!(s.shift_byte(), Some(111)); /// assert_eq!(s.shift_byte(), None); /// } /// ``` pub unsafe fn shift_byte(&mut self) -> Option<u8> { self.vec.shift() } /// Removes the first character from the string buffer and returns it. /// Returns `None` if this string buffer is empty. /// /// # Warning /// /// This is a O(n) operation as it requires copying every element in the buffer. /// /// # Example /// /// ``` /// let mut s = String::from_str("foo"); /// assert_eq!(s.shift_char(), Some('f')); /// assert_eq!(s.shift_char(), Some('o')); /// assert_eq!(s.shift_char(), Some('o')); /// assert_eq!(s.shift_char(), None); /// ``` pub fn shift_char(&mut self) -> Option<char> { let len = self.len(); if len == 0 { return None } let CharRange {ch, next} = self.as_slice().char_range_at(0); let new_len = len - next; unsafe { ptr::copy_memory(self.vec.as_mut_ptr(), self.vec.as_ptr().offset(next as int), new_len); self.vec.set_len(new_len); } Some(ch) } /// Views the string buffer as a mutable sequence of bytes. /// /// This is unsafe because it does not check /// to ensure that the resulting string will be valid UTF-8. /// /// # Example /// /// ``` /// let mut s = String::from_str("hello"); /// unsafe { /// let vec = s.as_mut_vec(); /// assert!(vec == &mut vec![104, 101, 108, 108, 111]); /// vec.reverse(); /// } /// assert_eq!(s.as_slice(), "olleh"); /// ``` pub unsafe fn as_mut_vec<'a>(&'a mut self) -> &'a mut Vec<u8> { &mut self.vec } } impl Collection for String { #[inline] fn len(&self) -> uint { self.vec.len() } } impl Mutable for String { #[inline] fn clear(&mut self) { self.vec.clear() } } impl FromIterator<char> for String { fn from_iter<I:Iterator<char>>(iterator: I) -> String { let mut buf = String::new(); buf.extend(iterator); buf } } impl Extendable<char> for String { fn extend<I:Iterator<char>>(&mut self, mut iterator: I) { for ch in iterator { self.push_char(ch) } } } impl Str for String { #[inline] fn as_slice<'a>(&'a self) -> &'a str { unsafe { mem::transmute(self.vec.as_slice()) } } } impl StrAllocating for String { #[inline] fn into_string(self) -> String { self } } impl Default for String { fn default() -> String { String::new() } } impl fmt::Show for String { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_slice().fmt(f) } } impl<H: hash::Writer> hash::Hash<H> for String { #[inline] fn hash(&self, hasher: &mut H) { self.as_slice().hash(hasher) } } impl<'a, S: Str> Equiv<S> for String { #[inline] fn equiv(&self, other: &S) -> bool { self.as_slice() == other.as_slice() } } impl<S: Str> Add<S, String> for String { fn add(&self, other: &S) -> String { let mut s = String::from_str(self.as_slice()); s.push_str(other.as_slice()); return s; } } /// Unsafe operations pub mod raw { use core::mem; use core::ptr::RawPtr; use core::raw::Slice; use super::String; use vec::Vec; /// Creates a new `String` from length, capacity, and a pointer. /// /// This is unsafe because: /// * We call `Vec::from_raw_parts` to get a `Vec<u8>` /// * We assume that the `Vec` contains valid UTF-8 #[inline] pub unsafe fn from_parts(buf: *mut u8, length: uint, capacity: uint) -> String { String { vec: Vec::from_raw_parts(length, capacity, buf), } } /// Create `String` from a *u8 buffer of the given length /// /// This function is unsafe because of two reasons: /// * A raw pointer is dereferenced and transmuted to `&[u8]` /// * The slice is not checked to see whether it contains valid UTF-8 pub unsafe fn from_buf_len(buf: *const u8, len: uint) -> String { use slice::CloneableVector; let slice: &[u8] = mem::transmute(Slice { data: buf, len: len, }); self::from_utf8(slice.to_vec()) } /// Create a `String` from a null-terminated *u8 buffer /// /// This function is unsafe because we dereference memory until we find the NUL character, /// which is not guaranteed to be present. Additionaly, the slice is not checked to see /// whether it contains valid UTF-8 pub unsafe fn from_buf(buf: *const u8) -> String { let mut len = 0; while *buf.offset(len) != 0 { len += 1; } self::from_buf_len(buf, len as uint) } /// Converts a vector of bytes to a new `String` without checking if /// it contains valid UTF-8. This is unsafe because it assumes that /// the utf-8-ness of the vector has already been validated. #[inline] pub unsafe fn from_utf8(bytes: Vec<u8>) -> String { String { vec: bytes } } } #[cfg(test)] mod tests { use std::prelude::*; use test::Bencher; use {Mutable, MutableSeq}; use str; use str::{Str, StrSlice, Owned, Slice}; use super::String; use vec::Vec; #[test] fn test_from_str() { let owned: Option<::std::string::String> = from_str("string"); assert_eq!(owned.as_ref().map(|s| s.as_slice()), Some("string")); } #[test] fn test_from_utf8() { let xs = Vec::from_slice(b"hello"); assert_eq!(String::from_utf8(xs), Ok(String::from_str("hello"))); let xs = Vec::from_slice("ศไทย中华Việt Nam".as_bytes()); assert_eq!(String::from_utf8(xs), Ok(String::from_str("ศไทย中华Việt Nam"))); let xs = Vec::from_slice(b"hello\xFF"); assert_eq!(String::from_utf8(xs), Err(Vec::from_slice(b"hello\xFF"))); } #[test] fn test_from_utf8_lossy() { let xs = b"hello"; assert_eq!(String::from_utf8_lossy(xs), Slice("hello")); let xs = "ศไทย中华Việt Nam".as_bytes(); assert_eq!(String::from_utf8_lossy(xs), Slice("ศไทย中华Việt Nam")); let xs = b"Hello\xC2 There\xFF Goodbye"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("Hello\uFFFD There\uFFFD Goodbye"))); let xs = b"Hello\xC0\x80 There\xE6\x83 Goodbye"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("Hello\uFFFD\uFFFD There\uFFFD Goodbye"))); let xs = b"\xF5foo\xF5\x80bar"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFDfoo\uFFFD\uFFFDbar"))); let xs = b"\xF1foo\xF1\x80bar\xF1\x80\x80baz"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFDfoo\uFFFDbar\uFFFDbaz"))); let xs = b"\xF4foo\xF4\x80bar\xF4\xBFbaz"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFDfoo\uFFFDbar\uFFFD\uFFFDbaz"))); let xs = b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFD\uFFFD\uFFFD\uFFFD\ foo\U00010000bar"))); // surrogates let xs = b"\xED\xA0\x80foo\xED\xBF\xBFbar"; assert_eq!(String::from_utf8_lossy(xs), Owned(String::from_str("\uFFFD\uFFFD\uFFFDfoo\ \uFFFD\uFFFD\uFFFDbar"))); } #[test] fn test_from_utf16() { let pairs = [(String::from_str("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n"), vec![0xd800_u16, 0xdf45_u16, 0xd800_u16, 0xdf3f_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf46_u16, 0xd800_u16, 0xdf39_u16, 0xd800_u16, 0xdf3b_u16, 0xd800_u16, 0xdf30_u16, 0x000a_u16]), (String::from_str("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n"), vec![0xd801_u16, 0xdc12_u16, 0xd801_u16, 0xdc49_u16, 0xd801_u16, 0xdc2e_u16, 0xd801_u16, 0xdc40_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4b_u16, 0x0020_u16, 0xd801_u16, 0xdc0f_u16, 0xd801_u16, 0xdc32_u16, 0xd801_u16, 0xdc4d_u16, 0x000a_u16]), (String::from_str("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n"), vec![0xd800_u16, 0xdf00_u16, 0xd800_u16, 0xdf16_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf11_u16, 0xd800_u16, 0xdf09_u16, 0x00b7_u16, 0xd800_u16, 0xdf0c_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf15_u16, 0xd800_u16, 0xdf04_u16, 0xd800_u16, 0xdf0b_u16, 0xd800_u16, 0xdf09_u16, 0xd800_u16, 0xdf11_u16, 0x000a_u16 ]), (String::from_str("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n"), vec![0xd801_u16, 0xdc8b_u16, 0xd801_u16, 0xdc98_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc91_u16, 0xd801_u16, 0xdc9b_u16, 0xd801_u16, 0xdc92_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc93_u16, 0x0020_u16, 0xd801_u16, 0xdc88_u16, 0xd801_u16, 0xdc9a_u16, 0xd801_u16, 0xdc8d_u16, 0x0020_u16, 0xd801_u16, 0xdc8f_u16, 0xd801_u16, 0xdc9c_u16, 0xd801_u16, 0xdc92_u16, 0xd801_u16, 0xdc96_u16, 0xd801_u16, 0xdc86_u16, 0x0020_u16, 0xd801_u16, 0xdc95_u16, 0xd801_u16, 0xdc86_u16, 0x000a_u16 ]), // Issue #12318, even-numbered non-BMP planes (String::from_str("\U00020000"), vec![0xD840, 0xDC00])]; for p in pairs.iter() { let (s, u) = (*p).clone(); let s_as_utf16 = s.as_slice().utf16_units().collect::<Vec<u16>>(); let u_as_string = String::from_utf16(u.as_slice()).unwrap(); assert!(str::is_utf16(u.as_slice())); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); assert_eq!(String::from_utf16_lossy(u.as_slice()), s); assert_eq!(String::from_utf16(s_as_utf16.as_slice()).unwrap(), s); assert_eq!(u_as_string.as_slice().utf16_units().collect::<Vec<u16>>(), u); } } #[test] fn test_utf16_invalid() { // completely positive cases tested above. // lead + eof assert_eq!(String::from_utf16([0xD800]), None); // lead + lead assert_eq!(String::from_utf16([0xD800, 0xD800]), None); // isolated trail assert_eq!(String::from_utf16([0x0061, 0xDC00]), None); // general assert_eq!(String::from_utf16([0xD800, 0xd801, 0xdc8b, 0xD800]), None); } #[test] fn test_from_utf16_lossy() { // completely positive cases tested above. // lead + eof assert_eq!(String::from_utf16_lossy([0xD800]), String::from_str("\uFFFD")); // lead + lead assert_eq!(String::from_utf16_lossy([0xD800, 0xD800]), String::from_str("\uFFFD\uFFFD")); // isolated trail assert_eq!(String::from_utf16_lossy([0x0061, 0xDC00]), String::from_str("a\uFFFD")); // general assert_eq!(String::from_utf16_lossy([0xD800, 0xd801, 0xdc8b, 0xD800]), String::from_str("\uFFFD𐒋\uFFFD")); } #[test] fn test_from_buf_len() { unsafe { let a = vec![65u8, 65, 65, 65, 65, 65, 65, 0]; assert_eq!(super::raw::from_buf_len(a.as_ptr(), 3), String::from_str("AAA")); } } #[test] fn test_from_buf() { unsafe { let a = vec![65, 65, 65, 65, 65, 65, 65, 0]; let b = a.as_ptr(); let c = super::raw::from_buf(b); assert_eq!(c, String::from_str("AAAAAAA")); } } #[test] fn test_push_bytes() { let mut s = String::from_str("ABC"); unsafe { s.push_bytes([ 'D' as u8 ]); } assert_eq!(s.as_slice(), "ABCD"); } #[test] fn test_push_str() { let mut s = String::new(); s.push_str(""); assert_eq!(s.as_slice().slice_from(0), ""); s.push_str("abc"); assert_eq!(s.as_slice().slice_from(0), "abc"); s.push_str("ประเทศไทย中华Việt Nam"); assert_eq!(s.as_slice().slice_from(0), "abcประเทศไทย中华Việt Nam"); } #[test] fn test_push_char() { let mut data = String::from_str("ประเทศไทย中"); data.push_char('华'); data.push_char('b'); // 1 byte data.push_char('¢'); // 2 byte data.push_char('€'); // 3 byte data.push_char('𤭢'); // 4 byte assert_eq!(data.as_slice(), "ประเทศไทย中华b¢€𤭢"); } #[test] fn test_pop_char() { let mut data = String::from_str("ประเทศไทย中华b¢€𤭢"); assert_eq!(data.pop_char().unwrap(), '𤭢'); // 4 bytes assert_eq!(data.pop_char().unwrap(), '€'); // 3 bytes assert_eq!(data.pop_char().unwrap(), '¢'); // 2 bytes assert_eq!(data.pop_char().unwrap(), 'b'); // 1 bytes assert_eq!(data.pop_char().unwrap(), '华'); assert_eq!(data.as_slice(), "ประเทศไทย中"); } #[test] fn test_shift_char() { let mut data = String::from_str("𤭢€¢b华ประเทศไทย中"); assert_eq!(data.shift_char().unwrap(), '𤭢'); // 4 bytes assert_eq!(data.shift_char().unwrap(), '€'); // 3 bytes assert_eq!(data.shift_char().unwrap(), '¢'); // 2 bytes assert_eq!(data.shift_char().unwrap(), 'b'); // 1 bytes assert_eq!(data.shift_char().unwrap(), '华'); assert_eq!(data.as_slice(), "ประเทศไทย中"); } #[test] fn test_str_truncate() { let mut s = String::from_str("12345"); s.truncate(5); assert_eq!(s.as_slice(), "12345"); s.truncate(3); assert_eq!(s.as_slice(), "123"); s.truncate(0); assert_eq!(s.as_slice(), ""); let mut s = String::from_str("12345"); let p = s.as_slice().as_ptr(); s.truncate(3); s.push_str("6"); let p_ = s.as_slice().as_ptr(); assert_eq!(p_, p); } #[test] #[should_fail] fn test_str_truncate_invalid_len() { let mut s = String::from_str("12345"); s.truncate(6); } #[test] #[should_fail] fn test_str_truncate_split_codepoint() { let mut s = String::from_str("\u00FC"); // ü s.truncate(1); } #[test] fn test_str_clear() { let mut s = String::from_str("12345"); s.clear(); assert_eq!(s.len(), 0); assert_eq!(s.as_slice(), ""); } #[test] fn test_str_add() { let a = String::from_str("12345"); let b = a + "2"; let b = b + String::from_str("2"); assert_eq!(b.len(), 7); assert_eq!(b.as_slice(), "1234522"); } #[bench] fn bench_with_capacity(b: &mut Bencher) { b.iter(|| { String::with_capacity(100) }); } #[bench] fn bench_push_str(b: &mut Bencher) { let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb"; b.iter(|| { let mut r = String::new(); r.push_str(s); }); } #[bench] fn from_utf8_lossy_100_ascii(b: &mut Bencher) { let s = b"Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; assert_eq!(100, s.len()); b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_100_multibyte(b: &mut Bencher) { let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes(); assert_eq!(100, s.len()); b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_invalid(b: &mut Bencher) { let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye"; b.iter(|| { let _ = String::from_utf8_lossy(s); }); } #[bench] fn from_utf8_lossy_100_invalid(b: &mut Bencher) { let s = Vec::from_elem(100, 0xF5u8); b.iter(|| { let _ = String::from_utf8_lossy(s.as_slice()); }); } }
// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use cmp::Ordering; use ops::Try; use super::LoopState; use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, Fuse}; use super::{Flatten, FlatMap, flatten_compat}; use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, Rev}; use super::{Zip, Sum, Product}; use super::{ChainState, FromIterator, ZipImpl}; fn _assert_is_object_safe(_: &dyn Iterator<Item=()>) {} /// An interface for dealing with iterators. /// /// This is the main iterator trait. For more about the concept of iterators /// generally, please see the [module-level documentation]. In particular, you /// may want to know how to [implement `Iterator`][impl]. /// /// [module-level documentation]: index.html /// [impl]: index.html#implementing-iterator #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( on( _Self="[std::ops::Range<Idx>; 1]", label="if you meant to iterate between two values, remove the square brackets", note="`[start..end]` is an array of one `Range`; you might have meant to have a `Range` \ without the brackets: `start..end`" ), on( _Self="[std::ops::RangeFrom<Idx>; 1]", label="if you meant to iterate from a value onwards, remove the square brackets", note="`[start..]` is an array of one `RangeFrom`; you might have meant to have a \ `RangeFrom` without the brackets: `start..`, keeping in mind that iterating over an \ unbounded iterator will run forever unless you `break` or `return` from within the \ loop" ), on( _Self="[std::ops::RangeTo<Idx>; 1]", label="if you meant to iterate until a value, remove the square brackets and add a \ starting value", note="`[..end]` is an array of one `RangeTo`; you might have meant to have a bounded \ `Range` without the brackets: `0..end`" ), on( _Self="[std::ops::RangeInclusive<Idx>; 1]", label="if you meant to iterate between two values, remove the square brackets", note="`[start..=end]` is an array of one `RangeInclusive`; you might have meant to have a \ `RangeInclusive` without the brackets: `start..=end`" ), on( _Self="[std::ops::RangeToInclusive<Idx>; 1]", label="if you meant to iterate until a value (including it), remove the square brackets \ and add a starting value", note="`[..=end]` is an array of one `RangeToInclusive`; you might have meant to have a \ bounded `RangeInclusive` without the brackets: `0..=end`" ), on( _Self="std::ops::RangeTo<Idx>", label="if you meant to iterate until a value, add a starting value", note="`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \ bounded `Range`: `0..end`" ), on( _Self="std::ops::RangeToInclusive<Idx>", label="if you meant to iterate until a value (including it), add a starting value", note="`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \ to have a bounded `RangeInclusive`: `0..=end`" ), on( _Self="&str", label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( _Self="std::string::String", label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( _Self="[]", label="borrow the array with `&` or call `.iter()` on it to iterate over it", note="arrays are not an iterators, but slices like the following are: `&[1, 2, 3]`" ), on( _Self="{integral}", note="if you want to iterate between `start` until a value `end`, use the exclusive range \ syntax `start..end` or the inclusive range syntax `start..=end`" ), label="`{Self}` is not an iterator", message="`{Self}` is not an iterator" )] #[doc(spotlight)] pub trait Iterator { /// The type of the elements being iterated over. #[stable(feature = "rust1", since = "1.0.0")] type Item; /// Advances the iterator and returns the next value. /// /// Returns [`None`] when iteration is finished. Individual iterator /// implementations may choose to resume iteration, and so calling `next()` /// again may or may not eventually start returning [`Some(Item)`] again at some /// point. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`Some(Item)`]: ../../std/option/enum.Option.html#variant.Some /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// // A call to next() returns the next value... /// assert_eq!(Some(&1), iter.next()); /// assert_eq!(Some(&2), iter.next()); /// assert_eq!(Some(&3), iter.next()); /// /// // ... and then None once it's over. /// assert_eq!(None, iter.next()); /// /// // More calls may or may not return None. Here, they always will. /// assert_eq!(None, iter.next()); /// assert_eq!(None, iter.next()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn next(&mut self) -> Option<Self::Item>; /// Returns the bounds on the remaining length of the iterator. /// /// Specifically, `size_hint()` returns a tuple where the first element /// is the lower bound, and the second element is the upper bound. /// /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`. /// A [`None`] here means that either there is no known upper bound, or the /// upper bound is larger than [`usize`]. /// /// # Implementation notes /// /// It is not enforced that an iterator implementation yields the declared /// number of elements. A buggy iterator may yield less than the lower bound /// or more than the upper bound of elements. /// /// `size_hint()` is primarily intended to be used for optimizations such as /// reserving space for the elements of the iterator, but must not be /// trusted to e.g. omit bounds checks in unsafe code. An incorrect /// implementation of `size_hint()` should not lead to memory safety /// violations. /// /// That said, the implementation should provide a correct estimation, /// because otherwise it would be a violation of the trait's protocol. /// /// The default implementation returns `(0, None)` which is correct for any /// iterator. /// /// [`usize`]: ../../std/primitive.usize.html /// [`Option`]: ../../std/option/enum.Option.html /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let iter = a.iter(); /// /// assert_eq!((3, Some(3)), iter.size_hint()); /// ``` /// /// A more complex example: /// /// ``` /// // The even numbers from zero to ten. /// let iter = (0..10).filter(|x| x % 2 == 0); /// /// // We might iterate from zero to ten times. Knowing that it's five /// // exactly wouldn't be possible without executing filter(). /// assert_eq!((0, Some(10)), iter.size_hint()); /// /// // Let's add five more numbers with chain() /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20); /// /// // now both bounds are increased by five /// assert_eq!((5, Some(15)), iter.size_hint()); /// ``` /// /// Returning `None` for an upper bound: /// /// ``` /// // an infinite iterator has no upper bound /// // and the maximum possible lower bound /// let iter = 0..; /// /// assert_eq!((usize::max_value(), None), iter.size_hint()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn size_hint(&self) -> (usize, Option<usize>) { (0, None) } /// Consumes the iterator, counting the number of iterations and returning it. /// /// This method will evaluate the iterator until its [`next`] returns /// [`None`]. Once [`None`] is encountered, `count()` returns the number of /// times it called [`next`]. /// /// [`next`]: #tymethod.next /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so counting elements of /// an iterator with more than [`usize::MAX`] elements either produces the /// wrong result or panics. If debug assertions are enabled, a panic is /// guaranteed. /// /// # Panics /// /// This function might panic if the iterator has more than [`usize::MAX`] /// elements. /// /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().count(), 3); /// /// let a = [1, 2, 3, 4, 5]; /// assert_eq!(a.iter().count(), 5); /// ``` #[inline] #[rustc_inherit_overflow_checks] #[stable(feature = "rust1", since = "1.0.0")] fn count(self) -> usize where Self: Sized { // Might overflow. self.fold(0, |cnt, _| cnt + 1) } /// Consumes the iterator, returning the last element. /// /// This method will evaluate the iterator until it returns [`None`]. While /// doing so, it keeps track of the current element. After [`None`] is /// returned, `last()` will then return the last element it saw. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().last(), Some(&3)); /// /// let a = [1, 2, 3, 4, 5]; /// assert_eq!(a.iter().last(), Some(&5)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn last(self) -> Option<Self::Item> where Self: Sized { let mut last = None; for x in self { last = Some(x); } last } /// Returns the `n`th element of the iterator. /// /// Like most indexing operations, the count starts from zero, so `nth(0)` /// returns the first value, `nth(1)` the second, and so on. /// /// Note that all preceding elements, as well as the returned element, will be /// consumed from the iterator. That means that the preceding elements will be /// discarded, and also that calling `nth(0)` multiple times on the same iterator /// will return different elements. /// /// `nth()` will return [`None`] if `n` is greater than or equal to the length of the /// iterator. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().nth(1), Some(&2)); /// ``` /// /// Calling `nth()` multiple times doesn't rewind the iterator: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.nth(1), Some(&2)); /// assert_eq!(iter.nth(1), None); /// ``` /// /// Returning `None` if there are less than `n + 1` elements: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().nth(10), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn nth(&mut self, mut n: usize) -> Option<Self::Item> { for x in self { if n == 0 { return Some(x) } n -= 1; } None } /// Creates an iterator starting at the same point, but stepping by /// the given amount at each iteration. /// /// Note 1: The first element of the iterator will always be returned, /// regardless of the step given. /// /// Note 2: The time at which ignored elements are pulled is not fixed. /// `StepBy` behaves like the sequence `next(), nth(step-1), nth(step-1), …`, /// but is also free to behave like the sequence /// `advance_n_and_return_first(step), advance_n_and_return_first(step), …` /// Which way is used may change for some iterators for performance reasons. /// The second way will advance the iterator earlier and may consume more items. /// /// `advance_n_and_return_first` is the equivalent of: /// ``` /// fn advance_n_and_return_first<I>(iter: &mut I, total_step: usize) -> Option<I::Item> /// where /// I: Iterator, /// { /// let next = iter.next(); /// if total_step > 1 { /// iter.nth(total_step-2); /// } /// next /// } /// ``` /// /// # Panics /// /// The method will panic if the given step is `0`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [0, 1, 2, 3, 4, 5]; /// let mut iter = a.into_iter().step_by(2); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] fn step_by(self, step: usize) -> StepBy<Self> where Self: Sized { assert!(step != 0); StepBy{iter: self, step: step - 1, first_take: true} } /// Takes two iterators and creates a new iterator over both in sequence. /// /// `chain()` will return a new iterator which will first iterate over /// values from the first iterator and then over values from the second /// iterator. /// /// In other words, it links two iterators together, in a chain. 🔗 /// /// # Examples /// /// Basic usage: /// /// ``` /// let a1 = [1, 2, 3]; /// let a2 = [4, 5, 6]; /// /// let mut iter = a1.iter().chain(a2.iter()); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), Some(&5)); /// assert_eq!(iter.next(), Some(&6)); /// assert_eq!(iter.next(), None); /// ``` /// /// Since the argument to `chain()` uses [`IntoIterator`], we can pass /// anything that can be converted into an [`Iterator`], not just an /// [`Iterator`] itself. For example, slices (`&[T]`) implement /// [`IntoIterator`], and so can be passed to `chain()` directly: /// /// [`IntoIterator`]: trait.IntoIterator.html /// [`Iterator`]: trait.Iterator.html /// /// ``` /// let s1 = &[1, 2, 3]; /// let s2 = &[4, 5, 6]; /// /// let mut iter = s1.iter().chain(s2); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), Some(&5)); /// assert_eq!(iter.next(), Some(&6)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter> where Self: Sized, U: IntoIterator<Item=Self::Item>, { Chain{a: self, b: other.into_iter(), state: ChainState::Both} } /// 'Zips up' two iterators into a single iterator of pairs. /// /// `zip()` returns a new iterator that will iterate over two other /// iterators, returning a tuple where the first element comes from the /// first iterator, and the second element comes from the second iterator. /// /// In other words, it zips two iterators together, into a single one. /// /// If either iterator returns [`None`], [`next`] from the zipped iterator /// will return [`None`]. If the first iterator returns [`None`], `zip` will /// short-circuit and `next` will not be called on the second iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a1 = [1, 2, 3]; /// let a2 = [4, 5, 6]; /// /// let mut iter = a1.iter().zip(a2.iter()); /// /// assert_eq!(iter.next(), Some((&1, &4))); /// assert_eq!(iter.next(), Some((&2, &5))); /// assert_eq!(iter.next(), Some((&3, &6))); /// assert_eq!(iter.next(), None); /// ``` /// /// Since the argument to `zip()` uses [`IntoIterator`], we can pass /// anything that can be converted into an [`Iterator`], not just an /// [`Iterator`] itself. For example, slices (`&[T]`) implement /// [`IntoIterator`], and so can be passed to `zip()` directly: /// /// [`IntoIterator`]: trait.IntoIterator.html /// [`Iterator`]: trait.Iterator.html /// /// ``` /// let s1 = &[1, 2, 3]; /// let s2 = &[4, 5, 6]; /// /// let mut iter = s1.iter().zip(s2); /// /// assert_eq!(iter.next(), Some((&1, &4))); /// assert_eq!(iter.next(), Some((&2, &5))); /// assert_eq!(iter.next(), Some((&3, &6))); /// assert_eq!(iter.next(), None); /// ``` /// /// `zip()` is often used to zip an infinite iterator to a finite one. /// This works because the finite iterator will eventually return [`None`], /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate`]: /// /// ``` /// let enumerate: Vec<_> = "foo".chars().enumerate().collect(); /// /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect(); /// /// assert_eq!((0, 'f'), enumerate[0]); /// assert_eq!((0, 'f'), zipper[0]); /// /// assert_eq!((1, 'o'), enumerate[1]); /// assert_eq!((1, 'o'), zipper[1]); /// /// assert_eq!((2, 'o'), enumerate[2]); /// assert_eq!((2, 'o'), zipper[2]); /// ``` /// /// [`enumerate`]: trait.Iterator.html#method.enumerate /// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next /// [`None`]: ../../std/option/enum.Option.html#variant.None #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter> where Self: Sized, U: IntoIterator { Zip::new(self, other.into_iter()) } /// Takes a closure and creates an iterator which calls that closure on each /// element. /// /// `map()` transforms one iterator into another, by means of its argument: /// something that implements `FnMut`. It produces a new iterator which /// calls this closure on each element of the original iterator. /// /// If you are good at thinking in types, you can think of `map()` like this: /// If you have an iterator that gives you elements of some type `A`, and /// you want an iterator of some other type `B`, you can use `map()`, /// passing a closure that takes an `A` and returns a `B`. /// /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is /// lazy, it is best used when you're already working with other iterators. /// If you're doing some sort of looping for a side effect, it's considered /// more idiomatic to use [`for`] than `map()`. /// /// [`for`]: ../../book/first-edition/loops.html#for /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.into_iter().map(|x| 2 * x); /// /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), Some(6)); /// assert_eq!(iter.next(), None); /// ``` /// /// If you're doing some sort of side effect, prefer [`for`] to `map()`: /// /// ``` /// # #![allow(unused_must_use)] /// // don't do this: /// (0..5).map(|x| println!("{}", x)); /// /// // it won't even execute, as it is lazy. Rust will warn you about this. /// /// // Instead, use for: /// for x in 0..5 { /// println!("{}", x); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn map<B, F>(self, f: F) -> Map<Self, F> where Self: Sized, F: FnMut(Self::Item) -> B, { Map { iter: self, f } } /// Calls a closure on each element of an iterator. /// /// This is equivalent to using a [`for`] loop on the iterator, although /// `break` and `continue` are not possible from a closure. It's generally /// more idiomatic to use a `for` loop, but `for_each` may be more legible /// when processing items at the end of longer iterator chains. In some /// cases `for_each` may also be faster than a loop, because it will use /// internal iteration on adaptors like `Chain`. /// /// [`for`]: ../../book/first-edition/loops.html#for /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::sync::mpsc::channel; /// /// let (tx, rx) = channel(); /// (0..5).map(|x| x * 2 + 1) /// .for_each(move |x| tx.send(x).unwrap()); /// /// let v: Vec<_> = rx.iter().collect(); /// assert_eq!(v, vec![1, 3, 5, 7, 9]); /// ``` /// /// For such a small example, a `for` loop may be cleaner, but `for_each` /// might be preferable to keep a functional style with longer iterators: /// /// ``` /// (0..5).flat_map(|x| x * 100 .. x * 110) /// .enumerate() /// .filter(|&(i, x)| (i + x) % 3 == 0) /// .for_each(|(i, x)| println!("{}:{}", i, x)); /// ``` #[inline] #[stable(feature = "iterator_for_each", since = "1.21.0")] fn for_each<F>(self, mut f: F) where Self: Sized, F: FnMut(Self::Item), { self.fold((), move |(), item| f(item)); } /// Creates an iterator which uses a closure to determine if an element /// should be yielded. /// /// The closure must return `true` or `false`. `filter()` creates an /// iterator which calls this closure on each element. If the closure /// returns `true`, then the element is returned. If the closure returns /// `false`, it will try again, and call the closure on the next element, /// seeing if it passes the test. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [0i32, 1, 2]; /// /// let mut iter = a.into_iter().filter(|x| x.is_positive()); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `filter()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.into_iter().filter(|x| **x > 1); // need two *s! /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// It's common to instead use destructuring on the argument to strip away /// one: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.into_iter().filter(|&x| *x > 1); // both & and * /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// or both: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.into_iter().filter(|&&x| x > 1); // two &s /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// of these layers. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn filter<P>(self, predicate: P) -> Filter<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { Filter {iter: self, predicate } } /// Creates an iterator that both filters and maps. /// /// The closure must return an [`Option<T>`]. `filter_map` creates an /// iterator which calls this closure on each element. If the closure /// returns [`Some(element)`][`Some`], then that element is returned. If the /// closure returns [`None`], it will try again, and call the closure on the /// next element, seeing if it will return [`Some`]. /// /// Why `filter_map` and not just [`filter`] and [`map`]? The key is in this /// part: /// /// [`filter`]: #method.filter /// [`map`]: #method.map /// /// > If the closure returns [`Some(element)`][`Some`], then that element is returned. /// /// In other words, it removes the [`Option<T>`] layer automatically. If your /// mapping is already returning an [`Option<T>`] and you want to skip over /// [`None`]s, then `filter_map` is much, much nicer to use. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = ["1", "lol", "3", "NaN", "5"]; /// /// let mut iter = a.iter().filter_map(|s| s.parse().ok()); /// /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), Some(5)); /// assert_eq!(iter.next(), None); /// ``` /// /// Here's the same example, but with [`filter`] and [`map`]: /// /// ``` /// let a = ["1", "lol", "3", "NaN", "5"]; /// let mut iter = a.iter().map(|s| s.parse()).filter(|s| s.is_ok()).map(|s| s.unwrap()); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), Some(5)); /// assert_eq!(iter.next(), None); /// ``` /// /// [`Option<T>`]: ../../std/option/enum.Option.html /// [`Some`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F> where Self: Sized, F: FnMut(Self::Item) -> Option<B>, { FilterMap { iter: self, f } } /// Creates an iterator which gives the current iteration count as well as /// the next value. /// /// The iterator returned yields pairs `(i, val)`, where `i` is the /// current index of iteration and `val` is the value returned by the /// iterator. /// /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a /// different sized integer, the [`zip`] function provides similar /// functionality. /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so enumerating more than /// [`usize::MAX`] elements either produces the wrong result or panics. If /// debug assertions are enabled, a panic is guaranteed. /// /// # Panics /// /// The returned iterator might panic if the to-be-returned index would /// overflow a [`usize`]. /// /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// [`usize`]: ../../std/primitive.usize.html /// [`zip`]: #method.zip /// /// # Examples /// /// ``` /// let a = ['a', 'b', 'c']; /// /// let mut iter = a.iter().enumerate(); /// /// assert_eq!(iter.next(), Some((0, &'a'))); /// assert_eq!(iter.next(), Some((1, &'b'))); /// assert_eq!(iter.next(), Some((2, &'c'))); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn enumerate(self) -> Enumerate<Self> where Self: Sized { Enumerate { iter: self, count: 0 } } /// Creates an iterator which can use `peek` to look at the next element of /// the iterator without consuming it. /// /// Adds a [`peek`] method to an iterator. See its documentation for /// more information. /// /// Note that the underlying iterator is still advanced when [`peek`] is /// called for the first time: In order to retrieve the next element, /// [`next`] is called on the underlying iterator, hence any side effects (i.e. /// anything other than fetching the next value) of the [`next`] method /// will occur. /// /// [`peek`]: struct.Peekable.html#method.peek /// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next /// /// # Examples /// /// Basic usage: /// /// ``` /// let xs = [1, 2, 3]; /// /// let mut iter = xs.iter().peekable(); /// /// // peek() lets us see into the future /// assert_eq!(iter.peek(), Some(&&1)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), Some(&2)); /// /// // we can peek() multiple times, the iterator won't advance /// assert_eq!(iter.peek(), Some(&&3)); /// assert_eq!(iter.peek(), Some(&&3)); /// /// assert_eq!(iter.next(), Some(&3)); /// /// // after the iterator is finished, so is peek() /// assert_eq!(iter.peek(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn peekable(self) -> Peekable<Self> where Self: Sized { Peekable{iter: self, peeked: None} } /// Creates an iterator that [`skip`]s elements based on a predicate. /// /// [`skip`]: #method.skip /// /// `skip_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and ignore elements /// until it returns `false`. /// /// After `false` is returned, `skip_while()`'s job is over, and the /// rest of the elements are yielded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [-1i32, 0, 1]; /// /// let mut iter = a.into_iter().skip_while(|x| x.is_negative()); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `skip_while()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [-1, 0, 1]; /// /// let mut iter = a.into_iter().skip_while(|x| **x < 0); // need two *s! /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial `false`: /// /// ``` /// let a = [-1, 0, 1, -2]; /// /// let mut iter = a.into_iter().skip_while(|x| **x < 0); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// /// // while this would have been false, since we already got a false, /// // skip_while() isn't used any more /// assert_eq!(iter.next(), Some(&-2)); /// /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { SkipWhile { iter: self, flag: false, predicate } } /// Creates an iterator that yields elements based on a predicate. /// /// `take_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and yield elements /// while it returns `true`. /// /// After `false` is returned, `take_while()`'s job is over, and the /// rest of the elements are ignored. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [-1i32, 0, 1]; /// /// let mut iter = a.into_iter().take_while(|x| x.is_negative()); /// /// assert_eq!(iter.next(), Some(&-1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `take_while()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [-1, 0, 1]; /// /// let mut iter = a.into_iter().take_while(|x| **x < 0); // need two *s! /// /// assert_eq!(iter.next(), Some(&-1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial `false`: /// /// ``` /// let a = [-1, 0, 1, -2]; /// /// let mut iter = a.into_iter().take_while(|x| **x < 0); /// /// assert_eq!(iter.next(), Some(&-1)); /// /// // We have more elements that are less than zero, but since we already /// // got a false, take_while() isn't used any more /// assert_eq!(iter.next(), None); /// ``` /// /// Because `take_while()` needs to look at the value in order to see if it /// should be included or not, consuming iterators will see that it is /// removed: /// /// ``` /// let a = [1, 2, 3, 4]; /// let mut iter = a.into_iter(); /// /// let result: Vec<i32> = iter.by_ref() /// .take_while(|n| **n != 3) /// .cloned() /// .collect(); /// /// assert_eq!(result, &[1, 2]); /// /// let result: Vec<i32> = iter.cloned().collect(); /// /// assert_eq!(result, &[4]); /// ``` /// /// The `3` is no longer there, because it was consumed in order to see if /// the iteration should stop, but wasn't placed back into the iterator or /// some similar thing. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { TakeWhile { iter: self, flag: false, predicate } } /// Creates an iterator that skips the first `n` elements. /// /// After they have been consumed, the rest of the elements are yielded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().skip(2); /// /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip(self, n: usize) -> Skip<Self> where Self: Sized { Skip { iter: self, n } } /// Creates an iterator that yields its first `n` elements. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().take(2); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// `take()` is often used with an infinite iterator, to make it finite: /// /// ``` /// let mut iter = (0..).take(3); /// /// assert_eq!(iter.next(), Some(0)); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take(self, n: usize) -> Take<Self> where Self: Sized, { Take { iter: self, n } } /// An iterator adaptor similar to [`fold`] that holds internal state and /// produces a new iterator. /// /// [`fold`]: #method.fold /// /// `scan()` takes two arguments: an initial value which seeds the internal /// state, and a closure with two arguments, the first being a mutable /// reference to the internal state and the second an iterator element. /// The closure can assign to the internal state to share state between /// iterations. /// /// On iteration, the closure will be applied to each element of the /// iterator and the return value from the closure, an [`Option`], is /// yielded by the iterator. /// /// [`Option`]: ../../std/option/enum.Option.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().scan(1, |state, &x| { /// // each iteration, we'll multiply the state by the element /// *state = *state * x; /// /// // then, we'll yield the negation of the state /// Some(-*state) /// }); /// /// assert_eq!(iter.next(), Some(-1)); /// assert_eq!(iter.next(), Some(-2)); /// assert_eq!(iter.next(), Some(-6)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F> where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option<B>, { Scan { iter: self, f, state: initial_state } } /// Creates an iterator that works like map, but flattens nested structure. /// /// The [`map`] adapter is very useful, but only when the closure /// argument produces values. If it produces an iterator instead, there's /// an extra layer of indirection. `flat_map()` will remove this extra layer /// on its own. /// /// You can think of `flat_map(f)` as the semantic equivalent /// of [`map`]ping, and then [`flatten`]ing as in `map(f).flatten()`. /// /// Another way of thinking about `flat_map()`: [`map`]'s closure returns /// one item for each element, and `flat_map()`'s closure returns an /// iterator for each element. /// /// [`map`]: #method.map /// [`flatten`]: #method.flatten /// /// # Examples /// /// Basic usage: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .flat_map(|s| s.chars()) /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F> where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, { FlatMap { inner: flatten_compat(self.map(f)) } } /// Creates an iterator that flattens nested structure. /// /// This is useful when you have an iterator of iterators or an iterator of /// things that can be turned into iterators and you want to remove one /// level of indirection. /// /// # Examples /// /// Basic usage: /// /// ``` /// let data = vec![vec![1, 2, 3, 4], vec![5, 6]]; /// let flattened = data.into_iter().flatten().collect::<Vec<u8>>(); /// assert_eq!(flattened, &[1, 2, 3, 4, 5, 6]); /// ``` /// /// Mapping and then flattening: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .map(|s| s.chars()) /// .flatten() /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` /// /// You can also rewrite this in terms of [`flat_map()`], which is preferable /// in this case since it conveys intent more clearly: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .flat_map(|s| s.chars()) /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` /// /// Flattening once only removes one level of nesting: /// /// ``` /// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]; /// /// let d2 = d3.iter().flatten().collect::<Vec<_>>(); /// assert_eq!(d2, [&[1, 2], &[3, 4], &[5, 6], &[7, 8]]); /// /// let d1 = d3.iter().flatten().flatten().collect::<Vec<_>>(); /// assert_eq!(d1, [&1, &2, &3, &4, &5, &6, &7, &8]); /// ``` /// /// Here we see that `flatten()` does not perform a "deep" flatten. /// Instead, only one level of nesting is removed. That is, if you /// `flatten()` a three-dimensional array the result will be /// two-dimensional and not one-dimensional. To get a one-dimensional /// structure, you have to `flatten()` again. /// /// [`flat_map()`]: #method.flat_map #[inline] #[stable(feature = "iterator_flatten", since = "1.29.0")] fn flatten(self) -> Flatten<Self> where Self: Sized, Self::Item: IntoIterator { Flatten { inner: flatten_compat(self) } } /// Creates an iterator which ends after the first [`None`]. /// /// After an iterator returns [`None`], future calls may or may not yield /// [`Some(T)`] again. `fuse()` adapts an iterator, ensuring that after a /// [`None`] is given, it will always return [`None`] forever. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`Some(T)`]: ../../std/option/enum.Option.html#variant.Some /// /// # Examples /// /// Basic usage: /// /// ``` /// // an iterator which alternates between Some and None /// struct Alternate { /// state: i32, /// } /// /// impl Iterator for Alternate { /// type Item = i32; /// /// fn next(&mut self) -> Option<i32> { /// let val = self.state; /// self.state = self.state + 1; /// /// // if it's even, Some(i32), else None /// if val % 2 == 0 { /// Some(val) /// } else { /// None /// } /// } /// } /// /// let mut iter = Alternate { state: 0 }; /// /// // we can see our iterator going back and forth /// assert_eq!(iter.next(), Some(0)); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// /// // however, once we fuse it... /// let mut iter = iter.fuse(); /// /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), None); /// /// // it will always return None after the first time. /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fuse(self) -> Fuse<Self> where Self: Sized { Fuse{iter: self, done: false} } /// Do something with each element of an iterator, passing the value on. /// /// When using iterators, you'll often chain several of them together. /// While working on such code, you might want to check out what's /// happening at various parts in the pipeline. To do that, insert /// a call to `inspect()`. /// /// It's more common for `inspect()` to be used as a debugging tool than to /// exist in your final code, but applications may find it useful in certain /// situations when errors need to be logged before being discarded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 4, 2, 3]; /// /// // this iterator sequence is complex. /// let sum = a.iter() /// .cloned() /// .filter(|x| x % 2 == 0) /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// /// // let's add some inspect() calls to investigate what's happening /// let sum = a.iter() /// .cloned() /// .inspect(|x| println!("about to filter: {}", x)) /// .filter(|x| x % 2 == 0) /// .inspect(|x| println!("made it through filter: {}", x)) /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// ``` /// /// This will print: /// /// ```text /// 6 /// about to filter: 1 /// about to filter: 4 /// made it through filter: 4 /// about to filter: 2 /// made it through filter: 2 /// about to filter: 3 /// 6 /// ``` /// /// Logging errors before discarding them: /// /// ``` /// let lines = ["1", "2", "a"]; /// /// let sum: i32 = lines /// .iter() /// .map(|line| line.parse::<i32>()) /// .inspect(|num| { /// if let Err(ref e) = *num { /// println!("Parsing error: {}", e); /// } /// }) /// .filter_map(Result::ok) /// .sum(); /// /// println!("Sum: {}", sum); /// ``` /// /// This will print: /// /// ```text /// Parsing error: invalid digit found in string /// Sum: 3 /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn inspect<F>(self, f: F) -> Inspect<Self, F> where Self: Sized, F: FnMut(&Self::Item), { Inspect { iter: self, f } } /// Borrows an iterator, rather than consuming it. /// /// This is useful to allow applying iterator adaptors while still /// retaining ownership of the original iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let iter = a.into_iter(); /// /// let sum: i32 = iter.take(5).fold(0, |acc, i| acc + i ); /// /// assert_eq!(sum, 6); /// /// // if we try to use iter again, it won't work. The following line /// // gives "error: use of moved value: `iter` /// // assert_eq!(iter.next(), None); /// /// // let's try that again /// let a = [1, 2, 3]; /// /// let mut iter = a.into_iter(); /// /// // instead, we add in a .by_ref() /// let sum: i32 = iter.by_ref().take(2).fold(0, |acc, i| acc + i ); /// /// assert_eq!(sum, 3); /// /// // now this is just fine: /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Transforms an iterator into a collection. /// /// `collect()` can take anything iterable, and turn it into a relevant /// collection. This is one of the more powerful methods in the standard /// library, used in a variety of contexts. /// /// The most basic pattern in which `collect()` is used is to turn one /// collection into another. You take a collection, call [`iter`] on it, /// do a bunch of transformations, and then `collect()` at the end. /// /// One of the keys to `collect()`'s power is that many things you might /// not think of as 'collections' actually are. For example, a [`String`] /// is a collection of [`char`]s. And a collection of /// [`Result<T, E>`][`Result`] can be thought of as single /// [`Result`]`<Collection<T>, E>`. See the examples below for more. /// /// Because `collect()` is so general, it can cause problems with type /// inference. As such, `collect()` is one of the few times you'll see /// the syntax affectionately known as the 'turbofish': `::<>`. This /// helps the inference algorithm understand specifically which collection /// you're trying to collect into. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled: Vec<i32> = a.iter() /// .map(|&x| x * 2) /// .collect(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Note that we needed the `: Vec<i32>` on the left-hand side. This is because /// we could collect into, for example, a [`VecDeque<T>`] instead: /// /// [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html /// /// ``` /// use std::collections::VecDeque; /// /// let a = [1, 2, 3]; /// /// let doubled: VecDeque<i32> = a.iter().map(|&x| x * 2).collect(); /// /// assert_eq!(2, doubled[0]); /// assert_eq!(4, doubled[1]); /// assert_eq!(6, doubled[2]); /// ``` /// /// Using the 'turbofish' instead of annotating `doubled`: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<i32>>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Because `collect()` only cares about what you're collecting into, you can /// still use a partial type hint, `_`, with the turbofish: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<_>>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Using `collect()` to make a [`String`]: /// /// ``` /// let chars = ['g', 'd', 'k', 'k', 'n']; /// /// let hello: String = chars.iter() /// .map(|&x| x as u8) /// .map(|x| (x + 1) as char) /// .collect(); /// /// assert_eq!("hello", hello); /// ``` /// /// If you have a list of [`Result<T, E>`][`Result`]s, you can use `collect()` to /// see if any of them failed: /// /// ``` /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")]; /// /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect(); /// /// // gives us the first error /// assert_eq!(Err("nope"), result); /// /// let results = [Ok(1), Ok(3)]; /// /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect(); /// /// // gives us the list of answers /// assert_eq!(Ok(vec![1, 3]), result); /// ``` /// /// [`iter`]: ../../std/iter/trait.Iterator.html#tymethod.next /// [`String`]: ../../std/string/struct.String.html /// [`char`]: ../../std/primitive.char.html /// [`Result`]: ../../std/result/enum.Result.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] fn collect<B: FromIterator<Self::Item>>(self) -> B where Self: Sized { FromIterator::from_iter(self) } /// Consumes an iterator, creating two collections from it. /// /// The predicate passed to `partition()` can return `true`, or `false`. /// `partition()` returns a pair, all of the elements for which it returned /// `true`, and all of the elements for which it returned `false`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let (even, odd): (Vec<i32>, Vec<i32>) = a /// .into_iter() /// .partition(|&n| n % 2 == 0); /// /// assert_eq!(even, vec![2]); /// assert_eq!(odd, vec![1, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn partition<B, F>(self, mut f: F) -> (B, B) where Self: Sized, B: Default + Extend<Self::Item>, F: FnMut(&Self::Item) -> bool { let mut left: B = Default::default(); let mut right: B = Default::default(); for x in self { if f(&x) { left.extend(Some(x)) } else { right.extend(Some(x)) } } (left, right) } /// An iterator method that applies a function as long as it returns /// successfully, producing a single, final value. /// /// `try_fold()` takes two arguments: an initial value, and a closure with /// two arguments: an 'accumulator', and an element. The closure either /// returns successfully, with the value that the accumulator should have /// for the next iteration, or it returns failure, with an error value that /// is propagated back to the caller immediately (short-circuiting). /// /// The initial value is the value the accumulator will have on the first /// call. If applying the closure succeeded against every element of the /// iterator, `try_fold()` returns the final accumulator as success. /// /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// /// # Note to Implementors /// /// Most of the other (forward) methods have default implementations in /// terms of this one, so try to implement this explicitly if it can /// do something better than the default `for` loop implementation. /// /// In particular, try to have this call `try_fold()` on the internal parts /// from which this iterator is composed. If multiple calls are needed, /// the `?` operator may be convenient for chaining the accumulator value /// along, but beware any invariants that need to be upheld before those /// early returns. This is a `&mut self` method, so iteration needs to be /// resumable after hitting an error here. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// // the checked sum of all of the elements of the array /// let sum = a.iter().try_fold(0i8, |acc, &x| acc.checked_add(x)); /// /// assert_eq!(sum, Some(6)); /// ``` /// /// Short-circuiting: /// /// ``` /// let a = [10, 20, 30, 100, 40, 50]; /// let mut it = a.iter(); /// /// // This sum overflows when adding the 100 element /// let sum = it.try_fold(0i8, |acc, &x| acc.checked_add(x)); /// assert_eq!(sum, None); /// /// // Because it short-circuited, the remaining elements are still /// // available through the iterator. /// assert_eq!(it.len(), 2); /// assert_eq!(it.next(), Some(&40)); /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B> { let mut accum = init; while let Some(x) = self.next() { accum = f(accum, x)?; } Try::from_ok(accum) } /// An iterator method that applies a fallible function to each item in the /// iterator, stopping at the first error and returning that error. /// /// This can also be thought of as the fallible form of [`for_each()`] /// or as the stateless version of [`try_fold()`]. /// /// [`for_each()`]: #method.for_each /// [`try_fold()`]: #method.try_fold /// /// # Examples /// /// ``` /// use std::fs::rename; /// use std::io::{stdout, Write}; /// use std::path::Path; /// /// let data = ["no_tea.txt", "stale_bread.json", "torrential_rain.png"]; /// /// let res = data.iter().try_for_each(|x| writeln!(stdout(), "{}", x)); /// assert!(res.is_ok()); /// /// let mut it = data.iter().cloned(); /// let res = it.try_for_each(|x| rename(x, Path::new(x).with_extension("old"))); /// assert!(res.is_err()); /// // It short-circuited, so the remaining items are still in the iterator: /// assert_eq!(it.next(), Some("stale_bread.json")); /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_for_each<F, R>(&mut self, mut f: F) -> R where Self: Sized, F: FnMut(Self::Item) -> R, R: Try<Ok=()> { self.try_fold((), move |(), x| f(x)) } /// An iterator method that applies a function, producing a single, final value. /// /// `fold()` takes two arguments: an initial value, and a closure with two /// arguments: an 'accumulator', and an element. The closure returns the value that /// the accumulator should have for the next iteration. /// /// The initial value is the value the accumulator will have on the first /// call. /// /// After applying this closure to every element of the iterator, `fold()` /// returns the accumulator. /// /// This operation is sometimes called 'reduce' or 'inject'. /// /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// /// Note: `fold()`, and similar methods that traverse the entire iterator, /// may not terminate for infinite iterators, even on traits for which a /// result is determinable in finite time. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// // the sum of all of the elements of the array /// let sum = a.iter().fold(0, |acc, x| acc + x); /// /// assert_eq!(sum, 6); /// ``` /// /// Let's walk through each step of the iteration here: /// /// | element | acc | x | result | /// |---------|-----|---|--------| /// | | 0 | | | /// | 1 | 0 | 1 | 1 | /// | 2 | 1 | 2 | 3 | /// | 3 | 3 | 3 | 6 | /// /// And so, our final result, `6`. /// /// It's common for people who haven't used iterators a lot to /// use a `for` loop with a list of things to build up a result. Those /// can be turned into `fold()`s: /// /// [`for`]: ../../book/first-edition/loops.html#for /// /// ``` /// let numbers = [1, 2, 3, 4, 5]; /// /// let mut result = 0; /// /// // for loop: /// for i in &numbers { /// result = result + i; /// } /// /// // fold: /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x); /// /// // they're the same /// assert_eq!(result, result2); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fold<B, F>(mut self, init: B, mut f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { self.try_fold(init, move |acc, x| Ok::<B, !>(f(acc, x))).unwrap() } /// Tests if every element of the iterator matches a predicate. /// /// `all()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if they all return /// `true`, then so does `all()`. If any of them return `false`, it /// returns `false`. /// /// `all()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `false`, given that no matter what else happens, /// the result will also be `false`. /// /// An empty iterator returns `true`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert!(a.iter().all(|&x| x > 0)); /// /// assert!(!a.iter().all(|&x| x > 2)); /// ``` /// /// Stopping at the first `false`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert!(!iter.all(|&x| x != 2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn all<F>(&mut self, mut f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool { self.try_for_each(move |x| { if f(x) { LoopState::Continue(()) } else { LoopState::Break(()) } }) == LoopState::Continue(()) } /// Tests if any element of the iterator matches a predicate. /// /// `any()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if any of them return /// `true`, then so does `any()`. If they all return `false`, it /// returns `false`. /// /// `any()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `true`, given that no matter what else happens, /// the result will also be `true`. /// /// An empty iterator returns `false`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert!(a.iter().any(|&x| x > 0)); /// /// assert!(!a.iter().any(|&x| x > 5)); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert!(iter.any(|&x| x != 2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&2)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn any<F>(&mut self, mut f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool { self.try_for_each(move |x| { if f(x) { LoopState::Break(()) } else { LoopState::Continue(()) } }) == LoopState::Break(()) } /// Searches for an element of an iterator that satisfies a predicate. /// /// `find()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if any of them return /// `true`, then `find()` returns [`Some(element)`]. If they all return /// `false`, it returns [`None`]. /// /// `find()` is short-circuiting; in other words, it will stop processing /// as soon as the closure returns `true`. /// /// Because `find()` takes a reference, and many iterators iterate over /// references, this leads to a possibly confusing situation where the /// argument is a double reference. You can see this effect in the /// examples below, with `&&x`. /// /// [`Some(element)`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2)); /// /// assert_eq!(a.iter().find(|&&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.find(|&&x| x == 2), Some(&2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item> where Self: Sized, P: FnMut(&Self::Item) -> bool, { self.try_for_each(move |x| { if predicate(&x) { LoopState::Break(x) } else { LoopState::Continue(()) } }).break_value() } /// Applies function to the elements of iterator and returns /// the first non-none result. /// /// `iter.find_map(f)` is equivalent to `iter.filter_map(f).next()`. /// /// /// # Examples /// /// ``` /// let a = ["lol", "NaN", "2", "5"]; /// /// let first_number = a.iter().find_map(|s| s.parse().ok()); /// /// assert_eq!(first_number, Some(2)); /// ``` #[inline] #[stable(feature = "iterator_find_map", since = "1.30.0")] fn find_map<B, F>(&mut self, mut f: F) -> Option<B> where Self: Sized, F: FnMut(Self::Item) -> Option<B>, { self.try_for_each(move |x| { match f(x) { Some(x) => LoopState::Break(x), None => LoopState::Continue(()), } }).break_value() } /// Searches for an element in an iterator, returning its index. /// /// `position()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if one of them /// returns `true`, then `position()` returns [`Some(index)`]. If all of /// them return `false`, it returns [`None`]. /// /// `position()` is short-circuiting; in other words, it will stop /// processing as soon as it finds a `true`. /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so if there are more /// than [`usize::MAX`] non-matching elements, it either produces the wrong /// result or panics. If debug assertions are enabled, a panic is /// guaranteed. /// /// # Panics /// /// This function might panic if the iterator has more than `usize::MAX` /// non-matching elements. /// /// [`Some(index)`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().position(|&x| x == 2), Some(1)); /// /// assert_eq!(a.iter().position(|&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3, 4]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.position(|&x| x >= 2), Some(1)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// /// // The returned index depends on iterator state /// assert_eq!(iter.position(|&x| x == 4), Some(0)); /// /// ``` #[inline] #[rustc_inherit_overflow_checks] #[stable(feature = "rust1", since = "1.0.0")] fn position<P>(&mut self, mut predicate: P) -> Option<usize> where Self: Sized, P: FnMut(Self::Item) -> bool, { // The addition might panic on overflow self.try_fold(0, move |i, x| { if predicate(x) { LoopState::Break(i) } else { LoopState::Continue(i + 1) } }).break_value() } /// Searches for an element in an iterator from the right, returning its /// index. /// /// `rposition()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, starting from the end, /// and if one of them returns `true`, then `rposition()` returns /// [`Some(index)`]. If all of them return `false`, it returns [`None`]. /// /// `rposition()` is short-circuiting; in other words, it will stop /// processing as soon as it finds a `true`. /// /// [`Some(index)`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2)); /// /// assert_eq!(a.iter().rposition(|&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.rposition(|&x| x == 2), Some(1)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&1)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where P: FnMut(Self::Item) -> bool, Self: Sized + ExactSizeIterator + DoubleEndedIterator { // No need for an overflow check here, because `ExactSizeIterator` // implies that the number of elements fits into a `usize`. let n = self.len(); self.try_rfold(n, move |i, x| { let i = i - 1; if predicate(x) { LoopState::Break(i) } else { LoopState::Continue(i) } }).break_value() } /// Returns the maximum element of an iterator. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let b: Vec<u32> = Vec::new(); /// /// assert_eq!(a.iter().max(), Some(&3)); /// assert_eq!(b.iter().max(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn max(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord { select_fold1(self, |_| (), // switch to y even if it is only equal, to preserve // stability. |_, x, _, y| *x <= *y) .map(|(_, x)| x) } /// Returns the minimum element of an iterator. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let b: Vec<u32> = Vec::new(); /// /// assert_eq!(a.iter().min(), Some(&1)); /// assert_eq!(b.iter().min(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn min(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord { select_fold1(self, |_| (), // only switch to y if it is strictly smaller, to // preserve stability. |_, x, _, y| *x > *y) .map(|(_, x)| x) } /// Returns the element that gives the maximum value from the /// specified function. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10); /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item) -> B, { select_fold1(self, f, // switch to y even if it is only equal, to preserve // stability. |x_p, _, y_p, _| x_p <= y_p) .map(|(_, x)| x) } /// Returns the element that gives the maximum value with respect to the /// specified comparison function. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5); /// ``` #[inline] #[stable(feature = "iter_max_by", since = "1.15.0")] fn max_by<F>(self, mut compare: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { select_fold1(self, |_| (), // switch to y even if it is only equal, to preserve // stability. |_, x, _, y| Ordering::Greater != compare(x, y)) .map(|(_, x)| x) } /// Returns the element that gives the minimum value from the /// specified function. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0); /// ``` #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item) -> B, { select_fold1(self, f, // only switch to y if it is strictly smaller, to // preserve stability. |x_p, _, y_p, _| x_p > y_p) .map(|(_, x)| x) } /// Returns the element that gives the minimum value with respect to the /// specified comparison function. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10); /// ``` #[inline] #[stable(feature = "iter_min_by", since = "1.15.0")] fn min_by<F>(self, mut compare: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { select_fold1(self, |_| (), // switch to y even if it is strictly smaller, to // preserve stability. |_, x, _, y| Ordering::Greater == compare(x, y)) .map(|(_, x)| x) } /// Reverses an iterator's direction. /// /// Usually, iterators iterate from left to right. After using `rev()`, /// an iterator will instead iterate from right to left. /// /// This is only possible if the iterator has an end, so `rev()` only /// works on [`DoubleEndedIterator`]s. /// /// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html /// /// # Examples /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().rev(); /// /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rev(self) -> Rev<Self> where Self: Sized + DoubleEndedIterator { Rev{iter: self} } /// Converts an iterator of pairs into a pair of containers. /// /// `unzip()` consumes an entire iterator of pairs, producing two /// collections: one from the left elements of the pairs, and one /// from the right elements. /// /// This function is, in some sense, the opposite of [`zip`]. /// /// [`zip`]: #method.zip /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [(1, 2), (3, 4)]; /// /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip(); /// /// assert_eq!(left, [1, 3]); /// assert_eq!(right, [2, 4]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where FromA: Default + Extend<A>, FromB: Default + Extend<B>, Self: Sized + Iterator<Item=(A, B)>, { let mut ts: FromA = Default::default(); let mut us: FromB = Default::default(); self.for_each(|(t, u)| { ts.extend(Some(t)); us.extend(Some(u)); }); (ts, us) } /// Creates an iterator which [`clone`]s all of its elements. /// /// This is useful when you have an iterator over `&T`, but you need an /// iterator over `T`. /// /// [`clone`]: ../../std/clone/trait.Clone.html#tymethod.clone /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let v_cloned: Vec<_> = a.iter().cloned().collect(); /// /// // cloned is the same as .map(|&x| x), for integers /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); /// /// assert_eq!(v_cloned, vec![1, 2, 3]); /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn cloned<'a, T: 'a>(self) -> Cloned<Self> where Self: Sized + Iterator<Item=&'a T>, T: Clone { Cloned { it: self } } /// Repeats an iterator endlessly. /// /// Instead of stopping at [`None`], the iterator will instead start again, /// from the beginning. After iterating again, it will start at the /// beginning again. And again. And again. Forever. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut it = a.iter().cycle(); /// /// assert_eq!(it.next(), Some(&1)); /// assert_eq!(it.next(), Some(&2)); /// assert_eq!(it.next(), Some(&3)); /// assert_eq!(it.next(), Some(&1)); /// assert_eq!(it.next(), Some(&2)); /// assert_eq!(it.next(), Some(&3)); /// assert_eq!(it.next(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] fn cycle(self) -> Cycle<Self> where Self: Sized + Clone { Cycle{orig: self.clone(), iter: self} } /// Sums the elements of an iterator. /// /// Takes each element, adds them together, and returns the result. /// /// An empty iterator returns the zero value of the type. /// /// # Panics /// /// When calling `sum()` and a primitive integer type is being returned, this /// method will panic if the computation overflows and debug assertions are /// enabled. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let sum: i32 = a.iter().sum(); /// /// assert_eq!(sum, 6); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] fn sum<S>(self) -> S where Self: Sized, S: Sum<Self::Item>, { Sum::sum(self) } /// Iterates over the entire iterator, multiplying all the elements /// /// An empty iterator returns the one value of the type. /// /// # Panics /// /// When calling `product()` and a primitive integer type is being returned, /// method will panic if the computation overflows and debug assertions are /// enabled. /// /// # Examples /// /// ``` /// fn factorial(n: u32) -> u32 { /// (1..).take_while(|&i| i <= n).product() /// } /// assert_eq!(factorial(0), 1); /// assert_eq!(factorial(1), 1); /// assert_eq!(factorial(5), 120); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] fn product<P>(self) -> P where Self: Sized, P: Product<Self::Item>, { Product::product(self) } /// Lexicographically compares the elements of this `Iterator` with those /// of another. #[stable(feature = "iter_order", since = "1.5.0")] fn cmp<I>(mut self, other: I) -> Ordering where I: IntoIterator<Item = Self::Item>, Self::Item: Ord, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => if other.next().is_none() { return Ordering::Equal } else { return Ordering::Less }, Some(val) => val, }; let y = match other.next() { None => return Ordering::Greater, Some(val) => val, }; match x.cmp(&y) { Ordering::Equal => (), non_eq => return non_eq, } } } /// Lexicographically compares the elements of this `Iterator` with those /// of another. #[stable(feature = "iter_order", since = "1.5.0")] fn partial_cmp<I>(mut self, other: I) -> Option<Ordering> where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => if other.next().is_none() { return Some(Ordering::Equal) } else { return Some(Ordering::Less) }, Some(val) => val, }; let y = match other.next() { None => return Some(Ordering::Greater), Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Equal) => (), non_eq => return non_eq, } } } /// Determines if the elements of this `Iterator` are equal to those of /// another. #[stable(feature = "iter_order", since = "1.5.0")] fn eq<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_none(), Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; if x != y { return false } } } /// Determines if the elements of this `Iterator` are unequal to those of /// another. #[stable(feature = "iter_order", since = "1.5.0")] fn ne<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_some(), Some(val) => val, }; let y = match other.next() { None => return true, Some(val) => val, }; if x != y { return true } } } /// Determines if the elements of this `Iterator` are lexicographically /// less than those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn lt<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_some(), Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return true, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return false, None => return false, } } } /// Determines if the elements of this `Iterator` are lexicographically /// less or equal to those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn le<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => { other.next(); return true; }, Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return true, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return false, None => return false, } } } /// Determines if the elements of this `Iterator` are lexicographically /// greater than those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn gt<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => { other.next(); return false; }, Some(val) => val, }; let y = match other.next() { None => return true, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return false, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return true, None => return false, } } } /// Determines if the elements of this `Iterator` are lexicographically /// greater than or equal to those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn ge<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_none(), Some(val) => val, }; let y = match other.next() { None => return true, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return false, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return true, None => return false, } } } } /// Select an element from an iterator based on the given "projection" /// and "comparison" function. /// /// This is an idiosyncratic helper to try to factor out the /// commonalities of {max,min}{,_by}. In particular, this avoids /// having to implement optimizations several times. #[inline] fn select_fold1<I, B, FProj, FCmp>(mut it: I, mut f_proj: FProj, mut f_cmp: FCmp) -> Option<(B, I::Item)> where I: Iterator, FProj: FnMut(&I::Item) -> B, FCmp: FnMut(&B, &I::Item, &B, &I::Item) -> bool { // start with the first element as our selection. This avoids // having to use `Option`s inside the loop, translating to a // sizeable performance gain (6x in one case). it.next().map(|first| { let first_p = f_proj(&first); it.fold((first_p, first), |(sel_p, sel), x| { let x_p = f_proj(&x); if f_cmp(&sel_p, &sel, &x_p, &x) { (x_p, x) } else { (sel_p, sel) } }) }) } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator + ?Sized> Iterator for &mut I { type Item = I::Item; fn next(&mut self) -> Option<I::Item> { (**self).next() } fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() } fn nth(&mut self, n: usize) -> Option<Self::Item> { (**self).nth(n) } } Add missing link in docs // Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use cmp::Ordering; use ops::Try; use super::LoopState; use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, Fuse}; use super::{Flatten, FlatMap, flatten_compat}; use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, Rev}; use super::{Zip, Sum, Product}; use super::{ChainState, FromIterator, ZipImpl}; fn _assert_is_object_safe(_: &dyn Iterator<Item=()>) {} /// An interface for dealing with iterators. /// /// This is the main iterator trait. For more about the concept of iterators /// generally, please see the [module-level documentation]. In particular, you /// may want to know how to [implement `Iterator`][impl]. /// /// [module-level documentation]: index.html /// [impl]: index.html#implementing-iterator #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( on( _Self="[std::ops::Range<Idx>; 1]", label="if you meant to iterate between two values, remove the square brackets", note="`[start..end]` is an array of one `Range`; you might have meant to have a `Range` \ without the brackets: `start..end`" ), on( _Self="[std::ops::RangeFrom<Idx>; 1]", label="if you meant to iterate from a value onwards, remove the square brackets", note="`[start..]` is an array of one `RangeFrom`; you might have meant to have a \ `RangeFrom` without the brackets: `start..`, keeping in mind that iterating over an \ unbounded iterator will run forever unless you `break` or `return` from within the \ loop" ), on( _Self="[std::ops::RangeTo<Idx>; 1]", label="if you meant to iterate until a value, remove the square brackets and add a \ starting value", note="`[..end]` is an array of one `RangeTo`; you might have meant to have a bounded \ `Range` without the brackets: `0..end`" ), on( _Self="[std::ops::RangeInclusive<Idx>; 1]", label="if you meant to iterate between two values, remove the square brackets", note="`[start..=end]` is an array of one `RangeInclusive`; you might have meant to have a \ `RangeInclusive` without the brackets: `start..=end`" ), on( _Self="[std::ops::RangeToInclusive<Idx>; 1]", label="if you meant to iterate until a value (including it), remove the square brackets \ and add a starting value", note="`[..=end]` is an array of one `RangeToInclusive`; you might have meant to have a \ bounded `RangeInclusive` without the brackets: `0..=end`" ), on( _Self="std::ops::RangeTo<Idx>", label="if you meant to iterate until a value, add a starting value", note="`..end` is a `RangeTo`, which cannot be iterated on; you might have meant to have a \ bounded `Range`: `0..end`" ), on( _Self="std::ops::RangeToInclusive<Idx>", label="if you meant to iterate until a value (including it), add a starting value", note="`..=end` is a `RangeToInclusive`, which cannot be iterated on; you might have meant \ to have a bounded `RangeInclusive`: `0..=end`" ), on( _Self="&str", label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( _Self="std::string::String", label="`{Self}` is not an iterator; try calling `.chars()` or `.bytes()`" ), on( _Self="[]", label="borrow the array with `&` or call `.iter()` on it to iterate over it", note="arrays are not an iterators, but slices like the following are: `&[1, 2, 3]`" ), on( _Self="{integral}", note="if you want to iterate between `start` until a value `end`, use the exclusive range \ syntax `start..end` or the inclusive range syntax `start..=end`" ), label="`{Self}` is not an iterator", message="`{Self}` is not an iterator" )] #[doc(spotlight)] pub trait Iterator { /// The type of the elements being iterated over. #[stable(feature = "rust1", since = "1.0.0")] type Item; /// Advances the iterator and returns the next value. /// /// Returns [`None`] when iteration is finished. Individual iterator /// implementations may choose to resume iteration, and so calling `next()` /// again may or may not eventually start returning [`Some(Item)`] again at some /// point. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`Some(Item)`]: ../../std/option/enum.Option.html#variant.Some /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// // A call to next() returns the next value... /// assert_eq!(Some(&1), iter.next()); /// assert_eq!(Some(&2), iter.next()); /// assert_eq!(Some(&3), iter.next()); /// /// // ... and then None once it's over. /// assert_eq!(None, iter.next()); /// /// // More calls may or may not return None. Here, they always will. /// assert_eq!(None, iter.next()); /// assert_eq!(None, iter.next()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn next(&mut self) -> Option<Self::Item>; /// Returns the bounds on the remaining length of the iterator. /// /// Specifically, `size_hint()` returns a tuple where the first element /// is the lower bound, and the second element is the upper bound. /// /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`. /// A [`None`] here means that either there is no known upper bound, or the /// upper bound is larger than [`usize`]. /// /// # Implementation notes /// /// It is not enforced that an iterator implementation yields the declared /// number of elements. A buggy iterator may yield less than the lower bound /// or more than the upper bound of elements. /// /// `size_hint()` is primarily intended to be used for optimizations such as /// reserving space for the elements of the iterator, but must not be /// trusted to e.g. omit bounds checks in unsafe code. An incorrect /// implementation of `size_hint()` should not lead to memory safety /// violations. /// /// That said, the implementation should provide a correct estimation, /// because otherwise it would be a violation of the trait's protocol. /// /// The default implementation returns `(0, `[`None`]`)` which is correct for any /// iterator. /// /// [`usize`]: ../../std/primitive.usize.html /// [`Option`]: ../../std/option/enum.Option.html /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let iter = a.iter(); /// /// assert_eq!((3, Some(3)), iter.size_hint()); /// ``` /// /// A more complex example: /// /// ``` /// // The even numbers from zero to ten. /// let iter = (0..10).filter(|x| x % 2 == 0); /// /// // We might iterate from zero to ten times. Knowing that it's five /// // exactly wouldn't be possible without executing filter(). /// assert_eq!((0, Some(10)), iter.size_hint()); /// /// // Let's add five more numbers with chain() /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20); /// /// // now both bounds are increased by five /// assert_eq!((5, Some(15)), iter.size_hint()); /// ``` /// /// Returning `None` for an upper bound: /// /// ``` /// // an infinite iterator has no upper bound /// // and the maximum possible lower bound /// let iter = 0..; /// /// assert_eq!((usize::max_value(), None), iter.size_hint()); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn size_hint(&self) -> (usize, Option<usize>) { (0, None) } /// Consumes the iterator, counting the number of iterations and returning it. /// /// This method will evaluate the iterator until its [`next`] returns /// [`None`]. Once [`None`] is encountered, `count()` returns the number of /// times it called [`next`]. /// /// [`next`]: #tymethod.next /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so counting elements of /// an iterator with more than [`usize::MAX`] elements either produces the /// wrong result or panics. If debug assertions are enabled, a panic is /// guaranteed. /// /// # Panics /// /// This function might panic if the iterator has more than [`usize::MAX`] /// elements. /// /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().count(), 3); /// /// let a = [1, 2, 3, 4, 5]; /// assert_eq!(a.iter().count(), 5); /// ``` #[inline] #[rustc_inherit_overflow_checks] #[stable(feature = "rust1", since = "1.0.0")] fn count(self) -> usize where Self: Sized { // Might overflow. self.fold(0, |cnt, _| cnt + 1) } /// Consumes the iterator, returning the last element. /// /// This method will evaluate the iterator until it returns [`None`]. While /// doing so, it keeps track of the current element. After [`None`] is /// returned, `last()` will then return the last element it saw. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().last(), Some(&3)); /// /// let a = [1, 2, 3, 4, 5]; /// assert_eq!(a.iter().last(), Some(&5)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn last(self) -> Option<Self::Item> where Self: Sized { let mut last = None; for x in self { last = Some(x); } last } /// Returns the `n`th element of the iterator. /// /// Like most indexing operations, the count starts from zero, so `nth(0)` /// returns the first value, `nth(1)` the second, and so on. /// /// Note that all preceding elements, as well as the returned element, will be /// consumed from the iterator. That means that the preceding elements will be /// discarded, and also that calling `nth(0)` multiple times on the same iterator /// will return different elements. /// /// `nth()` will return [`None`] if `n` is greater than or equal to the length of the /// iterator. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().nth(1), Some(&2)); /// ``` /// /// Calling `nth()` multiple times doesn't rewind the iterator: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.nth(1), Some(&2)); /// assert_eq!(iter.nth(1), None); /// ``` /// /// Returning `None` if there are less than `n + 1` elements: /// /// ``` /// let a = [1, 2, 3]; /// assert_eq!(a.iter().nth(10), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn nth(&mut self, mut n: usize) -> Option<Self::Item> { for x in self { if n == 0 { return Some(x) } n -= 1; } None } /// Creates an iterator starting at the same point, but stepping by /// the given amount at each iteration. /// /// Note 1: The first element of the iterator will always be returned, /// regardless of the step given. /// /// Note 2: The time at which ignored elements are pulled is not fixed. /// `StepBy` behaves like the sequence `next(), nth(step-1), nth(step-1), …`, /// but is also free to behave like the sequence /// `advance_n_and_return_first(step), advance_n_and_return_first(step), …` /// Which way is used may change for some iterators for performance reasons. /// The second way will advance the iterator earlier and may consume more items. /// /// `advance_n_and_return_first` is the equivalent of: /// ``` /// fn advance_n_and_return_first<I>(iter: &mut I, total_step: usize) -> Option<I::Item> /// where /// I: Iterator, /// { /// let next = iter.next(); /// if total_step > 1 { /// iter.nth(total_step-2); /// } /// next /// } /// ``` /// /// # Panics /// /// The method will panic if the given step is `0`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [0, 1, 2, 3, 4, 5]; /// let mut iter = a.into_iter().step_by(2); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] fn step_by(self, step: usize) -> StepBy<Self> where Self: Sized { assert!(step != 0); StepBy{iter: self, step: step - 1, first_take: true} } /// Takes two iterators and creates a new iterator over both in sequence. /// /// `chain()` will return a new iterator which will first iterate over /// values from the first iterator and then over values from the second /// iterator. /// /// In other words, it links two iterators together, in a chain. 🔗 /// /// # Examples /// /// Basic usage: /// /// ``` /// let a1 = [1, 2, 3]; /// let a2 = [4, 5, 6]; /// /// let mut iter = a1.iter().chain(a2.iter()); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), Some(&5)); /// assert_eq!(iter.next(), Some(&6)); /// assert_eq!(iter.next(), None); /// ``` /// /// Since the argument to `chain()` uses [`IntoIterator`], we can pass /// anything that can be converted into an [`Iterator`], not just an /// [`Iterator`] itself. For example, slices (`&[T]`) implement /// [`IntoIterator`], and so can be passed to `chain()` directly: /// /// [`IntoIterator`]: trait.IntoIterator.html /// [`Iterator`]: trait.Iterator.html /// /// ``` /// let s1 = &[1, 2, 3]; /// let s2 = &[4, 5, 6]; /// /// let mut iter = s1.iter().chain(s2); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&4)); /// assert_eq!(iter.next(), Some(&5)); /// assert_eq!(iter.next(), Some(&6)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter> where Self: Sized, U: IntoIterator<Item=Self::Item>, { Chain{a: self, b: other.into_iter(), state: ChainState::Both} } /// 'Zips up' two iterators into a single iterator of pairs. /// /// `zip()` returns a new iterator that will iterate over two other /// iterators, returning a tuple where the first element comes from the /// first iterator, and the second element comes from the second iterator. /// /// In other words, it zips two iterators together, into a single one. /// /// If either iterator returns [`None`], [`next`] from the zipped iterator /// will return [`None`]. If the first iterator returns [`None`], `zip` will /// short-circuit and `next` will not be called on the second iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a1 = [1, 2, 3]; /// let a2 = [4, 5, 6]; /// /// let mut iter = a1.iter().zip(a2.iter()); /// /// assert_eq!(iter.next(), Some((&1, &4))); /// assert_eq!(iter.next(), Some((&2, &5))); /// assert_eq!(iter.next(), Some((&3, &6))); /// assert_eq!(iter.next(), None); /// ``` /// /// Since the argument to `zip()` uses [`IntoIterator`], we can pass /// anything that can be converted into an [`Iterator`], not just an /// [`Iterator`] itself. For example, slices (`&[T]`) implement /// [`IntoIterator`], and so can be passed to `zip()` directly: /// /// [`IntoIterator`]: trait.IntoIterator.html /// [`Iterator`]: trait.Iterator.html /// /// ``` /// let s1 = &[1, 2, 3]; /// let s2 = &[4, 5, 6]; /// /// let mut iter = s1.iter().zip(s2); /// /// assert_eq!(iter.next(), Some((&1, &4))); /// assert_eq!(iter.next(), Some((&2, &5))); /// assert_eq!(iter.next(), Some((&3, &6))); /// assert_eq!(iter.next(), None); /// ``` /// /// `zip()` is often used to zip an infinite iterator to a finite one. /// This works because the finite iterator will eventually return [`None`], /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate`]: /// /// ``` /// let enumerate: Vec<_> = "foo".chars().enumerate().collect(); /// /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect(); /// /// assert_eq!((0, 'f'), enumerate[0]); /// assert_eq!((0, 'f'), zipper[0]); /// /// assert_eq!((1, 'o'), enumerate[1]); /// assert_eq!((1, 'o'), zipper[1]); /// /// assert_eq!((2, 'o'), enumerate[2]); /// assert_eq!((2, 'o'), zipper[2]); /// ``` /// /// [`enumerate`]: trait.Iterator.html#method.enumerate /// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next /// [`None`]: ../../std/option/enum.Option.html#variant.None #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter> where Self: Sized, U: IntoIterator { Zip::new(self, other.into_iter()) } /// Takes a closure and creates an iterator which calls that closure on each /// element. /// /// `map()` transforms one iterator into another, by means of its argument: /// something that implements `FnMut`. It produces a new iterator which /// calls this closure on each element of the original iterator. /// /// If you are good at thinking in types, you can think of `map()` like this: /// If you have an iterator that gives you elements of some type `A`, and /// you want an iterator of some other type `B`, you can use `map()`, /// passing a closure that takes an `A` and returns a `B`. /// /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is /// lazy, it is best used when you're already working with other iterators. /// If you're doing some sort of looping for a side effect, it's considered /// more idiomatic to use [`for`] than `map()`. /// /// [`for`]: ../../book/first-edition/loops.html#for /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.into_iter().map(|x| 2 * x); /// /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), Some(6)); /// assert_eq!(iter.next(), None); /// ``` /// /// If you're doing some sort of side effect, prefer [`for`] to `map()`: /// /// ``` /// # #![allow(unused_must_use)] /// // don't do this: /// (0..5).map(|x| println!("{}", x)); /// /// // it won't even execute, as it is lazy. Rust will warn you about this. /// /// // Instead, use for: /// for x in 0..5 { /// println!("{}", x); /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn map<B, F>(self, f: F) -> Map<Self, F> where Self: Sized, F: FnMut(Self::Item) -> B, { Map { iter: self, f } } /// Calls a closure on each element of an iterator. /// /// This is equivalent to using a [`for`] loop on the iterator, although /// `break` and `continue` are not possible from a closure. It's generally /// more idiomatic to use a `for` loop, but `for_each` may be more legible /// when processing items at the end of longer iterator chains. In some /// cases `for_each` may also be faster than a loop, because it will use /// internal iteration on adaptors like `Chain`. /// /// [`for`]: ../../book/first-edition/loops.html#for /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::sync::mpsc::channel; /// /// let (tx, rx) = channel(); /// (0..5).map(|x| x * 2 + 1) /// .for_each(move |x| tx.send(x).unwrap()); /// /// let v: Vec<_> = rx.iter().collect(); /// assert_eq!(v, vec![1, 3, 5, 7, 9]); /// ``` /// /// For such a small example, a `for` loop may be cleaner, but `for_each` /// might be preferable to keep a functional style with longer iterators: /// /// ``` /// (0..5).flat_map(|x| x * 100 .. x * 110) /// .enumerate() /// .filter(|&(i, x)| (i + x) % 3 == 0) /// .for_each(|(i, x)| println!("{}:{}", i, x)); /// ``` #[inline] #[stable(feature = "iterator_for_each", since = "1.21.0")] fn for_each<F>(self, mut f: F) where Self: Sized, F: FnMut(Self::Item), { self.fold((), move |(), item| f(item)); } /// Creates an iterator which uses a closure to determine if an element /// should be yielded. /// /// The closure must return `true` or `false`. `filter()` creates an /// iterator which calls this closure on each element. If the closure /// returns `true`, then the element is returned. If the closure returns /// `false`, it will try again, and call the closure on the next element, /// seeing if it passes the test. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [0i32, 1, 2]; /// /// let mut iter = a.into_iter().filter(|x| x.is_positive()); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `filter()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.into_iter().filter(|x| **x > 1); // need two *s! /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// It's common to instead use destructuring on the argument to strip away /// one: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.into_iter().filter(|&x| *x > 1); // both & and * /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// or both: /// /// ``` /// let a = [0, 1, 2]; /// /// let mut iter = a.into_iter().filter(|&&x| x > 1); // two &s /// /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// of these layers. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn filter<P>(self, predicate: P) -> Filter<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { Filter {iter: self, predicate } } /// Creates an iterator that both filters and maps. /// /// The closure must return an [`Option<T>`]. `filter_map` creates an /// iterator which calls this closure on each element. If the closure /// returns [`Some(element)`][`Some`], then that element is returned. If the /// closure returns [`None`], it will try again, and call the closure on the /// next element, seeing if it will return [`Some`]. /// /// Why `filter_map` and not just [`filter`] and [`map`]? The key is in this /// part: /// /// [`filter`]: #method.filter /// [`map`]: #method.map /// /// > If the closure returns [`Some(element)`][`Some`], then that element is returned. /// /// In other words, it removes the [`Option<T>`] layer automatically. If your /// mapping is already returning an [`Option<T>`] and you want to skip over /// [`None`]s, then `filter_map` is much, much nicer to use. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = ["1", "lol", "3", "NaN", "5"]; /// /// let mut iter = a.iter().filter_map(|s| s.parse().ok()); /// /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), Some(5)); /// assert_eq!(iter.next(), None); /// ``` /// /// Here's the same example, but with [`filter`] and [`map`]: /// /// ``` /// let a = ["1", "lol", "3", "NaN", "5"]; /// let mut iter = a.iter().map(|s| s.parse()).filter(|s| s.is_ok()).map(|s| s.unwrap()); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(3)); /// assert_eq!(iter.next(), Some(5)); /// assert_eq!(iter.next(), None); /// ``` /// /// [`Option<T>`]: ../../std/option/enum.Option.html /// [`Some`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn filter_map<B, F>(self, f: F) -> FilterMap<Self, F> where Self: Sized, F: FnMut(Self::Item) -> Option<B>, { FilterMap { iter: self, f } } /// Creates an iterator which gives the current iteration count as well as /// the next value. /// /// The iterator returned yields pairs `(i, val)`, where `i` is the /// current index of iteration and `val` is the value returned by the /// iterator. /// /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a /// different sized integer, the [`zip`] function provides similar /// functionality. /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so enumerating more than /// [`usize::MAX`] elements either produces the wrong result or panics. If /// debug assertions are enabled, a panic is guaranteed. /// /// # Panics /// /// The returned iterator might panic if the to-be-returned index would /// overflow a [`usize`]. /// /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// [`usize`]: ../../std/primitive.usize.html /// [`zip`]: #method.zip /// /// # Examples /// /// ``` /// let a = ['a', 'b', 'c']; /// /// let mut iter = a.iter().enumerate(); /// /// assert_eq!(iter.next(), Some((0, &'a'))); /// assert_eq!(iter.next(), Some((1, &'b'))); /// assert_eq!(iter.next(), Some((2, &'c'))); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn enumerate(self) -> Enumerate<Self> where Self: Sized { Enumerate { iter: self, count: 0 } } /// Creates an iterator which can use `peek` to look at the next element of /// the iterator without consuming it. /// /// Adds a [`peek`] method to an iterator. See its documentation for /// more information. /// /// Note that the underlying iterator is still advanced when [`peek`] is /// called for the first time: In order to retrieve the next element, /// [`next`] is called on the underlying iterator, hence any side effects (i.e. /// anything other than fetching the next value) of the [`next`] method /// will occur. /// /// [`peek`]: struct.Peekable.html#method.peek /// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next /// /// # Examples /// /// Basic usage: /// /// ``` /// let xs = [1, 2, 3]; /// /// let mut iter = xs.iter().peekable(); /// /// // peek() lets us see into the future /// assert_eq!(iter.peek(), Some(&&1)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), Some(&2)); /// /// // we can peek() multiple times, the iterator won't advance /// assert_eq!(iter.peek(), Some(&&3)); /// assert_eq!(iter.peek(), Some(&&3)); /// /// assert_eq!(iter.next(), Some(&3)); /// /// // after the iterator is finished, so is peek() /// assert_eq!(iter.peek(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn peekable(self) -> Peekable<Self> where Self: Sized { Peekable{iter: self, peeked: None} } /// Creates an iterator that [`skip`]s elements based on a predicate. /// /// [`skip`]: #method.skip /// /// `skip_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and ignore elements /// until it returns `false`. /// /// After `false` is returned, `skip_while()`'s job is over, and the /// rest of the elements are yielded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [-1i32, 0, 1]; /// /// let mut iter = a.into_iter().skip_while(|x| x.is_negative()); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `skip_while()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [-1, 0, 1]; /// /// let mut iter = a.into_iter().skip_while(|x| **x < 0); // need two *s! /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial `false`: /// /// ``` /// let a = [-1, 0, 1, -2]; /// /// let mut iter = a.into_iter().skip_while(|x| **x < 0); /// /// assert_eq!(iter.next(), Some(&0)); /// assert_eq!(iter.next(), Some(&1)); /// /// // while this would have been false, since we already got a false, /// // skip_while() isn't used any more /// assert_eq!(iter.next(), Some(&-2)); /// /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip_while<P>(self, predicate: P) -> SkipWhile<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { SkipWhile { iter: self, flag: false, predicate } } /// Creates an iterator that yields elements based on a predicate. /// /// `take_while()` takes a closure as an argument. It will call this /// closure on each element of the iterator, and yield elements /// while it returns `true`. /// /// After `false` is returned, `take_while()`'s job is over, and the /// rest of the elements are ignored. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [-1i32, 0, 1]; /// /// let mut iter = a.into_iter().take_while(|x| x.is_negative()); /// /// assert_eq!(iter.next(), Some(&-1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Because the closure passed to `take_while()` takes a reference, and many /// iterators iterate over references, this leads to a possibly confusing /// situation, where the type of the closure is a double reference: /// /// ``` /// let a = [-1, 0, 1]; /// /// let mut iter = a.into_iter().take_while(|x| **x < 0); // need two *s! /// /// assert_eq!(iter.next(), Some(&-1)); /// assert_eq!(iter.next(), None); /// ``` /// /// Stopping after an initial `false`: /// /// ``` /// let a = [-1, 0, 1, -2]; /// /// let mut iter = a.into_iter().take_while(|x| **x < 0); /// /// assert_eq!(iter.next(), Some(&-1)); /// /// // We have more elements that are less than zero, but since we already /// // got a false, take_while() isn't used any more /// assert_eq!(iter.next(), None); /// ``` /// /// Because `take_while()` needs to look at the value in order to see if it /// should be included or not, consuming iterators will see that it is /// removed: /// /// ``` /// let a = [1, 2, 3, 4]; /// let mut iter = a.into_iter(); /// /// let result: Vec<i32> = iter.by_ref() /// .take_while(|n| **n != 3) /// .cloned() /// .collect(); /// /// assert_eq!(result, &[1, 2]); /// /// let result: Vec<i32> = iter.cloned().collect(); /// /// assert_eq!(result, &[4]); /// ``` /// /// The `3` is no longer there, because it was consumed in order to see if /// the iteration should stop, but wasn't placed back into the iterator or /// some similar thing. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P> where Self: Sized, P: FnMut(&Self::Item) -> bool, { TakeWhile { iter: self, flag: false, predicate } } /// Creates an iterator that skips the first `n` elements. /// /// After they have been consumed, the rest of the elements are yielded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().skip(2); /// /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip(self, n: usize) -> Skip<Self> where Self: Sized { Skip { iter: self, n } } /// Creates an iterator that yields its first `n` elements. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().take(2); /// /// assert_eq!(iter.next(), Some(&1)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), None); /// ``` /// /// `take()` is often used with an infinite iterator, to make it finite: /// /// ``` /// let mut iter = (0..).take(3); /// /// assert_eq!(iter.next(), Some(0)); /// assert_eq!(iter.next(), Some(1)); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take(self, n: usize) -> Take<Self> where Self: Sized, { Take { iter: self, n } } /// An iterator adaptor similar to [`fold`] that holds internal state and /// produces a new iterator. /// /// [`fold`]: #method.fold /// /// `scan()` takes two arguments: an initial value which seeds the internal /// state, and a closure with two arguments, the first being a mutable /// reference to the internal state and the second an iterator element. /// The closure can assign to the internal state to share state between /// iterations. /// /// On iteration, the closure will be applied to each element of the /// iterator and the return value from the closure, an [`Option`], is /// yielded by the iterator. /// /// [`Option`]: ../../std/option/enum.Option.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().scan(1, |state, &x| { /// // each iteration, we'll multiply the state by the element /// *state = *state * x; /// /// // then, we'll yield the negation of the state /// Some(-*state) /// }); /// /// assert_eq!(iter.next(), Some(-1)); /// assert_eq!(iter.next(), Some(-2)); /// assert_eq!(iter.next(), Some(-6)); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn scan<St, B, F>(self, initial_state: St, f: F) -> Scan<Self, St, F> where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option<B>, { Scan { iter: self, f, state: initial_state } } /// Creates an iterator that works like map, but flattens nested structure. /// /// The [`map`] adapter is very useful, but only when the closure /// argument produces values. If it produces an iterator instead, there's /// an extra layer of indirection. `flat_map()` will remove this extra layer /// on its own. /// /// You can think of `flat_map(f)` as the semantic equivalent /// of [`map`]ping, and then [`flatten`]ing as in `map(f).flatten()`. /// /// Another way of thinking about `flat_map()`: [`map`]'s closure returns /// one item for each element, and `flat_map()`'s closure returns an /// iterator for each element. /// /// [`map`]: #method.map /// [`flatten`]: #method.flatten /// /// # Examples /// /// Basic usage: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .flat_map(|s| s.chars()) /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F> where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, { FlatMap { inner: flatten_compat(self.map(f)) } } /// Creates an iterator that flattens nested structure. /// /// This is useful when you have an iterator of iterators or an iterator of /// things that can be turned into iterators and you want to remove one /// level of indirection. /// /// # Examples /// /// Basic usage: /// /// ``` /// let data = vec![vec![1, 2, 3, 4], vec![5, 6]]; /// let flattened = data.into_iter().flatten().collect::<Vec<u8>>(); /// assert_eq!(flattened, &[1, 2, 3, 4, 5, 6]); /// ``` /// /// Mapping and then flattening: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .map(|s| s.chars()) /// .flatten() /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` /// /// You can also rewrite this in terms of [`flat_map()`], which is preferable /// in this case since it conveys intent more clearly: /// /// ``` /// let words = ["alpha", "beta", "gamma"]; /// /// // chars() returns an iterator /// let merged: String = words.iter() /// .flat_map(|s| s.chars()) /// .collect(); /// assert_eq!(merged, "alphabetagamma"); /// ``` /// /// Flattening once only removes one level of nesting: /// /// ``` /// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]; /// /// let d2 = d3.iter().flatten().collect::<Vec<_>>(); /// assert_eq!(d2, [&[1, 2], &[3, 4], &[5, 6], &[7, 8]]); /// /// let d1 = d3.iter().flatten().flatten().collect::<Vec<_>>(); /// assert_eq!(d1, [&1, &2, &3, &4, &5, &6, &7, &8]); /// ``` /// /// Here we see that `flatten()` does not perform a "deep" flatten. /// Instead, only one level of nesting is removed. That is, if you /// `flatten()` a three-dimensional array the result will be /// two-dimensional and not one-dimensional. To get a one-dimensional /// structure, you have to `flatten()` again. /// /// [`flat_map()`]: #method.flat_map #[inline] #[stable(feature = "iterator_flatten", since = "1.29.0")] fn flatten(self) -> Flatten<Self> where Self: Sized, Self::Item: IntoIterator { Flatten { inner: flatten_compat(self) } } /// Creates an iterator which ends after the first [`None`]. /// /// After an iterator returns [`None`], future calls may or may not yield /// [`Some(T)`] again. `fuse()` adapts an iterator, ensuring that after a /// [`None`] is given, it will always return [`None`] forever. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`Some(T)`]: ../../std/option/enum.Option.html#variant.Some /// /// # Examples /// /// Basic usage: /// /// ``` /// // an iterator which alternates between Some and None /// struct Alternate { /// state: i32, /// } /// /// impl Iterator for Alternate { /// type Item = i32; /// /// fn next(&mut self) -> Option<i32> { /// let val = self.state; /// self.state = self.state + 1; /// /// // if it's even, Some(i32), else None /// if val % 2 == 0 { /// Some(val) /// } else { /// None /// } /// } /// } /// /// let mut iter = Alternate { state: 0 }; /// /// // we can see our iterator going back and forth /// assert_eq!(iter.next(), Some(0)); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), Some(2)); /// assert_eq!(iter.next(), None); /// /// // however, once we fuse it... /// let mut iter = iter.fuse(); /// /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), None); /// /// // it will always return None after the first time. /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fuse(self) -> Fuse<Self> where Self: Sized { Fuse{iter: self, done: false} } /// Do something with each element of an iterator, passing the value on. /// /// When using iterators, you'll often chain several of them together. /// While working on such code, you might want to check out what's /// happening at various parts in the pipeline. To do that, insert /// a call to `inspect()`. /// /// It's more common for `inspect()` to be used as a debugging tool than to /// exist in your final code, but applications may find it useful in certain /// situations when errors need to be logged before being discarded. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 4, 2, 3]; /// /// // this iterator sequence is complex. /// let sum = a.iter() /// .cloned() /// .filter(|x| x % 2 == 0) /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// /// // let's add some inspect() calls to investigate what's happening /// let sum = a.iter() /// .cloned() /// .inspect(|x| println!("about to filter: {}", x)) /// .filter(|x| x % 2 == 0) /// .inspect(|x| println!("made it through filter: {}", x)) /// .fold(0, |sum, i| sum + i); /// /// println!("{}", sum); /// ``` /// /// This will print: /// /// ```text /// 6 /// about to filter: 1 /// about to filter: 4 /// made it through filter: 4 /// about to filter: 2 /// made it through filter: 2 /// about to filter: 3 /// 6 /// ``` /// /// Logging errors before discarding them: /// /// ``` /// let lines = ["1", "2", "a"]; /// /// let sum: i32 = lines /// .iter() /// .map(|line| line.parse::<i32>()) /// .inspect(|num| { /// if let Err(ref e) = *num { /// println!("Parsing error: {}", e); /// } /// }) /// .filter_map(Result::ok) /// .sum(); /// /// println!("Sum: {}", sum); /// ``` /// /// This will print: /// /// ```text /// Parsing error: invalid digit found in string /// Sum: 3 /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn inspect<F>(self, f: F) -> Inspect<Self, F> where Self: Sized, F: FnMut(&Self::Item), { Inspect { iter: self, f } } /// Borrows an iterator, rather than consuming it. /// /// This is useful to allow applying iterator adaptors while still /// retaining ownership of the original iterator. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let iter = a.into_iter(); /// /// let sum: i32 = iter.take(5).fold(0, |acc, i| acc + i ); /// /// assert_eq!(sum, 6); /// /// // if we try to use iter again, it won't work. The following line /// // gives "error: use of moved value: `iter` /// // assert_eq!(iter.next(), None); /// /// // let's try that again /// let a = [1, 2, 3]; /// /// let mut iter = a.into_iter(); /// /// // instead, we add in a .by_ref() /// let sum: i32 = iter.by_ref().take(2).fold(0, |acc, i| acc + i ); /// /// assert_eq!(sum, 3); /// /// // now this is just fine: /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self where Self: Sized { self } /// Transforms an iterator into a collection. /// /// `collect()` can take anything iterable, and turn it into a relevant /// collection. This is one of the more powerful methods in the standard /// library, used in a variety of contexts. /// /// The most basic pattern in which `collect()` is used is to turn one /// collection into another. You take a collection, call [`iter`] on it, /// do a bunch of transformations, and then `collect()` at the end. /// /// One of the keys to `collect()`'s power is that many things you might /// not think of as 'collections' actually are. For example, a [`String`] /// is a collection of [`char`]s. And a collection of /// [`Result<T, E>`][`Result`] can be thought of as single /// [`Result`]`<Collection<T>, E>`. See the examples below for more. /// /// Because `collect()` is so general, it can cause problems with type /// inference. As such, `collect()` is one of the few times you'll see /// the syntax affectionately known as the 'turbofish': `::<>`. This /// helps the inference algorithm understand specifically which collection /// you're trying to collect into. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled: Vec<i32> = a.iter() /// .map(|&x| x * 2) /// .collect(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Note that we needed the `: Vec<i32>` on the left-hand side. This is because /// we could collect into, for example, a [`VecDeque<T>`] instead: /// /// [`VecDeque<T>`]: ../../std/collections/struct.VecDeque.html /// /// ``` /// use std::collections::VecDeque; /// /// let a = [1, 2, 3]; /// /// let doubled: VecDeque<i32> = a.iter().map(|&x| x * 2).collect(); /// /// assert_eq!(2, doubled[0]); /// assert_eq!(4, doubled[1]); /// assert_eq!(6, doubled[2]); /// ``` /// /// Using the 'turbofish' instead of annotating `doubled`: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<i32>>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Because `collect()` only cares about what you're collecting into, you can /// still use a partial type hint, `_`, with the turbofish: /// /// ``` /// let a = [1, 2, 3]; /// /// let doubled = a.iter().map(|x| x * 2).collect::<Vec<_>>(); /// /// assert_eq!(vec![2, 4, 6], doubled); /// ``` /// /// Using `collect()` to make a [`String`]: /// /// ``` /// let chars = ['g', 'd', 'k', 'k', 'n']; /// /// let hello: String = chars.iter() /// .map(|&x| x as u8) /// .map(|x| (x + 1) as char) /// .collect(); /// /// assert_eq!("hello", hello); /// ``` /// /// If you have a list of [`Result<T, E>`][`Result`]s, you can use `collect()` to /// see if any of them failed: /// /// ``` /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")]; /// /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect(); /// /// // gives us the first error /// assert_eq!(Err("nope"), result); /// /// let results = [Ok(1), Ok(3)]; /// /// let result: Result<Vec<_>, &str> = results.iter().cloned().collect(); /// /// // gives us the list of answers /// assert_eq!(Ok(vec![1, 3]), result); /// ``` /// /// [`iter`]: ../../std/iter/trait.Iterator.html#tymethod.next /// [`String`]: ../../std/string/struct.String.html /// [`char`]: ../../std/primitive.char.html /// [`Result`]: ../../std/result/enum.Result.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] fn collect<B: FromIterator<Self::Item>>(self) -> B where Self: Sized { FromIterator::from_iter(self) } /// Consumes an iterator, creating two collections from it. /// /// The predicate passed to `partition()` can return `true`, or `false`. /// `partition()` returns a pair, all of the elements for which it returned /// `true`, and all of the elements for which it returned `false`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let (even, odd): (Vec<i32>, Vec<i32>) = a /// .into_iter() /// .partition(|&n| n % 2 == 0); /// /// assert_eq!(even, vec![2]); /// assert_eq!(odd, vec![1, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn partition<B, F>(self, mut f: F) -> (B, B) where Self: Sized, B: Default + Extend<Self::Item>, F: FnMut(&Self::Item) -> bool { let mut left: B = Default::default(); let mut right: B = Default::default(); for x in self { if f(&x) { left.extend(Some(x)) } else { right.extend(Some(x)) } } (left, right) } /// An iterator method that applies a function as long as it returns /// successfully, producing a single, final value. /// /// `try_fold()` takes two arguments: an initial value, and a closure with /// two arguments: an 'accumulator', and an element. The closure either /// returns successfully, with the value that the accumulator should have /// for the next iteration, or it returns failure, with an error value that /// is propagated back to the caller immediately (short-circuiting). /// /// The initial value is the value the accumulator will have on the first /// call. If applying the closure succeeded against every element of the /// iterator, `try_fold()` returns the final accumulator as success. /// /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// /// # Note to Implementors /// /// Most of the other (forward) methods have default implementations in /// terms of this one, so try to implement this explicitly if it can /// do something better than the default `for` loop implementation. /// /// In particular, try to have this call `try_fold()` on the internal parts /// from which this iterator is composed. If multiple calls are needed, /// the `?` operator may be convenient for chaining the accumulator value /// along, but beware any invariants that need to be upheld before those /// early returns. This is a `&mut self` method, so iteration needs to be /// resumable after hitting an error here. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// // the checked sum of all of the elements of the array /// let sum = a.iter().try_fold(0i8, |acc, &x| acc.checked_add(x)); /// /// assert_eq!(sum, Some(6)); /// ``` /// /// Short-circuiting: /// /// ``` /// let a = [10, 20, 30, 100, 40, 50]; /// let mut it = a.iter(); /// /// // This sum overflows when adding the 100 element /// let sum = it.try_fold(0i8, |acc, &x| acc.checked_add(x)); /// assert_eq!(sum, None); /// /// // Because it short-circuited, the remaining elements are still /// // available through the iterator. /// assert_eq!(it.len(), 2); /// assert_eq!(it.next(), Some(&40)); /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R where Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try<Ok=B> { let mut accum = init; while let Some(x) = self.next() { accum = f(accum, x)?; } Try::from_ok(accum) } /// An iterator method that applies a fallible function to each item in the /// iterator, stopping at the first error and returning that error. /// /// This can also be thought of as the fallible form of [`for_each()`] /// or as the stateless version of [`try_fold()`]. /// /// [`for_each()`]: #method.for_each /// [`try_fold()`]: #method.try_fold /// /// # Examples /// /// ``` /// use std::fs::rename; /// use std::io::{stdout, Write}; /// use std::path::Path; /// /// let data = ["no_tea.txt", "stale_bread.json", "torrential_rain.png"]; /// /// let res = data.iter().try_for_each(|x| writeln!(stdout(), "{}", x)); /// assert!(res.is_ok()); /// /// let mut it = data.iter().cloned(); /// let res = it.try_for_each(|x| rename(x, Path::new(x).with_extension("old"))); /// assert!(res.is_err()); /// // It short-circuited, so the remaining items are still in the iterator: /// assert_eq!(it.next(), Some("stale_bread.json")); /// ``` #[inline] #[stable(feature = "iterator_try_fold", since = "1.27.0")] fn try_for_each<F, R>(&mut self, mut f: F) -> R where Self: Sized, F: FnMut(Self::Item) -> R, R: Try<Ok=()> { self.try_fold((), move |(), x| f(x)) } /// An iterator method that applies a function, producing a single, final value. /// /// `fold()` takes two arguments: an initial value, and a closure with two /// arguments: an 'accumulator', and an element. The closure returns the value that /// the accumulator should have for the next iteration. /// /// The initial value is the value the accumulator will have on the first /// call. /// /// After applying this closure to every element of the iterator, `fold()` /// returns the accumulator. /// /// This operation is sometimes called 'reduce' or 'inject'. /// /// Folding is useful whenever you have a collection of something, and want /// to produce a single value from it. /// /// Note: `fold()`, and similar methods that traverse the entire iterator, /// may not terminate for infinite iterators, even on traits for which a /// result is determinable in finite time. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// // the sum of all of the elements of the array /// let sum = a.iter().fold(0, |acc, x| acc + x); /// /// assert_eq!(sum, 6); /// ``` /// /// Let's walk through each step of the iteration here: /// /// | element | acc | x | result | /// |---------|-----|---|--------| /// | | 0 | | | /// | 1 | 0 | 1 | 1 | /// | 2 | 1 | 2 | 3 | /// | 3 | 3 | 3 | 6 | /// /// And so, our final result, `6`. /// /// It's common for people who haven't used iterators a lot to /// use a `for` loop with a list of things to build up a result. Those /// can be turned into `fold()`s: /// /// [`for`]: ../../book/first-edition/loops.html#for /// /// ``` /// let numbers = [1, 2, 3, 4, 5]; /// /// let mut result = 0; /// /// // for loop: /// for i in &numbers { /// result = result + i; /// } /// /// // fold: /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x); /// /// // they're the same /// assert_eq!(result, result2); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fold<B, F>(mut self, init: B, mut f: F) -> B where Self: Sized, F: FnMut(B, Self::Item) -> B, { self.try_fold(init, move |acc, x| Ok::<B, !>(f(acc, x))).unwrap() } /// Tests if every element of the iterator matches a predicate. /// /// `all()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if they all return /// `true`, then so does `all()`. If any of them return `false`, it /// returns `false`. /// /// `all()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `false`, given that no matter what else happens, /// the result will also be `false`. /// /// An empty iterator returns `true`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert!(a.iter().all(|&x| x > 0)); /// /// assert!(!a.iter().all(|&x| x > 2)); /// ``` /// /// Stopping at the first `false`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert!(!iter.all(|&x| x != 2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn all<F>(&mut self, mut f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool { self.try_for_each(move |x| { if f(x) { LoopState::Continue(()) } else { LoopState::Break(()) } }) == LoopState::Continue(()) } /// Tests if any element of the iterator matches a predicate. /// /// `any()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if any of them return /// `true`, then so does `any()`. If they all return `false`, it /// returns `false`. /// /// `any()` is short-circuiting; in other words, it will stop processing /// as soon as it finds a `true`, given that no matter what else happens, /// the result will also be `true`. /// /// An empty iterator returns `false`. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert!(a.iter().any(|&x| x > 0)); /// /// assert!(!a.iter().any(|&x| x > 5)); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert!(iter.any(|&x| x != 2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&2)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn any<F>(&mut self, mut f: F) -> bool where Self: Sized, F: FnMut(Self::Item) -> bool { self.try_for_each(move |x| { if f(x) { LoopState::Break(()) } else { LoopState::Continue(()) } }) == LoopState::Break(()) } /// Searches for an element of an iterator that satisfies a predicate. /// /// `find()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if any of them return /// `true`, then `find()` returns [`Some(element)`]. If they all return /// `false`, it returns [`None`]. /// /// `find()` is short-circuiting; in other words, it will stop processing /// as soon as the closure returns `true`. /// /// Because `find()` takes a reference, and many iterators iterate over /// references, this leads to a possibly confusing situation where the /// argument is a double reference. You can see this effect in the /// examples below, with `&&x`. /// /// [`Some(element)`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2)); /// /// assert_eq!(a.iter().find(|&&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.find(|&&x| x == 2), Some(&2)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item> where Self: Sized, P: FnMut(&Self::Item) -> bool, { self.try_for_each(move |x| { if predicate(&x) { LoopState::Break(x) } else { LoopState::Continue(()) } }).break_value() } /// Applies function to the elements of iterator and returns /// the first non-none result. /// /// `iter.find_map(f)` is equivalent to `iter.filter_map(f).next()`. /// /// /// # Examples /// /// ``` /// let a = ["lol", "NaN", "2", "5"]; /// /// let first_number = a.iter().find_map(|s| s.parse().ok()); /// /// assert_eq!(first_number, Some(2)); /// ``` #[inline] #[stable(feature = "iterator_find_map", since = "1.30.0")] fn find_map<B, F>(&mut self, mut f: F) -> Option<B> where Self: Sized, F: FnMut(Self::Item) -> Option<B>, { self.try_for_each(move |x| { match f(x) { Some(x) => LoopState::Break(x), None => LoopState::Continue(()), } }).break_value() } /// Searches for an element in an iterator, returning its index. /// /// `position()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, and if one of them /// returns `true`, then `position()` returns [`Some(index)`]. If all of /// them return `false`, it returns [`None`]. /// /// `position()` is short-circuiting; in other words, it will stop /// processing as soon as it finds a `true`. /// /// # Overflow Behavior /// /// The method does no guarding against overflows, so if there are more /// than [`usize::MAX`] non-matching elements, it either produces the wrong /// result or panics. If debug assertions are enabled, a panic is /// guaranteed. /// /// # Panics /// /// This function might panic if the iterator has more than `usize::MAX` /// non-matching elements. /// /// [`Some(index)`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [`usize::MAX`]: ../../std/usize/constant.MAX.html /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().position(|&x| x == 2), Some(1)); /// /// assert_eq!(a.iter().position(|&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3, 4]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.position(|&x| x >= 2), Some(1)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&3)); /// /// // The returned index depends on iterator state /// assert_eq!(iter.position(|&x| x == 4), Some(0)); /// /// ``` #[inline] #[rustc_inherit_overflow_checks] #[stable(feature = "rust1", since = "1.0.0")] fn position<P>(&mut self, mut predicate: P) -> Option<usize> where Self: Sized, P: FnMut(Self::Item) -> bool, { // The addition might panic on overflow self.try_fold(0, move |i, x| { if predicate(x) { LoopState::Break(i) } else { LoopState::Continue(i + 1) } }).break_value() } /// Searches for an element in an iterator from the right, returning its /// index. /// /// `rposition()` takes a closure that returns `true` or `false`. It applies /// this closure to each element of the iterator, starting from the end, /// and if one of them returns `true`, then `rposition()` returns /// [`Some(index)`]. If all of them return `false`, it returns [`None`]. /// /// `rposition()` is short-circuiting; in other words, it will stop /// processing as soon as it finds a `true`. /// /// [`Some(index)`]: ../../std/option/enum.Option.html#variant.Some /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2)); /// /// assert_eq!(a.iter().rposition(|&x| x == 5), None); /// ``` /// /// Stopping at the first `true`: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter(); /// /// assert_eq!(iter.rposition(|&x| x == 2), Some(1)); /// /// // we can still use `iter`, as there are more elements. /// assert_eq!(iter.next(), Some(&1)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where P: FnMut(Self::Item) -> bool, Self: Sized + ExactSizeIterator + DoubleEndedIterator { // No need for an overflow check here, because `ExactSizeIterator` // implies that the number of elements fits into a `usize`. let n = self.len(); self.try_rfold(n, move |i, x| { let i = i - 1; if predicate(x) { LoopState::Break(i) } else { LoopState::Continue(i) } }).break_value() } /// Returns the maximum element of an iterator. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let b: Vec<u32> = Vec::new(); /// /// assert_eq!(a.iter().max(), Some(&3)); /// assert_eq!(b.iter().max(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn max(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord { select_fold1(self, |_| (), // switch to y even if it is only equal, to preserve // stability. |_, x, _, y| *x <= *y) .map(|(_, x)| x) } /// Returns the minimum element of an iterator. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let b: Vec<u32> = Vec::new(); /// /// assert_eq!(a.iter().min(), Some(&1)); /// assert_eq!(b.iter().min(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn min(self) -> Option<Self::Item> where Self: Sized, Self::Item: Ord { select_fold1(self, |_| (), // only switch to y if it is strictly smaller, to // preserve stability. |_, x, _, y| *x > *y) .map(|(_, x)| x) } /// Returns the element that gives the maximum value from the /// specified function. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10); /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] fn max_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item) -> B, { select_fold1(self, f, // switch to y even if it is only equal, to preserve // stability. |x_p, _, y_p, _| x_p <= y_p) .map(|(_, x)| x) } /// Returns the element that gives the maximum value with respect to the /// specified comparison function. /// /// If several elements are equally maximum, the last element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5); /// ``` #[inline] #[stable(feature = "iter_max_by", since = "1.15.0")] fn max_by<F>(self, mut compare: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { select_fold1(self, |_| (), // switch to y even if it is only equal, to preserve // stability. |_, x, _, y| Ordering::Greater != compare(x, y)) .map(|(_, x)| x) } /// Returns the element that gives the minimum value from the /// specified function. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0); /// ``` #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] fn min_by_key<B: Ord, F>(self, f: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item) -> B, { select_fold1(self, f, // only switch to y if it is strictly smaller, to // preserve stability. |x_p, _, y_p, _| x_p > y_p) .map(|(_, x)| x) } /// Returns the element that gives the minimum value with respect to the /// specified comparison function. /// /// If several elements are equally minimum, the first element is /// returned. If the iterator is empty, [`None`] is returned. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// ``` /// let a = [-3_i32, 0, 1, 5, -10]; /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10); /// ``` #[inline] #[stable(feature = "iter_min_by", since = "1.15.0")] fn min_by<F>(self, mut compare: F) -> Option<Self::Item> where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { select_fold1(self, |_| (), // switch to y even if it is strictly smaller, to // preserve stability. |_, x, _, y| Ordering::Greater == compare(x, y)) .map(|(_, x)| x) } /// Reverses an iterator's direction. /// /// Usually, iterators iterate from left to right. After using `rev()`, /// an iterator will instead iterate from right to left. /// /// This is only possible if the iterator has an end, so `rev()` only /// works on [`DoubleEndedIterator`]s. /// /// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html /// /// # Examples /// /// ``` /// let a = [1, 2, 3]; /// /// let mut iter = a.iter().rev(); /// /// assert_eq!(iter.next(), Some(&3)); /// assert_eq!(iter.next(), Some(&2)); /// assert_eq!(iter.next(), Some(&1)); /// /// assert_eq!(iter.next(), None); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rev(self) -> Rev<Self> where Self: Sized + DoubleEndedIterator { Rev{iter: self} } /// Converts an iterator of pairs into a pair of containers. /// /// `unzip()` consumes an entire iterator of pairs, producing two /// collections: one from the left elements of the pairs, and one /// from the right elements. /// /// This function is, in some sense, the opposite of [`zip`]. /// /// [`zip`]: #method.zip /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [(1, 2), (3, 4)]; /// /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip(); /// /// assert_eq!(left, [1, 3]); /// assert_eq!(right, [2, 4]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn unzip<A, B, FromA, FromB>(self) -> (FromA, FromB) where FromA: Default + Extend<A>, FromB: Default + Extend<B>, Self: Sized + Iterator<Item=(A, B)>, { let mut ts: FromA = Default::default(); let mut us: FromB = Default::default(); self.for_each(|(t, u)| { ts.extend(Some(t)); us.extend(Some(u)); }); (ts, us) } /// Creates an iterator which [`clone`]s all of its elements. /// /// This is useful when you have an iterator over `&T`, but you need an /// iterator over `T`. /// /// [`clone`]: ../../std/clone/trait.Clone.html#tymethod.clone /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let v_cloned: Vec<_> = a.iter().cloned().collect(); /// /// // cloned is the same as .map(|&x| x), for integers /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); /// /// assert_eq!(v_cloned, vec![1, 2, 3]); /// assert_eq!(v_map, vec![1, 2, 3]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn cloned<'a, T: 'a>(self) -> Cloned<Self> where Self: Sized + Iterator<Item=&'a T>, T: Clone { Cloned { it: self } } /// Repeats an iterator endlessly. /// /// Instead of stopping at [`None`], the iterator will instead start again, /// from the beginning. After iterating again, it will start at the /// beginning again. And again. And again. Forever. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// /// let mut it = a.iter().cycle(); /// /// assert_eq!(it.next(), Some(&1)); /// assert_eq!(it.next(), Some(&2)); /// assert_eq!(it.next(), Some(&3)); /// assert_eq!(it.next(), Some(&1)); /// assert_eq!(it.next(), Some(&2)); /// assert_eq!(it.next(), Some(&3)); /// assert_eq!(it.next(), Some(&1)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] fn cycle(self) -> Cycle<Self> where Self: Sized + Clone { Cycle{orig: self.clone(), iter: self} } /// Sums the elements of an iterator. /// /// Takes each element, adds them together, and returns the result. /// /// An empty iterator returns the zero value of the type. /// /// # Panics /// /// When calling `sum()` and a primitive integer type is being returned, this /// method will panic if the computation overflows and debug assertions are /// enabled. /// /// # Examples /// /// Basic usage: /// /// ``` /// let a = [1, 2, 3]; /// let sum: i32 = a.iter().sum(); /// /// assert_eq!(sum, 6); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] fn sum<S>(self) -> S where Self: Sized, S: Sum<Self::Item>, { Sum::sum(self) } /// Iterates over the entire iterator, multiplying all the elements /// /// An empty iterator returns the one value of the type. /// /// # Panics /// /// When calling `product()` and a primitive integer type is being returned, /// method will panic if the computation overflows and debug assertions are /// enabled. /// /// # Examples /// /// ``` /// fn factorial(n: u32) -> u32 { /// (1..).take_while(|&i| i <= n).product() /// } /// assert_eq!(factorial(0), 1); /// assert_eq!(factorial(1), 1); /// assert_eq!(factorial(5), 120); /// ``` #[stable(feature = "iter_arith", since = "1.11.0")] fn product<P>(self) -> P where Self: Sized, P: Product<Self::Item>, { Product::product(self) } /// Lexicographically compares the elements of this `Iterator` with those /// of another. #[stable(feature = "iter_order", since = "1.5.0")] fn cmp<I>(mut self, other: I) -> Ordering where I: IntoIterator<Item = Self::Item>, Self::Item: Ord, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => if other.next().is_none() { return Ordering::Equal } else { return Ordering::Less }, Some(val) => val, }; let y = match other.next() { None => return Ordering::Greater, Some(val) => val, }; match x.cmp(&y) { Ordering::Equal => (), non_eq => return non_eq, } } } /// Lexicographically compares the elements of this `Iterator` with those /// of another. #[stable(feature = "iter_order", since = "1.5.0")] fn partial_cmp<I>(mut self, other: I) -> Option<Ordering> where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => if other.next().is_none() { return Some(Ordering::Equal) } else { return Some(Ordering::Less) }, Some(val) => val, }; let y = match other.next() { None => return Some(Ordering::Greater), Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Equal) => (), non_eq => return non_eq, } } } /// Determines if the elements of this `Iterator` are equal to those of /// another. #[stable(feature = "iter_order", since = "1.5.0")] fn eq<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_none(), Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; if x != y { return false } } } /// Determines if the elements of this `Iterator` are unequal to those of /// another. #[stable(feature = "iter_order", since = "1.5.0")] fn ne<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_some(), Some(val) => val, }; let y = match other.next() { None => return true, Some(val) => val, }; if x != y { return true } } } /// Determines if the elements of this `Iterator` are lexicographically /// less than those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn lt<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_some(), Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return true, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return false, None => return false, } } } /// Determines if the elements of this `Iterator` are lexicographically /// less or equal to those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn le<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => { other.next(); return true; }, Some(val) => val, }; let y = match other.next() { None => return false, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return true, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return false, None => return false, } } } /// Determines if the elements of this `Iterator` are lexicographically /// greater than those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn gt<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => { other.next(); return false; }, Some(val) => val, }; let y = match other.next() { None => return true, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return false, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return true, None => return false, } } } /// Determines if the elements of this `Iterator` are lexicographically /// greater than or equal to those of another. #[stable(feature = "iter_order", since = "1.5.0")] fn ge<I>(mut self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd<I::Item>, Self: Sized, { let mut other = other.into_iter(); loop { let x = match self.next() { None => return other.next().is_none(), Some(val) => val, }; let y = match other.next() { None => return true, Some(val) => val, }; match x.partial_cmp(&y) { Some(Ordering::Less) => return false, Some(Ordering::Equal) => (), Some(Ordering::Greater) => return true, None => return false, } } } } /// Select an element from an iterator based on the given "projection" /// and "comparison" function. /// /// This is an idiosyncratic helper to try to factor out the /// commonalities of {max,min}{,_by}. In particular, this avoids /// having to implement optimizations several times. #[inline] fn select_fold1<I, B, FProj, FCmp>(mut it: I, mut f_proj: FProj, mut f_cmp: FCmp) -> Option<(B, I::Item)> where I: Iterator, FProj: FnMut(&I::Item) -> B, FCmp: FnMut(&B, &I::Item, &B, &I::Item) -> bool { // start with the first element as our selection. This avoids // having to use `Option`s inside the loop, translating to a // sizeable performance gain (6x in one case). it.next().map(|first| { let first_p = f_proj(&first); it.fold((first_p, first), |(sel_p, sel), x| { let x_p = f_proj(&x); if f_cmp(&sel_p, &sel, &x_p, &x) { (x_p, x) } else { (sel_p, sel) } }) }) } #[stable(feature = "rust1", since = "1.0.0")] impl<I: Iterator + ?Sized> Iterator for &mut I { type Item = I::Item; fn next(&mut self) -> Option<I::Item> { (**self).next() } fn size_hint(&self) -> (usize, Option<usize>) { (**self).size_hint() } fn nth(&mut self, n: usize) -> Option<Self::Item> { (**self).nth(n) } }
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Runtime calls emitted by the compiler. use cast::transmute; use libc::{c_char, c_uchar, c_void, size_t, uintptr_t, c_int}; use managed::raw::BoxRepr; use str; use sys; use unstable::exchange_alloc; use cast::transmute; #[allow(non_camel_case_types)] pub type rust_task = c_void; #[cfg(target_word_size = "32")] pub static FROZEN_BIT: uint = 0x80000000; #[cfg(target_word_size = "64")] pub static FROZEN_BIT: uint = 0x8000000000000000; pub mod rustrt { use libc::{c_char, uintptr_t}; pub extern { #[rust_stack] unsafe fn rust_upcall_malloc(td: *c_char, size: uintptr_t) -> *c_char; #[rust_stack] unsafe fn rust_upcall_free(ptr: *c_char); } } #[lang="fail_"] pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! { sys::begin_unwind_(expr, file, line); } #[lang="fail_bounds_check"] pub unsafe fn fail_bounds_check(file: *c_char, line: size_t, index: size_t, len: size_t) { let msg = fmt!("index out of bounds: the len is %d but the index is %d", len as int, index as int); do str::as_buf(msg) |p, _len| { fail_(p as *c_char, file, line); } } pub unsafe fn fail_borrowed() { let msg = "borrowed"; do str::as_buf(msg) |msg_p, _| { do str::as_buf("???") |file_p, _| { fail_(msg_p as *c_char, file_p as *c_char, 0); } } } // FIXME #4942: Make these signatures agree with exchange_alloc's signatures #[lang="exchange_malloc"] #[inline(always)] pub unsafe fn exchange_malloc(td: *c_char, size: uintptr_t) -> *c_char { transmute(exchange_alloc::malloc(transmute(td), transmute(size))) } // NB: Calls to free CANNOT be allowed to fail, as throwing an exception from // inside a landing pad may corrupt the state of the exception handler. If a // problem occurs, call exit instead. #[lang="exchange_free"] #[inline(always)] pub unsafe fn exchange_free(ptr: *c_char) { exchange_alloc::free(transmute(ptr)) } #[lang="malloc"] #[inline(always)] pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char { return rustrt::rust_upcall_malloc(td, size); } // NB: Calls to free CANNOT be allowed to fail, as throwing an exception from // inside a landing pad may corrupt the state of the exception handler. If a // problem occurs, call exit instead. #[lang="free"] #[inline(always)] pub unsafe fn local_free(ptr: *c_char) { rustrt::rust_upcall_free(ptr); } #[lang="borrow_as_imm"] #[inline(always)] pub unsafe fn borrow_as_imm(a: *u8) { let a: *mut BoxRepr = transmute(a); (*a).header.ref_count |= FROZEN_BIT; } #[lang="return_to_mut"] #[inline(always)] pub unsafe fn return_to_mut(a: *u8) { // Sometimes the box is null, if it is conditionally frozen. // See e.g. #4904. if !a.is_null() { let a: *mut BoxRepr = transmute(a); (*a).header.ref_count &= !FROZEN_BIT; } } #[lang="check_not_borrowed"] #[inline(always)] pub unsafe fn check_not_borrowed(a: *u8) { let a: *mut BoxRepr = transmute(a); if ((*a).header.ref_count & FROZEN_BIT) != 0 { fail_borrowed(); } } #[lang="strdup_uniq"] #[inline(always)] pub unsafe fn strdup_uniq(ptr: *c_uchar, len: uint) -> ~str { str::raw::from_buf_len(ptr, len) } #[lang="start"] #[cfg(stage0)] pub fn start(main: *u8, argc: int, argv: *c_char, crate_map: *u8) -> int { use libc::getenv; use rt::start; unsafe { let use_new_rt = do str::as_c_str("RUST_NEWRT") |s| { getenv(s).is_null() }; if use_new_rt { return rust_start(main as *c_void, argc as c_int, argv, crate_map as *c_void) as int; } else { return start(main, argc, argv, crate_map); } } extern { fn rust_start(main: *c_void, argc: c_int, argv: *c_char, crate_map: *c_void) -> c_int; } } #[lang="start"] #[cfg(not(stage0))] pub fn start(main: *u8, argc: int, argv: **c_char, crate_map: *u8) -> int { use libc::getenv; use rt::start; unsafe { let use_new_rt = do str::as_c_str("RUST_NEWRT") |s| { getenv(s).is_null() }; if use_new_rt { return rust_start(main as *c_void, argc as c_int, argv, crate_map as *c_void) as int; } else { return start(main, argc, argv, crate_map); } } extern { fn rust_start(main: *c_void, argc: c_int, argv: **c_char, crate_map: *c_void) -> c_int; } } // Local Variables: // mode: rust; // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End: Rename confusing var, use_new_rt -> use_old_rt. // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Runtime calls emitted by the compiler. use cast::transmute; use libc::{c_char, c_uchar, c_void, size_t, uintptr_t, c_int}; use managed::raw::BoxRepr; use str; use sys; use unstable::exchange_alloc; use cast::transmute; #[allow(non_camel_case_types)] pub type rust_task = c_void; #[cfg(target_word_size = "32")] pub static FROZEN_BIT: uint = 0x80000000; #[cfg(target_word_size = "64")] pub static FROZEN_BIT: uint = 0x8000000000000000; pub mod rustrt { use libc::{c_char, uintptr_t}; pub extern { #[rust_stack] unsafe fn rust_upcall_malloc(td: *c_char, size: uintptr_t) -> *c_char; #[rust_stack] unsafe fn rust_upcall_free(ptr: *c_char); } } #[lang="fail_"] pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! { sys::begin_unwind_(expr, file, line); } #[lang="fail_bounds_check"] pub unsafe fn fail_bounds_check(file: *c_char, line: size_t, index: size_t, len: size_t) { let msg = fmt!("index out of bounds: the len is %d but the index is %d", len as int, index as int); do str::as_buf(msg) |p, _len| { fail_(p as *c_char, file, line); } } pub unsafe fn fail_borrowed() { let msg = "borrowed"; do str::as_buf(msg) |msg_p, _| { do str::as_buf("???") |file_p, _| { fail_(msg_p as *c_char, file_p as *c_char, 0); } } } // FIXME #4942: Make these signatures agree with exchange_alloc's signatures #[lang="exchange_malloc"] #[inline(always)] pub unsafe fn exchange_malloc(td: *c_char, size: uintptr_t) -> *c_char { transmute(exchange_alloc::malloc(transmute(td), transmute(size))) } // NB: Calls to free CANNOT be allowed to fail, as throwing an exception from // inside a landing pad may corrupt the state of the exception handler. If a // problem occurs, call exit instead. #[lang="exchange_free"] #[inline(always)] pub unsafe fn exchange_free(ptr: *c_char) { exchange_alloc::free(transmute(ptr)) } #[lang="malloc"] #[inline(always)] pub unsafe fn local_malloc(td: *c_char, size: uintptr_t) -> *c_char { return rustrt::rust_upcall_malloc(td, size); } // NB: Calls to free CANNOT be allowed to fail, as throwing an exception from // inside a landing pad may corrupt the state of the exception handler. If a // problem occurs, call exit instead. #[lang="free"] #[inline(always)] pub unsafe fn local_free(ptr: *c_char) { rustrt::rust_upcall_free(ptr); } #[lang="borrow_as_imm"] #[inline(always)] pub unsafe fn borrow_as_imm(a: *u8) { let a: *mut BoxRepr = transmute(a); (*a).header.ref_count |= FROZEN_BIT; } #[lang="return_to_mut"] #[inline(always)] pub unsafe fn return_to_mut(a: *u8) { // Sometimes the box is null, if it is conditionally frozen. // See e.g. #4904. if !a.is_null() { let a: *mut BoxRepr = transmute(a); (*a).header.ref_count &= !FROZEN_BIT; } } #[lang="check_not_borrowed"] #[inline(always)] pub unsafe fn check_not_borrowed(a: *u8) { let a: *mut BoxRepr = transmute(a); if ((*a).header.ref_count & FROZEN_BIT) != 0 { fail_borrowed(); } } #[lang="strdup_uniq"] #[inline(always)] pub unsafe fn strdup_uniq(ptr: *c_uchar, len: uint) -> ~str { str::raw::from_buf_len(ptr, len) } #[lang="start"] #[cfg(stage0)] pub fn start(main: *u8, argc: int, argv: *c_char, crate_map: *u8) -> int { use libc::getenv; use rt::start; unsafe { let use_old_rt = do str::as_c_str("RUST_NEWRT") |s| { getenv(s).is_null() }; if use_old_rt { return rust_start(main as *c_void, argc as c_int, argv, crate_map as *c_void) as int; } else { return start(main, argc, argv, crate_map); } } extern { fn rust_start(main: *c_void, argc: c_int, argv: *c_char, crate_map: *c_void) -> c_int; } } #[lang="start"] #[cfg(not(stage0))] pub fn start(main: *u8, argc: int, argv: **c_char, crate_map: *u8) -> int { use libc::getenv; use rt::start; unsafe { let use_old_rt = do str::as_c_str("RUST_NEWRT") |s| { getenv(s).is_null() }; if use_old_rt { return rust_start(main as *c_void, argc as c_int, argv, crate_map as *c_void) as int; } else { return start(main, argc, argv, crate_map); } } extern { fn rust_start(main: *c_void, argc: c_int, argv: **c_char, crate_map: *c_void) -> c_int; } } // Local Variables: // mode: rust; // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End:
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Type substitutions. pub use self::ParamSpace::*; pub use self::RegionSubsts::*; use middle::ty::{mod, Ty}; use middle::ty_fold::{mod, TypeFoldable, TypeFolder}; use util::ppaux::Repr; use std::fmt; use std::slice::Items; use std::vec::Vec; use syntax::codemap::{Span, DUMMY_SP}; /////////////////////////////////////////////////////////////////////////// /// A substitution mapping type/region parameters to new values. We /// identify each in-scope parameter by an *index* and a *parameter /// space* (which indices where the parameter is defined; see /// `ParamSpace`). #[deriving(Clone, PartialEq, Eq, Hash, Show)] pub struct Substs<'tcx> { pub types: VecPerParamSpace<Ty<'tcx>>, pub regions: RegionSubsts, } /// Represents the values to use when substituting lifetime parameters. /// If the value is `ErasedRegions`, then this subst is occurring during /// trans, and all region parameters will be replaced with `ty::ReStatic`. #[deriving(Clone, PartialEq, Eq, Hash, Show)] pub enum RegionSubsts { ErasedRegions, NonerasedRegions(VecPerParamSpace<ty::Region>) } impl<'tcx> Substs<'tcx> { pub fn new(t: VecPerParamSpace<Ty<'tcx>>, r: VecPerParamSpace<ty::Region>) -> Substs<'tcx> { Substs { types: t, regions: NonerasedRegions(r) } } pub fn new_type(t: Vec<Ty<'tcx>>, r: Vec<ty::Region>) -> Substs<'tcx> { Substs::new(VecPerParamSpace::new(t, Vec::new(), Vec::new(), Vec::new()), VecPerParamSpace::new(r, Vec::new(), Vec::new(), Vec::new())) } pub fn new_trait(t: Vec<Ty<'tcx>>, r: Vec<ty::Region>, a: Vec<Ty<'tcx>>, s: Ty<'tcx>) -> Substs<'tcx> { Substs::new(VecPerParamSpace::new(t, vec!(s), a, Vec::new()), VecPerParamSpace::new(r, Vec::new(), Vec::new(), Vec::new())) } pub fn erased(t: VecPerParamSpace<Ty<'tcx>>) -> Substs<'tcx> { Substs { types: t, regions: ErasedRegions } } pub fn empty() -> Substs<'tcx> { Substs { types: VecPerParamSpace::empty(), regions: NonerasedRegions(VecPerParamSpace::empty()), } } pub fn trans_empty() -> Substs<'tcx> { Substs { types: VecPerParamSpace::empty(), regions: ErasedRegions } } pub fn is_noop(&self) -> bool { let regions_is_noop = match self.regions { ErasedRegions => false, // may be used to canonicalize NonerasedRegions(ref regions) => regions.is_empty(), }; regions_is_noop && self.types.is_empty() } pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { *self.types.get(ty_param_def.space, ty_param_def.index) } pub fn has_regions_escaping_depth(&self, depth: uint) -> bool { self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || { match self.regions { ErasedRegions => false, NonerasedRegions(ref regions) => regions.iter().any(|r| r.escapes_depth(depth)), } } } pub fn self_ty(&self) -> Option<Ty<'tcx>> { self.types.get_self().map(|&t| t) } pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> { assert!(self.self_ty().is_none()); let mut s = (*self).clone(); s.types.push(SelfSpace, self_ty); s } pub fn with_assoc_tys(&self, assoc_tys: Vec<Ty<'tcx>>) -> Substs<'tcx> { assert!(self.types.is_empty_in(AssocSpace)); let mut s = (*self).clone(); s.types.replace(AssocSpace, assoc_tys); s } pub fn erase_regions(self) -> Substs<'tcx> { let Substs { types, regions: _ } = self; Substs { types: types, regions: ErasedRegions } } /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method /// to easily access the set of region substitutions. pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> { match self.regions { ErasedRegions => panic!("Erased regions only expected in trans"), NonerasedRegions(ref r) => r } } /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method /// to easily access the set of region substitutions. pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> { match self.regions { ErasedRegions => panic!("Erased regions only expected in trans"), NonerasedRegions(ref mut r) => r } } pub fn with_method(self, m_types: Vec<Ty<'tcx>>, m_regions: Vec<ty::Region>) -> Substs<'tcx> { let Substs { types, regions } = self; let types = types.with_vec(FnSpace, m_types); let regions = regions.map(m_regions, |r, m_regions| r.with_vec(FnSpace, m_regions)); Substs { types: types, regions: regions } } } impl RegionSubsts { fn map<A>(self, a: A, op: |VecPerParamSpace<ty::Region>, A| -> VecPerParamSpace<ty::Region>) -> RegionSubsts { match self { ErasedRegions => ErasedRegions, NonerasedRegions(r) => NonerasedRegions(op(r, a)) } } pub fn is_erased(&self) -> bool { match *self { ErasedRegions => true, NonerasedRegions(_) => false, } } } /////////////////////////////////////////////////////////////////////////// // ParamSpace #[deriving(PartialOrd, Ord, PartialEq, Eq, Clone, Hash, Encodable, Decodable, Show)] pub enum ParamSpace { TypeSpace, // Type parameters attached to a type definition, trait, or impl SelfSpace, // Self parameter on a trait AssocSpace, // Assoc types defined in a trait/impl FnSpace, // Type parameters attached to a method or fn } impl Copy for ParamSpace {} impl ParamSpace { pub fn all() -> [ParamSpace, ..4] { [TypeSpace, SelfSpace, AssocSpace, FnSpace] } pub fn to_uint(self) -> uint { match self { TypeSpace => 0, SelfSpace => 1, AssocSpace => 2, FnSpace => 3, } } pub fn from_uint(u: uint) -> ParamSpace { match u { 0 => TypeSpace, 1 => SelfSpace, 2 => AssocSpace, 3 => FnSpace, _ => panic!("Invalid ParamSpace: {}", u) } } } /// Vector of things sorted by param space. Used to keep /// the set of things declared on the type, self, or method /// distinct. #[deriving(PartialEq, Eq, Clone, Hash, Encodable, Decodable)] pub struct VecPerParamSpace<T> { // This was originally represented as a tuple with one Vec<T> for // each variant of ParamSpace, and that remains the abstraction // that it provides to its clients. // // Here is how the representation corresponds to the abstraction // i.e. the "abstraction function" AF: // // AF(self) = (self.content[..self.type_limit], // self.content[self.type_limit..self.self_limit], // self.content[self.self_limit..self.assoc_limit], // self.content[self.assoc_limit..]) type_limit: uint, self_limit: uint, assoc_limit: uint, content: Vec<T>, } /// The `split` function converts one `VecPerParamSpace` into this /// `SeparateVecsPerParamSpace` structure. pub struct SeparateVecsPerParamSpace<T> { pub types: Vec<T>, pub selfs: Vec<T>, pub assocs: Vec<T>, pub fns: Vec<T>, } impl<T:fmt::Show> fmt::Show for VecPerParamSpace<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { try!(write!(fmt, "VecPerParamSpace {{")); for space in ParamSpace::all().iter() { try!(write!(fmt, "{}: {}, ", *space, self.get_slice(*space))); } try!(write!(fmt, "}}")); Ok(()) } } impl<T> VecPerParamSpace<T> { fn limits(&self, space: ParamSpace) -> (uint, uint) { match space { TypeSpace => (0, self.type_limit), SelfSpace => (self.type_limit, self.self_limit), AssocSpace => (self.self_limit, self.assoc_limit), FnSpace => (self.assoc_limit, self.content.len()), } } pub fn empty() -> VecPerParamSpace<T> { VecPerParamSpace { type_limit: 0, self_limit: 0, assoc_limit: 0, content: Vec::new() } } pub fn params_from_type(types: Vec<T>) -> VecPerParamSpace<T> { VecPerParamSpace::empty().with_vec(TypeSpace, types) } /// `t` is the type space. /// `s` is the self space. /// `a` is the assoc space. /// `f` is the fn space. pub fn new(t: Vec<T>, s: Vec<T>, a: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> { let type_limit = t.len(); let self_limit = type_limit + s.len(); let assoc_limit = self_limit + a.len(); let mut content = t; content.extend(s.into_iter()); content.extend(a.into_iter()); content.extend(f.into_iter()); VecPerParamSpace { type_limit: type_limit, self_limit: self_limit, assoc_limit: assoc_limit, content: content, } } fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint, assoc_limit: uint) -> VecPerParamSpace<T> { VecPerParamSpace { type_limit: type_limit, self_limit: self_limit, assoc_limit: assoc_limit, content: content, } } /// Appends `value` to the vector associated with `space`. /// /// Unlike the `push` method in `Vec`, this should not be assumed /// to be a cheap operation (even when amortized over many calls). pub fn push(&mut self, space: ParamSpace, value: T) { let (_, limit) = self.limits(space); match space { TypeSpace => { self.type_limit += 1; self.self_limit += 1; self.assoc_limit += 1; } SelfSpace => { self.self_limit += 1; self.assoc_limit += 1; } AssocSpace => { self.assoc_limit += 1; } FnSpace => { } } self.content.insert(limit, value); } pub fn pop(&mut self, space: ParamSpace) -> Option<T> { let (start, limit) = self.limits(space); if start == limit { None } else { match space { TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; self.assoc_limit -= 1; } SelfSpace => { self.self_limit -= 1; self.assoc_limit -= 1; } AssocSpace => { self.assoc_limit -= 1; } FnSpace => {} } self.content.remove(limit - 1) } } pub fn truncate(&mut self, space: ParamSpace, len: uint) { // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). while self.len(space) > len { self.pop(space); } } pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) { // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). self.truncate(space, 0); for t in elems.into_iter() { self.push(space, t); } } pub fn get_self<'a>(&'a self) -> Option<&'a T> { let v = self.get_slice(SelfSpace); assert!(v.len() <= 1); if v.len() == 0 { None } else { Some(&v[0]) } } pub fn len(&self, space: ParamSpace) -> uint { self.get_slice(space).len() } pub fn is_empty_in(&self, space: ParamSpace) -> bool { self.len(space) == 0 } pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] { let (start, limit) = self.limits(space); self.content.slice(start, limit) } pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] { let (start, limit) = self.limits(space); self.content.slice_mut(start, limit) } pub fn opt_get<'a>(&'a self, space: ParamSpace, index: uint) -> Option<&'a T> { let v = self.get_slice(space); if index < v.len() { Some(&v[index]) } else { None } } pub fn get<'a>(&'a self, space: ParamSpace, index: uint) -> &'a T { &self.get_slice(space)[index] } pub fn iter<'a>(&'a self) -> Items<'a,T> { self.content.iter() } pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> { EnumeratedItems::new(self) } pub fn as_slice(&self) -> &[T] { self.content.as_slice() } pub fn all_vecs(&self, pred: |&[T]| -> bool) -> bool { let spaces = [TypeSpace, SelfSpace, FnSpace]; spaces.iter().all(|&space| { pred(self.get_slice(space)) }) } pub fn all(&self, pred: |&T| -> bool) -> bool { self.iter().all(pred) } pub fn any(&self, pred: |&T| -> bool) -> bool { self.iter().any(pred) } pub fn is_empty(&self) -> bool { self.all_vecs(|v| v.is_empty()) } pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U { let result = self.iter().map(pred).collect(); VecPerParamSpace::new_internal(result, self.type_limit, self.self_limit, self.assoc_limit) } pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut((ParamSpace, uint, &T)) -> U, { let result = self.iter_enumerated().map(pred).collect(); VecPerParamSpace::new_internal(result, self.type_limit, self.self_limit, self.assoc_limit) } pub fn map_move<U>(self, pred: |T| -> U) -> VecPerParamSpace<U> { let SeparateVecsPerParamSpace { types: t, selfs: s, assocs: a, fns: f } = self.split(); VecPerParamSpace::new(t.into_iter().map(|p| pred(p)).collect(), s.into_iter().map(|p| pred(p)).collect(), a.into_iter().map(|p| pred(p)).collect(), f.into_iter().map(|p| pred(p)).collect()) } pub fn split(self) -> SeparateVecsPerParamSpace<T> { let VecPerParamSpace { type_limit, self_limit, assoc_limit, content } = self; let mut content_iter = content.into_iter(); SeparateVecsPerParamSpace { types: content_iter.by_ref().take(type_limit).collect(), selfs: content_iter.by_ref().take(self_limit - type_limit).collect(), assocs: content_iter.by_ref().take(assoc_limit - self_limit).collect(), fns: content_iter.collect() } } pub fn with_vec(mut self, space: ParamSpace, vec: Vec<T>) -> VecPerParamSpace<T> { assert!(self.is_empty_in(space)); self.replace(space, vec); self } } pub struct EnumeratedItems<'a,T:'a> { vec: &'a VecPerParamSpace<T>, space_index: uint, elem_index: uint } impl<'a,T> EnumeratedItems<'a,T> { fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> { let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 }; result.adjust_space(); result } fn adjust_space(&mut self) { let spaces = ParamSpace::all(); while self.space_index < spaces.len() && self.elem_index >= self.vec.len(spaces[self.space_index]) { self.space_index += 1; self.elem_index = 0; } } } impl<'a,T> Iterator<(ParamSpace, uint, &'a T)> for EnumeratedItems<'a,T> { fn next(&mut self) -> Option<(ParamSpace, uint, &'a T)> { let spaces = ParamSpace::all(); if self.space_index < spaces.len() { let space = spaces[self.space_index]; let index = self.elem_index; let item = self.vec.get(space, index); self.elem_index += 1; self.adjust_space(); Some((space, index, item)) } else { None } } } /////////////////////////////////////////////////////////////////////////// // Public trait `Subst` // // Just call `foo.subst(tcx, substs)` to perform a substitution across // `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when // there is more information available (for better errors). pub trait Subst<'tcx> { fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self { self.subst_spanned(tcx, substs, None) } fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>, span: Option<Span>) -> Self; } impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T { fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>, span: Option<Span>) -> T { let mut folder = SubstFolder { tcx: tcx, substs: substs, span: span, root_ty: None, ty_stack_depth: 0, region_binders_passed: 0 }; (*self).fold_with(&mut folder) } } /////////////////////////////////////////////////////////////////////////// // The actual substitution engine itself is a type folder. struct SubstFolder<'a, 'tcx: 'a> { tcx: &'a ty::ctxt<'tcx>, substs: &'a Substs<'tcx>, // The location for which the substitution is performed, if available. span: Option<Span>, // The root type that is being substituted, if available. root_ty: Option<Ty<'tcx>>, // Depth of type stack ty_stack_depth: uint, // Number of region binders we have passed through while doing the substitution region_binders_passed: uint, } impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx } fn enter_region_binder(&mut self) { self.region_binders_passed += 1; } fn exit_region_binder(&mut self) { self.region_binders_passed -= 1; } fn fold_region(&mut self, r: ty::Region) -> ty::Region { // Note: This routine only handles regions that are bound on // type declarations and other outer declarations, not those // bound in *fn types*. Region substitution of the bound // regions that appear in a function signature is done using // the specialized routine `ty::replace_late_regions()`. match r { ty::ReEarlyBound(_, space, i, region_name) => { match self.substs.regions { ErasedRegions => ty::ReStatic, NonerasedRegions(ref regions) => match regions.opt_get(space, i) { Some(&r) => { self.shift_region_through_binders(r) } None => { let span = self.span.unwrap_or(DUMMY_SP); self.tcx().sess.span_bug( span, format!("Type parameter out of range \ when substituting in region {} (root type={}) \ (space={}, index={})", region_name.as_str(), self.root_ty.repr(self.tcx()), space, i).as_slice()); } } } } _ => r } } fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { if !ty::type_needs_subst(t) { return t; } // track the root type we were asked to substitute let depth = self.ty_stack_depth; if depth == 0 { self.root_ty = Some(t); } self.ty_stack_depth += 1; let t1 = match t.sty { ty::ty_param(p) => { self.ty_for_param(p, t) } _ => { ty_fold::super_fold_ty(self, t) } }; assert_eq!(depth + 1, self.ty_stack_depth); self.ty_stack_depth -= 1; if depth == 0 { self.root_ty = None; } return t1; } } impl<'a,'tcx> SubstFolder<'a,'tcx> { fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> { // Look up the type in the substitutions. It really should be in there. let opt_ty = self.substs.types.opt_get(p.space, p.idx); let ty = match opt_ty { Some(t) => *t, None => { let span = self.span.unwrap_or(DUMMY_SP); self.tcx().sess.span_bug( span, format!("Type parameter `{}` ({}/{}/{}) out of range \ when substituting (root type={}) substs={}", p.repr(self.tcx()), source_ty.repr(self.tcx()), p.space, p.idx, self.root_ty.repr(self.tcx()), self.substs.repr(self.tcx())).as_slice()); } }; self.shift_regions_through_binders(ty) } /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs /// when we are substituting a type with escaping regions into a context where we have passed /// through region binders. That's quite a mouthful. Let's see an example: /// /// ``` /// type Func<A> = fn(A); /// type MetaFunc = for<'a> fn(Func<&'a int>) /// ``` /// /// The type `MetaFunc`, when fully expanded, will be /// /// for<'a> fn(fn(&'a int)) /// ^~ ^~ ^~~ /// | | | /// | | DebruijnIndex of 2 /// Binders /// /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip /// over the inner binder (remember that we count Debruijn indices from 1). However, in the /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a /// debruijn index of 1. It's only during the substitution that we can see we must increase the /// depth by 1 to account for the binder that we passed through. /// /// As a second example, consider this twist: /// /// ``` /// type FuncTuple<A> = (A,fn(A)); /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>) /// ``` /// /// Here the final type will be: /// /// for<'a> fn((&'a int, fn(&'a int))) /// ^~~ ^~~ /// | | /// DebruijnIndex of 1 | /// DebruijnIndex of 2 /// /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the /// first case we do not increase the Debruijn index and in the second case we do. The reason /// is that only in the second case have we passed through a fn binder. fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { debug!("shift_regions(ty={}, region_binders_passed={}, type_has_escaping_regions={})", ty.repr(self.tcx()), self.region_binders_passed, ty::type_has_escaping_regions(ty)); if self.region_binders_passed == 0 || !ty::type_has_escaping_regions(ty) { return ty; } let result = ty_fold::shift_regions(self.tcx(), self.region_binders_passed, &ty); debug!("shift_regions: shifted result = {}", result.repr(self.tcx())); result } fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region { ty_fold::shift_region(region, self.region_binders_passed) } } librustc: fix fallout // Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Type substitutions. pub use self::ParamSpace::*; pub use self::RegionSubsts::*; use middle::ty::{mod, Ty}; use middle::ty_fold::{mod, TypeFoldable, TypeFolder}; use util::ppaux::Repr; use std::fmt; use std::slice::Items; use std::vec::Vec; use syntax::codemap::{Span, DUMMY_SP}; /////////////////////////////////////////////////////////////////////////// /// A substitution mapping type/region parameters to new values. We /// identify each in-scope parameter by an *index* and a *parameter /// space* (which indices where the parameter is defined; see /// `ParamSpace`). #[deriving(Clone, PartialEq, Eq, Hash, Show)] pub struct Substs<'tcx> { pub types: VecPerParamSpace<Ty<'tcx>>, pub regions: RegionSubsts, } /// Represents the values to use when substituting lifetime parameters. /// If the value is `ErasedRegions`, then this subst is occurring during /// trans, and all region parameters will be replaced with `ty::ReStatic`. #[deriving(Clone, PartialEq, Eq, Hash, Show)] pub enum RegionSubsts { ErasedRegions, NonerasedRegions(VecPerParamSpace<ty::Region>) } impl<'tcx> Substs<'tcx> { pub fn new(t: VecPerParamSpace<Ty<'tcx>>, r: VecPerParamSpace<ty::Region>) -> Substs<'tcx> { Substs { types: t, regions: NonerasedRegions(r) } } pub fn new_type(t: Vec<Ty<'tcx>>, r: Vec<ty::Region>) -> Substs<'tcx> { Substs::new(VecPerParamSpace::new(t, Vec::new(), Vec::new(), Vec::new()), VecPerParamSpace::new(r, Vec::new(), Vec::new(), Vec::new())) } pub fn new_trait(t: Vec<Ty<'tcx>>, r: Vec<ty::Region>, a: Vec<Ty<'tcx>>, s: Ty<'tcx>) -> Substs<'tcx> { Substs::new(VecPerParamSpace::new(t, vec!(s), a, Vec::new()), VecPerParamSpace::new(r, Vec::new(), Vec::new(), Vec::new())) } pub fn erased(t: VecPerParamSpace<Ty<'tcx>>) -> Substs<'tcx> { Substs { types: t, regions: ErasedRegions } } pub fn empty() -> Substs<'tcx> { Substs { types: VecPerParamSpace::empty(), regions: NonerasedRegions(VecPerParamSpace::empty()), } } pub fn trans_empty() -> Substs<'tcx> { Substs { types: VecPerParamSpace::empty(), regions: ErasedRegions } } pub fn is_noop(&self) -> bool { let regions_is_noop = match self.regions { ErasedRegions => false, // may be used to canonicalize NonerasedRegions(ref regions) => regions.is_empty(), }; regions_is_noop && self.types.is_empty() } pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { *self.types.get(ty_param_def.space, ty_param_def.index) } pub fn has_regions_escaping_depth(&self, depth: uint) -> bool { self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || { match self.regions { ErasedRegions => false, NonerasedRegions(ref regions) => regions.iter().any(|r| r.escapes_depth(depth)), } } } pub fn self_ty(&self) -> Option<Ty<'tcx>> { self.types.get_self().map(|&t| t) } pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> { assert!(self.self_ty().is_none()); let mut s = (*self).clone(); s.types.push(SelfSpace, self_ty); s } pub fn with_assoc_tys(&self, assoc_tys: Vec<Ty<'tcx>>) -> Substs<'tcx> { assert!(self.types.is_empty_in(AssocSpace)); let mut s = (*self).clone(); s.types.replace(AssocSpace, assoc_tys); s } pub fn erase_regions(self) -> Substs<'tcx> { let Substs { types, regions: _ } = self; Substs { types: types, regions: ErasedRegions } } /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method /// to easily access the set of region substitutions. pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> { match self.regions { ErasedRegions => panic!("Erased regions only expected in trans"), NonerasedRegions(ref r) => r } } /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method /// to easily access the set of region substitutions. pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace<ty::Region> { match self.regions { ErasedRegions => panic!("Erased regions only expected in trans"), NonerasedRegions(ref mut r) => r } } pub fn with_method(self, m_types: Vec<Ty<'tcx>>, m_regions: Vec<ty::Region>) -> Substs<'tcx> { let Substs { types, regions } = self; let types = types.with_vec(FnSpace, m_types); let regions = regions.map(m_regions, |r, m_regions| r.with_vec(FnSpace, m_regions)); Substs { types: types, regions: regions } } } impl RegionSubsts { fn map<A>(self, a: A, op: |VecPerParamSpace<ty::Region>, A| -> VecPerParamSpace<ty::Region>) -> RegionSubsts { match self { ErasedRegions => ErasedRegions, NonerasedRegions(r) => NonerasedRegions(op(r, a)) } } pub fn is_erased(&self) -> bool { match *self { ErasedRegions => true, NonerasedRegions(_) => false, } } } /////////////////////////////////////////////////////////////////////////// // ParamSpace #[deriving(PartialOrd, Ord, PartialEq, Eq, Clone, Hash, Encodable, Decodable, Show)] pub enum ParamSpace { TypeSpace, // Type parameters attached to a type definition, trait, or impl SelfSpace, // Self parameter on a trait AssocSpace, // Assoc types defined in a trait/impl FnSpace, // Type parameters attached to a method or fn } impl Copy for ParamSpace {} impl ParamSpace { pub fn all() -> [ParamSpace, ..4] { [TypeSpace, SelfSpace, AssocSpace, FnSpace] } pub fn to_uint(self) -> uint { match self { TypeSpace => 0, SelfSpace => 1, AssocSpace => 2, FnSpace => 3, } } pub fn from_uint(u: uint) -> ParamSpace { match u { 0 => TypeSpace, 1 => SelfSpace, 2 => AssocSpace, 3 => FnSpace, _ => panic!("Invalid ParamSpace: {}", u) } } } /// Vector of things sorted by param space. Used to keep /// the set of things declared on the type, self, or method /// distinct. #[deriving(PartialEq, Eq, Clone, Hash, Encodable, Decodable)] pub struct VecPerParamSpace<T> { // This was originally represented as a tuple with one Vec<T> for // each variant of ParamSpace, and that remains the abstraction // that it provides to its clients. // // Here is how the representation corresponds to the abstraction // i.e. the "abstraction function" AF: // // AF(self) = (self.content[..self.type_limit], // self.content[self.type_limit..self.self_limit], // self.content[self.self_limit..self.assoc_limit], // self.content[self.assoc_limit..]) type_limit: uint, self_limit: uint, assoc_limit: uint, content: Vec<T>, } /// The `split` function converts one `VecPerParamSpace` into this /// `SeparateVecsPerParamSpace` structure. pub struct SeparateVecsPerParamSpace<T> { pub types: Vec<T>, pub selfs: Vec<T>, pub assocs: Vec<T>, pub fns: Vec<T>, } impl<T:fmt::Show> fmt::Show for VecPerParamSpace<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { try!(write!(fmt, "VecPerParamSpace {{")); for space in ParamSpace::all().iter() { try!(write!(fmt, "{}: {}, ", *space, self.get_slice(*space))); } try!(write!(fmt, "}}")); Ok(()) } } impl<T> VecPerParamSpace<T> { fn limits(&self, space: ParamSpace) -> (uint, uint) { match space { TypeSpace => (0, self.type_limit), SelfSpace => (self.type_limit, self.self_limit), AssocSpace => (self.self_limit, self.assoc_limit), FnSpace => (self.assoc_limit, self.content.len()), } } pub fn empty() -> VecPerParamSpace<T> { VecPerParamSpace { type_limit: 0, self_limit: 0, assoc_limit: 0, content: Vec::new() } } pub fn params_from_type(types: Vec<T>) -> VecPerParamSpace<T> { VecPerParamSpace::empty().with_vec(TypeSpace, types) } /// `t` is the type space. /// `s` is the self space. /// `a` is the assoc space. /// `f` is the fn space. pub fn new(t: Vec<T>, s: Vec<T>, a: Vec<T>, f: Vec<T>) -> VecPerParamSpace<T> { let type_limit = t.len(); let self_limit = type_limit + s.len(); let assoc_limit = self_limit + a.len(); let mut content = t; content.extend(s.into_iter()); content.extend(a.into_iter()); content.extend(f.into_iter()); VecPerParamSpace { type_limit: type_limit, self_limit: self_limit, assoc_limit: assoc_limit, content: content, } } fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint, assoc_limit: uint) -> VecPerParamSpace<T> { VecPerParamSpace { type_limit: type_limit, self_limit: self_limit, assoc_limit: assoc_limit, content: content, } } /// Appends `value` to the vector associated with `space`. /// /// Unlike the `push` method in `Vec`, this should not be assumed /// to be a cheap operation (even when amortized over many calls). pub fn push(&mut self, space: ParamSpace, value: T) { let (_, limit) = self.limits(space); match space { TypeSpace => { self.type_limit += 1; self.self_limit += 1; self.assoc_limit += 1; } SelfSpace => { self.self_limit += 1; self.assoc_limit += 1; } AssocSpace => { self.assoc_limit += 1; } FnSpace => { } } self.content.insert(limit, value); } pub fn pop(&mut self, space: ParamSpace) -> Option<T> { let (start, limit) = self.limits(space); if start == limit { None } else { match space { TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; self.assoc_limit -= 1; } SelfSpace => { self.self_limit -= 1; self.assoc_limit -= 1; } AssocSpace => { self.assoc_limit -= 1; } FnSpace => {} } self.content.remove(limit - 1) } } pub fn truncate(&mut self, space: ParamSpace, len: uint) { // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). while self.len(space) > len { self.pop(space); } } pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) { // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). self.truncate(space, 0); for t in elems.into_iter() { self.push(space, t); } } pub fn get_self<'a>(&'a self) -> Option<&'a T> { let v = self.get_slice(SelfSpace); assert!(v.len() <= 1); if v.len() == 0 { None } else { Some(&v[0]) } } pub fn len(&self, space: ParamSpace) -> uint { self.get_slice(space).len() } pub fn is_empty_in(&self, space: ParamSpace) -> bool { self.len(space) == 0 } pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] { let (start, limit) = self.limits(space); self.content.slice(start, limit) } pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] { let (start, limit) = self.limits(space); self.content.slice_mut(start, limit) } pub fn opt_get<'a>(&'a self, space: ParamSpace, index: uint) -> Option<&'a T> { let v = self.get_slice(space); if index < v.len() { Some(&v[index]) } else { None } } pub fn get<'a>(&'a self, space: ParamSpace, index: uint) -> &'a T { &self.get_slice(space)[index] } pub fn iter<'a>(&'a self) -> Items<'a,T> { self.content.iter() } pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> { EnumeratedItems::new(self) } pub fn as_slice(&self) -> &[T] { self.content.as_slice() } pub fn all_vecs(&self, pred: |&[T]| -> bool) -> bool { let spaces = [TypeSpace, SelfSpace, FnSpace]; spaces.iter().all(|&space| { pred(self.get_slice(space)) }) } pub fn all<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool { self.iter().all(pred) } pub fn any<P>(&self, pred: P) -> bool where P: FnMut(&T) -> bool { self.iter().any(pred) } pub fn is_empty(&self) -> bool { self.all_vecs(|v| v.is_empty()) } pub fn map<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut(&T) -> U { let result = self.iter().map(pred).collect(); VecPerParamSpace::new_internal(result, self.type_limit, self.self_limit, self.assoc_limit) } pub fn map_enumerated<U, P>(&self, pred: P) -> VecPerParamSpace<U> where P: FnMut((ParamSpace, uint, &T)) -> U, { let result = self.iter_enumerated().map(pred).collect(); VecPerParamSpace::new_internal(result, self.type_limit, self.self_limit, self.assoc_limit) } pub fn map_move<U>(self, pred: |T| -> U) -> VecPerParamSpace<U> { let SeparateVecsPerParamSpace { types: t, selfs: s, assocs: a, fns: f } = self.split(); VecPerParamSpace::new(t.into_iter().map(|p| pred(p)).collect(), s.into_iter().map(|p| pred(p)).collect(), a.into_iter().map(|p| pred(p)).collect(), f.into_iter().map(|p| pred(p)).collect()) } pub fn split(self) -> SeparateVecsPerParamSpace<T> { let VecPerParamSpace { type_limit, self_limit, assoc_limit, content } = self; let mut content_iter = content.into_iter(); SeparateVecsPerParamSpace { types: content_iter.by_ref().take(type_limit).collect(), selfs: content_iter.by_ref().take(self_limit - type_limit).collect(), assocs: content_iter.by_ref().take(assoc_limit - self_limit).collect(), fns: content_iter.collect() } } pub fn with_vec(mut self, space: ParamSpace, vec: Vec<T>) -> VecPerParamSpace<T> { assert!(self.is_empty_in(space)); self.replace(space, vec); self } } pub struct EnumeratedItems<'a,T:'a> { vec: &'a VecPerParamSpace<T>, space_index: uint, elem_index: uint } impl<'a,T> EnumeratedItems<'a,T> { fn new(v: &'a VecPerParamSpace<T>) -> EnumeratedItems<'a,T> { let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 }; result.adjust_space(); result } fn adjust_space(&mut self) { let spaces = ParamSpace::all(); while self.space_index < spaces.len() && self.elem_index >= self.vec.len(spaces[self.space_index]) { self.space_index += 1; self.elem_index = 0; } } } impl<'a,T> Iterator<(ParamSpace, uint, &'a T)> for EnumeratedItems<'a,T> { fn next(&mut self) -> Option<(ParamSpace, uint, &'a T)> { let spaces = ParamSpace::all(); if self.space_index < spaces.len() { let space = spaces[self.space_index]; let index = self.elem_index; let item = self.vec.get(space, index); self.elem_index += 1; self.adjust_space(); Some((space, index, item)) } else { None } } } /////////////////////////////////////////////////////////////////////////// // Public trait `Subst` // // Just call `foo.subst(tcx, substs)` to perform a substitution across // `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when // there is more information available (for better errors). pub trait Subst<'tcx> { fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self { self.subst_spanned(tcx, substs, None) } fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>, span: Option<Span>) -> Self; } impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T { fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>, span: Option<Span>) -> T { let mut folder = SubstFolder { tcx: tcx, substs: substs, span: span, root_ty: None, ty_stack_depth: 0, region_binders_passed: 0 }; (*self).fold_with(&mut folder) } } /////////////////////////////////////////////////////////////////////////// // The actual substitution engine itself is a type folder. struct SubstFolder<'a, 'tcx: 'a> { tcx: &'a ty::ctxt<'tcx>, substs: &'a Substs<'tcx>, // The location for which the substitution is performed, if available. span: Option<Span>, // The root type that is being substituted, if available. root_ty: Option<Ty<'tcx>>, // Depth of type stack ty_stack_depth: uint, // Number of region binders we have passed through while doing the substitution region_binders_passed: uint, } impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx } fn enter_region_binder(&mut self) { self.region_binders_passed += 1; } fn exit_region_binder(&mut self) { self.region_binders_passed -= 1; } fn fold_region(&mut self, r: ty::Region) -> ty::Region { // Note: This routine only handles regions that are bound on // type declarations and other outer declarations, not those // bound in *fn types*. Region substitution of the bound // regions that appear in a function signature is done using // the specialized routine `ty::replace_late_regions()`. match r { ty::ReEarlyBound(_, space, i, region_name) => { match self.substs.regions { ErasedRegions => ty::ReStatic, NonerasedRegions(ref regions) => match regions.opt_get(space, i) { Some(&r) => { self.shift_region_through_binders(r) } None => { let span = self.span.unwrap_or(DUMMY_SP); self.tcx().sess.span_bug( span, format!("Type parameter out of range \ when substituting in region {} (root type={}) \ (space={}, index={})", region_name.as_str(), self.root_ty.repr(self.tcx()), space, i).as_slice()); } } } } _ => r } } fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { if !ty::type_needs_subst(t) { return t; } // track the root type we were asked to substitute let depth = self.ty_stack_depth; if depth == 0 { self.root_ty = Some(t); } self.ty_stack_depth += 1; let t1 = match t.sty { ty::ty_param(p) => { self.ty_for_param(p, t) } _ => { ty_fold::super_fold_ty(self, t) } }; assert_eq!(depth + 1, self.ty_stack_depth); self.ty_stack_depth -= 1; if depth == 0 { self.root_ty = None; } return t1; } } impl<'a,'tcx> SubstFolder<'a,'tcx> { fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> { // Look up the type in the substitutions. It really should be in there. let opt_ty = self.substs.types.opt_get(p.space, p.idx); let ty = match opt_ty { Some(t) => *t, None => { let span = self.span.unwrap_or(DUMMY_SP); self.tcx().sess.span_bug( span, format!("Type parameter `{}` ({}/{}/{}) out of range \ when substituting (root type={}) substs={}", p.repr(self.tcx()), source_ty.repr(self.tcx()), p.space, p.idx, self.root_ty.repr(self.tcx()), self.substs.repr(self.tcx())).as_slice()); } }; self.shift_regions_through_binders(ty) } /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs /// when we are substituting a type with escaping regions into a context where we have passed /// through region binders. That's quite a mouthful. Let's see an example: /// /// ``` /// type Func<A> = fn(A); /// type MetaFunc = for<'a> fn(Func<&'a int>) /// ``` /// /// The type `MetaFunc`, when fully expanded, will be /// /// for<'a> fn(fn(&'a int)) /// ^~ ^~ ^~~ /// | | | /// | | DebruijnIndex of 2 /// Binders /// /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip /// over the inner binder (remember that we count Debruijn indices from 1). However, in the /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a /// debruijn index of 1. It's only during the substitution that we can see we must increase the /// depth by 1 to account for the binder that we passed through. /// /// As a second example, consider this twist: /// /// ``` /// type FuncTuple<A> = (A,fn(A)); /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>) /// ``` /// /// Here the final type will be: /// /// for<'a> fn((&'a int, fn(&'a int))) /// ^~~ ^~~ /// | | /// DebruijnIndex of 1 | /// DebruijnIndex of 2 /// /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the /// first case we do not increase the Debruijn index and in the second case we do. The reason /// is that only in the second case have we passed through a fn binder. fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { debug!("shift_regions(ty={}, region_binders_passed={}, type_has_escaping_regions={})", ty.repr(self.tcx()), self.region_binders_passed, ty::type_has_escaping_regions(ty)); if self.region_binders_passed == 0 || !ty::type_has_escaping_regions(ty) { return ty; } let result = ty_fold::shift_regions(self.tcx(), self.region_binders_passed, &ty); debug!("shift_regions: shifted result = {}", result.repr(self.tcx())); result } fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region { ty_fold::shift_region(region, self.region_binders_passed) } }
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Abstraction of a thread pool for basic parallelism. #![deprecated(since = "1.0.0", reason = "This kind of API needs some time to bake in \ crates.io. Consider trying \ https://github.com/carllerche/syncbox")] #![unstable(feature = "std_misc")] use core::prelude::*; use sync::{Arc, Mutex}; use sync::mpsc::{channel, Sender, Receiver}; use thread::Thread; use thunk::Thunk; struct Sentinel<'a> { jobs: &'a Arc<Mutex<Receiver<Thunk>>>, active: bool } impl<'a> Sentinel<'a> { fn new(jobs: &Arc<Mutex<Receiver<Thunk>>>) -> Sentinel { Sentinel { jobs: jobs, active: true } } // Cancel and destroy this sentinel. fn cancel(mut self) { self.active = false; } } #[unsafe_destructor] impl<'a> Drop for Sentinel<'a> { fn drop(&mut self) { if self.active { spawn_in_pool(self.jobs.clone()) } } } /// A thread pool used to execute functions in parallel. /// /// Spawns `n` worker threads and replenishes the pool if any worker threads /// panic. /// /// # Example /// /// ```rust /// use std::sync::TaskPool; /// use std::iter::AdditiveIterator; /// use std::sync::mpsc::channel; /// /// let pool = TaskPool::new(4u); /// /// let (tx, rx) = channel(); /// for _ in 0..8u { /// let tx = tx.clone(); /// pool.execute(move|| { /// tx.send(1u).unwrap(); /// }); /// } /// /// assert_eq!(rx.iter().take(8u).sum(), 8u); /// ``` pub struct TaskPool { // How the threadpool communicates with subthreads. // // This is the only such Sender, so when it is dropped all subthreads will // quit. jobs: Sender<Thunk> } impl TaskPool { /// Spawns a new thread pool with `threads` threads. /// /// # Panics /// /// This function will panic if `threads` is 0. pub fn new(threads: uint) -> TaskPool { assert!(threads >= 1); let (tx, rx) = channel::<Thunk>(); let rx = Arc::new(Mutex::new(rx)); // Threadpool threads for _ in 0..threads { spawn_in_pool(rx.clone()); } TaskPool { jobs: tx } } /// Executes the function `job` on a thread in the pool. pub fn execute<F>(&self, job: F) where F : FnOnce(), F : Send { self.jobs.send(Thunk::new(job)).unwrap(); } } fn spawn_in_pool(jobs: Arc<Mutex<Receiver<Thunk>>>) { Thread::spawn(move || { // Will spawn a new thread on panic unless it is cancelled. let sentinel = Sentinel::new(&jobs); loop { let message = { // Only lock jobs for the time it takes // to get a job, not run it. let lock = jobs.lock().unwrap(); lock.recv() }; match message { Ok(job) => job.invoke(()), // The Taskpool was dropped. Err(..) => break } } sentinel.cancel(); }); } #[cfg(test)] mod test { use prelude::v1::*; use super::*; use sync::mpsc::channel; const TEST_TASKS: uint = 4u; #[test] fn test_works() { use iter::AdditiveIterator; let pool = TaskPool::new(TEST_TASKS); let (tx, rx) = channel(); for _ in 0..TEST_TASKS { let tx = tx.clone(); pool.execute(move|| { tx.send(1u).unwrap(); }); } assert_eq!(rx.iter().take(TEST_TASKS).sum(), TEST_TASKS); } #[test] #[should_fail] fn test_zero_tasks_panic() { TaskPool::new(0); } #[test] fn test_recovery_from_subtask_panic() { use iter::AdditiveIterator; let pool = TaskPool::new(TEST_TASKS); // Panic all the existing threads. for _ in 0..TEST_TASKS { pool.execute(move|| -> () { panic!() }); } // Ensure new threads were spawned to compensate. let (tx, rx) = channel(); for _ in 0..TEST_TASKS { let tx = tx.clone(); pool.execute(move|| { tx.send(1u).unwrap(); }); } assert_eq!(rx.iter().take(TEST_TASKS).sum(), TEST_TASKS); } #[test] fn test_should_not_panic_on_drop_if_subtasks_panic_after_drop() { use sync::{Arc, Barrier}; let pool = TaskPool::new(TEST_TASKS); let waiter = Arc::new(Barrier::new(TEST_TASKS + 1)); // Panic all the existing threads in a bit. for _ in 0..TEST_TASKS { let waiter = waiter.clone(); pool.execute(move|| { waiter.wait(); panic!(); }); } drop(pool); // Kick off the failure. waiter.wait(); } } std: Recomend threadpool on crates.io for TaskPool // Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Abstraction of a thread pool for basic parallelism. #![deprecated(since = "1.0.0", reason = "This kind of API needs some time to bake in \ crates.io. This functionality is available through \ https://crates.io/crates/threadpool")] #![unstable(feature = "std_misc")] use core::prelude::*; use sync::{Arc, Mutex}; use sync::mpsc::{channel, Sender, Receiver}; use thread::Thread; use thunk::Thunk; struct Sentinel<'a> { jobs: &'a Arc<Mutex<Receiver<Thunk>>>, active: bool } impl<'a> Sentinel<'a> { fn new(jobs: &Arc<Mutex<Receiver<Thunk>>>) -> Sentinel { Sentinel { jobs: jobs, active: true } } // Cancel and destroy this sentinel. fn cancel(mut self) { self.active = false; } } #[unsafe_destructor] impl<'a> Drop for Sentinel<'a> { fn drop(&mut self) { if self.active { spawn_in_pool(self.jobs.clone()) } } } /// A thread pool used to execute functions in parallel. /// /// Spawns `n` worker threads and replenishes the pool if any worker threads /// panic. /// /// # Example /// /// ```rust /// use std::sync::TaskPool; /// use std::iter::AdditiveIterator; /// use std::sync::mpsc::channel; /// /// let pool = TaskPool::new(4u); /// /// let (tx, rx) = channel(); /// for _ in 0..8u { /// let tx = tx.clone(); /// pool.execute(move|| { /// tx.send(1u).unwrap(); /// }); /// } /// /// assert_eq!(rx.iter().take(8u).sum(), 8u); /// ``` pub struct TaskPool { // How the threadpool communicates with subthreads. // // This is the only such Sender, so when it is dropped all subthreads will // quit. jobs: Sender<Thunk> } impl TaskPool { /// Spawns a new thread pool with `threads` threads. /// /// # Panics /// /// This function will panic if `threads` is 0. pub fn new(threads: uint) -> TaskPool { assert!(threads >= 1); let (tx, rx) = channel::<Thunk>(); let rx = Arc::new(Mutex::new(rx)); // Threadpool threads for _ in 0..threads { spawn_in_pool(rx.clone()); } TaskPool { jobs: tx } } /// Executes the function `job` on a thread in the pool. pub fn execute<F>(&self, job: F) where F : FnOnce(), F : Send { self.jobs.send(Thunk::new(job)).unwrap(); } } fn spawn_in_pool(jobs: Arc<Mutex<Receiver<Thunk>>>) { Thread::spawn(move || { // Will spawn a new thread on panic unless it is cancelled. let sentinel = Sentinel::new(&jobs); loop { let message = { // Only lock jobs for the time it takes // to get a job, not run it. let lock = jobs.lock().unwrap(); lock.recv() }; match message { Ok(job) => job.invoke(()), // The Taskpool was dropped. Err(..) => break } } sentinel.cancel(); }); } #[cfg(test)] mod test { use prelude::v1::*; use super::*; use sync::mpsc::channel; const TEST_TASKS: uint = 4u; #[test] fn test_works() { use iter::AdditiveIterator; let pool = TaskPool::new(TEST_TASKS); let (tx, rx) = channel(); for _ in 0..TEST_TASKS { let tx = tx.clone(); pool.execute(move|| { tx.send(1u).unwrap(); }); } assert_eq!(rx.iter().take(TEST_TASKS).sum(), TEST_TASKS); } #[test] #[should_fail] fn test_zero_tasks_panic() { TaskPool::new(0); } #[test] fn test_recovery_from_subtask_panic() { use iter::AdditiveIterator; let pool = TaskPool::new(TEST_TASKS); // Panic all the existing threads. for _ in 0..TEST_TASKS { pool.execute(move|| -> () { panic!() }); } // Ensure new threads were spawned to compensate. let (tx, rx) = channel(); for _ in 0..TEST_TASKS { let tx = tx.clone(); pool.execute(move|| { tx.send(1u).unwrap(); }); } assert_eq!(rx.iter().take(TEST_TASKS).sum(), TEST_TASKS); } #[test] fn test_should_not_panic_on_drop_if_subtasks_panic_after_drop() { use sync::{Arc, Barrier}; let pool = TaskPool::new(TEST_TASKS); let waiter = Arc::new(Barrier::new(TEST_TASKS + 1)); // Panic all the existing threads in a bit. for _ in 0..TEST_TASKS { let waiter = waiter.clone(); pool.execute(move|| { waiter.wait(); panic!(); }); } drop(pool); // Kick off the failure. waiter.wait(); } }
use util::interner; use util::interner::interner; use std::map::HashMap; #[auto_serialize2] #[auto_deserialize2] enum binop { PLUS, MINUS, STAR, SLASH, PERCENT, CARET, AND, OR, SHL, SHR, } #[auto_serialize2] #[auto_deserialize2] enum token { /* Expression-operator symbols. */ EQ, LT, LE, EQEQ, NE, GE, GT, ANDAND, OROR, NOT, TILDE, BINOP(binop), BINOPEQ(binop), /* Structural symbols */ AT, DOT, DOTDOT, ELLIPSIS, COMMA, SEMI, COLON, MOD_SEP, RARROW, LARROW, DARROW, FAT_ARROW, LPAREN, RPAREN, LBRACKET, RBRACKET, LBRACE, RBRACE, POUND, DOLLAR, /* Literals */ LIT_INT(i64, ast::int_ty), LIT_UINT(u64, ast::uint_ty), LIT_INT_UNSUFFIXED(i64), LIT_FLOAT(ast::ident, ast::float_ty), LIT_STR(ast::ident), /* Name components */ IDENT(ast::ident, bool), UNDERSCORE, /* For interpolation */ INTERPOLATED(nonterminal), DOC_COMMENT(ast::ident), EOF, } #[auto_serialize2] #[auto_deserialize2] /// For interpolation during macro expansion. enum nonterminal { nt_item(@ast::item), nt_block(ast::blk), nt_stmt(@ast::stmt), nt_pat( @ast::pat), nt_expr(@ast::expr), nt_ty( @ast::ty), nt_ident(ast::ident, bool), nt_path(@ast::path), nt_tt( @ast::token_tree), //needs @ed to break a circularity nt_matchers(~[ast::matcher]) } fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", STAR => ~"*", SLASH => ~"/", PERCENT => ~"%", CARET => ~"^", AND => ~"&", OR => ~"|", SHL => ~"<<", SHR => ~">>" } } fn to_str(in: @ident_interner, t: token) -> ~str { match t { EQ => ~"=", LT => ~"<", LE => ~"<=", EQEQ => ~"==", NE => ~"!=", GE => ~">=", GT => ~">", NOT => ~"!", TILDE => ~"~", OROR => ~"||", ANDAND => ~"&&", BINOP(op) => binop_to_str(op), BINOPEQ(op) => binop_to_str(op) + ~"=", /* Structural symbols */ AT => ~"@", DOT => ~".", DOTDOT => ~"..", ELLIPSIS => ~"...", COMMA => ~",", SEMI => ~";", COLON => ~":", MOD_SEP => ~"::", RARROW => ~"->", LARROW => ~"<-", DARROW => ~"<->", FAT_ARROW => ~"=>", LPAREN => ~"(", RPAREN => ~")", LBRACKET => ~"[", RBRACKET => ~"]", LBRACE => ~"{", RBRACE => ~"}", POUND => ~"#", DOLLAR => ~"$", /* Literals */ LIT_INT(c, ast::ty_char) => { ~"'" + char::escape_default(c as char) + ~"'" } LIT_INT(i, t) => { int::to_str(i as int, 10u) + ast_util::int_ty_to_str(t) } LIT_UINT(u, t) => { uint::to_str(u as uint, 10u) + ast_util::uint_ty_to_str(t) } LIT_INT_UNSUFFIXED(i) => { int::to_str(i as int, 10u) } LIT_FLOAT(s, t) => { let mut body = *in.get(s); if body.ends_with(~".") { body = body + ~"0"; // `10.f` is not a float literal } body + ast_util::float_ty_to_str(t) } LIT_STR(s) => { ~"\"" + str::escape_default(*in.get(s)) + ~"\"" } /* Name components */ IDENT(s, _) => *in.get(s), UNDERSCORE => ~"_", /* Other */ DOC_COMMENT(s) => *in.get(s), EOF => ~"<eof>", INTERPOLATED(nt) => { ~"an interpolated " + match nt { nt_item(*) => ~"item", nt_block(*) => ~"block", nt_stmt(*) => ~"statement", nt_pat(*) => ~"pattern", nt_expr(*) => ~"expression", nt_ty(*) => ~"type", nt_ident(*) => ~"identifier", nt_path(*) => ~"path", nt_tt(*) => ~"tt", nt_matchers(*) => ~"matcher sequence" } } } } pure fn can_begin_expr(t: token) -> bool { match t { LPAREN => true, LBRACE => true, LBRACKET => true, IDENT(_, _) => true, UNDERSCORE => true, TILDE => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_STR(_) => true, POUND => true, AT => true, NOT => true, BINOP(MINUS) => true, BINOP(STAR) => true, BINOP(AND) => true, BINOP(OR) => true, // in lambda syntax OROR => true, // in lambda syntax MOD_SEP => true, INTERPOLATED(nt_expr(*)) | INTERPOLATED(nt_ident(*)) | INTERPOLATED(nt_block(*)) | INTERPOLATED(nt_path(*)) => true, _ => false } } /// what's the opposite delimiter? fn flip_delimiter(t: token::token) -> token::token { match t { token::LPAREN => token::RPAREN, token::LBRACE => token::RBRACE, token::LBRACKET => token::RBRACKET, token::RPAREN => token::LPAREN, token::RBRACE => token::LBRACE, token::RBRACKET => token::LBRACKET, _ => fail } } fn is_lit(t: token) -> bool { match t { LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_STR(_) => true, _ => false } } pure fn is_ident(t: token) -> bool { match t { IDENT(_, _) => true, _ => false } } pure fn is_ident_or_path(t: token) -> bool { match t { IDENT(_, _) | INTERPOLATED(nt_path(*)) => true, _ => false } } pure fn is_plain_ident(t: token) -> bool { match t { IDENT(_, false) => true, _ => false } } pure fn is_bar(t: token) -> bool { match t { BINOP(OR) | OROR => true, _ => false } } mod special_idents { #[legacy_exports]; use ast::ident; const underscore : ident = ident { repr: 0u }; const anon : ident = ident { repr: 1u }; const dtor : ident = ident { repr: 2u }; // 'drop', but that's reserved const invalid : ident = ident { repr: 3u }; // '' const unary : ident = ident { repr: 4u }; const not_fn : ident = ident { repr: 5u }; const idx_fn : ident = ident { repr: 6u }; const unary_minus_fn : ident = ident { repr: 7u }; const clownshoes_extensions : ident = ident { repr: 8u }; const self_ : ident = ident { repr: 9u }; // 'self' /* for matcher NTs */ const item : ident = ident { repr: 10u }; const block : ident = ident { repr: 11u }; const stmt : ident = ident { repr: 12u }; const pat : ident = ident { repr: 13u }; const expr : ident = ident { repr: 14u }; const ty : ident = ident { repr: 15u }; const ident : ident = ident { repr: 16u }; const path : ident = ident { repr: 17u }; const tt : ident = ident { repr: 18u }; const matchers : ident = ident { repr: 19u }; const str : ident = ident { repr: 20u }; // for the type /* outside of libsyntax */ const ty_visitor : ident = ident { repr: 21u }; const arg : ident = ident { repr: 22u }; const descrim : ident = ident { repr: 23u }; const clownshoe_abi : ident = ident { repr: 24u }; const clownshoe_stack_shim : ident = ident { repr: 25u }; const tydesc : ident = ident { repr: 26u }; const literally_dtor : ident = ident { repr: 27u }; const main : ident = ident { repr: 28u }; const opaque : ident = ident { repr: 29u }; const blk : ident = ident { repr: 30u }; const static : ident = ident { repr: 31u }; const intrinsic : ident = ident { repr: 32u }; const clownshoes_foreign_mod: ident = ident { repr: 33 }; } struct ident_interner { priv interner: util::interner::interner<@~str>, } impl ident_interner { fn intern(val: @~str) -> ast::ident { ast::ident { repr: self.interner.intern(val) } } fn gensym(val: @~str) -> ast::ident { ast::ident { repr: self.interner.gensym(val) } } pure fn get(idx: ast::ident) -> @~str { self.interner.get(idx.repr) } fn len() -> uint { self.interner.len() } } /** Key for thread-local data for sneaking interner information to the * serializer/deserializer. It sounds like a hack because it is one. * Bonus ultra-hack: functions as keys don't work across crates, * so we have to use a unique number. See taskgroup_key! in task.rs * for another case of this. */ macro_rules! interner_key ( () => (cast::transmute::<(uint, uint), &fn(+v: @@token::ident_interner)>( (-3 as uint, 0u))) ) fn mk_ident_interner() -> @ident_interner { /* the indices here must correspond to the numbers in special_idents */ let init_vec = ~[@~"_", @~"anon", @~"drop", @~"", @~"unary", @~"!", @~"[]", @~"unary-", @~"__extensions__", @~"self", @~"item", @~"block", @~"stmt", @~"pat", @~"expr", @~"ty", @~"ident", @~"path", @~"tt", @~"matchers", @~"str", @~"TyVisitor", @~"arg", @~"descrim", @~"__rust_abi", @~"__rust_stack_shim", @~"TyDesc", @~"dtor", @~"main", @~"<opaque>", @~"blk", @~"static", @~"intrinsic", @~"__foreign_mod__"]; let rv = @ident_interner { interner: interner::mk_prefill::<@~str>(init_vec) }; /* having multiple interners will just confuse the serializer */ unsafe { assert task::local_data::local_data_get(interner_key!()).is_none() }; unsafe { task::local_data::local_data_set(interner_key!(), @rv) }; rv } /* for when we don't care about the contents; doesn't interact with TLD or serialization */ fn mk_fake_ident_interner() -> @ident_interner { @ident_interner { interner: interner::mk::<@~str>() } } /** * All the valid words that have meaning in the Rust language. * * Rust keywords are either 'temporary', 'strict' or 'reserved'. Temporary * keywords are contextual and may be used as identifiers anywhere. They are * expected to disappear from the grammar soon. Strict keywords may not * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ fn keyword_table() -> HashMap<~str, ()> { let keywords = HashMap(); for temporary_keyword_table().each_key |word| { keywords.insert(word, ()); } for strict_keyword_table().each_key |word| { keywords.insert(word, ()); } for reserved_keyword_table().each_key |word| { keywords.insert(word, ()); } keywords } /// Keywords that may be used as identifiers fn temporary_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"self", ~"static", ]; for keys.each |word| { words.insert(*word, ()); } words } /// Full keywords. May not appear anywhere else. fn strict_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"as", ~"assert", ~"break", ~"const", ~"copy", ~"do", ~"drop", ~"else", ~"enum", ~"export", ~"extern", ~"fail", ~"false", ~"fn", ~"for", ~"if", ~"impl", ~"let", ~"log", ~"loop", ~"match", ~"mod", ~"move", ~"mut", ~"priv", ~"pub", ~"pure", ~"ref", ~"return", ~"struct", ~"true", ~"trait", ~"type", ~"unsafe", ~"use", ~"while" ]; for keys.each |word| { words.insert(*word, ()); } words } fn reserved_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"be" ]; for keys.each |word| { words.insert(*word, ()); } words } impl binop : cmp::Eq { pure fn eq(other: &binop) -> bool { (self as uint) == ((*other) as uint) } pure fn ne(other: &binop) -> bool { !self.eq(other) } } impl token : cmp::Eq { pure fn eq(other: &token) -> bool { match self { EQ => { match (*other) { EQ => true, _ => false } } LT => { match (*other) { LT => true, _ => false } } LE => { match (*other) { LE => true, _ => false } } EQEQ => { match (*other) { EQEQ => true, _ => false } } NE => { match (*other) { NE => true, _ => false } } GE => { match (*other) { GE => true, _ => false } } GT => { match (*other) { GT => true, _ => false } } ANDAND => { match (*other) { ANDAND => true, _ => false } } OROR => { match (*other) { OROR => true, _ => false } } NOT => { match (*other) { NOT => true, _ => false } } TILDE => { match (*other) { TILDE => true, _ => false } } BINOP(e0a) => { match (*other) { BINOP(e0b) => e0a == e0b, _ => false } } BINOPEQ(e0a) => { match (*other) { BINOPEQ(e0b) => e0a == e0b, _ => false } } AT => { match (*other) { AT => true, _ => false } } DOT => { match (*other) { DOT => true, _ => false } } DOTDOT => { match (*other) { DOTDOT => true, _ => false } } ELLIPSIS => { match (*other) { ELLIPSIS => true, _ => false } } COMMA => { match (*other) { COMMA => true, _ => false } } SEMI => { match (*other) { SEMI => true, _ => false } } COLON => { match (*other) { COLON => true, _ => false } } MOD_SEP => { match (*other) { MOD_SEP => true, _ => false } } RARROW => { match (*other) { RARROW => true, _ => false } } LARROW => { match (*other) { LARROW => true, _ => false } } DARROW => { match (*other) { DARROW => true, _ => false } } FAT_ARROW => { match (*other) { FAT_ARROW => true, _ => false } } LPAREN => { match (*other) { LPAREN => true, _ => false } } RPAREN => { match (*other) { RPAREN => true, _ => false } } LBRACKET => { match (*other) { LBRACKET => true, _ => false } } RBRACKET => { match (*other) { RBRACKET => true, _ => false } } LBRACE => { match (*other) { LBRACE => true, _ => false } } RBRACE => { match (*other) { RBRACE => true, _ => false } } POUND => { match (*other) { POUND => true, _ => false } } DOLLAR => { match (*other) { DOLLAR => true, _ => false } } LIT_INT(e0a, e1a) => { match (*other) { LIT_INT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } LIT_UINT(e0a, e1a) => { match (*other) { LIT_UINT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } LIT_INT_UNSUFFIXED(e0a) => { match (*other) { LIT_INT_UNSUFFIXED(e0b) => e0a == e0b, _ => false } } LIT_FLOAT(e0a, e1a) => { match (*other) { LIT_FLOAT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } LIT_STR(e0a) => { match (*other) { LIT_STR(e0b) => e0a == e0b, _ => false } } IDENT(e0a, e1a) => { match (*other) { IDENT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } UNDERSCORE => { match (*other) { UNDERSCORE => true, _ => false } } INTERPOLATED(_) => { match (*other) { INTERPOLATED(_) => true, _ => false } } DOC_COMMENT(e0a) => { match (*other) { DOC_COMMENT(e0b) => e0a == e0b, _ => false } } EOF => { match (*other) { EOF => true, _ => false } } } } pure fn ne(other: &token) -> bool { !self.eq(other) } } // Local Variables: // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End: allow interner cache to be shared across parsers (#3699) use util::interner; use util::interner::interner; use std::map::HashMap; #[auto_serialize2] #[auto_deserialize2] enum binop { PLUS, MINUS, STAR, SLASH, PERCENT, CARET, AND, OR, SHL, SHR, } #[auto_serialize2] #[auto_deserialize2] enum token { /* Expression-operator symbols. */ EQ, LT, LE, EQEQ, NE, GE, GT, ANDAND, OROR, NOT, TILDE, BINOP(binop), BINOPEQ(binop), /* Structural symbols */ AT, DOT, DOTDOT, ELLIPSIS, COMMA, SEMI, COLON, MOD_SEP, RARROW, LARROW, DARROW, FAT_ARROW, LPAREN, RPAREN, LBRACKET, RBRACKET, LBRACE, RBRACE, POUND, DOLLAR, /* Literals */ LIT_INT(i64, ast::int_ty), LIT_UINT(u64, ast::uint_ty), LIT_INT_UNSUFFIXED(i64), LIT_FLOAT(ast::ident, ast::float_ty), LIT_STR(ast::ident), /* Name components */ IDENT(ast::ident, bool), UNDERSCORE, /* For interpolation */ INTERPOLATED(nonterminal), DOC_COMMENT(ast::ident), EOF, } #[auto_serialize2] #[auto_deserialize2] /// For interpolation during macro expansion. enum nonterminal { nt_item(@ast::item), nt_block(ast::blk), nt_stmt(@ast::stmt), nt_pat( @ast::pat), nt_expr(@ast::expr), nt_ty( @ast::ty), nt_ident(ast::ident, bool), nt_path(@ast::path), nt_tt( @ast::token_tree), //needs @ed to break a circularity nt_matchers(~[ast::matcher]) } fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", STAR => ~"*", SLASH => ~"/", PERCENT => ~"%", CARET => ~"^", AND => ~"&", OR => ~"|", SHL => ~"<<", SHR => ~">>" } } fn to_str(in: @ident_interner, t: token) -> ~str { match t { EQ => ~"=", LT => ~"<", LE => ~"<=", EQEQ => ~"==", NE => ~"!=", GE => ~">=", GT => ~">", NOT => ~"!", TILDE => ~"~", OROR => ~"||", ANDAND => ~"&&", BINOP(op) => binop_to_str(op), BINOPEQ(op) => binop_to_str(op) + ~"=", /* Structural symbols */ AT => ~"@", DOT => ~".", DOTDOT => ~"..", ELLIPSIS => ~"...", COMMA => ~",", SEMI => ~";", COLON => ~":", MOD_SEP => ~"::", RARROW => ~"->", LARROW => ~"<-", DARROW => ~"<->", FAT_ARROW => ~"=>", LPAREN => ~"(", RPAREN => ~")", LBRACKET => ~"[", RBRACKET => ~"]", LBRACE => ~"{", RBRACE => ~"}", POUND => ~"#", DOLLAR => ~"$", /* Literals */ LIT_INT(c, ast::ty_char) => { ~"'" + char::escape_default(c as char) + ~"'" } LIT_INT(i, t) => { int::to_str(i as int, 10u) + ast_util::int_ty_to_str(t) } LIT_UINT(u, t) => { uint::to_str(u as uint, 10u) + ast_util::uint_ty_to_str(t) } LIT_INT_UNSUFFIXED(i) => { int::to_str(i as int, 10u) } LIT_FLOAT(s, t) => { let mut body = *in.get(s); if body.ends_with(~".") { body = body + ~"0"; // `10.f` is not a float literal } body + ast_util::float_ty_to_str(t) } LIT_STR(s) => { ~"\"" + str::escape_default(*in.get(s)) + ~"\"" } /* Name components */ IDENT(s, _) => *in.get(s), UNDERSCORE => ~"_", /* Other */ DOC_COMMENT(s) => *in.get(s), EOF => ~"<eof>", INTERPOLATED(nt) => { ~"an interpolated " + match nt { nt_item(*) => ~"item", nt_block(*) => ~"block", nt_stmt(*) => ~"statement", nt_pat(*) => ~"pattern", nt_expr(*) => ~"expression", nt_ty(*) => ~"type", nt_ident(*) => ~"identifier", nt_path(*) => ~"path", nt_tt(*) => ~"tt", nt_matchers(*) => ~"matcher sequence" } } } } pure fn can_begin_expr(t: token) -> bool { match t { LPAREN => true, LBRACE => true, LBRACKET => true, IDENT(_, _) => true, UNDERSCORE => true, TILDE => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_STR(_) => true, POUND => true, AT => true, NOT => true, BINOP(MINUS) => true, BINOP(STAR) => true, BINOP(AND) => true, BINOP(OR) => true, // in lambda syntax OROR => true, // in lambda syntax MOD_SEP => true, INTERPOLATED(nt_expr(*)) | INTERPOLATED(nt_ident(*)) | INTERPOLATED(nt_block(*)) | INTERPOLATED(nt_path(*)) => true, _ => false } } /// what's the opposite delimiter? fn flip_delimiter(t: token::token) -> token::token { match t { token::LPAREN => token::RPAREN, token::LBRACE => token::RBRACE, token::LBRACKET => token::RBRACKET, token::RPAREN => token::LPAREN, token::RBRACE => token::LBRACE, token::RBRACKET => token::LBRACKET, _ => fail } } fn is_lit(t: token) -> bool { match t { LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_STR(_) => true, _ => false } } pure fn is_ident(t: token) -> bool { match t { IDENT(_, _) => true, _ => false } } pure fn is_ident_or_path(t: token) -> bool { match t { IDENT(_, _) | INTERPOLATED(nt_path(*)) => true, _ => false } } pure fn is_plain_ident(t: token) -> bool { match t { IDENT(_, false) => true, _ => false } } pure fn is_bar(t: token) -> bool { match t { BINOP(OR) | OROR => true, _ => false } } mod special_idents { #[legacy_exports]; use ast::ident; const underscore : ident = ident { repr: 0u }; const anon : ident = ident { repr: 1u }; const dtor : ident = ident { repr: 2u }; // 'drop', but that's reserved const invalid : ident = ident { repr: 3u }; // '' const unary : ident = ident { repr: 4u }; const not_fn : ident = ident { repr: 5u }; const idx_fn : ident = ident { repr: 6u }; const unary_minus_fn : ident = ident { repr: 7u }; const clownshoes_extensions : ident = ident { repr: 8u }; const self_ : ident = ident { repr: 9u }; // 'self' /* for matcher NTs */ const item : ident = ident { repr: 10u }; const block : ident = ident { repr: 11u }; const stmt : ident = ident { repr: 12u }; const pat : ident = ident { repr: 13u }; const expr : ident = ident { repr: 14u }; const ty : ident = ident { repr: 15u }; const ident : ident = ident { repr: 16u }; const path : ident = ident { repr: 17u }; const tt : ident = ident { repr: 18u }; const matchers : ident = ident { repr: 19u }; const str : ident = ident { repr: 20u }; // for the type /* outside of libsyntax */ const ty_visitor : ident = ident { repr: 21u }; const arg : ident = ident { repr: 22u }; const descrim : ident = ident { repr: 23u }; const clownshoe_abi : ident = ident { repr: 24u }; const clownshoe_stack_shim : ident = ident { repr: 25u }; const tydesc : ident = ident { repr: 26u }; const literally_dtor : ident = ident { repr: 27u }; const main : ident = ident { repr: 28u }; const opaque : ident = ident { repr: 29u }; const blk : ident = ident { repr: 30u }; const static : ident = ident { repr: 31u }; const intrinsic : ident = ident { repr: 32u }; const clownshoes_foreign_mod: ident = ident { repr: 33 }; } struct ident_interner { priv interner: util::interner::interner<@~str>, } impl ident_interner { fn intern(val: @~str) -> ast::ident { ast::ident { repr: self.interner.intern(val) } } fn gensym(val: @~str) -> ast::ident { ast::ident { repr: self.interner.gensym(val) } } pure fn get(idx: ast::ident) -> @~str { self.interner.get(idx.repr) } fn len() -> uint { self.interner.len() } } /** Key for thread-local data for sneaking interner information to the * serializer/deserializer. It sounds like a hack because it is one. * Bonus ultra-hack: functions as keys don't work across crates, * so we have to use a unique number. See taskgroup_key! in task.rs * for another case of this. */ macro_rules! interner_key ( () => (cast::transmute::<(uint, uint), &fn(+v: @@token::ident_interner)>( (-3 as uint, 0u))) ) fn mk_ident_interner() -> @ident_interner { unsafe { match task::local_data::local_data_get(interner_key!()) { Some(interner) => *interner, None => { // the indices here must correspond to the numbers in // special_idents. let init_vec = ~[ @~"_", @~"anon", @~"drop", @~"", @~"unary", @~"!", @~"[]", @~"unary-", @~"__extensions__", @~"self", @~"item", @~"block", @~"stmt", @~"pat", @~"expr", @~"ty", @~"ident", @~"path", @~"tt", @~"matchers", @~"str", @~"TyVisitor", @~"arg", @~"descrim", @~"__rust_abi", @~"__rust_stack_shim", @~"TyDesc", @~"dtor", @~"main", @~"<opaque>", @~"blk", @~"static", @~"intrinsic", @~"__foreign_mod__" ]; let rv = @ident_interner { interner: interner::mk_prefill(init_vec) }; task::local_data::local_data_set(interner_key!(), @rv); rv } } } } /* for when we don't care about the contents; doesn't interact with TLD or serialization */ fn mk_fake_ident_interner() -> @ident_interner { @ident_interner { interner: interner::mk::<@~str>() } } /** * All the valid words that have meaning in the Rust language. * * Rust keywords are either 'temporary', 'strict' or 'reserved'. Temporary * keywords are contextual and may be used as identifiers anywhere. They are * expected to disappear from the grammar soon. Strict keywords may not * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ fn keyword_table() -> HashMap<~str, ()> { let keywords = HashMap(); for temporary_keyword_table().each_key |word| { keywords.insert(word, ()); } for strict_keyword_table().each_key |word| { keywords.insert(word, ()); } for reserved_keyword_table().each_key |word| { keywords.insert(word, ()); } keywords } /// Keywords that may be used as identifiers fn temporary_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"self", ~"static", ]; for keys.each |word| { words.insert(*word, ()); } words } /// Full keywords. May not appear anywhere else. fn strict_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"as", ~"assert", ~"break", ~"const", ~"copy", ~"do", ~"drop", ~"else", ~"enum", ~"export", ~"extern", ~"fail", ~"false", ~"fn", ~"for", ~"if", ~"impl", ~"let", ~"log", ~"loop", ~"match", ~"mod", ~"move", ~"mut", ~"priv", ~"pub", ~"pure", ~"ref", ~"return", ~"struct", ~"true", ~"trait", ~"type", ~"unsafe", ~"use", ~"while" ]; for keys.each |word| { words.insert(*word, ()); } words } fn reserved_keyword_table() -> HashMap<~str, ()> { let words = HashMap(); let keys = ~[ ~"be" ]; for keys.each |word| { words.insert(*word, ()); } words } impl binop : cmp::Eq { pure fn eq(other: &binop) -> bool { (self as uint) == ((*other) as uint) } pure fn ne(other: &binop) -> bool { !self.eq(other) } } impl token : cmp::Eq { pure fn eq(other: &token) -> bool { match self { EQ => { match (*other) { EQ => true, _ => false } } LT => { match (*other) { LT => true, _ => false } } LE => { match (*other) { LE => true, _ => false } } EQEQ => { match (*other) { EQEQ => true, _ => false } } NE => { match (*other) { NE => true, _ => false } } GE => { match (*other) { GE => true, _ => false } } GT => { match (*other) { GT => true, _ => false } } ANDAND => { match (*other) { ANDAND => true, _ => false } } OROR => { match (*other) { OROR => true, _ => false } } NOT => { match (*other) { NOT => true, _ => false } } TILDE => { match (*other) { TILDE => true, _ => false } } BINOP(e0a) => { match (*other) { BINOP(e0b) => e0a == e0b, _ => false } } BINOPEQ(e0a) => { match (*other) { BINOPEQ(e0b) => e0a == e0b, _ => false } } AT => { match (*other) { AT => true, _ => false } } DOT => { match (*other) { DOT => true, _ => false } } DOTDOT => { match (*other) { DOTDOT => true, _ => false } } ELLIPSIS => { match (*other) { ELLIPSIS => true, _ => false } } COMMA => { match (*other) { COMMA => true, _ => false } } SEMI => { match (*other) { SEMI => true, _ => false } } COLON => { match (*other) { COLON => true, _ => false } } MOD_SEP => { match (*other) { MOD_SEP => true, _ => false } } RARROW => { match (*other) { RARROW => true, _ => false } } LARROW => { match (*other) { LARROW => true, _ => false } } DARROW => { match (*other) { DARROW => true, _ => false } } FAT_ARROW => { match (*other) { FAT_ARROW => true, _ => false } } LPAREN => { match (*other) { LPAREN => true, _ => false } } RPAREN => { match (*other) { RPAREN => true, _ => false } } LBRACKET => { match (*other) { LBRACKET => true, _ => false } } RBRACKET => { match (*other) { RBRACKET => true, _ => false } } LBRACE => { match (*other) { LBRACE => true, _ => false } } RBRACE => { match (*other) { RBRACE => true, _ => false } } POUND => { match (*other) { POUND => true, _ => false } } DOLLAR => { match (*other) { DOLLAR => true, _ => false } } LIT_INT(e0a, e1a) => { match (*other) { LIT_INT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } LIT_UINT(e0a, e1a) => { match (*other) { LIT_UINT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } LIT_INT_UNSUFFIXED(e0a) => { match (*other) { LIT_INT_UNSUFFIXED(e0b) => e0a == e0b, _ => false } } LIT_FLOAT(e0a, e1a) => { match (*other) { LIT_FLOAT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } LIT_STR(e0a) => { match (*other) { LIT_STR(e0b) => e0a == e0b, _ => false } } IDENT(e0a, e1a) => { match (*other) { IDENT(e0b, e1b) => e0a == e0b && e1a == e1b, _ => false } } UNDERSCORE => { match (*other) { UNDERSCORE => true, _ => false } } INTERPOLATED(_) => { match (*other) { INTERPOLATED(_) => true, _ => false } } DOC_COMMENT(e0a) => { match (*other) { DOC_COMMENT(e0b) => e0a == e0b, _ => false } } EOF => { match (*other) { EOF => true, _ => false } } } } pure fn ne(other: &token) -> bool { !self.eq(other) } } // Local Variables: // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End:
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use core::prelude::*; use ast; use ast::Name; use ast_util; use parse::token; use util::interner::StrInterner; use util::interner; use core::cast; use core::char; use core::cmp::Equiv; use core::local_data; use core::rand; use core::rand::RngUtil; #[deriving(Encodable, Decodable, Eq)] pub enum binop { PLUS, MINUS, STAR, SLASH, PERCENT, CARET, AND, OR, SHL, SHR, } #[deriving(Encodable, Decodable, Eq)] pub enum Token { /* Expression-operator symbols. */ EQ, LT, LE, EQEQ, NE, GE, GT, ANDAND, OROR, NOT, TILDE, BINOP(binop), BINOPEQ(binop), /* Structural symbols */ AT, DOT, DOTDOT, COMMA, SEMI, COLON, MOD_SEP, RARROW, LARROW, DARROW, FAT_ARROW, LPAREN, RPAREN, LBRACKET, RBRACKET, LBRACE, RBRACE, POUND, DOLLAR, /* Literals */ LIT_INT(i64, ast::int_ty), LIT_UINT(u64, ast::uint_ty), LIT_INT_UNSUFFIXED(i64), LIT_FLOAT(ast::ident, ast::float_ty), LIT_FLOAT_UNSUFFIXED(ast::ident), LIT_STR(ast::ident), /* Name components */ // an identifier contains an "is_mod_name" boolean, // indicating whether :: follows this token with no // whitespace in between. IDENT(ast::ident, bool), UNDERSCORE, LIFETIME(ast::ident), /* For interpolation */ INTERPOLATED(nonterminal), DOC_COMMENT(ast::ident), EOF, } #[deriving(Encodable, Decodable, Eq)] /// For interpolation during macro expansion. pub enum nonterminal { nt_item(@ast::item), nt_block(ast::blk), nt_stmt(@ast::stmt), nt_pat( @ast::pat), nt_expr(@ast::expr), nt_ty( @ast::Ty), nt_ident(ast::ident, bool), nt_path(@ast::Path), nt_tt( @ast::token_tree), //needs @ed to break a circularity nt_matchers(~[ast::matcher]) } pub fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", STAR => ~"*", SLASH => ~"/", PERCENT => ~"%", CARET => ~"^", AND => ~"&", OR => ~"|", SHL => ~"<<", SHR => ~">>" } } pub fn to_str(in: @ident_interner, t: &Token) -> ~str { match *t { EQ => ~"=", LT => ~"<", LE => ~"<=", EQEQ => ~"==", NE => ~"!=", GE => ~">=", GT => ~">", NOT => ~"!", TILDE => ~"~", OROR => ~"||", ANDAND => ~"&&", BINOP(op) => binop_to_str(op), BINOPEQ(op) => binop_to_str(op) + "=", /* Structural symbols */ AT => ~"@", DOT => ~".", DOTDOT => ~"..", COMMA => ~",", SEMI => ~";", COLON => ~":", MOD_SEP => ~"::", RARROW => ~"->", LARROW => ~"<-", DARROW => ~"<->", FAT_ARROW => ~"=>", LPAREN => ~"(", RPAREN => ~")", LBRACKET => ~"[", RBRACKET => ~"]", LBRACE => ~"{", RBRACE => ~"}", POUND => ~"#", DOLLAR => ~"$", /* Literals */ LIT_INT(c, ast::ty_char) => { ~"'" + char::escape_default(c as char) + "'" } LIT_INT(i, t) => { i.to_str() + ast_util::int_ty_to_str(t) } LIT_UINT(u, t) => { u.to_str() + ast_util::uint_ty_to_str(t) } LIT_INT_UNSUFFIXED(i) => { i.to_str() } LIT_FLOAT(ref s, t) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body += "0"; // `10.f` is not a float literal } body + ast_util::float_ty_to_str(t) } LIT_FLOAT_UNSUFFIXED(ref s) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body += "0"; // `10.f` is not a float literal } body } LIT_STR(ref s) => { fmt!("\"%s\"", ident_to_str(s).escape_default()) } /* Name components */ IDENT(s, _) => in.get(s.name).to_owned(), LIFETIME(s) => fmt!("'%s", in.get(s.name)), UNDERSCORE => ~"_", /* Other */ DOC_COMMENT(ref s) => ident_to_str(s).to_owned(), EOF => ~"<eof>", INTERPOLATED(ref nt) => { match nt { &nt_expr(e) => ::print::pprust::expr_to_str(e, in), _ => { ~"an interpolated " + match (*nt) { nt_item(*) => ~"item", nt_block(*) => ~"block", nt_stmt(*) => ~"statement", nt_pat(*) => ~"pattern", nt_expr(*) => fail!("should have been handled above"), nt_ty(*) => ~"type", nt_ident(*) => ~"identifier", nt_path(*) => ~"path", nt_tt(*) => ~"tt", nt_matchers(*) => ~"matcher sequence" } } } } } } pub fn can_begin_expr(t: &Token) -> bool { match *t { LPAREN => true, LBRACE => true, LBRACKET => true, IDENT(_, _) => true, UNDERSCORE => true, TILDE => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, POUND => true, AT => true, NOT => true, BINOP(MINUS) => true, BINOP(STAR) => true, BINOP(AND) => true, BINOP(OR) => true, // in lambda syntax OROR => true, // in lambda syntax MOD_SEP => true, INTERPOLATED(nt_expr(*)) | INTERPOLATED(nt_ident(*)) | INTERPOLATED(nt_block(*)) | INTERPOLATED(nt_path(*)) => true, _ => false } } /// what's the opposite delimiter? pub fn flip_delimiter(t: &token::Token) -> token::Token { match *t { LPAREN => RPAREN, LBRACE => RBRACE, LBRACKET => RBRACKET, RPAREN => LPAREN, RBRACE => LBRACE, RBRACKET => LBRACKET, _ => fail!() } } pub fn is_lit(t: &Token) -> bool { match *t { LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, _ => false } } pub fn is_ident(t: &Token) -> bool { match *t { IDENT(_, _) => true, _ => false } } pub fn is_ident_or_path(t: &Token) -> bool { match *t { IDENT(_, _) | INTERPOLATED(nt_path(*)) => true, _ => false } } pub fn is_plain_ident(t: &Token) -> bool { match *t { IDENT(_, false) => true, _ => false } } pub fn is_bar(t: &Token) -> bool { match *t { BINOP(OR) | OROR => true, _ => false } } pub mod special_idents { use ast::ident; pub static underscore : ident = ident { name: 0, ctxt: 0}; pub static anon : ident = ident { name: 1, ctxt: 0}; pub static invalid : ident = ident { name: 2, ctxt: 0}; // '' pub static unary : ident = ident { name: 3, ctxt: 0}; pub static not_fn : ident = ident { name: 4, ctxt: 0}; pub static idx_fn : ident = ident { name: 5, ctxt: 0}; pub static unary_minus_fn : ident = ident { name: 6, ctxt: 0}; pub static clownshoes_extensions : ident = ident { name: 7, ctxt: 0}; pub static self_ : ident = ident { name: 8, ctxt: 0}; // 'self' /* for matcher NTs */ pub static item : ident = ident { name: 9, ctxt: 0}; pub static block : ident = ident { name: 10, ctxt: 0}; pub static stmt : ident = ident { name: 11, ctxt: 0}; pub static pat : ident = ident { name: 12, ctxt: 0}; pub static expr : ident = ident { name: 13, ctxt: 0}; pub static ty : ident = ident { name: 14, ctxt: 0}; pub static ident : ident = ident { name: 15, ctxt: 0}; pub static path : ident = ident { name: 16, ctxt: 0}; pub static tt : ident = ident { name: 17, ctxt: 0}; pub static matchers : ident = ident { name: 18, ctxt: 0}; pub static str : ident = ident { name: 19, ctxt: 0}; // for the type /* outside of libsyntax */ pub static ty_visitor : ident = ident { name: 20, ctxt: 0}; pub static arg : ident = ident { name: 21, ctxt: 0}; pub static descrim : ident = ident { name: 22, ctxt: 0}; pub static clownshoe_abi : ident = ident { name: 23, ctxt: 0}; pub static clownshoe_stack_shim : ident = ident { name: 24, ctxt: 0}; pub static tydesc : ident = ident { name: 25, ctxt: 0}; pub static main : ident = ident { name: 26, ctxt: 0}; pub static opaque : ident = ident { name: 27, ctxt: 0}; pub static blk : ident = ident { name: 28, ctxt: 0}; pub static statik : ident = ident { name: 29, ctxt: 0}; pub static intrinsic : ident = ident { name: 30, ctxt: 0}; pub static clownshoes_foreign_mod: ident = ident { name: 31, ctxt: 0}; pub static unnamed_field: ident = ident { name: 32, ctxt: 0}; pub static c_abi: ident = ident { name: 33, ctxt: 0}; pub static type_self: ident = ident { name: 34, ctxt: 0}; // `Self` } /** * Maps a token to a record specifying the corresponding binary * operator */ pub fn token_to_binop(tok: Token) -> Option<ast::binop> { match tok { BINOP(STAR) => Some(ast::mul), BINOP(SLASH) => Some(ast::div), BINOP(PERCENT) => Some(ast::rem), BINOP(PLUS) => Some(ast::add), BINOP(MINUS) => Some(ast::subtract), BINOP(SHL) => Some(ast::shl), BINOP(SHR) => Some(ast::shr), BINOP(AND) => Some(ast::bitand), BINOP(CARET) => Some(ast::bitxor), BINOP(OR) => Some(ast::bitor), LT => Some(ast::lt), LE => Some(ast::le), GE => Some(ast::ge), GT => Some(ast::gt), EQEQ => Some(ast::eq), NE => Some(ast::ne), ANDAND => Some(ast::and), OROR => Some(ast::or), _ => None } } pub struct ident_interner { priv interner: StrInterner, } impl ident_interner { pub fn intern(&self, val: &str) -> Name { self.interner.intern(val) } pub fn gensym(&self, val: &str) -> Name { self.interner.gensym(val) } pub fn get(&self, idx: Name) -> @str { self.interner.get(idx) } // is this really something that should be exposed? pub fn len(&self) -> uint { self.interner.len() } pub fn find_equiv<Q:Hash + IterBytes + Equiv<@str>>(&self, val: &Q) -> Option<Name> { self.interner.find_equiv(val) } } // return a fresh interner, preloaded with special identifiers. fn mk_fresh_ident_interner() -> @ident_interner { // the indices here must correspond to the numbers in // special_idents. let init_vec = ~[ "_", // 0 "anon", // 1 "", // 2 "unary", // 3 "!", // 4 "[]", // 5 "unary-", // 6 "__extensions__", // 7 "self", // 8 "item", // 9 "block", // 10 "stmt", // 11 "pat", // 12 "expr", // 13 "ty", // 14 "ident", // 15 "path", // 16 "tt", // 17 "matchers", // 18 "str", // 19 "TyVisitor", // 20 "arg", // 21 "descrim", // 22 "__rust_abi", // 23 "__rust_stack_shim", // 24 "TyDesc", // 25 "main", // 26 "<opaque>", // 27 "blk", // 28 "static", // 29 "intrinsic", // 30 "__foreign_mod__", // 31 "__field__", // 32 "C", // 33 "Self", // 34 "as", // 35 "break", // 36 "const", // 37 "copy", // 38 "do", // 39 "else", // 40 "enum", // 41 "extern", // 42 "false", // 43 "fn", // 44 "for", // 45 "if", // 46 "impl", // 47 "let", // 48 "__log", // 49 "loop", // 50 "match", // 51 "mod", // 52 "mut", // 53 "once", // 54 "priv", // 55 "pub", // 56 "pure", // 57 "ref", // 58 "return", // 59 "static", // 29 -- also a special ident "self", // 8 -- also a special ident "struct", // 60 "super", // 61 "true", // 62 "trait", // 63 "type", // 64 "unsafe", // 65 "use", // 66 "while", // 67 "be", // 68 ]; @ident_interner { interner: interner::StrInterner::prefill(init_vec) } } // if an interner exists in TLS, return it. Otherwise, prepare a // fresh one. pub fn get_ident_interner() -> @ident_interner { unsafe { let key = (cast::transmute::<(uint, uint), &fn(v: @@::parse::token::ident_interner)>( (-3 as uint, 0u))); match local_data::local_data_get(key) { Some(interner) => *interner, None => { let interner = mk_fresh_ident_interner(); unsafe { local_data::local_data_set(key, @interner); } interner } } } } /* for when we don't care about the contents; doesn't interact with TLD or serialization */ pub fn mk_fake_ident_interner() -> @ident_interner { @ident_interner { interner: interner::StrInterner::new() } } // maps a string to its interned representation pub fn intern(str : &str) -> Name { let interner = get_ident_interner(); interner.intern(str) } // gensyms a new uint, using the current interner pub fn gensym(str : &str) -> Name { let interner = get_ident_interner(); interner.gensym(str) } // map an interned representation back to a string pub fn interner_get(name : Name) -> @str { get_ident_interner().get(name) } // maps an identifier to the string that it corresponds to pub fn ident_to_str(id : &ast::ident) -> @str { interner_get(id.name) } // maps a string to an identifier with an empty syntax context pub fn str_to_ident(str : &str) -> ast::ident { ast::new_ident(intern(str)) } // maps a string to a gensym'ed identifier pub fn gensym_ident(str : &str) -> ast::ident { ast::new_ident(gensym(str)) } // create a fresh name. In principle, this is just a // gensym, but for debugging purposes, you'd like the // resulting name to have a suggestive stringify, without // paying the cost of guaranteeing that the name is // truly unique. I'm going to try to strike a balance // by using a gensym with a name that has a random number // at the end. So, the gensym guarantees the uniqueness, // and the int helps to avoid confusion. pub fn fresh_name(src_name : &str) -> Name { let num = rand::rng().gen_uint_range(0,0xffff); gensym(fmt!("%s_%u",src_name,num)) } /** * All the valid words that have meaning in the Rust language. * * Rust keywords are either 'strict' or 'reserved'. Strict keywords may not * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ pub mod keywords { use ast::ident; pub enum Keyword { // Strict keywords As, Break, Const, Copy, Do, Else, Enum, Extern, False, Fn, For, If, Impl, Let, __Log, Loop, Match, Mod, Mut, Once, Priv, Pub, Pure, Ref, Return, Static, Self, Struct, Super, True, Trait, Type, Unsafe, Use, While, // Reserved keywords Be, } impl Keyword { pub fn to_ident(&self) -> ident { match *self { As => ident { name: 35, ctxt: 0 }, Break => ident { name: 36, ctxt: 0 }, Const => ident { name: 37, ctxt: 0 }, Copy => ident { name: 38, ctxt: 0 }, Do => ident { name: 39, ctxt: 0 }, Else => ident { name: 40, ctxt: 0 }, Enum => ident { name: 41, ctxt: 0 }, Extern => ident { name: 42, ctxt: 0 }, False => ident { name: 43, ctxt: 0 }, Fn => ident { name: 44, ctxt: 0 }, For => ident { name: 45, ctxt: 0 }, If => ident { name: 46, ctxt: 0 }, Impl => ident { name: 47, ctxt: 0 }, Let => ident { name: 48, ctxt: 0 }, __Log => ident { name: 49, ctxt: 0 }, Loop => ident { name: 50, ctxt: 0 }, Match => ident { name: 51, ctxt: 0 }, Mod => ident { name: 52, ctxt: 0 }, Mut => ident { name: 53, ctxt: 0 }, Once => ident { name: 54, ctxt: 0 }, Priv => ident { name: 55, ctxt: 0 }, Pub => ident { name: 56, ctxt: 0 }, Pure => ident { name: 57, ctxt: 0 }, Ref => ident { name: 58, ctxt: 0 }, Return => ident { name: 59, ctxt: 0 }, Static => ident { name: 29, ctxt: 0 }, Self => ident { name: 8, ctxt: 0 }, Struct => ident { name: 60, ctxt: 0 }, Super => ident { name: 61, ctxt: 0 }, True => ident { name: 62, ctxt: 0 }, Trait => ident { name: 63, ctxt: 0 }, Type => ident { name: 64, ctxt: 0 }, Unsafe => ident { name: 65, ctxt: 0 }, Use => ident { name: 66, ctxt: 0 }, While => ident { name: 67, ctxt: 0 }, Be => ident { name: 68, ctxt: 0 }, } } } } pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => { kw.to_ident().name == sid.name } _ => { false } } } pub fn is_any_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { 8 | 29 | 35 .. 68 => true, _ => false, }, _ => false } } pub fn is_strict_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { 8 | 29 | 35 .. 67 => true, _ => false, }, _ => false, } } pub fn is_reserved_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { 68 => true, _ => false, }, _ => false, } } #[cfg(test)] mod test { use super::*; use std::io; #[test] fn t1() { let a = fresh_name("ghi"); io::println(fmt!("interned name: %u,\ntextual name: %s\n", a,interner_get(a))); } } libsyntax: cleanup warnings // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use core::prelude::*; use ast; use ast::Name; use ast_util; use parse::token; use util::interner::StrInterner; use util::interner; use core::cast; use core::char; use core::cmp::Equiv; use core::local_data; use core::rand; use core::rand::RngUtil; #[deriving(Encodable, Decodable, Eq)] pub enum binop { PLUS, MINUS, STAR, SLASH, PERCENT, CARET, AND, OR, SHL, SHR, } #[deriving(Encodable, Decodable, Eq)] pub enum Token { /* Expression-operator symbols. */ EQ, LT, LE, EQEQ, NE, GE, GT, ANDAND, OROR, NOT, TILDE, BINOP(binop), BINOPEQ(binop), /* Structural symbols */ AT, DOT, DOTDOT, COMMA, SEMI, COLON, MOD_SEP, RARROW, LARROW, DARROW, FAT_ARROW, LPAREN, RPAREN, LBRACKET, RBRACKET, LBRACE, RBRACE, POUND, DOLLAR, /* Literals */ LIT_INT(i64, ast::int_ty), LIT_UINT(u64, ast::uint_ty), LIT_INT_UNSUFFIXED(i64), LIT_FLOAT(ast::ident, ast::float_ty), LIT_FLOAT_UNSUFFIXED(ast::ident), LIT_STR(ast::ident), /* Name components */ // an identifier contains an "is_mod_name" boolean, // indicating whether :: follows this token with no // whitespace in between. IDENT(ast::ident, bool), UNDERSCORE, LIFETIME(ast::ident), /* For interpolation */ INTERPOLATED(nonterminal), DOC_COMMENT(ast::ident), EOF, } #[deriving(Encodable, Decodable, Eq)] /// For interpolation during macro expansion. pub enum nonterminal { nt_item(@ast::item), nt_block(ast::blk), nt_stmt(@ast::stmt), nt_pat( @ast::pat), nt_expr(@ast::expr), nt_ty( @ast::Ty), nt_ident(ast::ident, bool), nt_path(@ast::Path), nt_tt( @ast::token_tree), //needs @ed to break a circularity nt_matchers(~[ast::matcher]) } pub fn binop_to_str(o: binop) -> ~str { match o { PLUS => ~"+", MINUS => ~"-", STAR => ~"*", SLASH => ~"/", PERCENT => ~"%", CARET => ~"^", AND => ~"&", OR => ~"|", SHL => ~"<<", SHR => ~">>" } } pub fn to_str(in: @ident_interner, t: &Token) -> ~str { match *t { EQ => ~"=", LT => ~"<", LE => ~"<=", EQEQ => ~"==", NE => ~"!=", GE => ~">=", GT => ~">", NOT => ~"!", TILDE => ~"~", OROR => ~"||", ANDAND => ~"&&", BINOP(op) => binop_to_str(op), BINOPEQ(op) => binop_to_str(op) + "=", /* Structural symbols */ AT => ~"@", DOT => ~".", DOTDOT => ~"..", COMMA => ~",", SEMI => ~";", COLON => ~":", MOD_SEP => ~"::", RARROW => ~"->", LARROW => ~"<-", DARROW => ~"<->", FAT_ARROW => ~"=>", LPAREN => ~"(", RPAREN => ~")", LBRACKET => ~"[", RBRACKET => ~"]", LBRACE => ~"{", RBRACE => ~"}", POUND => ~"#", DOLLAR => ~"$", /* Literals */ LIT_INT(c, ast::ty_char) => { ~"'" + char::escape_default(c as char) + "'" } LIT_INT(i, t) => { i.to_str() + ast_util::int_ty_to_str(t) } LIT_UINT(u, t) => { u.to_str() + ast_util::uint_ty_to_str(t) } LIT_INT_UNSUFFIXED(i) => { i.to_str() } LIT_FLOAT(ref s, t) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body += "0"; // `10.f` is not a float literal } body + ast_util::float_ty_to_str(t) } LIT_FLOAT_UNSUFFIXED(ref s) => { let mut body = ident_to_str(s).to_owned(); if body.ends_with(".") { body += "0"; // `10.f` is not a float literal } body } LIT_STR(ref s) => { fmt!("\"%s\"", ident_to_str(s).escape_default()) } /* Name components */ IDENT(s, _) => in.get(s.name).to_owned(), LIFETIME(s) => fmt!("'%s", in.get(s.name)), UNDERSCORE => ~"_", /* Other */ DOC_COMMENT(ref s) => ident_to_str(s).to_owned(), EOF => ~"<eof>", INTERPOLATED(ref nt) => { match nt { &nt_expr(e) => ::print::pprust::expr_to_str(e, in), _ => { ~"an interpolated " + match (*nt) { nt_item(*) => ~"item", nt_block(*) => ~"block", nt_stmt(*) => ~"statement", nt_pat(*) => ~"pattern", nt_expr(*) => fail!("should have been handled above"), nt_ty(*) => ~"type", nt_ident(*) => ~"identifier", nt_path(*) => ~"path", nt_tt(*) => ~"tt", nt_matchers(*) => ~"matcher sequence" } } } } } } pub fn can_begin_expr(t: &Token) -> bool { match *t { LPAREN => true, LBRACE => true, LBRACKET => true, IDENT(_, _) => true, UNDERSCORE => true, TILDE => true, LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, POUND => true, AT => true, NOT => true, BINOP(MINUS) => true, BINOP(STAR) => true, BINOP(AND) => true, BINOP(OR) => true, // in lambda syntax OROR => true, // in lambda syntax MOD_SEP => true, INTERPOLATED(nt_expr(*)) | INTERPOLATED(nt_ident(*)) | INTERPOLATED(nt_block(*)) | INTERPOLATED(nt_path(*)) => true, _ => false } } /// what's the opposite delimiter? pub fn flip_delimiter(t: &token::Token) -> token::Token { match *t { LPAREN => RPAREN, LBRACE => RBRACE, LBRACKET => RBRACKET, RPAREN => LPAREN, RBRACE => LBRACE, RBRACKET => LBRACKET, _ => fail!() } } pub fn is_lit(t: &Token) -> bool { match *t { LIT_INT(_, _) => true, LIT_UINT(_, _) => true, LIT_INT_UNSUFFIXED(_) => true, LIT_FLOAT(_, _) => true, LIT_FLOAT_UNSUFFIXED(_) => true, LIT_STR(_) => true, _ => false } } pub fn is_ident(t: &Token) -> bool { match *t { IDENT(_, _) => true, _ => false } } pub fn is_ident_or_path(t: &Token) -> bool { match *t { IDENT(_, _) | INTERPOLATED(nt_path(*)) => true, _ => false } } pub fn is_plain_ident(t: &Token) -> bool { match *t { IDENT(_, false) => true, _ => false } } pub fn is_bar(t: &Token) -> bool { match *t { BINOP(OR) | OROR => true, _ => false } } pub mod special_idents { use ast::ident; pub static underscore : ident = ident { name: 0, ctxt: 0}; pub static anon : ident = ident { name: 1, ctxt: 0}; pub static invalid : ident = ident { name: 2, ctxt: 0}; // '' pub static unary : ident = ident { name: 3, ctxt: 0}; pub static not_fn : ident = ident { name: 4, ctxt: 0}; pub static idx_fn : ident = ident { name: 5, ctxt: 0}; pub static unary_minus_fn : ident = ident { name: 6, ctxt: 0}; pub static clownshoes_extensions : ident = ident { name: 7, ctxt: 0}; pub static self_ : ident = ident { name: 8, ctxt: 0}; // 'self' /* for matcher NTs */ pub static item : ident = ident { name: 9, ctxt: 0}; pub static block : ident = ident { name: 10, ctxt: 0}; pub static stmt : ident = ident { name: 11, ctxt: 0}; pub static pat : ident = ident { name: 12, ctxt: 0}; pub static expr : ident = ident { name: 13, ctxt: 0}; pub static ty : ident = ident { name: 14, ctxt: 0}; pub static ident : ident = ident { name: 15, ctxt: 0}; pub static path : ident = ident { name: 16, ctxt: 0}; pub static tt : ident = ident { name: 17, ctxt: 0}; pub static matchers : ident = ident { name: 18, ctxt: 0}; pub static str : ident = ident { name: 19, ctxt: 0}; // for the type /* outside of libsyntax */ pub static ty_visitor : ident = ident { name: 20, ctxt: 0}; pub static arg : ident = ident { name: 21, ctxt: 0}; pub static descrim : ident = ident { name: 22, ctxt: 0}; pub static clownshoe_abi : ident = ident { name: 23, ctxt: 0}; pub static clownshoe_stack_shim : ident = ident { name: 24, ctxt: 0}; pub static tydesc : ident = ident { name: 25, ctxt: 0}; pub static main : ident = ident { name: 26, ctxt: 0}; pub static opaque : ident = ident { name: 27, ctxt: 0}; pub static blk : ident = ident { name: 28, ctxt: 0}; pub static statik : ident = ident { name: 29, ctxt: 0}; pub static intrinsic : ident = ident { name: 30, ctxt: 0}; pub static clownshoes_foreign_mod: ident = ident { name: 31, ctxt: 0}; pub static unnamed_field: ident = ident { name: 32, ctxt: 0}; pub static c_abi: ident = ident { name: 33, ctxt: 0}; pub static type_self: ident = ident { name: 34, ctxt: 0}; // `Self` } /** * Maps a token to a record specifying the corresponding binary * operator */ pub fn token_to_binop(tok: Token) -> Option<ast::binop> { match tok { BINOP(STAR) => Some(ast::mul), BINOP(SLASH) => Some(ast::div), BINOP(PERCENT) => Some(ast::rem), BINOP(PLUS) => Some(ast::add), BINOP(MINUS) => Some(ast::subtract), BINOP(SHL) => Some(ast::shl), BINOP(SHR) => Some(ast::shr), BINOP(AND) => Some(ast::bitand), BINOP(CARET) => Some(ast::bitxor), BINOP(OR) => Some(ast::bitor), LT => Some(ast::lt), LE => Some(ast::le), GE => Some(ast::ge), GT => Some(ast::gt), EQEQ => Some(ast::eq), NE => Some(ast::ne), ANDAND => Some(ast::and), OROR => Some(ast::or), _ => None } } pub struct ident_interner { priv interner: StrInterner, } impl ident_interner { pub fn intern(&self, val: &str) -> Name { self.interner.intern(val) } pub fn gensym(&self, val: &str) -> Name { self.interner.gensym(val) } pub fn get(&self, idx: Name) -> @str { self.interner.get(idx) } // is this really something that should be exposed? pub fn len(&self) -> uint { self.interner.len() } pub fn find_equiv<Q:Hash + IterBytes + Equiv<@str>>(&self, val: &Q) -> Option<Name> { self.interner.find_equiv(val) } } // return a fresh interner, preloaded with special identifiers. fn mk_fresh_ident_interner() -> @ident_interner { // the indices here must correspond to the numbers in // special_idents. let init_vec = ~[ "_", // 0 "anon", // 1 "", // 2 "unary", // 3 "!", // 4 "[]", // 5 "unary-", // 6 "__extensions__", // 7 "self", // 8 "item", // 9 "block", // 10 "stmt", // 11 "pat", // 12 "expr", // 13 "ty", // 14 "ident", // 15 "path", // 16 "tt", // 17 "matchers", // 18 "str", // 19 "TyVisitor", // 20 "arg", // 21 "descrim", // 22 "__rust_abi", // 23 "__rust_stack_shim", // 24 "TyDesc", // 25 "main", // 26 "<opaque>", // 27 "blk", // 28 "static", // 29 "intrinsic", // 30 "__foreign_mod__", // 31 "__field__", // 32 "C", // 33 "Self", // 34 "as", // 35 "break", // 36 "const", // 37 "copy", // 38 "do", // 39 "else", // 40 "enum", // 41 "extern", // 42 "false", // 43 "fn", // 44 "for", // 45 "if", // 46 "impl", // 47 "let", // 48 "__log", // 49 "loop", // 50 "match", // 51 "mod", // 52 "mut", // 53 "once", // 54 "priv", // 55 "pub", // 56 "pure", // 57 "ref", // 58 "return", // 59 "static", // 29 -- also a special ident "self", // 8 -- also a special ident "struct", // 60 "super", // 61 "true", // 62 "trait", // 63 "type", // 64 "unsafe", // 65 "use", // 66 "while", // 67 "be", // 68 ]; @ident_interner { interner: interner::StrInterner::prefill(init_vec) } } // if an interner exists in TLS, return it. Otherwise, prepare a // fresh one. pub fn get_ident_interner() -> @ident_interner { unsafe { let key = (cast::transmute::<(uint, uint), &fn(v: @@::parse::token::ident_interner)>( (-3 as uint, 0u))); match local_data::local_data_get(key) { Some(interner) => *interner, None => { let interner = mk_fresh_ident_interner(); local_data::local_data_set(key, @interner); interner } } } } /* for when we don't care about the contents; doesn't interact with TLD or serialization */ pub fn mk_fake_ident_interner() -> @ident_interner { @ident_interner { interner: interner::StrInterner::new() } } // maps a string to its interned representation pub fn intern(str : &str) -> Name { let interner = get_ident_interner(); interner.intern(str) } // gensyms a new uint, using the current interner pub fn gensym(str : &str) -> Name { let interner = get_ident_interner(); interner.gensym(str) } // map an interned representation back to a string pub fn interner_get(name : Name) -> @str { get_ident_interner().get(name) } // maps an identifier to the string that it corresponds to pub fn ident_to_str(id : &ast::ident) -> @str { interner_get(id.name) } // maps a string to an identifier with an empty syntax context pub fn str_to_ident(str : &str) -> ast::ident { ast::new_ident(intern(str)) } // maps a string to a gensym'ed identifier pub fn gensym_ident(str : &str) -> ast::ident { ast::new_ident(gensym(str)) } // create a fresh name. In principle, this is just a // gensym, but for debugging purposes, you'd like the // resulting name to have a suggestive stringify, without // paying the cost of guaranteeing that the name is // truly unique. I'm going to try to strike a balance // by using a gensym with a name that has a random number // at the end. So, the gensym guarantees the uniqueness, // and the int helps to avoid confusion. pub fn fresh_name(src_name : &str) -> Name { let num = rand::rng().gen_uint_range(0,0xffff); gensym(fmt!("%s_%u",src_name,num)) } /** * All the valid words that have meaning in the Rust language. * * Rust keywords are either 'strict' or 'reserved'. Strict keywords may not * appear as identifiers at all. Reserved keywords are not used anywhere in * the language and may not appear as identifiers. */ pub mod keywords { use ast::ident; pub enum Keyword { // Strict keywords As, Break, Const, Copy, Do, Else, Enum, Extern, False, Fn, For, If, Impl, Let, __Log, Loop, Match, Mod, Mut, Once, Priv, Pub, Pure, Ref, Return, Static, Self, Struct, Super, True, Trait, Type, Unsafe, Use, While, // Reserved keywords Be, } impl Keyword { pub fn to_ident(&self) -> ident { match *self { As => ident { name: 35, ctxt: 0 }, Break => ident { name: 36, ctxt: 0 }, Const => ident { name: 37, ctxt: 0 }, Copy => ident { name: 38, ctxt: 0 }, Do => ident { name: 39, ctxt: 0 }, Else => ident { name: 40, ctxt: 0 }, Enum => ident { name: 41, ctxt: 0 }, Extern => ident { name: 42, ctxt: 0 }, False => ident { name: 43, ctxt: 0 }, Fn => ident { name: 44, ctxt: 0 }, For => ident { name: 45, ctxt: 0 }, If => ident { name: 46, ctxt: 0 }, Impl => ident { name: 47, ctxt: 0 }, Let => ident { name: 48, ctxt: 0 }, __Log => ident { name: 49, ctxt: 0 }, Loop => ident { name: 50, ctxt: 0 }, Match => ident { name: 51, ctxt: 0 }, Mod => ident { name: 52, ctxt: 0 }, Mut => ident { name: 53, ctxt: 0 }, Once => ident { name: 54, ctxt: 0 }, Priv => ident { name: 55, ctxt: 0 }, Pub => ident { name: 56, ctxt: 0 }, Pure => ident { name: 57, ctxt: 0 }, Ref => ident { name: 58, ctxt: 0 }, Return => ident { name: 59, ctxt: 0 }, Static => ident { name: 29, ctxt: 0 }, Self => ident { name: 8, ctxt: 0 }, Struct => ident { name: 60, ctxt: 0 }, Super => ident { name: 61, ctxt: 0 }, True => ident { name: 62, ctxt: 0 }, Trait => ident { name: 63, ctxt: 0 }, Type => ident { name: 64, ctxt: 0 }, Unsafe => ident { name: 65, ctxt: 0 }, Use => ident { name: 66, ctxt: 0 }, While => ident { name: 67, ctxt: 0 }, Be => ident { name: 68, ctxt: 0 }, } } } } pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => { kw.to_ident().name == sid.name } _ => { false } } } pub fn is_any_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { 8 | 29 | 35 .. 68 => true, _ => false, }, _ => false } } pub fn is_strict_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { 8 | 29 | 35 .. 67 => true, _ => false, }, _ => false, } } pub fn is_reserved_keyword(tok: &Token) -> bool { match *tok { token::IDENT(sid, false) => match sid.name { 68 => true, _ => false, }, _ => false, } } #[cfg(test)] mod test { use super::*; use std::io; #[test] fn t1() { let a = fresh_name("ghi"); io::println(fmt!("interned name: %u,\ntextual name: %s\n", a,interner_get(a))); } }
use nom; #[derive(Debug, PartialEq)] pub enum Hashline { OpenEnv(Environment), PlainLine(String), } #[derive(Debug, PartialEq)] pub struct Environment { indent_depth: usize, name: String, opts: String, comment: String, is_list_like: bool, } impl Environment { pub fn latex_begin(&self) -> String { format!(r"{:ind$}\begin{{{}}}{}{:comment_sep$}{}", "", self.name, self.opts, "", self.comment, ind = self.indent_depth, comment_sep = if self.comment.is_empty() { 0 } else { 1 }) } pub fn latex_end(&self) -> String { format!(r"{:ind$}\end{{{}}}", "", self.name, ind = self.indent_depth) } pub fn indent_depth(&self) -> usize { self.indent_depth } pub fn is_list_like(&self) -> bool { self.is_list_like } } // Hashline parsers named!( list_env_parser<&[u8]>, ws!(alt!(tag!("itemize") | tag!("enumerate") | tag!("description"))) ); named!(escaped_colon<u8>, preceded!(specific_byte!('\\' as u8), specific_byte!(':' as u8))); named!(escaped_percent<u8>, preceded!(specific_byte!('\\' as u8), specific_byte!('%' as u8))); named!(name_parser<u8>, alt!(escaped_colon | none_of_bytes_as_bytes!(b":%([{ \t"))); named!(opts_parser<u8>, alt!(escaped_colon | escaped_percent | none_of_bytes_as_bytes!(b":%"))); named!(args_parser<u8>, alt!(escaped_percent | none_of_bytes_as_bytes!(b"%"))); named!( hashline_parser<Hashline>, do_parse!( ws: opt!(is_a!(" ")) >> tag!("# ") >> name: many1!(name_parser) >> opts: many0!(opts_parser) >> tag!(":") >> args: many0!(args_parser) >> comment: call!(nom::rest) >> (hashline_helper(ws.unwrap_or(&b""[..]), &name, &opts, &args, &comment)) ) ); #[inline] fn hashline_helper(ws: &[u8], name: &[u8], opts: &[u8], args: &[u8], comment: &[u8]) -> Hashline { use std::str::from_utf8; use self::Hashline::{PlainLine, OpenEnv}; // It is ok to unwrap here, since we have checked for UTF-8 when we read the file let name_utf8 = from_utf8(name).unwrap().trim(); let opts_utf8 = from_utf8(opts).unwrap().trim().replace("%", r"\%"); let args_utf8 = from_utf8(args).unwrap().trim().replace("%", r"\%"); let comment_utf8 = from_utf8(comment).unwrap().trim(); if args_utf8.is_empty() { // If no args are given, it's an environment let env = Environment { indent_depth: ws.len(), name: name_utf8.to_string(), opts: opts_utf8.to_string(), comment: comment_utf8.to_string(), is_list_like: list_env_parser(name).is_done(), }; OpenEnv(env) } else { // If there are some args, it's a single-line command let ws_utf8 = from_utf8(ws).unwrap(); PlainLine(format!(r"{}\{}{}{{{}}}{:comment_sep$}{}", ws_utf8, name_utf8, opts_utf8, args_utf8, "", comment_utf8, comment_sep = if comment_utf8.is_empty() { 0 } else { 1 })) } } // Hashline processing #[inline] fn process_hashline<T: AsRef<str>>(line: T) -> Option<Hashline> { use nom::IResult::{Done, Error, Incomplete}; match hashline_parser(line.as_ref().as_bytes()) { Done(_, r) => Some(r), Error(_) | Incomplete(_) => None, } } // Itemline parsers named!( itemline_parser<Hashline>, do_parse!( ws: opt!(is_a!(" ")) >> tag!("*") >> item: call!(nom::rest) >> (itemline_helper(ws.unwrap_or(&b""[..]), item)) ) ); #[inline] fn itemline_helper(ws: &[u8], item: &[u8]) -> Hashline { use std::str::from_utf8; use self::Hashline::PlainLine; let ws_utf8 = from_utf8(ws).unwrap(); let item_utf8 = from_utf8(item).unwrap().trim(); PlainLine(format!(r"{}\item{:item_sep$}{}", ws_utf8, "", item_utf8, item_sep = if item_utf8.is_empty() { 0 } else { 1 })) } // Itemline processing #[inline] fn process_itemline<T: AsRef<str>>(line: T) -> Option<Hashline> { use nom::IResult::{Done, Error, Incomplete}; match itemline_parser(line.as_ref().as_bytes()) { Done(_, r) => Some(r), Error(_) | Incomplete(_) => None, } } // Fully process line pub fn process_line<T>(line: T, list_like_active: bool) -> Hashline where T: AsRef<str> { use self::Hashline::PlainLine; match process_hashline(&line) { Some(r) => r, None => { if list_like_active { match process_itemline(&line) { Some(r) => r, None => PlainLine(line.as_ref().to_string()), } } else { PlainLine(line.as_ref().to_string()) } } } } #[cfg(test)] mod tests { use nom::IResult::{Done, Error, Incomplete}; use nom::{ErrorKind, Needed}; macro_rules! nil { () => ("".as_bytes()); } macro_rules! ws_1 { () => (" ".as_bytes()); } macro_rules! ws_2 { () => (" ".as_bytes()); } macro_rules! ws_4 { () => (" ".as_bytes()); } macro_rules! foo { () => ("foo".as_bytes()); } macro_rules! bar { () => ("bar".as_bytes()); } macro_rules! qux { () => ("qux".as_bytes()); } #[test] fn hashline_helper_plain_lines() { use super::{Hashline, hashline_helper}; assert_eq!(hashline_helper(nil!(), foo!(), nil!(), bar!(), nil!()), Hashline::PlainLine("\\foo{bar}".to_string())); assert_eq!(hashline_helper(ws_2!(), foo!(), nil!(), bar!(), qux!()), Hashline::PlainLine(" \\foo{bar} qux".to_string())); assert_eq!(hashline_helper(ws_4!(), foo!(), bar!(), qux!(), nil!()), Hashline::PlainLine(" \\foobar{qux}".to_string())); } #[test] fn hashline_helper_environments() { use super::{Hashline, Environment, hashline_helper}; let env_ref_1 = Environment { indent_depth: 0, name: "foo".to_string(), opts: "bar".to_string(), comment: "".to_string(), is_list_like: false, }; assert_eq!(hashline_helper(nil!(), foo!(), bar!(), nil!(), nil!()), Hashline::OpenEnv(env_ref_1)); let env_ref_2 = Environment { indent_depth: 2, name: "foo".to_string(), opts: "".to_string(), comment: "bar".to_string(), is_list_like: false, }; assert_eq!(hashline_helper(ws_2!(), foo!(), nil!(), nil!(), bar!()), Hashline::OpenEnv(env_ref_2)); let env_ref_3 = Environment { indent_depth: 4, name: "foo".to_string(), opts: "bar".to_string(), comment: "qux".to_string(), is_list_like: false, }; assert_eq!(hashline_helper(ws_4!(), foo!(), bar!(), nil!(), qux!()), Hashline::OpenEnv(env_ref_3)); } #[test] fn itemline_helper() { use super::{Hashline, itemline_helper}; assert_eq!(itemline_helper(ws_2!(), foo!()), Hashline::PlainLine(" \\item foo".to_string())); // Test that no whitespace is put after `\item` if no item is given assert_eq!(itemline_helper(ws_1!(), nil!()), Hashline::PlainLine(" \\item".to_string())); } #[test] fn process_itemline() { use super::{Hashline, process_itemline}; // Valid itemlines assert_eq!(process_itemline("*"), Some(Hashline::PlainLine("\\item".to_string()))); assert_eq!(process_itemline("* "), Some(Hashline::PlainLine("\\item".to_string()))); assert_eq!(process_itemline(" *"), Some(Hashline::PlainLine(" \\item".to_string()))); assert_eq!(process_itemline(" * "), Some(Hashline::PlainLine(" \\item".to_string()))); assert_eq!(process_itemline("* foo"), Some(Hashline::PlainLine("\\item foo".to_string()))); assert_eq!(process_itemline(" * bar"), Some(Hashline::PlainLine(" \\item bar".to_string()))); assert_eq!(process_itemline("****"), Some(Hashline::PlainLine("\\item ***".to_string()))); // Not an itemline assert_eq!(process_itemline(" baz"), None); assert_eq!(process_itemline("qux *"), None); assert_eq!(process_itemline(" abc * def"), None); assert_eq!(process_itemline(" \\* "), None); assert_eq!(process_itemline("\\* "), None); } #[test] fn environment_methods() { use super::Environment; let env_1 = Environment { indent_depth: 0, name: "foo".to_string(), opts: "bar".to_string(), comment: "% baz".to_string(), is_list_like: true, }; assert_eq!(env_1.latex_begin(), "\\begin{foo}bar % baz"); assert_eq!(env_1.latex_end(), "\\end{foo}"); assert_eq!(env_1.is_list_like(), true); assert_eq!(env_1.indent_depth(), 0); let env_2 = Environment { indent_depth: 2, name: "abc".to_string(), opts: "def".to_string(), comment: "".to_string(), is_list_like: false, }; assert_eq!(env_2.latex_begin(), " \\begin{abc}def"); assert_eq!(env_2.latex_end(), " \\end{abc}"); assert_eq!(env_2.is_list_like(), false); assert_eq!(env_2.indent_depth(), 2); } #[test] fn list_env_parser() { use super::list_env_parser; let a = b"itemize"; let b = b"enumerate*"; let c = b" description *"; let d = b"item"; let e = b"foobar"; assert_eq!(list_env_parser(&a[..]), Done(&b""[..], &a[..])); assert_eq!(list_env_parser(&b[..]), Done(&b"*"[..], &b"enumerate"[..])); assert_eq!(list_env_parser(&c[..]), Done(&b"*"[..], &b"description"[..])); assert_eq!(list_env_parser(&d[..]), Incomplete(Needed::Size(7))); assert_eq!(list_env_parser(&e[..]), Error(error_position!(ErrorKind::Alt, &e[..]))); } #[test] fn escaped_colon() { use super::escaped_colon; let a = br"\:"; let c = b"ab"; assert_eq!(escaped_colon(&a[..]), Done(&b""[..], ':' as u8)); assert_eq!(escaped_colon(nil!()), Incomplete(Needed::Size(1))); assert_eq!(escaped_colon(&c[..]), Error(error_position!(ErrorKind::Char, &c[..]))); } #[test] fn escaped_percent() { use super::escaped_percent; let a = br"\%"; let c = b"ab"; assert_eq!(escaped_percent(&a[..]), Done(&b""[..], '%' as u8)); assert_eq!(escaped_percent(nil!()), Incomplete(Needed::Size(1))); assert_eq!(escaped_percent(&c[..]), Error(error_position!(ErrorKind::Char, &c[..]))); } #[test] fn name_parser() { use super::name_parser; assert_eq!(name_parser(&br"abc"[..]), Done(&b"bc"[..], 'a' as u8)); assert_eq!(name_parser(&br"\:abc"[..]), Done(&b"abc"[..], ':' as u8)); assert_eq!(name_parser(&b""[..]), Incomplete(Needed::Size(1))); for e in vec![b":E", b"%E", b"(E", b"[E", b"{E", b" E", b"\tE"] { assert_eq!(name_parser(&e[..]), Error(error_position!(ErrorKind::Alt, &e[..]))); } } #[test] fn opts_parser() { use super::opts_parser; assert_eq!(opts_parser(&br"abc"[..]), Done(&b"bc"[..], 'a' as u8)); assert_eq!(opts_parser(&br"\:abc"[..]), Done(&b"abc"[..], ':' as u8)); assert_eq!(opts_parser(&br"\%abc"[..]), Done(&b"abc"[..], '%' as u8)); assert_eq!(opts_parser(&br"(abc"[..]), Done(&b"abc"[..], '(' as u8)); assert_eq!(opts_parser(&br"[abc"[..]), Done(&b"abc"[..], '[' as u8)); assert_eq!(opts_parser(&br" abc"[..]), Done(&b"abc"[..], ' ' as u8)); assert_eq!(opts_parser(&b""[..]), Incomplete(Needed::Size(1))); for e in vec![b":E", b"%E"] { assert_eq!(opts_parser(&e[..]), Error(error_position!(ErrorKind::Alt, &e[..]))); } } #[test] fn args_parser() { use super::args_parser; assert_eq!(args_parser(&br"abc"[..]), Done(&b"bc"[..], 'a' as u8)); assert_eq!(args_parser(&br"\:abc"[..]), Done(&b":abc"[..], '\\' as u8)); assert_eq!(args_parser(&br"\%abc"[..]), Done(&b"abc"[..], '%' as u8)); assert_eq!(args_parser(&br"(abc"[..]), Done(&b"abc"[..], '(' as u8)); assert_eq!(args_parser(&br"[abc"[..]), Done(&b"abc"[..], '[' as u8)); assert_eq!(args_parser(&br" abc"[..]), Done(&b"abc"[..], ' ' as u8)); assert_eq!(args_parser(&b""[..]), Incomplete(Needed::Size(1))); assert_eq!(args_parser(&b"%E"[..]), Error(error_position!(ErrorKind::Alt, &b"%E"[..]))); } } Added an extra case to test the is_list_like field use nom; #[derive(Debug, PartialEq)] pub enum Hashline { OpenEnv(Environment), PlainLine(String), } #[derive(Debug, PartialEq)] pub struct Environment { indent_depth: usize, name: String, opts: String, comment: String, is_list_like: bool, } impl Environment { pub fn latex_begin(&self) -> String { format!(r"{:ind$}\begin{{{}}}{}{:comment_sep$}{}", "", self.name, self.opts, "", self.comment, ind = self.indent_depth, comment_sep = if self.comment.is_empty() { 0 } else { 1 }) } pub fn latex_end(&self) -> String { format!(r"{:ind$}\end{{{}}}", "", self.name, ind = self.indent_depth) } pub fn indent_depth(&self) -> usize { self.indent_depth } pub fn is_list_like(&self) -> bool { self.is_list_like } } // Hashline parsers named!( list_env_parser<&[u8]>, ws!(alt!(tag!("itemize") | tag!("enumerate") | tag!("description"))) ); named!(escaped_colon<u8>, preceded!(specific_byte!('\\' as u8), specific_byte!(':' as u8))); named!(escaped_percent<u8>, preceded!(specific_byte!('\\' as u8), specific_byte!('%' as u8))); named!(name_parser<u8>, alt!(escaped_colon | none_of_bytes_as_bytes!(b":%([{ \t"))); named!(opts_parser<u8>, alt!(escaped_colon | escaped_percent | none_of_bytes_as_bytes!(b":%"))); named!(args_parser<u8>, alt!(escaped_percent | none_of_bytes_as_bytes!(b"%"))); named!( hashline_parser<Hashline>, do_parse!( ws: opt!(is_a!(" ")) >> tag!("# ") >> name: many1!(name_parser) >> opts: many0!(opts_parser) >> tag!(":") >> args: many0!(args_parser) >> comment: call!(nom::rest) >> (hashline_helper(ws.unwrap_or(&b""[..]), &name, &opts, &args, &comment)) ) ); #[inline] fn hashline_helper(ws: &[u8], name: &[u8], opts: &[u8], args: &[u8], comment: &[u8]) -> Hashline { use std::str::from_utf8; use self::Hashline::{PlainLine, OpenEnv}; // It is ok to unwrap here, since we have checked for UTF-8 when we read the file let name_utf8 = from_utf8(name).unwrap().trim(); let opts_utf8 = from_utf8(opts).unwrap().trim().replace("%", r"\%"); let args_utf8 = from_utf8(args).unwrap().trim().replace("%", r"\%"); let comment_utf8 = from_utf8(comment).unwrap().trim(); if args_utf8.is_empty() { // If no args are given, it's an environment let env = Environment { indent_depth: ws.len(), name: name_utf8.to_string(), opts: opts_utf8.to_string(), comment: comment_utf8.to_string(), is_list_like: list_env_parser(name).is_done(), }; OpenEnv(env) } else { // If there are some args, it's a single-line command let ws_utf8 = from_utf8(ws).unwrap(); PlainLine(format!(r"{}\{}{}{{{}}}{:comment_sep$}{}", ws_utf8, name_utf8, opts_utf8, args_utf8, "", comment_utf8, comment_sep = if comment_utf8.is_empty() { 0 } else { 1 })) } } // Hashline processing #[inline] fn process_hashline<T: AsRef<str>>(line: T) -> Option<Hashline> { use nom::IResult::{Done, Error, Incomplete}; match hashline_parser(line.as_ref().as_bytes()) { Done(_, r) => Some(r), Error(_) | Incomplete(_) => None, } } // Itemline parsers named!( itemline_parser<Hashline>, do_parse!( ws: opt!(is_a!(" ")) >> tag!("*") >> item: call!(nom::rest) >> (itemline_helper(ws.unwrap_or(&b""[..]), item)) ) ); #[inline] fn itemline_helper(ws: &[u8], item: &[u8]) -> Hashline { use std::str::from_utf8; use self::Hashline::PlainLine; let ws_utf8 = from_utf8(ws).unwrap(); let item_utf8 = from_utf8(item).unwrap().trim(); PlainLine(format!(r"{}\item{:item_sep$}{}", ws_utf8, "", item_utf8, item_sep = if item_utf8.is_empty() { 0 } else { 1 })) } // Itemline processing #[inline] fn process_itemline<T: AsRef<str>>(line: T) -> Option<Hashline> { use nom::IResult::{Done, Error, Incomplete}; match itemline_parser(line.as_ref().as_bytes()) { Done(_, r) => Some(r), Error(_) | Incomplete(_) => None, } } // Fully process line pub fn process_line<T>(line: T, list_like_active: bool) -> Hashline where T: AsRef<str> { use self::Hashline::PlainLine; match process_hashline(&line) { Some(r) => r, None => { if list_like_active { match process_itemline(&line) { Some(r) => r, None => PlainLine(line.as_ref().to_string()), } } else { PlainLine(line.as_ref().to_string()) } } } } #[cfg(test)] mod tests { use nom::IResult::{Done, Error, Incomplete}; use nom::{ErrorKind, Needed}; macro_rules! nil { () => ("".as_bytes()); } macro_rules! ws_1 { () => (" ".as_bytes()); } macro_rules! ws_2 { () => (" ".as_bytes()); } macro_rules! ws_4 { () => (" ".as_bytes()); } macro_rules! foo { () => ("foo".as_bytes()); } macro_rules! bar { () => ("bar".as_bytes()); } macro_rules! qux { () => ("qux".as_bytes()); } macro_rules! itemize { () => ("itemize".as_bytes()); } #[test] fn hashline_helper_plain_lines() { use super::{Hashline, hashline_helper}; assert_eq!(hashline_helper(nil!(), foo!(), nil!(), bar!(), nil!()), Hashline::PlainLine("\\foo{bar}".to_string())); assert_eq!(hashline_helper(ws_2!(), foo!(), nil!(), bar!(), qux!()), Hashline::PlainLine(" \\foo{bar} qux".to_string())); assert_eq!(hashline_helper(ws_4!(), foo!(), bar!(), qux!(), nil!()), Hashline::PlainLine(" \\foobar{qux}".to_string())); } #[test] fn hashline_helper_environments() { use super::{Hashline, Environment, hashline_helper}; let env_ref_1 = Environment { indent_depth: 0, name: "foo".to_string(), opts: "bar".to_string(), comment: "".to_string(), is_list_like: false, }; assert_eq!(hashline_helper(nil!(), foo!(), bar!(), nil!(), nil!()), Hashline::OpenEnv(env_ref_1)); let env_ref_2 = Environment { indent_depth: 2, name: "foo".to_string(), opts: "".to_string(), comment: "bar".to_string(), is_list_like: false, }; assert_eq!(hashline_helper(ws_2!(), foo!(), nil!(), nil!(), bar!()), Hashline::OpenEnv(env_ref_2)); let env_ref_3 = Environment { indent_depth: 4, name: "foo".to_string(), opts: "bar".to_string(), comment: "qux".to_string(), is_list_like: false, }; assert_eq!(hashline_helper(ws_4!(), foo!(), bar!(), nil!(), qux!()), Hashline::OpenEnv(env_ref_3)); let env_ref_4 = Environment { indent_depth: 0, name: "itemize".to_string(), opts: "bar".to_string(), comment: "qux".to_string(), is_list_like: true, }; assert_eq!(hashline_helper(nil!(), itemize!(), bar!(), nil!(), qux!()), Hashline::OpenEnv(env_ref_4)); } #[test] fn itemline_helper() { use super::{Hashline, itemline_helper}; assert_eq!(itemline_helper(ws_2!(), foo!()), Hashline::PlainLine(" \\item foo".to_string())); // Test that no whitespace is put after `\item` if no item is given assert_eq!(itemline_helper(ws_1!(), nil!()), Hashline::PlainLine(" \\item".to_string())); } #[test] fn process_itemline() { use super::{Hashline, process_itemline}; // Valid itemlines assert_eq!(process_itemline("*"), Some(Hashline::PlainLine("\\item".to_string()))); assert_eq!(process_itemline("* "), Some(Hashline::PlainLine("\\item".to_string()))); assert_eq!(process_itemline(" *"), Some(Hashline::PlainLine(" \\item".to_string()))); assert_eq!(process_itemline(" * "), Some(Hashline::PlainLine(" \\item".to_string()))); assert_eq!(process_itemline("* foo"), Some(Hashline::PlainLine("\\item foo".to_string()))); assert_eq!(process_itemline(" * bar"), Some(Hashline::PlainLine(" \\item bar".to_string()))); assert_eq!(process_itemline("****"), Some(Hashline::PlainLine("\\item ***".to_string()))); // Not an itemline assert_eq!(process_itemline(" baz"), None); assert_eq!(process_itemline("qux *"), None); assert_eq!(process_itemline(" abc * def"), None); assert_eq!(process_itemline(" \\* "), None); assert_eq!(process_itemline("\\* "), None); } #[test] fn environment_methods() { use super::Environment; let env_1 = Environment { indent_depth: 0, name: "foo".to_string(), opts: "bar".to_string(), comment: "% baz".to_string(), is_list_like: true, }; assert_eq!(env_1.latex_begin(), "\\begin{foo}bar % baz"); assert_eq!(env_1.latex_end(), "\\end{foo}"); assert_eq!(env_1.is_list_like(), true); assert_eq!(env_1.indent_depth(), 0); let env_2 = Environment { indent_depth: 2, name: "abc".to_string(), opts: "def".to_string(), comment: "".to_string(), is_list_like: false, }; assert_eq!(env_2.latex_begin(), " \\begin{abc}def"); assert_eq!(env_2.latex_end(), " \\end{abc}"); assert_eq!(env_2.is_list_like(), false); assert_eq!(env_2.indent_depth(), 2); } #[test] fn list_env_parser() { use super::list_env_parser; let a = b"itemize"; let b = b"enumerate*"; let c = b" description *"; let d = b"item"; let e = b"foobar"; assert_eq!(list_env_parser(&a[..]), Done(&b""[..], &a[..])); assert_eq!(list_env_parser(&b[..]), Done(&b"*"[..], &b"enumerate"[..])); assert_eq!(list_env_parser(&c[..]), Done(&b"*"[..], &b"description"[..])); assert_eq!(list_env_parser(&d[..]), Incomplete(Needed::Size(7))); assert_eq!(list_env_parser(&e[..]), Error(error_position!(ErrorKind::Alt, &e[..]))); } #[test] fn escaped_colon() { use super::escaped_colon; let a = br"\:"; let c = b"ab"; assert_eq!(escaped_colon(&a[..]), Done(&b""[..], ':' as u8)); assert_eq!(escaped_colon(nil!()), Incomplete(Needed::Size(1))); assert_eq!(escaped_colon(&c[..]), Error(error_position!(ErrorKind::Char, &c[..]))); } #[test] fn escaped_percent() { use super::escaped_percent; let a = br"\%"; let c = b"ab"; assert_eq!(escaped_percent(&a[..]), Done(&b""[..], '%' as u8)); assert_eq!(escaped_percent(nil!()), Incomplete(Needed::Size(1))); assert_eq!(escaped_percent(&c[..]), Error(error_position!(ErrorKind::Char, &c[..]))); } #[test] fn name_parser() { use super::name_parser; assert_eq!(name_parser(&br"abc"[..]), Done(&b"bc"[..], 'a' as u8)); assert_eq!(name_parser(&br"\:abc"[..]), Done(&b"abc"[..], ':' as u8)); assert_eq!(name_parser(&b""[..]), Incomplete(Needed::Size(1))); for e in vec![b":E", b"%E", b"(E", b"[E", b"{E", b" E", b"\tE"] { assert_eq!(name_parser(&e[..]), Error(error_position!(ErrorKind::Alt, &e[..]))); } } #[test] fn opts_parser() { use super::opts_parser; assert_eq!(opts_parser(&br"abc"[..]), Done(&b"bc"[..], 'a' as u8)); assert_eq!(opts_parser(&br"\:abc"[..]), Done(&b"abc"[..], ':' as u8)); assert_eq!(opts_parser(&br"\%abc"[..]), Done(&b"abc"[..], '%' as u8)); assert_eq!(opts_parser(&br"(abc"[..]), Done(&b"abc"[..], '(' as u8)); assert_eq!(opts_parser(&br"[abc"[..]), Done(&b"abc"[..], '[' as u8)); assert_eq!(opts_parser(&br" abc"[..]), Done(&b"abc"[..], ' ' as u8)); assert_eq!(opts_parser(&b""[..]), Incomplete(Needed::Size(1))); for e in vec![b":E", b"%E"] { assert_eq!(opts_parser(&e[..]), Error(error_position!(ErrorKind::Alt, &e[..]))); } } #[test] fn args_parser() { use super::args_parser; assert_eq!(args_parser(&br"abc"[..]), Done(&b"bc"[..], 'a' as u8)); assert_eq!(args_parser(&br"\:abc"[..]), Done(&b":abc"[..], '\\' as u8)); assert_eq!(args_parser(&br"\%abc"[..]), Done(&b"abc"[..], '%' as u8)); assert_eq!(args_parser(&br"(abc"[..]), Done(&b"abc"[..], '(' as u8)); assert_eq!(args_parser(&br"[abc"[..]), Done(&b"abc"[..], '[' as u8)); assert_eq!(args_parser(&br" abc"[..]), Done(&b"abc"[..], ' ' as u8)); assert_eq!(args_parser(&b""[..]), Incomplete(Needed::Size(1))); assert_eq!(args_parser(&b"%E"[..]), Error(error_position!(ErrorKind::Alt, &b"%E"[..]))); } }
//! Compiler plugin for Rust-PHF //! //! See the documentation for the `phf` crate for more details. #![crate_id="github.com/sfackler/rust-phf/phf_mac"] #![crate_type="dylib"] #![doc(html_root_url="http://www.rust-ci.org/sfackler/rust-phf/doc")] #![feature(managed_boxes, macro_registrar, quote)] extern crate collections; extern crate rand; extern crate syntax; extern crate time; extern crate phf; use collections::HashMap; use std::os; use syntax::ast; use syntax::ast::{Name, TokenTree, LitStr, Expr, ExprVec, ExprLit}; use syntax::codemap::Span; use syntax::ext::base::{SyntaxExtension, ExtCtxt, MacResult, MRExpr, NormalTT, BasicMacroExpander}; use syntax::parse; use syntax::parse::token; use syntax::parse::token::{InternedString, COMMA, EOF, FAT_ARROW}; use rand::{Rng, SeedableRng, XorShiftRng}; static DEFAULT_LAMBDA: uint = 5; static FIXED_SEED: [u32, ..4] = [3141592653, 589793238, 462643383, 2795028841]; #[macro_registrar] #[doc(hidden)] pub fn macro_registrar(register: |Name, SyntaxExtension|) { register(token::intern("phf_map"), NormalTT(~BasicMacroExpander { expander: expand_phf_map, span: None }, None)); } struct Entry { key_str: InternedString, key: @Expr, value: @Expr } fn expand_phf_map(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> MacResult { let entries = match parse_entries(cx, tts) { Some(entries) => entries, None => return MacResult::dummy_expr(sp) }; if has_duplicates(cx, sp, entries.as_slice()) { return MacResult::dummy_expr(sp); } let mut rng: XorShiftRng = SeedableRng::from_seed(FIXED_SEED); let start = time::precise_time_s(); let state; loop { match generate_hash(entries.as_slice(), &mut rng) { Some(s) => { state = s; break; } None => {} } } let time = time::precise_time_s() - start; if os::getenv("PHF_STATS").is_some() { cx.span_note(sp, format!("PHF generation took {} seconds", time)); } create_map(cx, sp, entries, state) } fn parse_entries(cx: &mut ExtCtxt, tts: &[TokenTree]) -> Option<Vec<Entry>> { let mut parser = parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(), Vec::from_slice(tts)); let mut entries = Vec::new(); let mut bad = false; while parser.token != EOF { let key = cx.expand_expr(parser.parse_expr()); let key_str = match key.node { ExprLit(lit) => { match lit.node { LitStr(ref s, _) => s.clone(), _ => { cx.span_err(key.span, "expected string literal"); bad = true; InternedString::new("") } } } _ => { cx.span_err(key.span, "expected string literal"); bad = true; InternedString::new("") } }; if !parser.eat(&FAT_ARROW) { cx.span_err(parser.span, "expected `=>`"); return None; } let value = parser.parse_expr(); entries.push(Entry { key_str: key_str, key: key, value: value }); if !parser.eat(&COMMA) && parser.token != EOF { cx.span_err(parser.span, "expected `,`"); return None; } } if entries.len() > phf::MAX_SIZE { cx.span_err(parser.span, format!("maps with more than {} entries are not supported", phf::MAX_SIZE)); return None; } if bad { return None; } Some(entries) } fn has_duplicates(cx: &mut ExtCtxt, sp: Span, entries: &[Entry]) -> bool { let mut dups = false; let mut strings = HashMap::new(); for entry in entries.iter() { strings.insert_or_update_with(entry.key_str.clone(), (entry, true), |_, &(orig, ref mut first)| { if *first { cx.span_err(sp, format!("duplicate key \"{}\"", entry.key_str)); cx.span_note(orig.key.span, "one occurrence here"); *first = false; } cx.span_note(entry.key.span, "one occurrence here"); dups = true; }); } dups } struct HashState { k1: u64, k2: u64, disps: Vec<(uint, uint)>, map: Vec<uint>, } fn generate_hash(entries: &[Entry], rng: &mut XorShiftRng) -> Option<HashState> { struct Bucket { idx: uint, keys: Vec<uint>, } struct Hashes { g: uint, f1: uint, f2: uint, } let k1 = rng.gen(); let k2 = rng.gen(); let hashes: Vec<Hashes> = entries.iter().map(|entry| { let (g, f1, f2) = phf::hash(entry.key_str.get(), k1, k2); Hashes { g: g, f1: f1, f2: f2 } }).collect(); let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA; let mut buckets = Vec::from_fn(buckets_len, |i| Bucket { idx: i, keys: Vec::new() }); for (i, hash) in hashes.iter().enumerate() { buckets.get_mut(hash.g % buckets_len).keys.push(i); } // Sort descending buckets.sort_by(|a, b| b.keys.len().cmp(&a.keys.len())); let table_len = entries.len(); let mut map = Vec::from_elem(table_len, None); let mut disps = Vec::from_elem(buckets_len, (0u, 0u)); let mut try_map = HashMap::new(); 'buckets: for bucket in buckets.iter() { for d1 in range(0, table_len) { 'disps_l: for d2 in range(0, table_len) { try_map.clear(); for &key in bucket.keys.iter() { let idx = phf::displace(hashes.get(key).f1, hashes.get(key).f2, d1, d2) % table_len; if map.get(idx).is_some() || try_map.find(&idx).is_some() { continue 'disps_l; } try_map.insert(idx, key); } // We've picked a good set of disps *disps.get_mut(bucket.idx) = (d1, d2); for (&idx, &key) in try_map.iter() { *map.get_mut(idx) = Some(key); } continue 'buckets; } } // Unable to find displacements for a bucket return None; } let map = map.move_iter().map(|i| i.expect("should have a value")) .collect(); Some(HashState { k1: k1, k2: k2, disps: disps, map: map, }) } fn create_map(cx: &mut ExtCtxt, sp: Span, entries: Vec<Entry>, state: HashState) -> MacResult { let disps = state.disps.iter().map(|&(d1, d2)| { quote_expr!(&*cx, ($d1, $d2)) }).collect(); let disps = @Expr { id: ast::DUMMY_NODE_ID, node: ExprVec(disps), span: sp, }; let entries = state.map.iter().map(|&idx| { let &Entry { key, value, .. } = entries.get(idx); quote_expr!(&*cx, ($key, $value)) }).collect(); let entries = @Expr { id: ast::DUMMY_NODE_ID, node: ExprVec(entries), span: sp, }; let k1 = state.k1; let k2 = state.k2; MRExpr(quote_expr!(cx, PhfMap { k1: $k1, k2: $k2, disps: &'static $disps, entries: &'static $entries, })) } Moar cleanup //! Compiler plugin for Rust-PHF //! //! See the documentation for the `phf` crate for more details. #![crate_id="github.com/sfackler/rust-phf/phf_mac"] #![crate_type="dylib"] #![doc(html_root_url="http://www.rust-ci.org/sfackler/rust-phf/doc")] #![feature(managed_boxes, macro_registrar, quote)] extern crate collections; extern crate rand; extern crate syntax; extern crate time; extern crate phf; use collections::HashMap; use std::os; use syntax::ast; use syntax::ast::{Name, TokenTree, LitStr, Expr, ExprVec, ExprLit}; use syntax::codemap::Span; use syntax::ext::base::{SyntaxExtension, ExtCtxt, MacResult, MRExpr, NormalTT, BasicMacroExpander}; use syntax::parse; use syntax::parse::token; use syntax::parse::token::{InternedString, COMMA, EOF, FAT_ARROW}; use rand::{Rng, SeedableRng, XorShiftRng}; static DEFAULT_LAMBDA: uint = 5; static FIXED_SEED: [u32, ..4] = [3141592653, 589793238, 462643383, 2795028841]; #[macro_registrar] #[doc(hidden)] pub fn macro_registrar(register: |Name, SyntaxExtension|) { register(token::intern("phf_map"), NormalTT(~BasicMacroExpander { expander: expand_phf_map, span: None }, None)); } struct Entry { key_str: InternedString, key: @Expr, value: @Expr } fn expand_phf_map(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> MacResult { let entries = match parse_entries(cx, tts) { Some(entries) => entries, None => return MacResult::dummy_expr(sp) }; if has_duplicates(cx, sp, entries.as_slice()) { return MacResult::dummy_expr(sp); } let mut rng: XorShiftRng = SeedableRng::from_seed(FIXED_SEED); let start = time::precise_time_s(); let state; loop { match generate_hash(entries.as_slice(), &mut rng) { Some(s) => { state = s; break; } None => {} } } let time = time::precise_time_s() - start; if os::getenv("PHF_STATS").is_some() { cx.span_note(sp, format!("PHF generation took {} seconds", time)); } create_map(cx, sp, entries, state) } fn parse_entries(cx: &mut ExtCtxt, tts: &[TokenTree]) -> Option<Vec<Entry>> { let mut parser = parse::new_parser_from_tts(cx.parse_sess(), cx.cfg(), Vec::from_slice(tts)); let mut entries = Vec::new(); let mut bad = false; while parser.token != EOF { let key = cx.expand_expr(parser.parse_expr()); let key_str = match key.node { ExprLit(lit) => { match lit.node { LitStr(ref s, _) => s.clone(), _ => { cx.span_err(key.span, "expected string literal"); bad = true; InternedString::new("") } } } _ => { cx.span_err(key.span, "expected string literal"); bad = true; InternedString::new("") } }; if !parser.eat(&FAT_ARROW) { cx.span_err(parser.span, "expected `=>`"); return None; } let value = parser.parse_expr(); entries.push(Entry { key_str: key_str, key: key, value: value }); if !parser.eat(&COMMA) && parser.token != EOF { cx.span_err(parser.span, "expected `,`"); return None; } } if entries.len() > phf::MAX_SIZE { cx.span_err(parser.span, format!("maps with more than {} entries are not supported", phf::MAX_SIZE)); return None; } if bad { return None; } Some(entries) } fn has_duplicates(cx: &mut ExtCtxt, sp: Span, entries: &[Entry]) -> bool { let mut dups = false; let mut strings = HashMap::new(); for entry in entries.iter() { strings.insert_or_update_with(entry.key_str.clone(), (entry, true), |_, &(orig, ref mut first)| { if *first { cx.span_err(sp, format!("duplicate key \"{}\"", entry.key_str)); cx.span_note(orig.key.span, "one occurrence here"); *first = false; } cx.span_note(entry.key.span, "one occurrence here"); dups = true; }); } dups } struct HashState { k1: u64, k2: u64, disps: Vec<(uint, uint)>, map: Vec<uint>, } fn generate_hash(entries: &[Entry], rng: &mut XorShiftRng) -> Option<HashState> { struct Bucket { idx: uint, keys: Vec<uint>, } struct Hashes { g: uint, f1: uint, f2: uint, } let k1 = rng.gen(); let k2 = rng.gen(); let hashes: Vec<Hashes> = entries.iter().map(|entry| { let (g, f1, f2) = phf::hash(entry.key_str.get(), k1, k2); Hashes { g: g, f1: f1, f2: f2 } }).collect(); let buckets_len = (entries.len() + DEFAULT_LAMBDA - 1) / DEFAULT_LAMBDA; let mut buckets = Vec::from_fn(buckets_len, |i| Bucket { idx: i, keys: Vec::new() }); for (i, hash) in hashes.iter().enumerate() { buckets.get_mut(hash.g % buckets_len).keys.push(i); } // Sort descending buckets.sort_by(|a, b| b.keys.len().cmp(&a.keys.len())); let table_len = entries.len(); let mut map = Vec::from_elem(table_len, None); let mut disps = Vec::from_elem(buckets_len, (0u, 0u)); let mut try_map = HashMap::new(); 'buckets: for bucket in buckets.iter() { for d1 in range(0, table_len) { 'disps_l: for d2 in range(0, table_len) { try_map.clear(); for &key in bucket.keys.iter() { let idx = phf::displace(hashes.get(key).f1, hashes.get(key).f2, d1, d2) % table_len; if map.get(idx).is_some() || try_map.find(&idx).is_some() { continue 'disps_l; } try_map.insert(idx, key); } // We've picked a good set of disps *disps.get_mut(bucket.idx) = (d1, d2); for (&idx, &key) in try_map.iter() { *map.get_mut(idx) = Some(key); } continue 'buckets; } } // Unable to find displacements for a bucket return None; } Some(HashState { k1: k1, k2: k2, disps: disps, map: map.move_iter().map(|i| i.unwrap()).collect(), }) } fn create_map(cx: &mut ExtCtxt, sp: Span, entries: Vec<Entry>, state: HashState) -> MacResult { let disps = state.disps.iter().map(|&(d1, d2)| { quote_expr!(&*cx, ($d1, $d2)) }).collect(); let disps = @Expr { id: ast::DUMMY_NODE_ID, node: ExprVec(disps), span: sp, }; let entries = state.map.iter().map(|&idx| { let &Entry { key, value, .. } = entries.get(idx); quote_expr!(&*cx, ($key, $value)) }).collect(); let entries = @Expr { id: ast::DUMMY_NODE_ID, node: ExprVec(entries), span: sp, }; let k1 = state.k1; let k2 = state.k2; MRExpr(quote_expr!(cx, PhfMap { k1: $k1, k2: $k2, disps: &'static $disps, entries: &'static $entries, })) }
use ffi::*; use pipeline::Pipeline; use pipeline::PipelineT; use element::Element; use element::ElementT; use std::ffi::CString; use ::Transfer; unsafe impl Sync for PlayBin {} unsafe impl Send for PlayBin {} pub struct PlayBin{ playbin: Pipeline } impl PlayBin{ pub fn new(name: &str) -> Option<PlayBin>{ let pipeline = Element::new("playbin",name); match pipeline{ Some(p) => { match unsafe{ Pipeline::new_from_gst_pipeline( p.transfer() as *mut GstPipeline) }{ Some(p) => Some(PlayBin{ playbin: p }), None => None } } None => None } } pub fn set_audio_sink(&self, audio_sink: &ElementT){ self.set("audio-sink", unsafe{ audio_sink.gst_element() }); } /*pub fn frame(&self) -> GBuffer{ GBuffer::new(playbin.get<GstBuffer*>("frame")) }*/ pub fn set_subtitle_font_desc(&self, font: &str){ let cfont = CString::new(font).unwrap(); self.set("subtitle-font-desc", cfont); } pub fn set_video_sink(&self, video_sink: &ElementT){ self.set("video-sink", unsafe{ video_sink.gst_element() }); } pub fn set_vis_plugin(&self, vis_plugin: &ElementT){ self.set("vis-plugin", vis_plugin); } pub fn set_volume(&self, volume: f64){ self.set("volume", volume); } pub fn set_connection_speed(&self, connection_speed: u64){ self.set("connection-speed",connection_speed); } pub fn set_av_offset(&self, av_offset: i64){ self.set("av-offset", av_offset); } pub fn set_buffer_duration(&self, buffer_duration: i64){ self.set("buffer-duration",buffer_duration); } pub fn set_current_audio(&self, current_audio: i32){ self.set("current-audio",current_audio); } pub fn set_current_text(&self, current_text: i32){ self.set("current-text", current_text); } /*pub fn set_flags(&self, flags: GstPlayFlags){ self.set("flags", flags); }*/ pub fn mute(&self){ self.set("mute", 1 as gboolean); } pub fn unmute(&self){ self.set("mute", 0 as gboolean); } pub fn set_ring_buffer_max_size(&self, ring_buffer_max_size: u64){ self.set("ring-buffer-max-size", ring_buffer_max_size); } pub fn set_source(&self, source: &ElementT){ self.set("source", unsafe{ source.gst_element() }); } pub fn set_subtitle_encoding(&self, encoding: &str){ let cencoding = CString::new(encoding).unwrap(); self.set("subtitle-encoding", cencoding); } pub fn set_suburi(&self, suburi: &str){ let csuburi = CString::new(suburi).unwrap(); self.set("suburi", csuburi); } pub fn set_text_sink(&self, textsink: &ElementT){ self.set("text-sink", unsafe{ textsink.gst_element() }); } pub fn set_uri(&self, uri: &str){ let curi = CString::new(uri).unwrap(); self.set("uri", curi); } pub fn set_force_aspect_ratio(&self, force_aspect_ratio: bool){ self.set("force-aspect-ratio", force_aspect_ratio as gboolean); } pub fn set_audio_stream_combiner(&self, audio_stream_combiner: &ElementT){ self.set("audio-stream-combiner", unsafe{ audio_stream_combiner.gst_element() }); } pub fn set_video_stream_combiner(&self, video_stream_combiner: &ElementT){ self.set("vide-stream-combiner", unsafe{ video_stream_combiner.gst_element() }); } pub fn set_flags(&self, flags: i32){ self.set("flags", flags); } } impl PipelineT for PlayBin{ fn as_pipeline(&self) -> &Pipeline{ &self.playbin } fn as_pipeline_mut(&mut self) -> &mut Pipeline{ &mut self.playbin } } impl ::Transfer for PlayBin{ unsafe fn transfer(self) -> *mut GstElement{ self.playbin.transfer() } } playbin: use as_ptr for c strings use ffi::*; use pipeline::Pipeline; use pipeline::PipelineT; use element::Element; use element::ElementT; use std::ffi::CString; use ::Transfer; unsafe impl Sync for PlayBin {} unsafe impl Send for PlayBin {} pub struct PlayBin{ playbin: Pipeline } impl PlayBin{ pub fn new(name: &str) -> Option<PlayBin>{ let pipeline = Element::new("playbin",name); match pipeline{ Some(p) => { match unsafe{ Pipeline::new_from_gst_pipeline( p.transfer() as *mut GstPipeline) }{ Some(p) => Some(PlayBin{ playbin: p }), None => None } } None => None } } pub fn set_audio_sink(&self, audio_sink: &ElementT){ self.set("audio-sink", unsafe{ audio_sink.gst_element() }); } /*pub fn frame(&self) -> GBuffer{ GBuffer::new(playbin.get<GstBuffer*>("frame")) }*/ pub fn set_subtitle_font_desc(&self, font: &str){ let cfont = CString::new(font).unwrap(); self.set("subtitle-font-desc", cfont.as_ptr()); } pub fn set_video_sink(&self, video_sink: &ElementT){ self.set("video-sink", unsafe{ video_sink.gst_element() }); } pub fn set_vis_plugin(&self, vis_plugin: &ElementT){ self.set("vis-plugin", vis_plugin); } pub fn set_volume(&self, volume: f64){ self.set("volume", volume); } pub fn set_connection_speed(&self, connection_speed: u64){ self.set("connection-speed",connection_speed); } pub fn set_av_offset(&self, av_offset: i64){ self.set("av-offset", av_offset); } pub fn set_buffer_duration(&self, buffer_duration: i64){ self.set("buffer-duration",buffer_duration); } pub fn set_current_audio(&self, current_audio: i32){ self.set("current-audio",current_audio); } pub fn set_current_text(&self, current_text: i32){ self.set("current-text", current_text); } /*pub fn set_flags(&self, flags: GstPlayFlags){ self.set("flags", flags); }*/ pub fn mute(&self){ self.set("mute", 1 as gboolean); } pub fn unmute(&self){ self.set("mute", 0 as gboolean); } pub fn set_ring_buffer_max_size(&self, ring_buffer_max_size: u64){ self.set("ring-buffer-max-size", ring_buffer_max_size); } pub fn set_source(&self, source: &ElementT){ self.set("source", unsafe{ source.gst_element() }); } pub fn set_subtitle_encoding(&self, encoding: &str){ let cencoding = CString::new(encoding).unwrap(); self.set("subtitle-encoding", cencoding.as_ptr()); } pub fn set_suburi(&self, suburi: &str){ let csuburi = CString::new(suburi).unwrap(); self.set("suburi", csuburi.as_ptr()); } pub fn set_text_sink(&self, textsink: &ElementT){ self.set("text-sink", unsafe{ textsink.gst_element() }); } pub fn set_uri(&self, uri: &str){ let curi = CString::new(uri).unwrap(); self.set("uri", curi.as_ptr()); } pub fn set_force_aspect_ratio(&self, force_aspect_ratio: bool){ self.set("force-aspect-ratio", force_aspect_ratio as gboolean); } pub fn set_audio_stream_combiner(&self, audio_stream_combiner: &ElementT){ self.set("audio-stream-combiner", unsafe{ audio_stream_combiner.gst_element() }); } pub fn set_video_stream_combiner(&self, video_stream_combiner: &ElementT){ self.set("vide-stream-combiner", unsafe{ video_stream_combiner.gst_element() }); } pub fn set_flags(&self, flags: i32){ self.set("flags", flags); } } impl PipelineT for PlayBin{ fn as_pipeline(&self) -> &Pipeline{ &self.playbin } fn as_pipeline_mut(&mut self) -> &mut Pipeline{ &mut self.playbin } } impl ::Transfer for PlayBin{ unsafe fn transfer(self) -> *mut GstElement{ self.playbin.transfer() } }
use internal::prelude::*; use serde::de::{ self, Deserialize, Deserializer, MapAccess, Visitor }; use serde::ser::{ Serialize, Serializer }; use super::super::prelude::*; use std::{ collections::HashMap, mem::transmute, fmt }; /// Determines to what entity an action was used on. #[derive(Debug)] #[repr(u8)] pub enum Target { Guild = 10, Channel = 20, User = 30, Role = 40, Invite = 50, Webhook = 60, Emoji = 70, } /// Determines the action that was done on a target. #[derive(Debug)] pub enum Action { GuildUpdate, Channel(ActionChannel), ChannelOverwrite(ActionChannelOverwrite), Member(ActionMember), Role(ActionRole), Invite(ActionInvite), Webhook(ActionWebhook), Emoji(ActionEmoji), MessageDelete, } impl Action { pub fn num(&self) -> u8 { use self::Action::*; match *self { GuildUpdate => 1, Action::Channel(ref x) => x.num(), Action::ChannelOverwrite(ref x) => x.num(), Action::Member(ref x) => x.num(), Action::Role(ref x) => x.num(), Action::Invite(ref x) => x.num(), Action::Webhook(ref x) => x.num(), Action::Emoji(ref x) => x.num(), Action::MessageDelete => 72, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionChannel { Create = 10, Update = 11, Delete = 12, } impl ActionChannel { pub fn num(&self) -> u8 { match *self { ActionChannel::Create => 10, ActionChannel::Update => 11, ActionChannel::Delete => 12, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionChannelOverwrite { Create = 13, Update = 14, Delete = 15, } impl ActionChannelOverwrite { pub fn num(&self) -> u8 { match *self { ActionChannelOverwrite::Create => 13, ActionChannelOverwrite::Update => 14, ActionChannelOverwrite::Delete => 15, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionMember { Kick = 20, Prune = 21, BanAdd = 22, BanRemove = 23, Update = 24, RoleUpdate = 25, } impl ActionMember { pub fn num(&self) -> u8 { match *self { ActionMember::Kick => 20, ActionMember::Prune => 21, ActionMember::BanAdd => 22, ActionMember::BanRemove => 23, ActionMember::Update => 24, ActionMember::RoleUpdate => 25, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionRole { Create = 30, Update = 31, Delete = 32, } impl ActionRole { pub fn num(&self) -> u8 { match *self { ActionRole::Create => 30, ActionRole::Update => 31, ActionRole::Delete => 32, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionInvite { Create = 40, Update = 41, Delete = 42, } impl ActionInvite { pub fn num(&self) -> u8 { match *self { ActionInvite::Create => 40, ActionInvite::Update => 41, ActionInvite::Delete => 42, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionWebhook { Create = 50, Update = 51, Delete = 52, } impl ActionWebhook { pub fn num(&self) -> u8 { match *self { ActionWebhook::Create => 50, ActionWebhook::Update => 51, ActionWebhook::Delete => 52, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionEmoji { Create = 60, Delete = 61, Update = 62, } impl ActionEmoji { pub fn num(&self) -> u8 { match *self { ActionEmoji::Create => 60, ActionEmoji::Update => 61, ActionEmoji::Delete => 62, } } } #[derive(Debug, Deserialize, Serialize)] pub struct Change { #[serde(rename = "key")] pub name: String, // TODO: Change these to an actual type. #[serde(rename = "old_value")] pub old: String, #[serde(rename = "new_value")] pub new: String, } #[derive(Debug)] pub struct AuditLogs { pub entries: HashMap<AuditLogEntryId, AuditLogEntry>, pub webhooks: Vec<Webhook>, pub users: Vec<User>, } #[derive(Debug, Deserialize, Serialize)] pub struct AuditLogEntry { /// Determines to what entity an [`action`] was used on. /// /// [`action`]: #structfield.action #[serde(with = "u64_handler")] pub target_id: u64, /// Determines what action was done on a [`target`] /// /// [`target`]: #structfield.target #[serde( with = "action_handler", rename = "action_type" )] pub action: Action, /// What was the reasoning by doing an action on a target? If there was one. pub reason: Option<String>, /// The user that did this action on a target. pub user_id: UserId, /// What changes were made. pub changes: Option<Vec<Change>>, /// The id of this entry. pub id: AuditLogEntryId, /// Some optional data assosiated with this entry. pub options: Option<Options>, } #[derive(Debug, Deserialize, Serialize)] pub struct Options { /// Number of days after which inactive members were kicked. #[serde(default, with = "option_u64_handler")] pub delete_member_days: Option<u64>, /// Number of members removed by the prune #[serde(default, with = "option_u64_handler")] pub members_removed: Option<u64>, /// Channel in which the messages were deleted #[serde(default)] pub channel_id: Option<ChannelId>, /// Number of deleted messages. #[serde(default, with = "option_u64_handler")] pub count: Option<u64>, /// Id of the overwritten entity #[serde(default, with = "option_u64_handler")] pub id: Option<u64>, /// Type of overwritten entity ("member" or "role"). #[serde(default, rename = "type")] pub kind: Option<String>, /// Name of the role if type is "role" #[serde(default)] pub role_name: Option<String>, } mod u64_handler { use super::*; pub fn deserialize<'de, D: Deserializer<'de>>(des: D) -> StdResult<u64, D::Error> { struct U64Visitor; impl<'de> Visitor<'de> for U64Visitor { type Value = u64; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an integer or a string with a valid number inside") } // NOTE: Serde internally delegates number types below `u64` to it. fn visit_u64<E: de::Error>(self, val: u64) -> StdResult<u64, E> { Ok(val) } fn visit_str<E: de::Error>(self, string: &str) -> StdResult<u64, E> { string.parse().map_err(|e| de::Error::custom(e)) } } des.deserialize_any(U64Visitor) } pub fn serialize<S: Serializer>(num: &u64, s: S) -> StdResult<S::Ok, S::Error> { s.serialize_u64(*num) } } mod option_u64_handler { use super::*; pub fn deserialize<'de, D: Deserializer<'de>>(des: D) -> StdResult<Option<u64>, D::Error> { struct OptionU64Visitor; impl<'de> Visitor<'de> for OptionU64Visitor { type Value = Option<u64>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an optional integer or a string with a valid number inside") } fn visit_some<D: Deserializer<'de>>(self, deserializer: D) -> StdResult<Self::Value, D::Error> { deserializer.deserialize_any(OptionU64Visitor) } fn visit_none<E: de::Error>(self) -> StdResult<Self::Value, E> { Ok(None) } fn visit_u64<E: de::Error>(self, val: u64) -> StdResult<Option<u64>, E> { Ok(Some(val)) } fn visit_str<E: de::Error>(self, string: &str) -> StdResult<Option<u64>, E> { string.parse().map(Some).map_err(|e| de::Error::custom(e)) } } des.deserialize_option(OptionU64Visitor) } pub fn serialize<S: Serializer>(num: &Option<u64>, s: S) -> StdResult<S::Ok, S::Error> { Option::serialize(num, s) } } mod action_handler { use super::*; pub fn deserialize<'de, D: Deserializer<'de>>(de: D) -> StdResult<Action, D::Error> { struct ActionVisitor; impl<'de> Visitor<'de> for ActionVisitor { type Value = Action; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an integer between 1 to 72") } // NOTE: Serde internally delegates number types below `u64` to it. fn visit_u64<E: de::Error>(self, value: u64) -> StdResult<Action, E> { let value = value as u8; Ok(match value { 1 => Action::GuildUpdate, 10...12 => Action::Channel(unsafe { transmute(value) }), 13...15 => Action::ChannelOverwrite(unsafe { transmute(value) }), 20...25 => Action::Member(unsafe { transmute(value) }), 30...32 => Action::Role(unsafe { transmute(value) }), 40...42 => Action::Invite(unsafe { transmute(value) }), 50...52 => Action::Webhook(unsafe { transmute(value) }), 60...62 => Action::Emoji(unsafe { transmute(value) }), 72 => Action::MessageDelete, _ => return Err(E::custom(format!("Unexpected action number: {}", value))), }) } } de.deserialize_any(ActionVisitor) } pub fn serialize<S: Serializer>( action: &Action, serializer: S, ) -> StdResult<S::Ok, S::Error> { serializer.serialize_u8(action.num()) } } impl<'de> Deserialize<'de> for AuditLogs { fn deserialize<D: Deserializer<'de>>(de: D) -> StdResult<Self, D::Error> { #[derive(Deserialize)] #[serde(field_identifier)] enum Field { #[serde(rename = "audit_log_entries")] Entries, #[serde(rename = "webhooks")] Webhooks, #[serde(rename = "users")] Users, } struct EntriesVisitor; impl<'de> Visitor<'de> for EntriesVisitor { type Value = AuditLogs; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("audit log entries") } fn visit_map<V: MapAccess<'de>>(self, mut map: V) -> StdResult<AuditLogs, V::Error> { let mut audit_log_entries = None; let mut users = None; let mut webhooks = None; while let Some(field) = map.next_key()? { match field { Field::Entries => { if audit_log_entries.is_some() { return Err(de::Error::duplicate_field("entries")); } audit_log_entries = Some(map.next_value::<Vec<AuditLogEntry>>()?); }, Field::Webhooks => { if webhooks.is_some() { return Err(de::Error::duplicate_field("webhooks")); } webhooks = Some(map.next_value::<Vec<Webhook>>()?); }, Field::Users => { if users.is_some() { return Err(de::Error::duplicate_field("users")); } users = Some(map.next_value::<Vec<User>>()?); }, } } Ok(AuditLogs { entries: audit_log_entries .unwrap() .into_iter() .map(|entry| (entry.id, entry)) .collect(), webhooks: webhooks.unwrap(), users: users.unwrap(), }) } } const FIELD: &[&str] = &["audit_log_entries"]; de.deserialize_struct("AuditLogs", FIELD, EntriesVisitor) } } Get rid of a spurious import warning use internal::prelude::*; use serde::de::{ self, Deserialize, Deserializer, MapAccess, Visitor }; use serde::ser::Serializer; use super::super::prelude::*; use std::{ collections::HashMap, mem::transmute, fmt }; /// Determines to what entity an action was used on. #[derive(Debug)] #[repr(u8)] pub enum Target { Guild = 10, Channel = 20, User = 30, Role = 40, Invite = 50, Webhook = 60, Emoji = 70, } /// Determines the action that was done on a target. #[derive(Debug)] pub enum Action { GuildUpdate, Channel(ActionChannel), ChannelOverwrite(ActionChannelOverwrite), Member(ActionMember), Role(ActionRole), Invite(ActionInvite), Webhook(ActionWebhook), Emoji(ActionEmoji), MessageDelete, } impl Action { pub fn num(&self) -> u8 { use self::Action::*; match *self { GuildUpdate => 1, Action::Channel(ref x) => x.num(), Action::ChannelOverwrite(ref x) => x.num(), Action::Member(ref x) => x.num(), Action::Role(ref x) => x.num(), Action::Invite(ref x) => x.num(), Action::Webhook(ref x) => x.num(), Action::Emoji(ref x) => x.num(), Action::MessageDelete => 72, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionChannel { Create = 10, Update = 11, Delete = 12, } impl ActionChannel { pub fn num(&self) -> u8 { match *self { ActionChannel::Create => 10, ActionChannel::Update => 11, ActionChannel::Delete => 12, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionChannelOverwrite { Create = 13, Update = 14, Delete = 15, } impl ActionChannelOverwrite { pub fn num(&self) -> u8 { match *self { ActionChannelOverwrite::Create => 13, ActionChannelOverwrite::Update => 14, ActionChannelOverwrite::Delete => 15, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionMember { Kick = 20, Prune = 21, BanAdd = 22, BanRemove = 23, Update = 24, RoleUpdate = 25, } impl ActionMember { pub fn num(&self) -> u8 { match *self { ActionMember::Kick => 20, ActionMember::Prune => 21, ActionMember::BanAdd => 22, ActionMember::BanRemove => 23, ActionMember::Update => 24, ActionMember::RoleUpdate => 25, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionRole { Create = 30, Update = 31, Delete = 32, } impl ActionRole { pub fn num(&self) -> u8 { match *self { ActionRole::Create => 30, ActionRole::Update => 31, ActionRole::Delete => 32, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionInvite { Create = 40, Update = 41, Delete = 42, } impl ActionInvite { pub fn num(&self) -> u8 { match *self { ActionInvite::Create => 40, ActionInvite::Update => 41, ActionInvite::Delete => 42, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionWebhook { Create = 50, Update = 51, Delete = 52, } impl ActionWebhook { pub fn num(&self) -> u8 { match *self { ActionWebhook::Create => 50, ActionWebhook::Update => 51, ActionWebhook::Delete => 52, } } } #[derive(Debug)] #[repr(u8)] pub enum ActionEmoji { Create = 60, Delete = 61, Update = 62, } impl ActionEmoji { pub fn num(&self) -> u8 { match *self { ActionEmoji::Create => 60, ActionEmoji::Update => 61, ActionEmoji::Delete => 62, } } } #[derive(Debug, Deserialize, Serialize)] pub struct Change { #[serde(rename = "key")] pub name: String, // TODO: Change these to an actual type. #[serde(rename = "old_value")] pub old: String, #[serde(rename = "new_value")] pub new: String, } #[derive(Debug)] pub struct AuditLogs { pub entries: HashMap<AuditLogEntryId, AuditLogEntry>, pub webhooks: Vec<Webhook>, pub users: Vec<User>, } #[derive(Debug, Deserialize, Serialize)] pub struct AuditLogEntry { /// Determines to what entity an [`action`] was used on. /// /// [`action`]: #structfield.action #[serde(with = "u64_handler")] pub target_id: u64, /// Determines what action was done on a [`target`] /// /// [`target`]: #structfield.target #[serde( with = "action_handler", rename = "action_type" )] pub action: Action, /// What was the reasoning by doing an action on a target? If there was one. pub reason: Option<String>, /// The user that did this action on a target. pub user_id: UserId, /// What changes were made. pub changes: Option<Vec<Change>>, /// The id of this entry. pub id: AuditLogEntryId, /// Some optional data assosiated with this entry. pub options: Option<Options>, } #[derive(Debug, Deserialize, Serialize)] pub struct Options { /// Number of days after which inactive members were kicked. #[serde(default, with = "option_u64_handler")] pub delete_member_days: Option<u64>, /// Number of members removed by the prune #[serde(default, with = "option_u64_handler")] pub members_removed: Option<u64>, /// Channel in which the messages were deleted #[serde(default)] pub channel_id: Option<ChannelId>, /// Number of deleted messages. #[serde(default, with = "option_u64_handler")] pub count: Option<u64>, /// Id of the overwritten entity #[serde(default, with = "option_u64_handler")] pub id: Option<u64>, /// Type of overwritten entity ("member" or "role"). #[serde(default, rename = "type")] pub kind: Option<String>, /// Name of the role if type is "role" #[serde(default)] pub role_name: Option<String>, } mod u64_handler { use super::*; pub fn deserialize<'de, D: Deserializer<'de>>(des: D) -> StdResult<u64, D::Error> { struct U64Visitor; impl<'de> Visitor<'de> for U64Visitor { type Value = u64; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an integer or a string with a valid number inside") } // NOTE: Serde internally delegates number types below `u64` to it. fn visit_u64<E: de::Error>(self, val: u64) -> StdResult<u64, E> { Ok(val) } fn visit_str<E: de::Error>(self, string: &str) -> StdResult<u64, E> { string.parse().map_err(|e| de::Error::custom(e)) } } des.deserialize_any(U64Visitor) } pub fn serialize<S: Serializer>(num: &u64, s: S) -> StdResult<S::Ok, S::Error> { s.serialize_u64(*num) } } mod option_u64_handler { use super::*; pub fn deserialize<'de, D: Deserializer<'de>>(des: D) -> StdResult<Option<u64>, D::Error> { struct OptionU64Visitor; impl<'de> Visitor<'de> for OptionU64Visitor { type Value = Option<u64>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an optional integer or a string with a valid number inside") } fn visit_some<D: Deserializer<'de>>(self, deserializer: D) -> StdResult<Self::Value, D::Error> { deserializer.deserialize_any(OptionU64Visitor) } fn visit_none<E: de::Error>(self) -> StdResult<Self::Value, E> { Ok(None) } fn visit_u64<E: de::Error>(self, val: u64) -> StdResult<Option<u64>, E> { Ok(Some(val)) } fn visit_str<E: de::Error>(self, string: &str) -> StdResult<Option<u64>, E> { string.parse().map(Some).map_err(|e| de::Error::custom(e)) } } des.deserialize_option(OptionU64Visitor) } pub fn serialize<S: Serializer>(num: &Option<u64>, s: S) -> StdResult<S::Ok, S::Error> { use serde::Serialize; Option::serialize(num, s) } } mod action_handler { use super::*; pub fn deserialize<'de, D: Deserializer<'de>>(de: D) -> StdResult<Action, D::Error> { struct ActionVisitor; impl<'de> Visitor<'de> for ActionVisitor { type Value = Action; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("an integer between 1 to 72") } // NOTE: Serde internally delegates number types below `u64` to it. fn visit_u64<E: de::Error>(self, value: u64) -> StdResult<Action, E> { let value = value as u8; Ok(match value { 1 => Action::GuildUpdate, 10...12 => Action::Channel(unsafe { transmute(value) }), 13...15 => Action::ChannelOverwrite(unsafe { transmute(value) }), 20...25 => Action::Member(unsafe { transmute(value) }), 30...32 => Action::Role(unsafe { transmute(value) }), 40...42 => Action::Invite(unsafe { transmute(value) }), 50...52 => Action::Webhook(unsafe { transmute(value) }), 60...62 => Action::Emoji(unsafe { transmute(value) }), 72 => Action::MessageDelete, _ => return Err(E::custom(format!("Unexpected action number: {}", value))), }) } } de.deserialize_any(ActionVisitor) } pub fn serialize<S: Serializer>( action: &Action, serializer: S, ) -> StdResult<S::Ok, S::Error> { serializer.serialize_u8(action.num()) } } impl<'de> Deserialize<'de> for AuditLogs { fn deserialize<D: Deserializer<'de>>(de: D) -> StdResult<Self, D::Error> { #[derive(Deserialize)] #[serde(field_identifier)] enum Field { #[serde(rename = "audit_log_entries")] Entries, #[serde(rename = "webhooks")] Webhooks, #[serde(rename = "users")] Users, } struct EntriesVisitor; impl<'de> Visitor<'de> for EntriesVisitor { type Value = AuditLogs; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("audit log entries") } fn visit_map<V: MapAccess<'de>>(self, mut map: V) -> StdResult<AuditLogs, V::Error> { let mut audit_log_entries = None; let mut users = None; let mut webhooks = None; while let Some(field) = map.next_key()? { match field { Field::Entries => { if audit_log_entries.is_some() { return Err(de::Error::duplicate_field("entries")); } audit_log_entries = Some(map.next_value::<Vec<AuditLogEntry>>()?); }, Field::Webhooks => { if webhooks.is_some() { return Err(de::Error::duplicate_field("webhooks")); } webhooks = Some(map.next_value::<Vec<Webhook>>()?); }, Field::Users => { if users.is_some() { return Err(de::Error::duplicate_field("users")); } users = Some(map.next_value::<Vec<User>>()?); }, } } Ok(AuditLogs { entries: audit_log_entries .unwrap() .into_iter() .map(|entry| (entry.id, entry)) .collect(), webhooks: webhooks.unwrap(), users: users.unwrap(), }) } } const FIELD: &[&str] = &["audit_log_entries"]; de.deserialize_struct("AuditLogs", FIELD, EntriesVisitor) } }
//! Not public API. Used as `$crate::export` by macros. pub use core::marker::{Send, Sync}; pub use core::result::Result; pub use serde; Fix module name in private module's doc //! Not public API. Used as `$crate::private` by macros. pub use core::marker::{Send, Sync}; pub use core::result::Result; pub use serde;
//! //! A request represents information about a filesystem operation the //! kernel driver wants us to perform. //! use std::mem; use libc::consts::os::posix88::{EIO, ENOSYS, EPROTO}; use time::Timespec; use argument::ArgumentIterator; use channel::ChannelSender; use Filesystem; use fuse::*; use fuse::consts::*; use reply::{Reply, ReplyRaw, ReplyEmpty, ReplyDirectory}; use session::{MAX_WRITE_SIZE, Session}; /// We generally support async reads, lookups of . and .. and writes larger than 4k #[cfg(not(target_os = "macos"))] const INIT_FLAGS: u32 = FUSE_ASYNC_READ | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES; /// On OS X, we additionally support case insensitiveness, volume renames and xtimes /// TODO: we should eventually let the filesystem implementation decide which flags to set #[cfg(target_os = "macos")] const INIT_FLAGS: u32 = FUSE_ASYNC_READ | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_CASE_INSENSITIVE | FUSE_VOL_RENAME | FUSE_XTIMES; /// Create a new request from the given buffer pub fn request<'a> (ch: ChannelSender, buffer: &'a [u8]) -> Option<Request<'a>> { Request::new(ch, buffer) } /// Dispatch request to the given filesystem pub fn dispatch<FS: Filesystem> (req: &Request, se: &mut Session<FS>) { req.dispatch(se); } /// Request data structure pub struct Request<'a> { /// Channel sender for sending the reply ch: ChannelSender, /// Header of the FUSE request header: &'a fuse_in_header, /// Operation-specific data payload data: &'a [u8], } impl<'a> Request<'a> { /// Create a new request from the given buffer fn new (ch: ChannelSender, buffer: &'a [u8]) -> Option<Request<'a>> { // Every request always begins with a fuse_in_header struct // followed by arbitrary data depending on which opcode it contains if buffer.len() < mem::size_of::<fuse_in_header>() { error!("Short read of FUSE request ({} < {})", buffer.len(), mem::size_of::<fuse_in_header>()); return None; } let mut data = ArgumentIterator::new(buffer); let req = Request { ch: ch, header: data.fetch(), data: data.fetch_data(), }; if buffer.len() < req.header.len as uint { error!("Short read of FUSE request ({} < {})", buffer.len(), req.header.len); return None; } Some(req) } /// Dispatch request to the given filesystem. /// This calls the appropriate filesystem operation method for the /// request and sends back the returned reply to the kernel fn dispatch<FS: Filesystem> (&self, se: &mut Session<FS>) { let opcode: fuse_opcode = match FromPrimitive::from_u32(self.header.opcode) { Some(op) => op, None => { warn!("Ignoring unknown FUSE operation {}", self.header.opcode); self.reply::<ReplyEmpty>().error(ENOSYS); return; }, }; let mut data = ArgumentIterator::new(self.data); match opcode { // Filesystem initialization FUSE_INIT => { let reply: ReplyRaw<fuse_init_out> = self.reply(); let arg: &fuse_init_in = data.fetch(); debug!("INIT({}) kernel: ABI {}.{}, flags {:#x}, max readahead {}", self.header.unique, arg.major, arg.minor, arg.flags, arg.max_readahead); // We don't support ABI versions before 7.6 if arg.major < 7 || (arg.major == 7 && arg.minor < 6) { error!("Unsupported FUSE ABI version {}.{}", arg.major, arg.minor); reply.error(EPROTO); return; } // Remember ABI version supported by kernel se.proto_major = arg.major as uint; se.proto_minor = arg.minor as uint; // Call filesystem init method and give it a chance to return an error let res = se.filesystem.init(self); if res.is_err() { reply.error(res.unwrap_err()); return; } // Reply with our desired version and settings. If the kernel supports a // larger major version, it'll re-send a matching init message. If it // supports only lower major versions, we replied with an error above. let init = fuse_init_out { major: FUSE_KERNEL_VERSION, minor: FUSE_KERNEL_MINOR_VERSION, max_readahead: arg.max_readahead, // accept any readahead size flags: arg.flags & INIT_FLAGS, // use features given in INIT_FLAGS and reported as capable unused: 0, max_write: MAX_WRITE_SIZE as u32, // use a max write size that fits into the session's buffer }; debug!("INIT({}) response: ABI {}.{}, flags {:#x}, max readahead {}, max write {}", self.header.unique, init.major, init.minor, init.flags, init.max_readahead, init.max_write); se.initialized = true; reply.ok(&init); }, // Any operation is invalid before initialization _ if !se.initialized => { warn!("Ignoring FUSE operation {} before init", self.header.opcode); self.reply::<ReplyEmpty>().error(EIO); }, // Filesystem destroyed FUSE_DESTROY => { debug!("DESTROY({})", self.header.unique); se.filesystem.destroy(self); se.destroyed = true; self.reply::<ReplyEmpty>().ok(); } // Any operation is invalid after destroy _ if se.destroyed => { warn!("Ignoring FUSE operation {} after destroy", self.header.opcode); self.reply::<ReplyEmpty>().error(EIO); } FUSE_INTERRUPT => { let arg: &fuse_interrupt_in = data.fetch(); debug!("INTERRUPT({}) unique {}", self.header.unique, arg.unique); // TODO: handle FUSE_INTERRUPT self.reply::<ReplyEmpty>().error(ENOSYS); }, FUSE_LOOKUP => { let name = data.fetch_path(); debug!("LOOKUP({}) parent {:#018x}, name {}", self.header.unique, self.header.nodeid, name.display()); se.filesystem.lookup(self, self.header.nodeid, &name, self.reply()); }, FUSE_FORGET => { let arg: &fuse_forget_in = data.fetch(); debug!("FORGET({}) ino {:#018x}, nlookup {}", self.header.unique, self.header.nodeid, arg.nlookup); se.filesystem.forget(self, self.header.nodeid, arg.nlookup as uint); // no reply }, FUSE_GETATTR => { debug!("GETATTR({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.getattr(self, self.header.nodeid, self.reply()); }, FUSE_SETATTR => { let arg: &fuse_setattr_in = data.fetch(); debug!("SETATTR({}) ino {:#018x}, valid {:#x}", self.header.unique, self.header.nodeid, arg.valid); let mode = match arg.valid & FATTR_MODE { 0 => None, _ => Some(arg.mode) }; let uid = match arg.valid & FATTR_UID { 0 => None, _ => Some(arg.uid) }; let gid = match arg.valid & FATTR_GID { 0 => None, _ => Some(arg.gid) }; let size = match arg.valid & FATTR_SIZE { 0 => None, _ => Some(arg.size) }; let atime = match arg.valid & FATTR_ATIME { 0 => None, _ => Some(Timespec::new(arg.atime, arg.atimensec)) }; let mtime = match arg.valid & FATTR_MTIME { 0 => None, _ => Some(Timespec::new(arg.mtime, arg.mtimensec)) }; let fh = match arg.valid & FATTR_FH { 0 => None, _ => Some(arg.fh) }; #[cfg(target_os = "macos")] #[inline] fn get_macos_setattr (arg: &fuse_setattr_in) -> (Option<Timespec>, Option<Timespec>, Option<Timespec>, Option<u32>) { let crtime = match arg.valid & FATTR_CRTIME { 0 => None, _ => Some(Timespec::new(arg.crtime, arg.crtimensec)) }; let chgtime = match arg.valid & FATTR_CHGTIME { 0 => None, _ => Some(Timespec::new(arg.chgtime, arg.chgtimensec)) }; let bkuptime = match arg.valid & FATTR_BKUPTIME { 0 => None, _ => Some(Timespec::new(arg.bkuptime, arg.bkuptimensec)) }; let flags = match arg.valid & FATTR_FLAGS { 0 => None, _ => Some(arg.flags) }; (crtime, chgtime, bkuptime, flags) } #[cfg(not(target_os = "macos"))] #[inline] fn get_macos_setattr (_arg: &fuse_setattr_in) -> (Option<Timespec>, Option<Timespec>, Option<Timespec>, Option<u32>) { (None, None, None, None) } let (crtime, chgtime, bkuptime, flags) = get_macos_setattr(arg); se.filesystem.setattr(self, self.header.nodeid, mode, uid, gid, size, atime, mtime, fh, crtime, chgtime, bkuptime, flags, self.reply()); }, FUSE_READLINK => { debug!("READLINK({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.readlink(self, self.header.nodeid, self.reply()); }, FUSE_MKNOD => { let arg: &fuse_mknod_in = data.fetch(); let name = data.fetch_path(); debug!("MKNOD({}) parent {:#018x}, name {}, mode {:#05o}, rdev {}", self.header.unique, self.header.nodeid, name.display(), arg.mode, arg.rdev); se.filesystem.mknod(self, self.header.nodeid, &name, arg.mode, arg.rdev, self.reply()); }, FUSE_MKDIR => { let arg: &fuse_mkdir_in = data.fetch(); let name = data.fetch_path(); debug!("MKDIR({}) parent {:#018x}, name {}, mode {:#05o}", self.header.unique, self.header.nodeid, name.display(), arg.mode); se.filesystem.mkdir(self, self.header.nodeid, &name, arg.mode, self.reply()); }, FUSE_UNLINK => { let name = data.fetch_path(); debug!("UNLINK({}) parent {:#018x}, name {}", self.header.unique, self.header.nodeid, name.display()); se.filesystem.unlink(self, self.header.nodeid, &name, self.reply()); }, FUSE_RMDIR => { let name = data.fetch_path(); debug!("RMDIR({}) parent {:#018x}, name {}", self.header.unique, self.header.nodeid, name.display()); se.filesystem.rmdir(self, self.header.nodeid, &name, self.reply()); }, FUSE_SYMLINK => { let name = data.fetch_path(); let link = data.fetch_path(); debug!("SYMLINK({}) parent {:#018x}, name {}, link {}", self.header.unique, self.header.nodeid, name.display(), link.display()); se.filesystem.symlink(self, self.header.nodeid, &name, &link, self.reply()); }, FUSE_RENAME => { let arg: &fuse_rename_in = data.fetch(); let name = data.fetch_path(); let newname = data.fetch_path(); debug!("RENAME({}) parent {:#018x}, name {}, newparent {:#018x}, newname {}", self.header.unique, self.header.nodeid, name.display(), arg.newdir, newname.display()); se.filesystem.rename(self, self.header.nodeid, &name, arg.newdir, &newname, self.reply()); }, FUSE_LINK => { let arg: &fuse_link_in = data.fetch(); let newname = data.fetch_path(); debug!("LINK({}) ino {:#018x}, newparent {:#018x}, newname {}", self.header.unique, arg.oldnodeid, self.header.nodeid, newname.display()); se.filesystem.link(self, arg.oldnodeid, self.header.nodeid, &newname, self.reply()); }, FUSE_OPEN => { let arg: &fuse_open_in = data.fetch(); debug!("OPEN({}) ino {:#018x}, flags {:#x}", self.header.unique, self.header.nodeid, arg.flags); se.filesystem.open(self, self.header.nodeid, arg.flags as uint, self.reply()); }, FUSE_READ => { let arg: &fuse_read_in = data.fetch(); debug!("READ({}) ino {:#018x}, fh {}, offset {}, size {}", self.header.unique, self.header.nodeid, arg.fh, arg.offset, arg.size); se.filesystem.read(self, self.header.nodeid, arg.fh, arg.offset, arg.size as uint, self.reply()); }, FUSE_WRITE => { let arg: &fuse_write_in = data.fetch(); let data = data.fetch_data(); assert!(data.len() == arg.size as uint); debug!("WRITE({}) ino {:#018x}, fh {}, offset {}, size {}, flags {:#x}", self.header.unique, self.header.nodeid, arg.fh, arg.offset, arg.size, arg.write_flags); se.filesystem.write(self, self.header.nodeid, arg.fh, arg.offset, data, arg.write_flags as uint, self.reply()); }, FUSE_FLUSH => { let arg: &fuse_flush_in = data.fetch(); debug!("FLUSH({}) ino {:#018x}, fh {}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.lock_owner); se.filesystem.flush(self, self.header.nodeid, arg.fh, arg.lock_owner, self.reply()); }, FUSE_RELEASE => { let arg: &fuse_release_in = data.fetch(); let flush = match arg.release_flags & FUSE_RELEASE_FLUSH { 0 => false, _ => true }; debug!("RELEASE({}) ino {:#018x}, fh {}, flags {:#x}, release flags {:#x}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.flags, arg.release_flags, arg.lock_owner); se.filesystem.release(self, self.header.nodeid, arg.fh, arg.flags as uint, arg.lock_owner, flush, self.reply()); }, FUSE_FSYNC => { let arg: &fuse_fsync_in = data.fetch(); let datasync = match arg.fsync_flags & 1 { 0 => false, _ => true }; debug!("FSYNC({}) ino {:#018x}, fh {}, flags {:#x}", self.header.unique, self.header.nodeid, arg.fh, arg.fsync_flags); se.filesystem.fsync(self, self.header.nodeid, arg.fh, datasync, self.reply()); }, FUSE_OPENDIR => { let arg: &fuse_open_in = data.fetch(); debug!("OPENDIR({}) ino {:#018x}, flags {:#x}", self.header.unique, self.header.nodeid, arg.flags); se.filesystem.opendir(self, self.header.nodeid, arg.flags as uint, self.reply()); }, FUSE_READDIR => { let arg: &fuse_read_in = data.fetch(); debug!("READDIR({}) ino {:#018x}, fh {}, offset {}, size {}", self.header.unique, self.header.nodeid, arg.fh, arg.offset, arg.size); se.filesystem.readdir(self, self.header.nodeid, arg.fh, arg.offset, self.reply::<ReplyDirectory>().sized(arg.size as uint)); }, FUSE_RELEASEDIR => { let arg: &fuse_release_in = data.fetch(); debug!("RELEASEDIR({}) ino {:#018x}, fh {}, flags {:#x}, release flags {:#x}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.flags, arg.release_flags, arg.lock_owner); se.filesystem.releasedir(self, self.header.nodeid, arg.fh, arg.flags as uint, self.reply()); }, FUSE_FSYNCDIR => { let arg: &fuse_fsync_in = data.fetch(); let datasync = match arg.fsync_flags & 1 { 0 => false, _ => true }; debug!("FSYNCDIR({}) ino {:#018x}, fh {}, flags {:#x}", self.header.unique, self.header.nodeid, arg.fh, arg.fsync_flags); se.filesystem.fsyncdir(self, self.header.nodeid, arg.fh, datasync, self.reply()); }, FUSE_STATFS => { debug!("STATFS({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.statfs(self, self.header.nodeid, self.reply()); }, FUSE_SETXATTR => { let arg: &fuse_setxattr_in = data.fetch(); let name = data.fetch_str(); let value = data.fetch_data(); assert!(value.len() == arg.size as uint); debug!("SETXATTR({}) ino {:#018x}, name {}, size {}, flags {:#x}", self.header.unique, self.header.nodeid, String::from_utf8_lossy(name), arg.size, arg.flags); #[cfg(target_os = "macos")] #[inline] fn get_position (arg: &fuse_setxattr_in) -> u32 { arg.position } #[cfg(not(target_os = "macos"))] #[inline] fn get_position (_arg: &fuse_setxattr_in) -> u32 { 0 } se.filesystem.setxattr(self, self.header.nodeid, name, value, arg.flags as uint, get_position(arg), self.reply()); }, FUSE_GETXATTR => { let arg: &fuse_getxattr_in = data.fetch(); let name = data.fetch_str(); debug!("GETXATTR({}) ino {:#018x}, name {}, size {}", self.header.unique, self.header.nodeid, String::from_utf8_lossy(name), arg.size); se.filesystem.getxattr(self, self.header.nodeid, name, self.reply()); }, FUSE_LISTXATTR => { let arg: &fuse_getxattr_in = data.fetch(); debug!("LISTXATTR({}) ino {:#018x}, size {}", self.header.unique, self.header.nodeid, arg.size); se.filesystem.listxattr(self, self.header.nodeid, self.reply()); }, FUSE_REMOVEXATTR => { let name = data.fetch_str(); debug!("REMOVEXATTR({}) ino {:#018x}, name {}", self.header.unique, self.header.nodeid, String::from_utf8_lossy(name)); se.filesystem.removexattr(self, self.header.nodeid, name, self.reply()); }, FUSE_ACCESS => { let arg: &fuse_access_in = data.fetch(); debug!("ACCESS({}) ino {:#018x}, mask {:#05o}", self.header.unique, self.header.nodeid, arg.mask); se.filesystem.access(self, self.header.nodeid, arg.mask as uint, self.reply()); }, FUSE_CREATE => { let arg: &fuse_open_in = data.fetch(); let name = data.fetch_path(); debug!("CREATE({}) parent {:#018x}, name {}, mode {:#05o}, flags {:#x}", self.header.unique, self.header.nodeid, name.display(), arg.mode, arg.flags); se.filesystem.create(self, self.header.nodeid, &name, arg.mode, arg.flags as uint, self.reply()); }, FUSE_GETLK => { let arg: &fuse_lk_in = data.fetch(); debug!("GETLK({}) ino {:#018x}, fh {}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.owner); se.filesystem.getlk(self, self.header.nodeid, arg.fh, arg.owner, arg.lk.start, arg.lk.end, arg.lk.typ, arg.lk.pid, self.reply()); }, FUSE_SETLK | FUSE_SETLKW => { let arg: &fuse_lk_in = data.fetch(); let sleep = match opcode { FUSE_SETLKW => true, _ => false }; debug!("SETLK({}) ino {:#018x}, fh {}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.owner); se.filesystem.setlk(self, self.header.nodeid, arg.fh, arg.owner, arg.lk.start, arg.lk.end, arg.lk.typ, arg.lk.pid, sleep, self.reply()); }, FUSE_BMAP => { let arg: &fuse_bmap_in = data.fetch(); debug!("BMAP({}) ino {:#018x}, blocksize {}, ids {}", self.header.unique, self.header.nodeid, arg.blocksize, arg.block); se.filesystem.bmap(self, self.header.nodeid, arg.blocksize as uint, arg.block, self.reply()); }, #[cfg(target_os = "macos")] FUSE_SETVOLNAME => { // OS X only let name = data.fetch_str(); debug!("SETVOLNAME({}) name {}", self.header.unique, String::from_utf8_lossy(name)); se.filesystem.setvolname(self, name, self.reply()); }, #[cfg(target_os = "macos")] FUSE_EXCHANGE => { // OS X only let arg: &fuse_exchange_in = data.fetch(); let oldname = data.fetch_path(); let newname = data.fetch_path(); debug!("EXCHANGE({}) parent {:#018x}, name {}, newparent {:#018x}, newname {}, options {:#x}", self.header.unique, arg.olddir, oldname.display(), arg.newdir, newname.display(), arg.options); se.filesystem.exchange(self, arg.olddir, &oldname, arg.newdir, &newname, arg.options as uint, self.reply()); }, #[cfg(target_os = "macos")] FUSE_GETXTIMES => { // OS X only debug!("GETXTIMES({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.getxtimes(self, self.header.nodeid, self.reply()); }, } } /// Create a reply object for this request that can be passed to the filesystem /// implementation and makes sure that a request is replied eventually fn reply<T: Reply> (&self) -> T { let ch = self.ch; Reply::new(self.header.unique, proc(buffer) { match ch.send(buffer) { Ok(()) => (), Err(err) => error!("Failed to send FUSE reply: {}", err), } }) } /// Returns the unique identifier of this request #[inline] #[allow(dead_code)] pub fn unique (&self) -> u64 { self.header.unique } /// Returns the uid of this request #[inline] #[allow(dead_code)] pub fn uid (&self) -> u32 { self.header.uid } /// Returns the gid of this request #[inline] #[allow(dead_code)] pub fn gid (&self) -> u32 { self.header.gid } /// Returns the pid of this request #[inline] #[allow(dead_code)] pub fn pid (&self) -> u32 { self.header.pid } } Imported enum namespace, since they are now explicit See: https://github.com/rust-lang/rust/commit/3dcd2157403163789aaf21a9ab3c4d30a7c6494d //! //! A request represents information about a filesystem operation the //! kernel driver wants us to perform. //! use std::mem; use libc::consts::os::posix88::{EIO, ENOSYS, EPROTO}; use time::Timespec; use argument::ArgumentIterator; use channel::ChannelSender; use Filesystem; use fuse::*; use fuse::consts::*; use fuse::fuse_opcode::*; use reply::{Reply, ReplyRaw, ReplyEmpty, ReplyDirectory}; use session::{MAX_WRITE_SIZE, Session}; /// We generally support async reads, lookups of . and .. and writes larger than 4k #[cfg(not(target_os = "macos"))] const INIT_FLAGS: u32 = FUSE_ASYNC_READ | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES; /// On OS X, we additionally support case insensitiveness, volume renames and xtimes /// TODO: we should eventually let the filesystem implementation decide which flags to set #[cfg(target_os = "macos")] const INIT_FLAGS: u32 = FUSE_ASYNC_READ | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_CASE_INSENSITIVE | FUSE_VOL_RENAME | FUSE_XTIMES; /// Create a new request from the given buffer pub fn request<'a> (ch: ChannelSender, buffer: &'a [u8]) -> Option<Request<'a>> { Request::new(ch, buffer) } /// Dispatch request to the given filesystem pub fn dispatch<FS: Filesystem> (req: &Request, se: &mut Session<FS>) { req.dispatch(se); } /// Request data structure pub struct Request<'a> { /// Channel sender for sending the reply ch: ChannelSender, /// Header of the FUSE request header: &'a fuse_in_header, /// Operation-specific data payload data: &'a [u8], } impl<'a> Request<'a> { /// Create a new request from the given buffer fn new (ch: ChannelSender, buffer: &'a [u8]) -> Option<Request<'a>> { // Every request always begins with a fuse_in_header struct // followed by arbitrary data depending on which opcode it contains if buffer.len() < mem::size_of::<fuse_in_header>() { error!("Short read of FUSE request ({} < {})", buffer.len(), mem::size_of::<fuse_in_header>()); return None; } let mut data = ArgumentIterator::new(buffer); let req = Request { ch: ch, header: data.fetch(), data: data.fetch_data(), }; if buffer.len() < req.header.len as uint { error!("Short read of FUSE request ({} < {})", buffer.len(), req.header.len); return None; } Some(req) } /// Dispatch request to the given filesystem. /// This calls the appropriate filesystem operation method for the /// request and sends back the returned reply to the kernel fn dispatch<FS: Filesystem> (&self, se: &mut Session<FS>) { let opcode: fuse_opcode = match FromPrimitive::from_u32(self.header.opcode) { Some(op) => op, None => { warn!("Ignoring unknown FUSE operation {}", self.header.opcode); self.reply::<ReplyEmpty>().error(ENOSYS); return; }, }; let mut data = ArgumentIterator::new(self.data); match opcode { // Filesystem initialization FUSE_INIT => { let reply: ReplyRaw<fuse_init_out> = self.reply(); let arg: &fuse_init_in = data.fetch(); debug!("INIT({}) kernel: ABI {}.{}, flags {:#x}, max readahead {}", self.header.unique, arg.major, arg.minor, arg.flags, arg.max_readahead); // We don't support ABI versions before 7.6 if arg.major < 7 || (arg.major == 7 && arg.minor < 6) { error!("Unsupported FUSE ABI version {}.{}", arg.major, arg.minor); reply.error(EPROTO); return; } // Remember ABI version supported by kernel se.proto_major = arg.major as uint; se.proto_minor = arg.minor as uint; // Call filesystem init method and give it a chance to return an error let res = se.filesystem.init(self); if res.is_err() { reply.error(res.unwrap_err()); return; } // Reply with our desired version and settings. If the kernel supports a // larger major version, it'll re-send a matching init message. If it // supports only lower major versions, we replied with an error above. let init = fuse_init_out { major: FUSE_KERNEL_VERSION, minor: FUSE_KERNEL_MINOR_VERSION, max_readahead: arg.max_readahead, // accept any readahead size flags: arg.flags & INIT_FLAGS, // use features given in INIT_FLAGS and reported as capable unused: 0, max_write: MAX_WRITE_SIZE as u32, // use a max write size that fits into the session's buffer }; debug!("INIT({}) response: ABI {}.{}, flags {:#x}, max readahead {}, max write {}", self.header.unique, init.major, init.minor, init.flags, init.max_readahead, init.max_write); se.initialized = true; reply.ok(&init); }, // Any operation is invalid before initialization _ if !se.initialized => { warn!("Ignoring FUSE operation {} before init", self.header.opcode); self.reply::<ReplyEmpty>().error(EIO); }, // Filesystem destroyed FUSE_DESTROY => { debug!("DESTROY({})", self.header.unique); se.filesystem.destroy(self); se.destroyed = true; self.reply::<ReplyEmpty>().ok(); } // Any operation is invalid after destroy _ if se.destroyed => { warn!("Ignoring FUSE operation {} after destroy", self.header.opcode); self.reply::<ReplyEmpty>().error(EIO); } FUSE_INTERRUPT => { let arg: &fuse_interrupt_in = data.fetch(); debug!("INTERRUPT({}) unique {}", self.header.unique, arg.unique); // TODO: handle FUSE_INTERRUPT self.reply::<ReplyEmpty>().error(ENOSYS); }, FUSE_LOOKUP => { let name = data.fetch_path(); debug!("LOOKUP({}) parent {:#018x}, name {}", self.header.unique, self.header.nodeid, name.display()); se.filesystem.lookup(self, self.header.nodeid, &name, self.reply()); }, FUSE_FORGET => { let arg: &fuse_forget_in = data.fetch(); debug!("FORGET({}) ino {:#018x}, nlookup {}", self.header.unique, self.header.nodeid, arg.nlookup); se.filesystem.forget(self, self.header.nodeid, arg.nlookup as uint); // no reply }, FUSE_GETATTR => { debug!("GETATTR({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.getattr(self, self.header.nodeid, self.reply()); }, FUSE_SETATTR => { let arg: &fuse_setattr_in = data.fetch(); debug!("SETATTR({}) ino {:#018x}, valid {:#x}", self.header.unique, self.header.nodeid, arg.valid); let mode = match arg.valid & FATTR_MODE { 0 => None, _ => Some(arg.mode) }; let uid = match arg.valid & FATTR_UID { 0 => None, _ => Some(arg.uid) }; let gid = match arg.valid & FATTR_GID { 0 => None, _ => Some(arg.gid) }; let size = match arg.valid & FATTR_SIZE { 0 => None, _ => Some(arg.size) }; let atime = match arg.valid & FATTR_ATIME { 0 => None, _ => Some(Timespec::new(arg.atime, arg.atimensec)) }; let mtime = match arg.valid & FATTR_MTIME { 0 => None, _ => Some(Timespec::new(arg.mtime, arg.mtimensec)) }; let fh = match arg.valid & FATTR_FH { 0 => None, _ => Some(arg.fh) }; #[cfg(target_os = "macos")] #[inline] fn get_macos_setattr (arg: &fuse_setattr_in) -> (Option<Timespec>, Option<Timespec>, Option<Timespec>, Option<u32>) { let crtime = match arg.valid & FATTR_CRTIME { 0 => None, _ => Some(Timespec::new(arg.crtime, arg.crtimensec)) }; let chgtime = match arg.valid & FATTR_CHGTIME { 0 => None, _ => Some(Timespec::new(arg.chgtime, arg.chgtimensec)) }; let bkuptime = match arg.valid & FATTR_BKUPTIME { 0 => None, _ => Some(Timespec::new(arg.bkuptime, arg.bkuptimensec)) }; let flags = match arg.valid & FATTR_FLAGS { 0 => None, _ => Some(arg.flags) }; (crtime, chgtime, bkuptime, flags) } #[cfg(not(target_os = "macos"))] #[inline] fn get_macos_setattr (_arg: &fuse_setattr_in) -> (Option<Timespec>, Option<Timespec>, Option<Timespec>, Option<u32>) { (None, None, None, None) } let (crtime, chgtime, bkuptime, flags) = get_macos_setattr(arg); se.filesystem.setattr(self, self.header.nodeid, mode, uid, gid, size, atime, mtime, fh, crtime, chgtime, bkuptime, flags, self.reply()); }, FUSE_READLINK => { debug!("READLINK({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.readlink(self, self.header.nodeid, self.reply()); }, FUSE_MKNOD => { let arg: &fuse_mknod_in = data.fetch(); let name = data.fetch_path(); debug!("MKNOD({}) parent {:#018x}, name {}, mode {:#05o}, rdev {}", self.header.unique, self.header.nodeid, name.display(), arg.mode, arg.rdev); se.filesystem.mknod(self, self.header.nodeid, &name, arg.mode, arg.rdev, self.reply()); }, FUSE_MKDIR => { let arg: &fuse_mkdir_in = data.fetch(); let name = data.fetch_path(); debug!("MKDIR({}) parent {:#018x}, name {}, mode {:#05o}", self.header.unique, self.header.nodeid, name.display(), arg.mode); se.filesystem.mkdir(self, self.header.nodeid, &name, arg.mode, self.reply()); }, FUSE_UNLINK => { let name = data.fetch_path(); debug!("UNLINK({}) parent {:#018x}, name {}", self.header.unique, self.header.nodeid, name.display()); se.filesystem.unlink(self, self.header.nodeid, &name, self.reply()); }, FUSE_RMDIR => { let name = data.fetch_path(); debug!("RMDIR({}) parent {:#018x}, name {}", self.header.unique, self.header.nodeid, name.display()); se.filesystem.rmdir(self, self.header.nodeid, &name, self.reply()); }, FUSE_SYMLINK => { let name = data.fetch_path(); let link = data.fetch_path(); debug!("SYMLINK({}) parent {:#018x}, name {}, link {}", self.header.unique, self.header.nodeid, name.display(), link.display()); se.filesystem.symlink(self, self.header.nodeid, &name, &link, self.reply()); }, FUSE_RENAME => { let arg: &fuse_rename_in = data.fetch(); let name = data.fetch_path(); let newname = data.fetch_path(); debug!("RENAME({}) parent {:#018x}, name {}, newparent {:#018x}, newname {}", self.header.unique, self.header.nodeid, name.display(), arg.newdir, newname.display()); se.filesystem.rename(self, self.header.nodeid, &name, arg.newdir, &newname, self.reply()); }, FUSE_LINK => { let arg: &fuse_link_in = data.fetch(); let newname = data.fetch_path(); debug!("LINK({}) ino {:#018x}, newparent {:#018x}, newname {}", self.header.unique, arg.oldnodeid, self.header.nodeid, newname.display()); se.filesystem.link(self, arg.oldnodeid, self.header.nodeid, &newname, self.reply()); }, FUSE_OPEN => { let arg: &fuse_open_in = data.fetch(); debug!("OPEN({}) ino {:#018x}, flags {:#x}", self.header.unique, self.header.nodeid, arg.flags); se.filesystem.open(self, self.header.nodeid, arg.flags as uint, self.reply()); }, FUSE_READ => { let arg: &fuse_read_in = data.fetch(); debug!("READ({}) ino {:#018x}, fh {}, offset {}, size {}", self.header.unique, self.header.nodeid, arg.fh, arg.offset, arg.size); se.filesystem.read(self, self.header.nodeid, arg.fh, arg.offset, arg.size as uint, self.reply()); }, FUSE_WRITE => { let arg: &fuse_write_in = data.fetch(); let data = data.fetch_data(); assert!(data.len() == arg.size as uint); debug!("WRITE({}) ino {:#018x}, fh {}, offset {}, size {}, flags {:#x}", self.header.unique, self.header.nodeid, arg.fh, arg.offset, arg.size, arg.write_flags); se.filesystem.write(self, self.header.nodeid, arg.fh, arg.offset, data, arg.write_flags as uint, self.reply()); }, FUSE_FLUSH => { let arg: &fuse_flush_in = data.fetch(); debug!("FLUSH({}) ino {:#018x}, fh {}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.lock_owner); se.filesystem.flush(self, self.header.nodeid, arg.fh, arg.lock_owner, self.reply()); }, FUSE_RELEASE => { let arg: &fuse_release_in = data.fetch(); let flush = match arg.release_flags & FUSE_RELEASE_FLUSH { 0 => false, _ => true }; debug!("RELEASE({}) ino {:#018x}, fh {}, flags {:#x}, release flags {:#x}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.flags, arg.release_flags, arg.lock_owner); se.filesystem.release(self, self.header.nodeid, arg.fh, arg.flags as uint, arg.lock_owner, flush, self.reply()); }, FUSE_FSYNC => { let arg: &fuse_fsync_in = data.fetch(); let datasync = match arg.fsync_flags & 1 { 0 => false, _ => true }; debug!("FSYNC({}) ino {:#018x}, fh {}, flags {:#x}", self.header.unique, self.header.nodeid, arg.fh, arg.fsync_flags); se.filesystem.fsync(self, self.header.nodeid, arg.fh, datasync, self.reply()); }, FUSE_OPENDIR => { let arg: &fuse_open_in = data.fetch(); debug!("OPENDIR({}) ino {:#018x}, flags {:#x}", self.header.unique, self.header.nodeid, arg.flags); se.filesystem.opendir(self, self.header.nodeid, arg.flags as uint, self.reply()); }, FUSE_READDIR => { let arg: &fuse_read_in = data.fetch(); debug!("READDIR({}) ino {:#018x}, fh {}, offset {}, size {}", self.header.unique, self.header.nodeid, arg.fh, arg.offset, arg.size); se.filesystem.readdir(self, self.header.nodeid, arg.fh, arg.offset, self.reply::<ReplyDirectory>().sized(arg.size as uint)); }, FUSE_RELEASEDIR => { let arg: &fuse_release_in = data.fetch(); debug!("RELEASEDIR({}) ino {:#018x}, fh {}, flags {:#x}, release flags {:#x}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.flags, arg.release_flags, arg.lock_owner); se.filesystem.releasedir(self, self.header.nodeid, arg.fh, arg.flags as uint, self.reply()); }, FUSE_FSYNCDIR => { let arg: &fuse_fsync_in = data.fetch(); let datasync = match arg.fsync_flags & 1 { 0 => false, _ => true }; debug!("FSYNCDIR({}) ino {:#018x}, fh {}, flags {:#x}", self.header.unique, self.header.nodeid, arg.fh, arg.fsync_flags); se.filesystem.fsyncdir(self, self.header.nodeid, arg.fh, datasync, self.reply()); }, FUSE_STATFS => { debug!("STATFS({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.statfs(self, self.header.nodeid, self.reply()); }, FUSE_SETXATTR => { let arg: &fuse_setxattr_in = data.fetch(); let name = data.fetch_str(); let value = data.fetch_data(); assert!(value.len() == arg.size as uint); debug!("SETXATTR({}) ino {:#018x}, name {}, size {}, flags {:#x}", self.header.unique, self.header.nodeid, String::from_utf8_lossy(name), arg.size, arg.flags); #[cfg(target_os = "macos")] #[inline] fn get_position (arg: &fuse_setxattr_in) -> u32 { arg.position } #[cfg(not(target_os = "macos"))] #[inline] fn get_position (_arg: &fuse_setxattr_in) -> u32 { 0 } se.filesystem.setxattr(self, self.header.nodeid, name, value, arg.flags as uint, get_position(arg), self.reply()); }, FUSE_GETXATTR => { let arg: &fuse_getxattr_in = data.fetch(); let name = data.fetch_str(); debug!("GETXATTR({}) ino {:#018x}, name {}, size {}", self.header.unique, self.header.nodeid, String::from_utf8_lossy(name), arg.size); se.filesystem.getxattr(self, self.header.nodeid, name, self.reply()); }, FUSE_LISTXATTR => { let arg: &fuse_getxattr_in = data.fetch(); debug!("LISTXATTR({}) ino {:#018x}, size {}", self.header.unique, self.header.nodeid, arg.size); se.filesystem.listxattr(self, self.header.nodeid, self.reply()); }, FUSE_REMOVEXATTR => { let name = data.fetch_str(); debug!("REMOVEXATTR({}) ino {:#018x}, name {}", self.header.unique, self.header.nodeid, String::from_utf8_lossy(name)); se.filesystem.removexattr(self, self.header.nodeid, name, self.reply()); }, FUSE_ACCESS => { let arg: &fuse_access_in = data.fetch(); debug!("ACCESS({}) ino {:#018x}, mask {:#05o}", self.header.unique, self.header.nodeid, arg.mask); se.filesystem.access(self, self.header.nodeid, arg.mask as uint, self.reply()); }, FUSE_CREATE => { let arg: &fuse_open_in = data.fetch(); let name = data.fetch_path(); debug!("CREATE({}) parent {:#018x}, name {}, mode {:#05o}, flags {:#x}", self.header.unique, self.header.nodeid, name.display(), arg.mode, arg.flags); se.filesystem.create(self, self.header.nodeid, &name, arg.mode, arg.flags as uint, self.reply()); }, FUSE_GETLK => { let arg: &fuse_lk_in = data.fetch(); debug!("GETLK({}) ino {:#018x}, fh {}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.owner); se.filesystem.getlk(self, self.header.nodeid, arg.fh, arg.owner, arg.lk.start, arg.lk.end, arg.lk.typ, arg.lk.pid, self.reply()); }, FUSE_SETLK | FUSE_SETLKW => { let arg: &fuse_lk_in = data.fetch(); let sleep = match opcode { FUSE_SETLKW => true, _ => false }; debug!("SETLK({}) ino {:#018x}, fh {}, lock owner {}", self.header.unique, self.header.nodeid, arg.fh, arg.owner); se.filesystem.setlk(self, self.header.nodeid, arg.fh, arg.owner, arg.lk.start, arg.lk.end, arg.lk.typ, arg.lk.pid, sleep, self.reply()); }, FUSE_BMAP => { let arg: &fuse_bmap_in = data.fetch(); debug!("BMAP({}) ino {:#018x}, blocksize {}, ids {}", self.header.unique, self.header.nodeid, arg.blocksize, arg.block); se.filesystem.bmap(self, self.header.nodeid, arg.blocksize as uint, arg.block, self.reply()); }, #[cfg(target_os = "macos")] FUSE_SETVOLNAME => { // OS X only let name = data.fetch_str(); debug!("SETVOLNAME({}) name {}", self.header.unique, String::from_utf8_lossy(name)); se.filesystem.setvolname(self, name, self.reply()); }, #[cfg(target_os = "macos")] FUSE_EXCHANGE => { // OS X only let arg: &fuse_exchange_in = data.fetch(); let oldname = data.fetch_path(); let newname = data.fetch_path(); debug!("EXCHANGE({}) parent {:#018x}, name {}, newparent {:#018x}, newname {}, options {:#x}", self.header.unique, arg.olddir, oldname.display(), arg.newdir, newname.display(), arg.options); se.filesystem.exchange(self, arg.olddir, &oldname, arg.newdir, &newname, arg.options as uint, self.reply()); }, #[cfg(target_os = "macos")] FUSE_GETXTIMES => { // OS X only debug!("GETXTIMES({}) ino {:#018x}", self.header.unique, self.header.nodeid); se.filesystem.getxtimes(self, self.header.nodeid, self.reply()); }, } } /// Create a reply object for this request that can be passed to the filesystem /// implementation and makes sure that a request is replied eventually fn reply<T: Reply> (&self) -> T { let ch = self.ch; Reply::new(self.header.unique, proc(buffer) { match ch.send(buffer) { Ok(()) => (), Err(err) => error!("Failed to send FUSE reply: {}", err), } }) } /// Returns the unique identifier of this request #[inline] #[allow(dead_code)] pub fn unique (&self) -> u64 { self.header.unique } /// Returns the uid of this request #[inline] #[allow(dead_code)] pub fn uid (&self) -> u32 { self.header.uid } /// Returns the gid of this request #[inline] #[allow(dead_code)] pub fn gid (&self) -> u32 { self.header.gid } /// Returns the pid of this request #[inline] #[allow(dead_code)] pub fn pid (&self) -> u32 { self.header.pid } }
extern crate gl; extern crate cgmath; use super::super::*; use std::mem; use std::ptr; use std::ffi::CString; use std::str; use self::gl::types::*; use self::cgmath::*; type GLHandle = u32; type Handle = usize; type VBOHandle = Handle; type VAOHandle = Handle; type IBOHandle = Handle; type ProgramHandle = Handle; struct GLVbo { id: GLHandle, } struct GLIbo { id: GLHandle, itype: IndexType, count: usize, } struct GLUniform { name: String, index: GLuint, offset: GLint, itype: GLenum, size: GLsizei, } struct GLUniformBlock { name: String, size: usize, buffer: GLuint, buffer_data: BufferData, uniforms: Vec<GLUniform>, } impl PartialEq for GLUniformBlock { fn eq(&self, other: &Self) -> bool { self.name == other.name } } struct GLProg { id: GLHandle, vsid: GLHandle, fsid: GLHandle, uniform_blocks: Vec<GLUniformBlock>, } struct GLVertexArrayObject { id: GLHandle, } pub struct OpenGLRenderer { vaos: Vec<GLVertexArrayObject>, vbos: Vec<GLVbo>, ibos: Vec<GLIbo>, progs: Vec<GLProg>, } pub struct OpenGLGeometry { vbo: VBOHandle, ibo: IBOHandle, vao: VAOHandle, program: ProgramHandle, layout_desc: VertexLayoutDescription, params: ShaderParams, } impl Geometry for OpenGLGeometry { fn get_vertex_layout_description(&self) -> &VertexLayoutDescription { &self.layout_desc } fn get_params(&self) -> &ShaderParams { &self.params } fn get_mut_params(&mut self) -> &mut ShaderParams { &mut self.params } } impl OpenGLRenderer { pub fn new() -> Box<OpenGLRenderer> { Box::new(OpenGLRenderer { vaos: Vec::new(), vbos: Vec::new(), ibos: Vec::new(), progs: Vec::new(), }) } fn bind_vertex_buffer(&self, vboh: VBOHandle) { unsafe { gl::BindBuffer(gl::ARRAY_BUFFER, self.vbos[vboh].id); } } fn bind_index_buffer(&self, iboh: IBOHandle) { unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ibos[iboh].id); } } fn bind_program(&self, progh: ProgramHandle) { unsafe { gl::UseProgram(self.progs[progh].id); } } fn bind_vertex_array(&self, vaoh: VAOHandle) { unsafe { gl::BindVertexArray(self.vaos[vaoh].id); } } fn compile_shader(&self, src: &str, shader_type: GLenum) -> GLuint { unsafe { let shader = gl::CreateShader(shader_type); let c_str = CString::new(src.as_bytes()).unwrap(); gl::ShaderSource(shader, 1, &c_str.as_ptr(), ptr::null()); gl::CompileShader(shader); let mut status = gl::FALSE as GLint; gl::GetShaderiv(shader, gl::COMPILE_STATUS, &mut status); if status != (gl::TRUE as GLint) { let mut len = 0; gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut len); let mut buf: Vec<&[u8]> = Vec::new(); buf.set_len((len as usize) - 1); gl::GetShaderInfoLog(shader, len, ptr::null_mut(), buf.as_mut_ptr() as * mut GLchar); panic!("{}", str::from_utf8(&buf[0]).ok().expect("ShaderInfoLog not valid utf8.")); } shader } } fn create_vertex_array_object(&mut self, desc: &VertexLayoutDescription, vbo: VBOHandle, progh: ProgramHandle) -> Result<VAOHandle, String> { let mut vao = 0; unsafe { gl::GenVertexArrays(1, &mut vao); gl::BindVertexArray(vao); let mut vertex_size: i32 = 0; for elem in desc.elements.iter() { vertex_size += elem.vtype.get_size_of() as i32; } self.bind_vertex_buffer(vbo); self.bind_program(progh); let progid = self.progs[progh].id; for (i,elem) in desc.elements.iter().enumerate() { let index = i as u32; let num_components = elem.vtype.get_num_components(); let elem_type = match elem.vtype { VertexElementType::F32 | VertexElementType::F32F32 | VertexElementType::F32F32F32 | VertexElementType::F32F32F32F32 => gl::FLOAT, }; let attr_name_cstr = CString::new(elem.name.clone()).unwrap().as_ptr(); //let index = gl::GetAttribLocation(progid, attr_name_cstr); gl::BindAttribLocation(progid, index, attr_name_cstr); gl::EnableVertexAttribArray(index as u32); gl::VertexAttribPointer(index as u32, num_components, elem_type, gl::FALSE, 0, mem::transmute(elem.offset)); } } self.vaos.push(GLVertexArrayObject { id: vao, }); Ok(self.vaos.len() - 1) } fn create_vertex_buffer_object(&mut self, data: BufferData) -> Result<VBOHandle, String> { let mut buf_id = 0; unsafe { gl::GenBuffers(1, &mut buf_id); gl::BindBuffer(gl::ARRAY_BUFFER, buf_id); gl::BufferData(gl::ARRAY_BUFFER, data.bytes.len() as isize, mem::transmute(&data.bytes[0]), gl::STATIC_DRAW); } let vbo = GLVbo { id: buf_id, }; self.vbos.push(vbo); Ok(self.vbos.len() - 1) } fn create_index_buffer_object(&mut self, itype: IndexType, data: BufferData) -> Result<IBOHandle, String> { let mut buf_id = 0; unsafe { gl::GenBuffers(1, &mut buf_id); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, buf_id); gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, data.bytes.len() as isize, mem::transmute(&data.bytes[0]), gl::DYNAMIC_DRAW); } let count; match itype { IndexType::U32 => count = data.bytes.len() / mem::size_of::<u32>(), IndexType::U16 => count = data.bytes.len() / mem::size_of::<u16>(), } let ibo = GLIbo { id: buf_id, itype: itype, count: count }; self.ibos.push(ibo); Ok(self.vbos.len() - 1) } fn create_program(&mut self, vert_src: &str, frag_src: &str) -> Result<ProgramHandle, String> { let vs = self.compile_shader(vert_src, gl::VERTEX_SHADER); let fs = self.compile_shader(frag_src, gl::FRAGMENT_SHADER); let program; unsafe { program = gl::CreateProgram(); gl::AttachShader(program, vs); gl::AttachShader(program, fs); gl::LinkProgram(program); let mut status = gl::FALSE as GLint; gl::GetProgramiv(program, gl::LINK_STATUS, &mut status); if status != (gl::TRUE as GLint) { let mut len: GLint = 0; gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut len); let mut buf: Vec<&[u8]> = Vec::new(); buf.set_len((len as usize) - 1); gl::GetProgramInfoLog(program, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar); panic!("{}", str::from_utf8(&buf[0]).ok().expect("programinfolog not valid utf8")); } } let uniform_blocks = self.get_program_uniform_blocks(program); let prog = GLProg { vsid: vs, fsid: fs, id: program, uniform_blocks: uniform_blocks, }; self.progs.push(prog); Ok(self.progs.len() - 1) } fn get_shader_params_from_uniforms(&self, uniform_blocks: &Vec<GLUniformBlock>) -> ShaderParams { let mut param_groups: Vec<ParamGroup> = Vec::with_capacity(uniform_blocks.len()); for block in uniform_blocks.iter() { let group_name = block.name.clone(); let mut params: Vec<Param> = Vec::with_capacity(block.uniforms.len()); for uniform in block.uniforms.iter() { let param_value: ParamValue = match uniform.itype { gl::FLOAT => ParamValue::F32(0.0), gl::FLOAT_VEC4 => ParamValue::Vec4(vec4(0.0, 0.0, 0.0, 0.0)), gl::FLOAT_MAT3 => ParamValue::Mat3(Matrix3::identity()), gl::FLOAT_MAT4 => ParamValue::Mat4(Matrix4::identity()), _ => panic!("Unsupported shader uniform type!"), }; params.push(Param { name: uniform.name.clone(), value: param_value }); } param_groups.push(ParamGroup { name: group_name, params: params, }); } ShaderParams::new(param_groups) } fn get_program_uniform_blocks(&self, progid: GLuint) -> Vec<GLUniformBlock> { //let progid = program.id; let mut num_blocks: GLint = 0; unsafe { gl::GetProgramiv(progid, gl::ACTIVE_UNIFORM_BLOCKS, &mut num_blocks); } let mut max_uniform_name_len: GLint = 0; unsafe { gl::GetProgramiv(progid, gl::ACTIVE_UNIFORM_MAX_LENGTH, &mut max_uniform_name_len); } let mut uniform_blocks: Vec<GLUniformBlock> = Vec::with_capacity(num_blocks as usize); unsafe { for i in 0..num_blocks { let mut name_len: GLint = 0; gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_NAME_LENGTH, &mut name_len); let mut name_bytes = Vec::with_capacity(name_len as usize); name_bytes.set_len((name_len as usize) - 1); gl::GetActiveUniformBlockName(progid, i as u32, name_len, ptr::null_mut(), name_bytes.as_mut_ptr() as *mut GLchar); let block_name: String = str::from_utf8(&name_bytes).unwrap().to_string(); let mut block_size: GLint = 0; gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_DATA_SIZE, &mut block_size); let mut num_uniforms: GLint = 0; gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_ACTIVE_UNIFORMS, &mut num_uniforms); let mut uniform_indices = Vec::with_capacity(num_uniforms as usize); uniform_indices.set_len(num_uniforms as usize); gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES, uniform_indices.as_mut_ptr() as *mut GLint); let mut uniforms: Vec<GLUniform> = Vec::with_capacity(num_uniforms as usize); for uniform_index in uniform_indices { let mut name_len: GLsizei = 0; let mut uniform_name_bytes = Vec::with_capacity(max_uniform_name_len as usize); uniform_name_bytes.set_len(max_uniform_name_len as usize); let mut uniform_type: GLenum = 0; let mut uniform_size: GLint = 0; gl::GetActiveUniform(progid, uniform_index as u32, max_uniform_name_len, &mut name_len, &mut uniform_size, &mut uniform_type, uniform_name_bytes.as_mut_ptr() as *mut GLchar); for _ in 0..(max_uniform_name_len - name_len) { uniform_name_bytes.pop(); } let uniform_name: String = str::from_utf8(&uniform_name_bytes).unwrap().to_string(); let mut uniform_offset: GLint = 0; gl::GetActiveUniformsiv(progid, 1, &uniform_index, gl::UNIFORM_OFFSET, &mut uniform_offset); uniforms.push(GLUniform { name: uniform_name, index: uniform_index, offset: uniform_offset, itype: uniform_type, size: uniform_size, }); } let buffer_data = BufferData::new_zero_initialized(block_size as usize); let mut ubo: GLuint = 0; gl::GenBuffers(1, &mut ubo); gl::BindBuffer(gl::UNIFORM_BUFFER, ubo); gl::BufferData(gl::UNIFORM_BUFFER, buffer_data.bytes.len() as isize, mem::transmute(&buffer_data.bytes[0]), gl::STATIC_DRAW); gl::BindBufferBase(gl::UNIFORM_BUFFER, i as u32, ubo); uniform_blocks.push(GLUniformBlock { name: block_name, size: block_size as usize, buffer: ubo, buffer_data: BufferData::new_zero_initialized(block_size as usize), uniforms: uniforms, }); } uniform_blocks } } fn draw_vertex_arrays(&mut self, vboh: VBOHandle, vaoh: VAOHandle, iboh: IBOHandle, progh: ProgramHandle) { let ibo = &self.ibos[iboh]; self.bind_program(progh); self.bind_vertex_buffer(vboh); self.bind_index_buffer(iboh); self.bind_vertex_array(vaoh); unsafe { gl::DrawArrays(gl::TRIANGLES, 0, ibo.count as i32); } } fn apply_shader_params(&mut self, geom: &mut Box<OpenGLGeometry>) { let changes; { let mut params = geom.get_mut_params(); changes = params.flush_changes(); } let mut prog: &mut GLProg = self.progs.get_mut(geom.program).unwrap(); let mut uniform_blocks: &mut Vec<GLUniformBlock> = &mut prog.uniform_blocks; let mut affected_blocks: Vec<usize> = Vec::new(); let params = geom.get_mut_params(); // This is O(scary) // should probably be optimized some time // ShaderParams::flush_changes should return which blocks // are affected as well as the parameters within that where // affected, so that we can avoid this for name in changes.iter() { 'outer: for (block_idx, block) in uniform_blocks.iter_mut().enumerate() { for uniform in block.uniforms.iter() { if uniform.name == *name { if !affected_blocks.contains(&block_idx) { affected_blocks.push(block_idx); } let mut param_value = params.get(name); match *param_value { ParamValue::F32(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), ParamValue::Vec4(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), ParamValue::Mat3(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), ParamValue::Mat4(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), } break 'outer; } } } } for block_idx in affected_blocks { let block = uniform_blocks.get_mut(block_idx).unwrap(); unsafe { gl::BindBuffer(gl::UNIFORM_BUFFER, block.buffer); gl::BufferSubData(gl::UNIFORM_BUFFER, 0, block.buffer_data.bytes.len() as isize, mem::transmute(&block.buffer_data.bytes[0])); } } } } impl Renderer for OpenGLRenderer { fn clear(&mut self, r: f32, g: f32, b: f32, a: f32) { unsafe { gl::ClearColor(r, g, b, a); gl::Clear(gl::COLOR_BUFFER_BIT); } } fn create_geometry(&mut self, vertex_data: BufferData, index_data: BufferData, layout: VertexLayoutDescription, index_type: IndexType, vert_src: &str, frag_src: &str) -> Box<Geometry> { let vbo = self.create_vertex_buffer_object(vertex_data).unwrap(); let prog = self.create_program(vert_src, frag_src).unwrap(); let vao = self.create_vertex_array_object(&layout, vbo, prog).unwrap(); let ibo = self.create_index_buffer_object(index_type, index_data).unwrap(); //let params = self.get_program_params(prog); let params = self.get_shader_params_from_uniforms(&self.progs[prog].uniform_blocks); let geom = OpenGLGeometry { vbo: vbo, vao: vao, ibo: ibo, program: prog, layout_desc: layout, params: params }; Box::new(geom) } fn draw_geometry(&mut self, geom: &mut Box<Geometry>) { // This is pretty lame. There should be a better way to convert Box<Geometry> to Box<OpenGLGeometry> // Perhaps this is just an unsafe design by nature however. let glgeom: &mut Box<OpenGLGeometry> = unsafe { mem::transmute(geom) }; self.apply_shader_params(glgeom); self.draw_vertex_arrays(glgeom.vbo, glgeom.vao, glgeom.ibo, glgeom.program); } } Fixed erroneous shader error reporting code. extern crate gl; extern crate cgmath; use super::super::*; use std::mem; use std::ptr; use std::ffi::CString; use std::str; use self::gl::types::*; use self::cgmath::*; type GLHandle = u32; type Handle = usize; type VBOHandle = Handle; type VAOHandle = Handle; type IBOHandle = Handle; type ProgramHandle = Handle; struct GLVbo { id: GLHandle, } struct GLIbo { id: GLHandle, itype: IndexType, count: usize, } struct GLUniform { name: String, index: GLuint, offset: GLint, itype: GLenum, size: GLsizei, } struct GLUniformBlock { name: String, size: usize, buffer: GLuint, buffer_data: BufferData, uniforms: Vec<GLUniform>, } impl PartialEq for GLUniformBlock { fn eq(&self, other: &Self) -> bool { self.name == other.name } } struct GLProg { id: GLHandle, vsid: GLHandle, fsid: GLHandle, uniform_blocks: Vec<GLUniformBlock>, } struct GLVertexArrayObject { id: GLHandle, } pub struct OpenGLRenderer { vaos: Vec<GLVertexArrayObject>, vbos: Vec<GLVbo>, ibos: Vec<GLIbo>, progs: Vec<GLProg>, } pub struct OpenGLGeometry { vbo: VBOHandle, ibo: IBOHandle, vao: VAOHandle, program: ProgramHandle, layout_desc: VertexLayoutDescription, params: ShaderParams, } impl Geometry for OpenGLGeometry { fn get_vertex_layout_description(&self) -> &VertexLayoutDescription { &self.layout_desc } fn get_params(&self) -> &ShaderParams { &self.params } fn get_mut_params(&mut self) -> &mut ShaderParams { &mut self.params } } impl OpenGLRenderer { pub fn new() -> Box<OpenGLRenderer> { Box::new(OpenGLRenderer { vaos: Vec::new(), vbos: Vec::new(), ibos: Vec::new(), progs: Vec::new(), }) } fn bind_vertex_buffer(&self, vboh: VBOHandle) { unsafe { gl::BindBuffer(gl::ARRAY_BUFFER, self.vbos[vboh].id); } } fn bind_index_buffer(&self, iboh: IBOHandle) { unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ibos[iboh].id); } } fn bind_program(&self, progh: ProgramHandle) { unsafe { gl::UseProgram(self.progs[progh].id); } } fn bind_vertex_array(&self, vaoh: VAOHandle) { unsafe { gl::BindVertexArray(self.vaos[vaoh].id); } } fn compile_shader(&self, src: &str, shader_type: GLenum) -> GLuint { unsafe { let shader = gl::CreateShader(shader_type); let c_str = CString::new(src.as_bytes()).unwrap(); gl::ShaderSource(shader, 1, &c_str.as_ptr(), ptr::null()); gl::CompileShader(shader); let mut status = gl::FALSE as GLint; gl::GetShaderiv(shader, gl::COMPILE_STATUS, &mut status); if status != (gl::TRUE as GLint) { let mut len = 0; gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut len); let mut buf = Vec::with_capacity(len as usize); buf.set_len((len as usize) - 1); gl::GetShaderInfoLog(shader, len, ptr::null_mut(), buf.as_mut_ptr() as * mut GLchar); panic!("{}", str::from_utf8(&buf).ok().expect("ShaderInfoLog not valid utf8.")); } shader } } fn create_vertex_array_object(&mut self, desc: &VertexLayoutDescription, vbo: VBOHandle, progh: ProgramHandle) -> Result<VAOHandle, String> { let mut vao = 0; unsafe { gl::GenVertexArrays(1, &mut vao); gl::BindVertexArray(vao); let mut vertex_size: i32 = 0; for elem in desc.elements.iter() { vertex_size += elem.vtype.get_size_of() as i32; } self.bind_vertex_buffer(vbo); self.bind_program(progh); let progid = self.progs[progh].id; for (i,elem) in desc.elements.iter().enumerate() { let index = i as u32; let num_components = elem.vtype.get_num_components(); let elem_type = match elem.vtype { VertexElementType::F32 | VertexElementType::F32F32 | VertexElementType::F32F32F32 | VertexElementType::F32F32F32F32 => gl::FLOAT, }; let attr_name_cstr = CString::new(elem.name.clone()).unwrap().as_ptr(); //let index = gl::GetAttribLocation(progid, attr_name_cstr); gl::BindAttribLocation(progid, index, attr_name_cstr); gl::EnableVertexAttribArray(index as u32); gl::VertexAttribPointer(index as u32, num_components, elem_type, gl::FALSE, 0, mem::transmute(elem.offset)); } } self.vaos.push(GLVertexArrayObject { id: vao, }); Ok(self.vaos.len() - 1) } fn create_vertex_buffer_object(&mut self, data: BufferData) -> Result<VBOHandle, String> { let mut buf_id = 0; unsafe { gl::GenBuffers(1, &mut buf_id); gl::BindBuffer(gl::ARRAY_BUFFER, buf_id); gl::BufferData(gl::ARRAY_BUFFER, data.bytes.len() as isize, mem::transmute(&data.bytes[0]), gl::STATIC_DRAW); } let vbo = GLVbo { id: buf_id, }; self.vbos.push(vbo); Ok(self.vbos.len() - 1) } fn create_index_buffer_object(&mut self, itype: IndexType, data: BufferData) -> Result<IBOHandle, String> { let mut buf_id = 0; unsafe { gl::GenBuffers(1, &mut buf_id); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, buf_id); gl::BufferData(gl::ELEMENT_ARRAY_BUFFER, data.bytes.len() as isize, mem::transmute(&data.bytes[0]), gl::DYNAMIC_DRAW); } let count; match itype { IndexType::U32 => count = data.bytes.len() / mem::size_of::<u32>(), IndexType::U16 => count = data.bytes.len() / mem::size_of::<u16>(), } let ibo = GLIbo { id: buf_id, itype: itype, count: count }; self.ibos.push(ibo); Ok(self.vbos.len() - 1) } fn create_program(&mut self, vert_src: &str, frag_src: &str) -> Result<ProgramHandle, String> { let vs = self.compile_shader(vert_src, gl::VERTEX_SHADER); let fs = self.compile_shader(frag_src, gl::FRAGMENT_SHADER); let program; unsafe { program = gl::CreateProgram(); gl::AttachShader(program, vs); gl::AttachShader(program, fs); gl::LinkProgram(program); let mut status = gl::FALSE as GLint; gl::GetProgramiv(program, gl::LINK_STATUS, &mut status); if status != (gl::TRUE as GLint) { let mut len: GLint = 0; gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut len); let mut buf = Vec::with_capacity(len as usize); buf.set_len((len as usize) - 1); gl::GetProgramInfoLog(program, len, ptr::null_mut(), buf.as_mut_ptr() as *mut GLchar); panic!("{}", str::from_utf8(&buf).ok().expect("programinfolog not valid utf8")); } } let uniform_blocks = self.get_program_uniform_blocks(program); let prog = GLProg { vsid: vs, fsid: fs, id: program, uniform_blocks: uniform_blocks, }; self.progs.push(prog); Ok(self.progs.len() - 1) } fn get_shader_params_from_uniforms(&self, uniform_blocks: &Vec<GLUniformBlock>) -> ShaderParams { let mut param_groups: Vec<ParamGroup> = Vec::with_capacity(uniform_blocks.len()); for block in uniform_blocks.iter() { let group_name = block.name.clone(); let mut params: Vec<Param> = Vec::with_capacity(block.uniforms.len()); for uniform in block.uniforms.iter() { let param_value: ParamValue = match uniform.itype { gl::FLOAT => ParamValue::F32(0.0), gl::FLOAT_VEC4 => ParamValue::Vec4(vec4(0.0, 0.0, 0.0, 0.0)), gl::FLOAT_MAT3 => ParamValue::Mat3(Matrix3::identity()), gl::FLOAT_MAT4 => ParamValue::Mat4(Matrix4::identity()), _ => panic!("Unsupported shader uniform type!"), }; params.push(Param { name: uniform.name.clone(), value: param_value }); } param_groups.push(ParamGroup { name: group_name, params: params, }); } ShaderParams::new(param_groups) } fn get_program_uniform_blocks(&self, progid: GLuint) -> Vec<GLUniformBlock> { //let progid = program.id; let mut num_blocks: GLint = 0; unsafe { gl::GetProgramiv(progid, gl::ACTIVE_UNIFORM_BLOCKS, &mut num_blocks); } let mut max_uniform_name_len: GLint = 0; unsafe { gl::GetProgramiv(progid, gl::ACTIVE_UNIFORM_MAX_LENGTH, &mut max_uniform_name_len); } let mut uniform_blocks: Vec<GLUniformBlock> = Vec::with_capacity(num_blocks as usize); unsafe { for i in 0..num_blocks { let mut name_len: GLint = 0; gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_NAME_LENGTH, &mut name_len); let mut name_bytes = Vec::with_capacity(name_len as usize); name_bytes.set_len((name_len as usize) - 1); gl::GetActiveUniformBlockName(progid, i as u32, name_len, ptr::null_mut(), name_bytes.as_mut_ptr() as *mut GLchar); let block_name: String = str::from_utf8(&name_bytes).unwrap().to_string(); let mut block_size: GLint = 0; gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_DATA_SIZE, &mut block_size); let mut num_uniforms: GLint = 0; gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_ACTIVE_UNIFORMS, &mut num_uniforms); let mut uniform_indices = Vec::with_capacity(num_uniforms as usize); uniform_indices.set_len(num_uniforms as usize); gl::GetActiveUniformBlockiv(progid, i as u32, gl::UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES, uniform_indices.as_mut_ptr() as *mut GLint); let mut uniforms: Vec<GLUniform> = Vec::with_capacity(num_uniforms as usize); for uniform_index in uniform_indices { let mut name_len: GLsizei = 0; let mut uniform_name_bytes = Vec::with_capacity(max_uniform_name_len as usize); uniform_name_bytes.set_len(max_uniform_name_len as usize); let mut uniform_type: GLenum = 0; let mut uniform_size: GLint = 0; gl::GetActiveUniform(progid, uniform_index as u32, max_uniform_name_len, &mut name_len, &mut uniform_size, &mut uniform_type, uniform_name_bytes.as_mut_ptr() as *mut GLchar); for _ in 0..(max_uniform_name_len - name_len) { uniform_name_bytes.pop(); } let uniform_name: String = str::from_utf8(&uniform_name_bytes).unwrap().to_string(); let mut uniform_offset: GLint = 0; gl::GetActiveUniformsiv(progid, 1, &uniform_index, gl::UNIFORM_OFFSET, &mut uniform_offset); uniforms.push(GLUniform { name: uniform_name, index: uniform_index, offset: uniform_offset, itype: uniform_type, size: uniform_size, }); } let buffer_data = BufferData::new_zero_initialized(block_size as usize); let mut ubo: GLuint = 0; gl::GenBuffers(1, &mut ubo); gl::BindBuffer(gl::UNIFORM_BUFFER, ubo); gl::BufferData(gl::UNIFORM_BUFFER, buffer_data.bytes.len() as isize, mem::transmute(&buffer_data.bytes[0]), gl::STATIC_DRAW); gl::BindBufferBase(gl::UNIFORM_BUFFER, i as u32, ubo); uniform_blocks.push(GLUniformBlock { name: block_name, size: block_size as usize, buffer: ubo, buffer_data: BufferData::new_zero_initialized(block_size as usize), uniforms: uniforms, }); } uniform_blocks } } fn draw_vertex_arrays(&mut self, vboh: VBOHandle, vaoh: VAOHandle, iboh: IBOHandle, progh: ProgramHandle) { let ibo = &self.ibos[iboh]; self.bind_program(progh); self.bind_vertex_buffer(vboh); self.bind_index_buffer(iboh); self.bind_vertex_array(vaoh); unsafe { gl::DrawArrays(gl::TRIANGLES, 0, ibo.count as i32); } } fn apply_shader_params(&mut self, geom: &mut Box<OpenGLGeometry>) { let changes; { let mut params = geom.get_mut_params(); changes = params.flush_changes(); } let mut prog: &mut GLProg = self.progs.get_mut(geom.program).unwrap(); let mut uniform_blocks: &mut Vec<GLUniformBlock> = &mut prog.uniform_blocks; let mut affected_blocks: Vec<usize> = Vec::new(); let params = geom.get_mut_params(); // This is O(scary) // should probably be optimized some time // ShaderParams::flush_changes should return which blocks // are affected as well as the parameters within that where // affected, so that we can avoid this for name in changes.iter() { 'outer: for (block_idx, block) in uniform_blocks.iter_mut().enumerate() { for uniform in block.uniforms.iter() { if uniform.name == *name { if !affected_blocks.contains(&block_idx) { affected_blocks.push(block_idx); } let mut param_value = params.get(name); match *param_value { ParamValue::F32(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), ParamValue::Vec4(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), ParamValue::Mat3(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), ParamValue::Mat4(x) => block.buffer_data.update_region(uniform.offset as usize, vec![x]), } break 'outer; } } } } for block_idx in affected_blocks { let block = uniform_blocks.get_mut(block_idx).unwrap(); unsafe { gl::BindBuffer(gl::UNIFORM_BUFFER, block.buffer); gl::BufferSubData(gl::UNIFORM_BUFFER, 0, block.buffer_data.bytes.len() as isize, mem::transmute(&block.buffer_data.bytes[0])); } } } } impl Renderer for OpenGLRenderer { fn clear(&mut self, r: f32, g: f32, b: f32, a: f32) { unsafe { gl::ClearColor(r, g, b, a); gl::Clear(gl::COLOR_BUFFER_BIT); } } fn create_geometry(&mut self, vertex_data: BufferData, index_data: BufferData, layout: VertexLayoutDescription, index_type: IndexType, vert_src: &str, frag_src: &str) -> Box<Geometry> { let vbo = self.create_vertex_buffer_object(vertex_data).unwrap(); let prog = self.create_program(vert_src, frag_src).unwrap(); let vao = self.create_vertex_array_object(&layout, vbo, prog).unwrap(); let ibo = self.create_index_buffer_object(index_type, index_data).unwrap(); //let params = self.get_program_params(prog); let params = self.get_shader_params_from_uniforms(&self.progs[prog].uniform_blocks); let geom = OpenGLGeometry { vbo: vbo, vao: vao, ibo: ibo, program: prog, layout_desc: layout, params: params }; Box::new(geom) } fn draw_geometry(&mut self, geom: &mut Box<Geometry>) { // This is pretty lame. There should be a better way to convert Box<Geometry> to Box<OpenGLGeometry> // Perhaps this is just an unsafe design by nature however. let glgeom: &mut Box<OpenGLGeometry> = unsafe { mem::transmute(geom) }; self.apply_shader_params(glgeom); self.draw_vertex_arrays(glgeom.vbo, glgeom.vao, glgeom.ibo, glgeom.program); } }
//! Infrastructure for compiler plugins. //! //! Plugins are Rust libraries which extend the behavior of `rustc` //! in various ways. //! //! Plugin authors will use the `Registry` type re-exported by //! this module, along with its methods. The rest of the module //! is for use by `rustc` itself. //! //! To define a plugin, build a dylib crate with a //! `#[plugin_registrar]` function: //! //! ```no_run //! #![crate_name = "myplugin"] //! #![crate_type = "dylib"] //! #![feature(plugin_registrar)] //! #![feature(rustc_private)] //! //! extern crate rustc_plugin; //! extern crate syntax; //! extern crate syntax_pos; //! //! use rustc_plugin::Registry; //! use syntax::ext::base::{ExtCtxt, MacResult}; //! use syntax_pos::Span; //! use syntax::tokenstream::TokenTree; //! //! #[plugin_registrar] //! pub fn plugin_registrar(reg: &mut Registry) { //! reg.register_macro("mymacro", expand_mymacro); //! } //! //! fn expand_mymacro(cx: &mut ExtCtxt, span: Span, tt: &[TokenTree]) -> Box<MacResult> { //! unimplemented!() //! } //! //! # fn main() {} //! ``` //! //! WARNING: We currently don't check that the registrar function //! has the appropriate type! //! //! To use a plugin while compiling another crate: //! //! ```rust //! #![feature(plugin)] //! #![plugin(myplugin)] //! ``` //! //! See the [`plugin` feature](../unstable-book/language-features/plugin.html) of //! the Unstable Book for more examples. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(nll)] #![feature(rustc_diagnostic_macros)] #![recursion_limit="256"] #![deny(rust_2018_idioms)] pub use registry::Registry; mod error_codes; pub mod registry; pub mod load; pub mod build; __build_diagnostic_array! { librustc_plugin, DIAGNOSTICS } Rollup merge of #60131 - agnxy:doc-link, r=ehuss Fix broken link in rustc_plugin doc fix #57489 r? @steveklabnik //! Infrastructure for compiler plugins. //! //! Plugins are Rust libraries which extend the behavior of `rustc` //! in various ways. //! //! Plugin authors will use the `Registry` type re-exported by //! this module, along with its methods. The rest of the module //! is for use by `rustc` itself. //! //! To define a plugin, build a dylib crate with a //! `#[plugin_registrar]` function: //! //! ```no_run //! #![crate_name = "myplugin"] //! #![crate_type = "dylib"] //! #![feature(plugin_registrar)] //! #![feature(rustc_private)] //! //! extern crate rustc_plugin; //! extern crate syntax; //! extern crate syntax_pos; //! //! use rustc_plugin::Registry; //! use syntax::ext::base::{ExtCtxt, MacResult}; //! use syntax_pos::Span; //! use syntax::tokenstream::TokenTree; //! //! #[plugin_registrar] //! pub fn plugin_registrar(reg: &mut Registry) { //! reg.register_macro("mymacro", expand_mymacro); //! } //! //! fn expand_mymacro(cx: &mut ExtCtxt, span: Span, tt: &[TokenTree]) -> Box<MacResult> { //! unimplemented!() //! } //! //! # fn main() {} //! ``` //! //! WARNING: We currently don't check that the registrar function //! has the appropriate type! //! //! To use a plugin while compiling another crate: //! //! ```rust //! #![feature(plugin)] //! #![plugin(myplugin)] //! ``` //! //! See the [`plugin` //! feature](https://doc.rust-lang.org/nightly/unstable-book/language-features/plugin.html) //! of the Unstable Book for more examples. #![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(nll)] #![feature(rustc_diagnostic_macros)] #![recursion_limit="256"] #![deny(rust_2018_idioms)] pub use registry::Registry; mod error_codes; pub mod registry; pub mod load; pub mod build; __build_diagnostic_array! { librustc_plugin, DIAGNOSTICS }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{self, Arg, Arm, Block, Expr, Item, Pat, Stmt, TokenTree, Ty}; use codemap::Span; use ext::base::ExtCtxt; use ext::base; use ext::build::AstBuilder; use parse::parser::{Parser, PathParsingMode}; use parse::token::*; use parse::token; use ptr::P; /// Quasiquoting works via token trees. /// /// This is registered as a set of expression syntax extension called quote! /// that lifts its argument token-tree to an AST representing the /// construction of the same token tree, with token::SubstNt interpreted /// as antiquotes (splices). pub mod rt { use ast; use codemap::Spanned; use ext::base::ExtCtxt; use parse::{self, token, classify}; use ptr::P; use std::rc::Rc; use ast::TokenTree; pub use parse::new_parser_from_tts; pub use codemap::{BytePos, Span, dummy_spanned, DUMMY_SP}; pub trait ToTokens { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree>; } impl ToTokens for TokenTree { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec!(self.clone()) } } impl<T: ToTokens> ToTokens for Vec<T> { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { self.iter().flat_map(|t| t.to_tokens(cx)).collect() } } impl<T: ToTokens> ToTokens for Spanned<T> { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { // FIXME: use the span? self.node.to_tokens(cx) } } impl<T: ToTokens> ToTokens for Option<T> { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { match *self { Some(ref t) => t.to_tokens(cx), None => Vec::new(), } } } impl ToTokens for ast::Ident { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Ident(*self, token::Plain))] } } impl ToTokens for ast::Path { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtPath(Box::new(self.clone()))))] } } impl ToTokens for ast::Ty { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtTy(P(self.clone()))))] } } impl ToTokens for ast::Block { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtBlock(P(self.clone()))))] } } impl ToTokens for ast::Generics { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtGenerics(self.clone())))] } } impl ToTokens for ast::WhereClause { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtWhereClause(self.clone())))] } } impl ToTokens for P<ast::Item> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtItem(self.clone())))] } } impl ToTokens for ast::ImplItem { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtImplItem(P(self.clone()))))] } } impl ToTokens for ast::TraitItem { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtTraitItem(P(self.clone()))))] } } impl ToTokens for ast::Stmt { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { let mut tts = vec![ TokenTree::Token(self.span, token::Interpolated(token::NtStmt(P(self.clone())))) ]; // Some statements require a trailing semicolon. if classify::stmt_ends_with_semi(&self.node) { tts.push(TokenTree::Token(self.span, token::Semi)); } tts } } impl ToTokens for P<ast::Expr> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtExpr(self.clone())))] } } impl ToTokens for P<ast::Pat> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtPat(self.clone())))] } } impl ToTokens for ast::Arm { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtArm(self.clone())))] } } impl ToTokens for ast::Arg { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtArg(self.clone())))] } } impl ToTokens for P<ast::Block> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtBlock(self.clone())))] } } macro_rules! impl_to_tokens_slice { ($t: ty, $sep: expr) => { impl ToTokens for [$t] { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let mut v = vec![]; for (i, x) in self.iter().enumerate() { if i > 0 { v.extend_from_slice(&$sep); } v.extend(x.to_tokens(cx)); } v } } }; } impl_to_tokens_slice! { ast::Ty, [TokenTree::Token(DUMMY_SP, token::Comma)] } impl_to_tokens_slice! { P<ast::Item>, [] } impl_to_tokens_slice! { ast::Arg, [TokenTree::Token(DUMMY_SP, token::Comma)] } impl ToTokens for P<ast::MetaItem> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtMeta(self.clone())))] } } impl ToTokens for ast::Attribute { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let mut r = vec![]; // FIXME: The spans could be better r.push(TokenTree::Token(self.span, token::Pound)); if self.node.style == ast::AttrStyle::Inner { r.push(TokenTree::Token(self.span, token::Not)); } r.push(TokenTree::Delimited(self.span, Rc::new(ast::Delimited { delim: token::Bracket, open_span: self.span, tts: self.node.value.to_tokens(cx), close_span: self.span, }))); r } } impl ToTokens for str { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let lit = ast::LitKind::Str( token::intern_and_get_ident(self), ast::StrStyle::Cooked); dummy_spanned(lit).to_tokens(cx) } } impl ToTokens for () { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Delimited(DUMMY_SP, Rc::new(ast::Delimited { delim: token::Paren, open_span: DUMMY_SP, tts: vec![], close_span: DUMMY_SP, }))] } } impl ToTokens for ast::Lit { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { // FIXME: This is wrong P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Lit(P(self.clone())), span: DUMMY_SP, attrs: None, }).to_tokens(cx) } } impl ToTokens for bool { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { dummy_spanned(ast::LitKind::Bool(*self)).to_tokens(cx) } } impl ToTokens for char { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { dummy_spanned(ast::LitKind::Char(*self)).to_tokens(cx) } } macro_rules! impl_to_tokens_int { (signed, $t:ty, $tag:expr) => ( impl ToTokens for $t { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let val = if *self < 0 { -self } else { *self }; let lit = ast::LitKind::Int(val as u64, ast::LitIntType::Signed($tag)); let lit = P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Lit(P(dummy_spanned(lit))), span: DUMMY_SP, attrs: None, }); if *self >= 0 { return lit.to_tokens(cx); } P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Unary(ast::UnOp::Neg, lit), span: DUMMY_SP, attrs: None, }).to_tokens(cx) } } ); (unsigned, $t:ty, $tag:expr) => ( impl ToTokens for $t { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let lit = ast::LitKind::Int(*self as u64, ast::LitIntType::Unsigned($tag)); dummy_spanned(lit).to_tokens(cx) } } ); } impl_to_tokens_int! { signed, isize, ast::IntTy::Is } impl_to_tokens_int! { signed, i8, ast::IntTy::I8 } impl_to_tokens_int! { signed, i16, ast::IntTy::I16 } impl_to_tokens_int! { signed, i32, ast::IntTy::I32 } impl_to_tokens_int! { signed, i64, ast::IntTy::I64 } impl_to_tokens_int! { unsigned, usize, ast::UintTy::Us } impl_to_tokens_int! { unsigned, u8, ast::UintTy::U8 } impl_to_tokens_int! { unsigned, u16, ast::UintTy::U16 } impl_to_tokens_int! { unsigned, u32, ast::UintTy::U32 } impl_to_tokens_int! { unsigned, u64, ast::UintTy::U64 } pub trait ExtParseUtils { fn parse_item(&self, s: String) -> P<ast::Item>; fn parse_expr(&self, s: String) -> P<ast::Expr>; fn parse_stmt(&self, s: String) -> ast::Stmt; fn parse_tts(&self, s: String) -> Vec<TokenTree>; } impl<'a> ExtParseUtils for ExtCtxt<'a> { fn parse_item(&self, s: String) -> P<ast::Item> { panictry!(parse::parse_item_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())).expect("parse error") } fn parse_stmt(&self, s: String) -> ast::Stmt { panictry!(parse::parse_stmt_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())).expect("parse error") } fn parse_expr(&self, s: String) -> P<ast::Expr> { panictry!(parse::parse_expr_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())) } fn parse_tts(&self, s: String) -> Vec<TokenTree> { panictry!(parse::parse_tts_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())) } } } // These panicking parsing functions are used by the quote_*!() syntax extensions, // but shouldn't be used otherwise. pub fn parse_expr_panic(parser: &mut Parser) -> P<Expr> { panictry!(parser.parse_expr()) } pub fn parse_item_panic(parser: &mut Parser) -> Option<P<Item>> { panictry!(parser.parse_item()) } pub fn parse_pat_panic(parser: &mut Parser) -> P<Pat> { panictry!(parser.parse_pat()) } pub fn parse_arm_panic(parser: &mut Parser) -> Arm { panictry!(parser.parse_arm()) } pub fn parse_ty_panic(parser: &mut Parser) -> P<Ty> { panictry!(parser.parse_ty()) } pub fn parse_stmt_panic(parser: &mut Parser) -> Option<Stmt> { panictry!(parser.parse_stmt()) } pub fn parse_attribute_panic(parser: &mut Parser, permit_inner: bool) -> ast::Attribute { panictry!(parser.parse_attribute(permit_inner)) } pub fn parse_arg_panic(parser: &mut Parser) -> Arg { panictry!(parser.parse_arg()) } pub fn parse_block_panic(parser: &mut Parser) -> P<Block> { panictry!(parser.parse_block()) } pub fn parse_meta_item_panic(parser: &mut Parser) -> P<ast::MetaItem> { panictry!(parser.parse_meta_item()) } pub fn parse_path_panic(parser: &mut Parser, mode: PathParsingMode) -> ast::Path { panictry!(parser.parse_path(mode)) } pub fn expand_quote_tokens<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let (cx_expr, expr) = expand_tts(cx, sp, tts); let expanded = expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"]]); base::MacEager::expr(expanded) } pub fn expand_quote_expr<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let expanded = expand_parse_call(cx, sp, "parse_expr_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_item<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let expanded = expand_parse_call(cx, sp, "parse_item_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_pat<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let expanded = expand_parse_call(cx, sp, "parse_pat_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_arm(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_arm_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_ty(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_ty_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_stmt(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_stmt_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_attr(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_attribute_panic", vec!(cx.expr_bool(sp, true)), tts); base::MacEager::expr(expanded) } pub fn expand_quote_arg(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_arg_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_block(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_block_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_meta_item(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_meta_item_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_path(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let mode = mk_parser_path(cx, sp, "LifetimeAndTypesWithoutColons"); let expanded = expand_parse_call(cx, sp, "parse_path_panic", vec!(mode), tts); base::MacEager::expr(expanded) } pub fn expand_quote_matcher(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let (cx_expr, tts) = parse_arguments_to_quote(cx, tts); let mut vector = mk_stmts_let(cx, sp); vector.extend(statements_mk_tts(cx, &tts[..], true)); let block = cx.expr_block( cx.block_all(sp, vector, Some(cx.expr_ident(sp, id_ext("tt"))))); let expanded = expand_wrapper(cx, sp, cx_expr, block, &[&["syntax", "ext", "quote", "rt"]]); base::MacEager::expr(expanded) } fn ids_ext(strs: Vec<String> ) -> Vec<ast::Ident> { strs.iter().map(|str| str_to_ident(&(*str))).collect() } fn id_ext(str: &str) -> ast::Ident { str_to_ident(str) } // Lift an ident to the expr that evaluates to that ident. fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> { let e_str = cx.expr_str(sp, ident.name.as_str()); cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("ident_of"), vec!(e_str)) } // Lift a name to the expr that evaluates to that name fn mk_name(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> { let e_str = cx.expr_str(sp, ident.name.as_str()); cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("name_of"), vec!(e_str)) } fn mk_tt_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> { let idents = vec!(id_ext("syntax"), id_ext("ast"), id_ext("TokenTree"), id_ext(name)); cx.expr_path(cx.path_global(sp, idents)) } fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> { let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("token"), id_ext(name)); cx.expr_path(cx.path_global(sp, idents)) } fn mk_parser_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> { let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("parser"), id_ext(name)); cx.expr_path(cx.path_global(sp, idents)) } fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> { let name = match bop { token::Plus => "Plus", token::Minus => "Minus", token::Star => "Star", token::Slash => "Slash", token::Percent => "Percent", token::Caret => "Caret", token::And => "And", token::Or => "Or", token::Shl => "Shl", token::Shr => "Shr" }; mk_token_path(cx, sp, name) } fn mk_delim(cx: &ExtCtxt, sp: Span, delim: token::DelimToken) -> P<ast::Expr> { let name = match delim { token::Paren => "Paren", token::Bracket => "Bracket", token::Brace => "Brace", }; mk_token_path(cx, sp, name) } #[allow(non_upper_case_globals)] fn expr_mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> { macro_rules! mk_lit { ($name: expr, $suffix: expr, $($args: expr),*) => {{ let inner = cx.expr_call(sp, mk_token_path(cx, sp, $name), vec![$($args),*]); let suffix = match $suffix { Some(name) => cx.expr_some(sp, mk_name(cx, sp, ast::Ident::with_empty_ctxt(name))), None => cx.expr_none(sp) }; cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner, suffix]) }} } match *tok { token::BinOp(binop) => { return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop))); } token::BinOpEq(binop) => { return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"), vec!(mk_binop(cx, sp, binop))); } token::OpenDelim(delim) => { return cx.expr_call(sp, mk_token_path(cx, sp, "OpenDelim"), vec![mk_delim(cx, sp, delim)]); } token::CloseDelim(delim) => { return cx.expr_call(sp, mk_token_path(cx, sp, "CloseDelim"), vec![mk_delim(cx, sp, delim)]); } token::Literal(token::Byte(i), suf) => { let e_byte = mk_name(cx, sp, ast::Ident::with_empty_ctxt(i)); return mk_lit!("Byte", suf, e_byte); } token::Literal(token::Char(i), suf) => { let e_char = mk_name(cx, sp, ast::Ident::with_empty_ctxt(i)); return mk_lit!("Char", suf, e_char); } token::Literal(token::Integer(i), suf) => { let e_int = mk_name(cx, sp, ast::Ident::with_empty_ctxt(i)); return mk_lit!("Integer", suf, e_int); } token::Literal(token::Float(fident), suf) => { let e_fident = mk_name(cx, sp, ast::Ident::with_empty_ctxt(fident)); return mk_lit!("Float", suf, e_fident); } token::Literal(token::Str_(ident), suf) => { return mk_lit!("Str_", suf, mk_name(cx, sp, ast::Ident::with_empty_ctxt(ident))) } token::Literal(token::StrRaw(ident, n), suf) => { return mk_lit!("StrRaw", suf, mk_name(cx, sp, ast::Ident::with_empty_ctxt(ident)), cx.expr_usize(sp, n)) } token::Ident(ident, style) => { return cx.expr_call(sp, mk_token_path(cx, sp, "Ident"), vec![mk_ident(cx, sp, ident), match style { ModName => mk_token_path(cx, sp, "ModName"), Plain => mk_token_path(cx, sp, "Plain"), }]); } token::Lifetime(ident) => { return cx.expr_call(sp, mk_token_path(cx, sp, "Lifetime"), vec!(mk_ident(cx, sp, ident))); } token::DocComment(ident) => { return cx.expr_call(sp, mk_token_path(cx, sp, "DocComment"), vec!(mk_name(cx, sp, ast::Ident::with_empty_ctxt(ident)))); } token::MatchNt(name, kind, namep, kindp) => { return cx.expr_call(sp, mk_token_path(cx, sp, "MatchNt"), vec!(mk_ident(cx, sp, name), mk_ident(cx, sp, kind), match namep { ModName => mk_token_path(cx, sp, "ModName"), Plain => mk_token_path(cx, sp, "Plain"), }, match kindp { ModName => mk_token_path(cx, sp, "ModName"), Plain => mk_token_path(cx, sp, "Plain"), })); } token::Interpolated(_) => panic!("quote! with interpolated token"), _ => () } let name = match *tok { token::Eq => "Eq", token::Lt => "Lt", token::Le => "Le", token::EqEq => "EqEq", token::Ne => "Ne", token::Ge => "Ge", token::Gt => "Gt", token::AndAnd => "AndAnd", token::OrOr => "OrOr", token::Not => "Not", token::Tilde => "Tilde", token::At => "At", token::Dot => "Dot", token::DotDot => "DotDot", token::Comma => "Comma", token::Semi => "Semi", token::Colon => "Colon", token::ModSep => "ModSep", token::RArrow => "RArrow", token::LArrow => "LArrow", token::FatArrow => "FatArrow", token::Pound => "Pound", token::Dollar => "Dollar", token::Question => "Question", token::Underscore => "Underscore", token::Eof => "Eof", _ => panic!("unhandled token in quote!"), }; mk_token_path(cx, sp, name) } fn statements_mk_tt(cx: &ExtCtxt, tt: &TokenTree, matcher: bool) -> Vec<ast::Stmt> { match *tt { TokenTree::Token(sp, SubstNt(ident, _)) => { // tt.extend($ident.to_tokens(ext_cx)) let e_to_toks = cx.expr_method_call(sp, cx.expr_ident(sp, ident), id_ext("to_tokens"), vec!(cx.expr_ident(sp, id_ext("ext_cx")))); let e_to_toks = cx.expr_method_call(sp, e_to_toks, id_ext("into_iter"), vec![]); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("extend"), vec!(e_to_toks)); vec!(cx.stmt_expr(e_push)) } ref tt @ TokenTree::Token(_, MatchNt(..)) if !matcher => { let mut seq = vec![]; for i in 0..tt.len() { seq.push(tt.get_tt(i)); } statements_mk_tts(cx, &seq[..], matcher) } TokenTree::Token(sp, ref tok) => { let e_sp = cx.expr_ident(sp, id_ext("_sp")); let e_tok = cx.expr_call(sp, mk_tt_path(cx, sp, "Token"), vec!(e_sp, expr_mk_token(cx, sp, tok))); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("push"), vec!(e_tok)); vec!(cx.stmt_expr(e_push)) }, TokenTree::Delimited(_, ref delimed) => { statements_mk_tt(cx, &delimed.open_tt(), matcher).into_iter() .chain(delimed.tts.iter() .flat_map(|tt| statements_mk_tt(cx, tt, matcher))) .chain(statements_mk_tt(cx, &delimed.close_tt(), matcher)) .collect() }, TokenTree::Sequence(sp, ref seq) => { if !matcher { panic!("TokenTree::Sequence in quote!"); } let e_sp = cx.expr_ident(sp, id_ext("_sp")); let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp)); let mut tts_stmts = vec![stmt_let_tt]; tts_stmts.extend(statements_mk_tts(cx, &seq.tts[..], matcher)); let e_tts = cx.expr_block(cx.block(sp, tts_stmts, Some(cx.expr_ident(sp, id_ext("tt"))))); let e_separator = match seq.separator { Some(ref sep) => cx.expr_some(sp, expr_mk_token(cx, sp, sep)), None => cx.expr_none(sp), }; let e_op = match seq.op { ast::KleeneOp::ZeroOrMore => "ZeroOrMore", ast::KleeneOp::OneOrMore => "OneOrMore", }; let e_op_idents = vec![ id_ext("syntax"), id_ext("ast"), id_ext("KleeneOp"), id_ext(e_op), ]; let e_op = cx.expr_path(cx.path_global(sp, e_op_idents)); let fields = vec![cx.field_imm(sp, id_ext("tts"), e_tts), cx.field_imm(sp, id_ext("separator"), e_separator), cx.field_imm(sp, id_ext("op"), e_op), cx.field_imm(sp, id_ext("num_captures"), cx.expr_usize(sp, seq.num_captures))]; let seq_path = vec![id_ext("syntax"), id_ext("ast"), id_ext("SequenceRepetition")]; let e_seq_struct = cx.expr_struct(sp, cx.path_global(sp, seq_path), fields); let e_rc_new = cx.expr_call_global(sp, vec![id_ext("std"), id_ext("rc"), id_ext("Rc"), id_ext("new")], vec![e_seq_struct]); let e_tok = cx.expr_call(sp, mk_tt_path(cx, sp, "Sequence"), vec!(e_sp, e_rc_new)); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("push"), vec!(e_tok)); vec!(cx.stmt_expr(e_push)) } } } fn parse_arguments_to_quote(cx: &ExtCtxt, tts: &[TokenTree]) -> (P<ast::Expr>, Vec<TokenTree>) { // NB: It appears that the main parser loses its mind if we consider // $foo as a SubstNt during the main parse, so we have to re-parse // under quote_depth > 0. This is silly and should go away; the _guess_ is // it has to do with transition away from supporting old-style macros, so // try removing it when enough of them are gone. let mut p = cx.new_parser_from_tts(tts); p.quote_depth += 1; let cx_expr = panictry!(p.parse_expr()); if !p.eat(&token::Comma) { let _ = p.diagnostic().fatal("expected token `,`"); } let tts = panictry!(p.parse_all_token_trees()); p.abort_if_errors(); (cx_expr, tts) } fn mk_stmts_let(cx: &ExtCtxt, sp: Span) -> Vec<ast::Stmt> { // We also bind a single value, sp, to ext_cx.call_site() // // This causes every span in a token-tree quote to be attributed to the // call site of the extension using the quote. We can't really do much // better since the source of the quote may well be in a library that // was not even parsed by this compilation run, that the user has no // source code for (eg. in libsyntax, which they're just _using_). // // The old quasiquoter had an elaborate mechanism for denoting input // file locations from which quotes originated; unfortunately this // relied on feeding the source string of the quote back into the // compiler (which we don't really want to do) and, in any case, only // pushed the problem a very small step further back: an error // resulting from a parse of the resulting quote is still attributed to // the site the string literal occurred, which was in a source file // _other_ than the one the user has control over. For example, an // error in a quote from the protocol compiler, invoked in user code // using macro_rules! for example, will be attributed to the macro_rules.rs // file in libsyntax, which the user might not even have source to (unless // they happen to have a compiler on hand). Over all, the phase distinction // just makes quotes "hard to attribute". Possibly this could be fixed // by recreating some of the original qq machinery in the tt regime // (pushing fake FileMaps onto the parser to account for original sites // of quotes, for example) but at this point it seems not likely to be // worth the hassle. let e_sp = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("call_site"), Vec::new()); let stmt_let_sp = cx.stmt_let(sp, false, id_ext("_sp"), e_sp); let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp)); vec!(stmt_let_sp, stmt_let_tt) } fn statements_mk_tts(cx: &ExtCtxt, tts: &[TokenTree], matcher: bool) -> Vec<ast::Stmt> { let mut ss = Vec::new(); for tt in tts { ss.extend(statements_mk_tt(cx, tt, matcher)); } ss } fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[TokenTree]) -> (P<ast::Expr>, P<ast::Expr>) { let (cx_expr, tts) = parse_arguments_to_quote(cx, tts); let mut vector = mk_stmts_let(cx, sp); vector.extend(statements_mk_tts(cx, &tts[..], false)); let block = cx.expr_block( cx.block_all(sp, vector, Some(cx.expr_ident(sp, id_ext("tt"))))); (cx_expr, block) } fn expand_wrapper(cx: &ExtCtxt, sp: Span, cx_expr: P<ast::Expr>, expr: P<ast::Expr>, imports: &[&[&str]]) -> P<ast::Expr> { // Explicitly borrow to avoid moving from the invoker (#16992) let cx_expr_borrow = cx.expr_addr_of(sp, cx.expr_deref(sp, cx_expr)); let stmt_let_ext_cx = cx.stmt_let(sp, false, id_ext("ext_cx"), cx_expr_borrow); let stmts = imports.iter().map(|path| { // make item: `use ...;` let path = path.iter().map(|s| s.to_string()).collect(); cx.stmt_item(sp, cx.item_use_glob(sp, ast::Visibility::Inherited, ids_ext(path))) }).chain(Some(stmt_let_ext_cx)).collect(); cx.expr_block(cx.block_all(sp, stmts, Some(expr))) } fn expand_parse_call(cx: &ExtCtxt, sp: Span, parse_method: &str, arg_exprs: Vec<P<ast::Expr>> , tts: &[TokenTree]) -> P<ast::Expr> { let (cx_expr, tts_expr) = expand_tts(cx, sp, tts); let cfg_call = || cx.expr_method_call( sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("cfg"), Vec::new()); let parse_sess_call = || cx.expr_method_call( sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("parse_sess"), Vec::new()); let new_parser_call = cx.expr_call(sp, cx.expr_ident(sp, id_ext("new_parser_from_tts")), vec!(parse_sess_call(), cfg_call(), tts_expr)); let path = vec![id_ext("syntax"), id_ext("ext"), id_ext("quote"), id_ext(parse_method)]; let mut args = vec![cx.expr_mut_addr_of(sp, new_parser_call)]; args.extend(arg_exprs); let expr = cx.expr_call_global(sp, path, args); if parse_method == "parse_attribute" { expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"], &["syntax", "parse", "attr"]]) } else { expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"]]) } } syntax: impl ToTokens for P<ast::ImplItem> // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{self, Arg, Arm, Block, Expr, Item, Pat, Stmt, TokenTree, Ty}; use codemap::Span; use ext::base::ExtCtxt; use ext::base; use ext::build::AstBuilder; use parse::parser::{Parser, PathParsingMode}; use parse::token::*; use parse::token; use ptr::P; /// Quasiquoting works via token trees. /// /// This is registered as a set of expression syntax extension called quote! /// that lifts its argument token-tree to an AST representing the /// construction of the same token tree, with token::SubstNt interpreted /// as antiquotes (splices). pub mod rt { use ast; use codemap::Spanned; use ext::base::ExtCtxt; use parse::{self, token, classify}; use ptr::P; use std::rc::Rc; use ast::TokenTree; pub use parse::new_parser_from_tts; pub use codemap::{BytePos, Span, dummy_spanned, DUMMY_SP}; pub trait ToTokens { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree>; } impl ToTokens for TokenTree { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec!(self.clone()) } } impl<T: ToTokens> ToTokens for Vec<T> { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { self.iter().flat_map(|t| t.to_tokens(cx)).collect() } } impl<T: ToTokens> ToTokens for Spanned<T> { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { // FIXME: use the span? self.node.to_tokens(cx) } } impl<T: ToTokens> ToTokens for Option<T> { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { match *self { Some(ref t) => t.to_tokens(cx), None => Vec::new(), } } } impl ToTokens for ast::Ident { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Ident(*self, token::Plain))] } } impl ToTokens for ast::Path { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtPath(Box::new(self.clone()))))] } } impl ToTokens for ast::Ty { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtTy(P(self.clone()))))] } } impl ToTokens for ast::Block { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtBlock(P(self.clone()))))] } } impl ToTokens for ast::Generics { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtGenerics(self.clone())))] } } impl ToTokens for ast::WhereClause { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtWhereClause(self.clone())))] } } impl ToTokens for P<ast::Item> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtItem(self.clone())))] } } impl ToTokens for ast::ImplItem { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtImplItem(P(self.clone()))))] } } impl ToTokens for P<ast::ImplItem> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtImplItem(self.clone())))] } } impl ToTokens for ast::TraitItem { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtTraitItem(P(self.clone()))))] } } impl ToTokens for ast::Stmt { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { let mut tts = vec![ TokenTree::Token(self.span, token::Interpolated(token::NtStmt(P(self.clone())))) ]; // Some statements require a trailing semicolon. if classify::stmt_ends_with_semi(&self.node) { tts.push(TokenTree::Token(self.span, token::Semi)); } tts } } impl ToTokens for P<ast::Expr> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtExpr(self.clone())))] } } impl ToTokens for P<ast::Pat> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(self.span, token::Interpolated(token::NtPat(self.clone())))] } } impl ToTokens for ast::Arm { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtArm(self.clone())))] } } impl ToTokens for ast::Arg { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtArg(self.clone())))] } } impl ToTokens for P<ast::Block> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtBlock(self.clone())))] } } macro_rules! impl_to_tokens_slice { ($t: ty, $sep: expr) => { impl ToTokens for [$t] { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let mut v = vec![]; for (i, x) in self.iter().enumerate() { if i > 0 { v.extend_from_slice(&$sep); } v.extend(x.to_tokens(cx)); } v } } }; } impl_to_tokens_slice! { ast::Ty, [TokenTree::Token(DUMMY_SP, token::Comma)] } impl_to_tokens_slice! { P<ast::Item>, [] } impl_to_tokens_slice! { ast::Arg, [TokenTree::Token(DUMMY_SP, token::Comma)] } impl ToTokens for P<ast::MetaItem> { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Token(DUMMY_SP, token::Interpolated(token::NtMeta(self.clone())))] } } impl ToTokens for ast::Attribute { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let mut r = vec![]; // FIXME: The spans could be better r.push(TokenTree::Token(self.span, token::Pound)); if self.node.style == ast::AttrStyle::Inner { r.push(TokenTree::Token(self.span, token::Not)); } r.push(TokenTree::Delimited(self.span, Rc::new(ast::Delimited { delim: token::Bracket, open_span: self.span, tts: self.node.value.to_tokens(cx), close_span: self.span, }))); r } } impl ToTokens for str { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let lit = ast::LitKind::Str( token::intern_and_get_ident(self), ast::StrStyle::Cooked); dummy_spanned(lit).to_tokens(cx) } } impl ToTokens for () { fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> { vec![TokenTree::Delimited(DUMMY_SP, Rc::new(ast::Delimited { delim: token::Paren, open_span: DUMMY_SP, tts: vec![], close_span: DUMMY_SP, }))] } } impl ToTokens for ast::Lit { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { // FIXME: This is wrong P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Lit(P(self.clone())), span: DUMMY_SP, attrs: None, }).to_tokens(cx) } } impl ToTokens for bool { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { dummy_spanned(ast::LitKind::Bool(*self)).to_tokens(cx) } } impl ToTokens for char { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { dummy_spanned(ast::LitKind::Char(*self)).to_tokens(cx) } } macro_rules! impl_to_tokens_int { (signed, $t:ty, $tag:expr) => ( impl ToTokens for $t { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let val = if *self < 0 { -self } else { *self }; let lit = ast::LitKind::Int(val as u64, ast::LitIntType::Signed($tag)); let lit = P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Lit(P(dummy_spanned(lit))), span: DUMMY_SP, attrs: None, }); if *self >= 0 { return lit.to_tokens(cx); } P(ast::Expr { id: ast::DUMMY_NODE_ID, node: ast::ExprKind::Unary(ast::UnOp::Neg, lit), span: DUMMY_SP, attrs: None, }).to_tokens(cx) } } ); (unsigned, $t:ty, $tag:expr) => ( impl ToTokens for $t { fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> { let lit = ast::LitKind::Int(*self as u64, ast::LitIntType::Unsigned($tag)); dummy_spanned(lit).to_tokens(cx) } } ); } impl_to_tokens_int! { signed, isize, ast::IntTy::Is } impl_to_tokens_int! { signed, i8, ast::IntTy::I8 } impl_to_tokens_int! { signed, i16, ast::IntTy::I16 } impl_to_tokens_int! { signed, i32, ast::IntTy::I32 } impl_to_tokens_int! { signed, i64, ast::IntTy::I64 } impl_to_tokens_int! { unsigned, usize, ast::UintTy::Us } impl_to_tokens_int! { unsigned, u8, ast::UintTy::U8 } impl_to_tokens_int! { unsigned, u16, ast::UintTy::U16 } impl_to_tokens_int! { unsigned, u32, ast::UintTy::U32 } impl_to_tokens_int! { unsigned, u64, ast::UintTy::U64 } pub trait ExtParseUtils { fn parse_item(&self, s: String) -> P<ast::Item>; fn parse_expr(&self, s: String) -> P<ast::Expr>; fn parse_stmt(&self, s: String) -> ast::Stmt; fn parse_tts(&self, s: String) -> Vec<TokenTree>; } impl<'a> ExtParseUtils for ExtCtxt<'a> { fn parse_item(&self, s: String) -> P<ast::Item> { panictry!(parse::parse_item_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())).expect("parse error") } fn parse_stmt(&self, s: String) -> ast::Stmt { panictry!(parse::parse_stmt_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())).expect("parse error") } fn parse_expr(&self, s: String) -> P<ast::Expr> { panictry!(parse::parse_expr_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())) } fn parse_tts(&self, s: String) -> Vec<TokenTree> { panictry!(parse::parse_tts_from_source_str( "<quote expansion>".to_string(), s, self.cfg(), self.parse_sess())) } } } // These panicking parsing functions are used by the quote_*!() syntax extensions, // but shouldn't be used otherwise. pub fn parse_expr_panic(parser: &mut Parser) -> P<Expr> { panictry!(parser.parse_expr()) } pub fn parse_item_panic(parser: &mut Parser) -> Option<P<Item>> { panictry!(parser.parse_item()) } pub fn parse_pat_panic(parser: &mut Parser) -> P<Pat> { panictry!(parser.parse_pat()) } pub fn parse_arm_panic(parser: &mut Parser) -> Arm { panictry!(parser.parse_arm()) } pub fn parse_ty_panic(parser: &mut Parser) -> P<Ty> { panictry!(parser.parse_ty()) } pub fn parse_stmt_panic(parser: &mut Parser) -> Option<Stmt> { panictry!(parser.parse_stmt()) } pub fn parse_attribute_panic(parser: &mut Parser, permit_inner: bool) -> ast::Attribute { panictry!(parser.parse_attribute(permit_inner)) } pub fn parse_arg_panic(parser: &mut Parser) -> Arg { panictry!(parser.parse_arg()) } pub fn parse_block_panic(parser: &mut Parser) -> P<Block> { panictry!(parser.parse_block()) } pub fn parse_meta_item_panic(parser: &mut Parser) -> P<ast::MetaItem> { panictry!(parser.parse_meta_item()) } pub fn parse_path_panic(parser: &mut Parser, mode: PathParsingMode) -> ast::Path { panictry!(parser.parse_path(mode)) } pub fn expand_quote_tokens<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let (cx_expr, expr) = expand_tts(cx, sp, tts); let expanded = expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"]]); base::MacEager::expr(expanded) } pub fn expand_quote_expr<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let expanded = expand_parse_call(cx, sp, "parse_expr_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_item<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let expanded = expand_parse_call(cx, sp, "parse_item_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_pat<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'cx> { let expanded = expand_parse_call(cx, sp, "parse_pat_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_arm(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_arm_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_ty(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_ty_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_stmt(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_stmt_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_attr(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_attribute_panic", vec!(cx.expr_bool(sp, true)), tts); base::MacEager::expr(expanded) } pub fn expand_quote_arg(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_arg_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_block(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_block_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_meta_item(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let expanded = expand_parse_call(cx, sp, "parse_meta_item_panic", vec!(), tts); base::MacEager::expr(expanded) } pub fn expand_quote_path(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let mode = mk_parser_path(cx, sp, "LifetimeAndTypesWithoutColons"); let expanded = expand_parse_call(cx, sp, "parse_path_panic", vec!(mode), tts); base::MacEager::expr(expanded) } pub fn expand_quote_matcher(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<base::MacResult+'static> { let (cx_expr, tts) = parse_arguments_to_quote(cx, tts); let mut vector = mk_stmts_let(cx, sp); vector.extend(statements_mk_tts(cx, &tts[..], true)); let block = cx.expr_block( cx.block_all(sp, vector, Some(cx.expr_ident(sp, id_ext("tt"))))); let expanded = expand_wrapper(cx, sp, cx_expr, block, &[&["syntax", "ext", "quote", "rt"]]); base::MacEager::expr(expanded) } fn ids_ext(strs: Vec<String> ) -> Vec<ast::Ident> { strs.iter().map(|str| str_to_ident(&(*str))).collect() } fn id_ext(str: &str) -> ast::Ident { str_to_ident(str) } // Lift an ident to the expr that evaluates to that ident. fn mk_ident(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> { let e_str = cx.expr_str(sp, ident.name.as_str()); cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("ident_of"), vec!(e_str)) } // Lift a name to the expr that evaluates to that name fn mk_name(cx: &ExtCtxt, sp: Span, ident: ast::Ident) -> P<ast::Expr> { let e_str = cx.expr_str(sp, ident.name.as_str()); cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("name_of"), vec!(e_str)) } fn mk_tt_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> { let idents = vec!(id_ext("syntax"), id_ext("ast"), id_ext("TokenTree"), id_ext(name)); cx.expr_path(cx.path_global(sp, idents)) } fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> { let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("token"), id_ext(name)); cx.expr_path(cx.path_global(sp, idents)) } fn mk_parser_path(cx: &ExtCtxt, sp: Span, name: &str) -> P<ast::Expr> { let idents = vec!(id_ext("syntax"), id_ext("parse"), id_ext("parser"), id_ext(name)); cx.expr_path(cx.path_global(sp, idents)) } fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P<ast::Expr> { let name = match bop { token::Plus => "Plus", token::Minus => "Minus", token::Star => "Star", token::Slash => "Slash", token::Percent => "Percent", token::Caret => "Caret", token::And => "And", token::Or => "Or", token::Shl => "Shl", token::Shr => "Shr" }; mk_token_path(cx, sp, name) } fn mk_delim(cx: &ExtCtxt, sp: Span, delim: token::DelimToken) -> P<ast::Expr> { let name = match delim { token::Paren => "Paren", token::Bracket => "Bracket", token::Brace => "Brace", }; mk_token_path(cx, sp, name) } #[allow(non_upper_case_globals)] fn expr_mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P<ast::Expr> { macro_rules! mk_lit { ($name: expr, $suffix: expr, $($args: expr),*) => {{ let inner = cx.expr_call(sp, mk_token_path(cx, sp, $name), vec![$($args),*]); let suffix = match $suffix { Some(name) => cx.expr_some(sp, mk_name(cx, sp, ast::Ident::with_empty_ctxt(name))), None => cx.expr_none(sp) }; cx.expr_call(sp, mk_token_path(cx, sp, "Literal"), vec![inner, suffix]) }} } match *tok { token::BinOp(binop) => { return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop))); } token::BinOpEq(binop) => { return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"), vec!(mk_binop(cx, sp, binop))); } token::OpenDelim(delim) => { return cx.expr_call(sp, mk_token_path(cx, sp, "OpenDelim"), vec![mk_delim(cx, sp, delim)]); } token::CloseDelim(delim) => { return cx.expr_call(sp, mk_token_path(cx, sp, "CloseDelim"), vec![mk_delim(cx, sp, delim)]); } token::Literal(token::Byte(i), suf) => { let e_byte = mk_name(cx, sp, ast::Ident::with_empty_ctxt(i)); return mk_lit!("Byte", suf, e_byte); } token::Literal(token::Char(i), suf) => { let e_char = mk_name(cx, sp, ast::Ident::with_empty_ctxt(i)); return mk_lit!("Char", suf, e_char); } token::Literal(token::Integer(i), suf) => { let e_int = mk_name(cx, sp, ast::Ident::with_empty_ctxt(i)); return mk_lit!("Integer", suf, e_int); } token::Literal(token::Float(fident), suf) => { let e_fident = mk_name(cx, sp, ast::Ident::with_empty_ctxt(fident)); return mk_lit!("Float", suf, e_fident); } token::Literal(token::Str_(ident), suf) => { return mk_lit!("Str_", suf, mk_name(cx, sp, ast::Ident::with_empty_ctxt(ident))) } token::Literal(token::StrRaw(ident, n), suf) => { return mk_lit!("StrRaw", suf, mk_name(cx, sp, ast::Ident::with_empty_ctxt(ident)), cx.expr_usize(sp, n)) } token::Ident(ident, style) => { return cx.expr_call(sp, mk_token_path(cx, sp, "Ident"), vec![mk_ident(cx, sp, ident), match style { ModName => mk_token_path(cx, sp, "ModName"), Plain => mk_token_path(cx, sp, "Plain"), }]); } token::Lifetime(ident) => { return cx.expr_call(sp, mk_token_path(cx, sp, "Lifetime"), vec!(mk_ident(cx, sp, ident))); } token::DocComment(ident) => { return cx.expr_call(sp, mk_token_path(cx, sp, "DocComment"), vec!(mk_name(cx, sp, ast::Ident::with_empty_ctxt(ident)))); } token::MatchNt(name, kind, namep, kindp) => { return cx.expr_call(sp, mk_token_path(cx, sp, "MatchNt"), vec!(mk_ident(cx, sp, name), mk_ident(cx, sp, kind), match namep { ModName => mk_token_path(cx, sp, "ModName"), Plain => mk_token_path(cx, sp, "Plain"), }, match kindp { ModName => mk_token_path(cx, sp, "ModName"), Plain => mk_token_path(cx, sp, "Plain"), })); } token::Interpolated(_) => panic!("quote! with interpolated token"), _ => () } let name = match *tok { token::Eq => "Eq", token::Lt => "Lt", token::Le => "Le", token::EqEq => "EqEq", token::Ne => "Ne", token::Ge => "Ge", token::Gt => "Gt", token::AndAnd => "AndAnd", token::OrOr => "OrOr", token::Not => "Not", token::Tilde => "Tilde", token::At => "At", token::Dot => "Dot", token::DotDot => "DotDot", token::Comma => "Comma", token::Semi => "Semi", token::Colon => "Colon", token::ModSep => "ModSep", token::RArrow => "RArrow", token::LArrow => "LArrow", token::FatArrow => "FatArrow", token::Pound => "Pound", token::Dollar => "Dollar", token::Question => "Question", token::Underscore => "Underscore", token::Eof => "Eof", _ => panic!("unhandled token in quote!"), }; mk_token_path(cx, sp, name) } fn statements_mk_tt(cx: &ExtCtxt, tt: &TokenTree, matcher: bool) -> Vec<ast::Stmt> { match *tt { TokenTree::Token(sp, SubstNt(ident, _)) => { // tt.extend($ident.to_tokens(ext_cx)) let e_to_toks = cx.expr_method_call(sp, cx.expr_ident(sp, ident), id_ext("to_tokens"), vec!(cx.expr_ident(sp, id_ext("ext_cx")))); let e_to_toks = cx.expr_method_call(sp, e_to_toks, id_ext("into_iter"), vec![]); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("extend"), vec!(e_to_toks)); vec!(cx.stmt_expr(e_push)) } ref tt @ TokenTree::Token(_, MatchNt(..)) if !matcher => { let mut seq = vec![]; for i in 0..tt.len() { seq.push(tt.get_tt(i)); } statements_mk_tts(cx, &seq[..], matcher) } TokenTree::Token(sp, ref tok) => { let e_sp = cx.expr_ident(sp, id_ext("_sp")); let e_tok = cx.expr_call(sp, mk_tt_path(cx, sp, "Token"), vec!(e_sp, expr_mk_token(cx, sp, tok))); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("push"), vec!(e_tok)); vec!(cx.stmt_expr(e_push)) }, TokenTree::Delimited(_, ref delimed) => { statements_mk_tt(cx, &delimed.open_tt(), matcher).into_iter() .chain(delimed.tts.iter() .flat_map(|tt| statements_mk_tt(cx, tt, matcher))) .chain(statements_mk_tt(cx, &delimed.close_tt(), matcher)) .collect() }, TokenTree::Sequence(sp, ref seq) => { if !matcher { panic!("TokenTree::Sequence in quote!"); } let e_sp = cx.expr_ident(sp, id_ext("_sp")); let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp)); let mut tts_stmts = vec![stmt_let_tt]; tts_stmts.extend(statements_mk_tts(cx, &seq.tts[..], matcher)); let e_tts = cx.expr_block(cx.block(sp, tts_stmts, Some(cx.expr_ident(sp, id_ext("tt"))))); let e_separator = match seq.separator { Some(ref sep) => cx.expr_some(sp, expr_mk_token(cx, sp, sep)), None => cx.expr_none(sp), }; let e_op = match seq.op { ast::KleeneOp::ZeroOrMore => "ZeroOrMore", ast::KleeneOp::OneOrMore => "OneOrMore", }; let e_op_idents = vec![ id_ext("syntax"), id_ext("ast"), id_ext("KleeneOp"), id_ext(e_op), ]; let e_op = cx.expr_path(cx.path_global(sp, e_op_idents)); let fields = vec![cx.field_imm(sp, id_ext("tts"), e_tts), cx.field_imm(sp, id_ext("separator"), e_separator), cx.field_imm(sp, id_ext("op"), e_op), cx.field_imm(sp, id_ext("num_captures"), cx.expr_usize(sp, seq.num_captures))]; let seq_path = vec![id_ext("syntax"), id_ext("ast"), id_ext("SequenceRepetition")]; let e_seq_struct = cx.expr_struct(sp, cx.path_global(sp, seq_path), fields); let e_rc_new = cx.expr_call_global(sp, vec![id_ext("std"), id_ext("rc"), id_ext("Rc"), id_ext("new")], vec![e_seq_struct]); let e_tok = cx.expr_call(sp, mk_tt_path(cx, sp, "Sequence"), vec!(e_sp, e_rc_new)); let e_push = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("tt")), id_ext("push"), vec!(e_tok)); vec!(cx.stmt_expr(e_push)) } } } fn parse_arguments_to_quote(cx: &ExtCtxt, tts: &[TokenTree]) -> (P<ast::Expr>, Vec<TokenTree>) { // NB: It appears that the main parser loses its mind if we consider // $foo as a SubstNt during the main parse, so we have to re-parse // under quote_depth > 0. This is silly and should go away; the _guess_ is // it has to do with transition away from supporting old-style macros, so // try removing it when enough of them are gone. let mut p = cx.new_parser_from_tts(tts); p.quote_depth += 1; let cx_expr = panictry!(p.parse_expr()); if !p.eat(&token::Comma) { let _ = p.diagnostic().fatal("expected token `,`"); } let tts = panictry!(p.parse_all_token_trees()); p.abort_if_errors(); (cx_expr, tts) } fn mk_stmts_let(cx: &ExtCtxt, sp: Span) -> Vec<ast::Stmt> { // We also bind a single value, sp, to ext_cx.call_site() // // This causes every span in a token-tree quote to be attributed to the // call site of the extension using the quote. We can't really do much // better since the source of the quote may well be in a library that // was not even parsed by this compilation run, that the user has no // source code for (eg. in libsyntax, which they're just _using_). // // The old quasiquoter had an elaborate mechanism for denoting input // file locations from which quotes originated; unfortunately this // relied on feeding the source string of the quote back into the // compiler (which we don't really want to do) and, in any case, only // pushed the problem a very small step further back: an error // resulting from a parse of the resulting quote is still attributed to // the site the string literal occurred, which was in a source file // _other_ than the one the user has control over. For example, an // error in a quote from the protocol compiler, invoked in user code // using macro_rules! for example, will be attributed to the macro_rules.rs // file in libsyntax, which the user might not even have source to (unless // they happen to have a compiler on hand). Over all, the phase distinction // just makes quotes "hard to attribute". Possibly this could be fixed // by recreating some of the original qq machinery in the tt regime // (pushing fake FileMaps onto the parser to account for original sites // of quotes, for example) but at this point it seems not likely to be // worth the hassle. let e_sp = cx.expr_method_call(sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("call_site"), Vec::new()); let stmt_let_sp = cx.stmt_let(sp, false, id_ext("_sp"), e_sp); let stmt_let_tt = cx.stmt_let(sp, true, id_ext("tt"), cx.expr_vec_ng(sp)); vec!(stmt_let_sp, stmt_let_tt) } fn statements_mk_tts(cx: &ExtCtxt, tts: &[TokenTree], matcher: bool) -> Vec<ast::Stmt> { let mut ss = Vec::new(); for tt in tts { ss.extend(statements_mk_tt(cx, tt, matcher)); } ss } fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[TokenTree]) -> (P<ast::Expr>, P<ast::Expr>) { let (cx_expr, tts) = parse_arguments_to_quote(cx, tts); let mut vector = mk_stmts_let(cx, sp); vector.extend(statements_mk_tts(cx, &tts[..], false)); let block = cx.expr_block( cx.block_all(sp, vector, Some(cx.expr_ident(sp, id_ext("tt"))))); (cx_expr, block) } fn expand_wrapper(cx: &ExtCtxt, sp: Span, cx_expr: P<ast::Expr>, expr: P<ast::Expr>, imports: &[&[&str]]) -> P<ast::Expr> { // Explicitly borrow to avoid moving from the invoker (#16992) let cx_expr_borrow = cx.expr_addr_of(sp, cx.expr_deref(sp, cx_expr)); let stmt_let_ext_cx = cx.stmt_let(sp, false, id_ext("ext_cx"), cx_expr_borrow); let stmts = imports.iter().map(|path| { // make item: `use ...;` let path = path.iter().map(|s| s.to_string()).collect(); cx.stmt_item(sp, cx.item_use_glob(sp, ast::Visibility::Inherited, ids_ext(path))) }).chain(Some(stmt_let_ext_cx)).collect(); cx.expr_block(cx.block_all(sp, stmts, Some(expr))) } fn expand_parse_call(cx: &ExtCtxt, sp: Span, parse_method: &str, arg_exprs: Vec<P<ast::Expr>> , tts: &[TokenTree]) -> P<ast::Expr> { let (cx_expr, tts_expr) = expand_tts(cx, sp, tts); let cfg_call = || cx.expr_method_call( sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("cfg"), Vec::new()); let parse_sess_call = || cx.expr_method_call( sp, cx.expr_ident(sp, id_ext("ext_cx")), id_ext("parse_sess"), Vec::new()); let new_parser_call = cx.expr_call(sp, cx.expr_ident(sp, id_ext("new_parser_from_tts")), vec!(parse_sess_call(), cfg_call(), tts_expr)); let path = vec![id_ext("syntax"), id_ext("ext"), id_ext("quote"), id_ext(parse_method)]; let mut args = vec![cx.expr_mut_addr_of(sp, new_parser_call)]; args.extend(arg_exprs); let expr = cx.expr_call_global(sp, path, args); if parse_method == "parse_attribute" { expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"], &["syntax", "parse", "attr"]]) } else { expand_wrapper(cx, sp, cx_expr, expr, &[&["syntax", "ext", "quote", "rt"]]) } }
use std::any::Any; use super::bench::BenchSamples; use super::time; use super::types::TestDesc; use super::options::ShouldPanic; pub use self::TestResult::*; // Return codes for secondary process. // Start somewhere other than 0 so we know the return code means what we think // it means. pub const TR_OK: i32 = 50; pub const TR_FAILED: i32 = 51; #[derive(Debug, Clone, PartialEq)] pub enum TestResult { TrOk, TrFailed, TrFailedMsg(String), TrIgnored, TrAllowedFail, TrBench(BenchSamples), TrTimedFail, } unsafe impl Send for TestResult {} /// Creates a `TestResult` depending on the raw result of test execution /// and assotiated data. pub fn calc_result<'a>( desc: &TestDesc, task_result: Result<(), &'a (dyn Any + 'static + Send)>, time_opts: &Option<time::TestTimeOptions>, exec_time: &Option<time::TestExecTime> ) -> TestResult { let result = match (&desc.should_panic, task_result) { (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk, (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { let maybe_panic_str = err .downcast_ref::<String>() .map(|e| &**e) .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)); if maybe_panic_str .map(|e| e.contains(msg)) .unwrap_or(false) { TestResult::TrOk } else { if desc.allow_fail { TestResult::TrAllowedFail } else { if let Some(panic_str) = maybe_panic_str{ TestResult::TrFailedMsg( format!(r#"panic did not contain expected string panic message: `{:?}`, expected substring: `{:?}`"#, panic_str, &*msg) ) } else { TestResult::TrFailedMsg( format!(r#"expected panic with string value, found non-string value: `{:?}` expected substring: `{:?}`"#, (**err).type_id(), &*msg) ) } } } } (&ShouldPanic::Yes, Ok(())) => { TestResult::TrFailedMsg("test did not panic as expected".to_string()) } _ if desc.allow_fail => TestResult::TrAllowedFail, _ => TestResult::TrFailed, }; // If test is already failed (or allowed to fail), do not change the result. if result != TestResult::TrOk { return result; } // Check if test is failed due to timeout. if let (Some(opts), Some(time)) = (time_opts, exec_time) { if opts.error_on_excess && opts.is_critical(desc, time) { return TestResult::TrTimedFail; } } result } /// Creates a `TestResult` depending on the exit code of test subprocess. pub fn get_result_from_exit_code( desc: &TestDesc, code: i32, time_opts: &Option<time::TestTimeOptions>, exec_time: &Option<time::TestExecTime>, ) -> TestResult { let result = match (desc.allow_fail, code) { (_, TR_OK) => TestResult::TrOk, (true, TR_FAILED) => TestResult::TrAllowedFail, (false, TR_FAILED) => TestResult::TrFailed, (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)), }; // If test is already failed (or allowed to fail), do not change the result. if result != TestResult::TrOk { return result; } // Check if test is failed due to timeout. if let (Some(opts), Some(time)) = (time_opts, exec_time) { if opts.error_on_excess && opts.is_critical(desc, time) { return TestResult::TrTimedFail; } } result } Simplify if else as suggested in PR feedback use std::any::Any; use super::bench::BenchSamples; use super::time; use super::types::TestDesc; use super::options::ShouldPanic; pub use self::TestResult::*; // Return codes for secondary process. // Start somewhere other than 0 so we know the return code means what we think // it means. pub const TR_OK: i32 = 50; pub const TR_FAILED: i32 = 51; #[derive(Debug, Clone, PartialEq)] pub enum TestResult { TrOk, TrFailed, TrFailedMsg(String), TrIgnored, TrAllowedFail, TrBench(BenchSamples), TrTimedFail, } unsafe impl Send for TestResult {} /// Creates a `TestResult` depending on the raw result of test execution /// and assotiated data. pub fn calc_result<'a>( desc: &TestDesc, task_result: Result<(), &'a (dyn Any + 'static + Send)>, time_opts: &Option<time::TestTimeOptions>, exec_time: &Option<time::TestExecTime> ) -> TestResult { let result = match (&desc.should_panic, task_result) { (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk, (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { let maybe_panic_str = err .downcast_ref::<String>() .map(|e| &**e) .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e)); if maybe_panic_str.map(|e| e.contains(msg)).unwrap_or(false) { TestResult::TrOk } else if desc.allow_fail { TestResult::TrAllowedFail } else if let Some(panic_str) = maybe_panic_str { TestResult::TrFailedMsg(format!( r#"panic did not contain expected string panic message: `{:?}`, expected substring: `{:?}`"#, panic_str, msg )) } else { TestResult::TrFailedMsg(format!( r#"expected panic with string value, found non-string value: `{:?}` expected substring: `{:?}`"#, (**err).type_id(), msg )) } } (&ShouldPanic::Yes, Ok(())) => { TestResult::TrFailedMsg("test did not panic as expected".to_string()) } _ if desc.allow_fail => TestResult::TrAllowedFail, _ => TestResult::TrFailed, }; // If test is already failed (or allowed to fail), do not change the result. if result != TestResult::TrOk { return result; } // Check if test is failed due to timeout. if let (Some(opts), Some(time)) = (time_opts, exec_time) { if opts.error_on_excess && opts.is_critical(desc, time) { return TestResult::TrTimedFail; } } result } /// Creates a `TestResult` depending on the exit code of test subprocess. pub fn get_result_from_exit_code( desc: &TestDesc, code: i32, time_opts: &Option<time::TestTimeOptions>, exec_time: &Option<time::TestExecTime>, ) -> TestResult { let result = match (desc.allow_fail, code) { (_, TR_OK) => TestResult::TrOk, (true, TR_FAILED) => TestResult::TrAllowedFail, (false, TR_FAILED) => TestResult::TrFailed, (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)), }; // If test is already failed (or allowed to fail), do not change the result. if result != TestResult::TrOk { return result; } // Check if test is failed due to timeout. if let (Some(opts), Some(time)) = (time_opts, exec_time) { if opts.error_on_excess && opts.is_critical(desc, time) { return TestResult::TrTimedFail; } } result }
extern mod std; use core::libc::{c_char, c_int, c_void}; use core::option::{Some, None}; use core::result::{Ok, Err}; use core::result::Result; use consts::*; enum Pcre {} enum PcreExtra {} struct PcreRes { p: *Pcre, drop { unsafe { c::free(self.p as *c_void); } } } /// The result type of `compile` pub type CompileResult = Result<Pattern, CompileErr>; /// The result type of `exec` pub type ExecResult = Result<Match, ExecErr>; /// The result type of `search` pub type SearchResult = Result<Match, RegexErr>; // The result type of `replace` pub type ReplaceResult = Result<@~str, RegexErr>; /// The type that represents compile error pub struct CompileErr { code: int, reason: @~str, offset: uint, } /// The type that represents exec error pub type ExecErr = int; /// Either compile or exec error pub enum RegexErr { CompileErr(CompileErr), ExecErr(ExecErr), } /// Compiled regular expression pub struct Pattern { str: @~str, priv pcre_res: @PcreRes, } /// Match pub struct Match { subject: @~str, pattern: Pattern, priv captures: @~[int], } #[nolink] #[abi = "cdecl"] extern mod c { fn free(p: *c_void); } extern mod pcre { fn pcre_compile2(pattern: *c_char, options: c_int, errorcodeptr: *c_int, errptr: **c_char, erroffset: *c_int, tableptr: *c_char) -> *Pcre; fn pcre_exec(code: *Pcre, extra: *PcreExtra, subject: *c_char, length: c_int, startoffset: c_int, options: c_int, ovector: * c_int, ovecsize: c_int) -> c_int; fn pcre_fullinfo(code: *Pcre, extra: *PcreExtra, what: c_int, where: *c_void) -> c_int; fn pcre_get_stringnumber(code: *Pcre, name: *c_char) -> c_int; } pub trait PatternUtil { fn info_capture_count(self) -> uint; fn info_name_count(self) -> uint; fn info_name_entry_size(self) -> uint; fn with_name_table(self, blk: &fn(*u8)); fn group_count(self) -> uint; fn group_names(self) -> ~[~str]; } impl PatternUtil for Pattern { fn info_capture_count(self) -> uint { let count = -1 as c_int; unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_CAPTURECOUNT as c_int, ptr::addr_of(&count) as *c_void); } assert!(count >= 0 as c_int); return count as uint; } fn info_name_count(self) -> uint { let count = -1 as c_int; unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_NAMECOUNT as c_int, ptr::addr_of(&count) as *c_void); } assert!(count >= 0 as c_int); return count as uint; } fn info_name_entry_size(self) -> uint { let size = -1 as c_int; unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_NAMEENTRYSIZE as c_int, ptr::addr_of(&size) as *c_void); } assert!(size >= 0 as c_int); return size as uint; } fn with_name_table(self, blk: &fn(*u8)) { let table = ptr::null::<u8>(); unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_NAMETABLE as c_int, ptr::addr_of(&table) as *c_void); } assert!(table != ptr::null::<u8>()); blk(table); } fn group_count(self) -> uint { return self.info_capture_count(); } fn group_names(self) -> ~[~str] { let count = self.info_name_count(); if count == 0u { return ~[]; } let size = self.info_name_entry_size(); let mut names: ~[~str] = ~[]; unsafe { do self.with_name_table |table| { for uint::range(0u, count) |i| { let p = ptr::offset(table, size * i + 2u); let s = str::raw::from_c_str(p as *c_char); vec::push(&mut names, s); } } } return names; } } pub trait PatternLike { fn compile(&self, options: int) -> CompileResult; } impl<'self> PatternLike for &'self str { fn compile(&self, options: int) -> CompileResult { compile(*self, options) } } impl PatternLike for ~str { fn compile(&self, options: int) -> CompileResult { compile(*self, options) } } impl PatternLike for @str { fn compile(&self, options: int) -> CompileResult { compile(*self, options) } } impl PatternLike for Pattern { fn compile(&self, _options: int) -> CompileResult { Ok(*self) } } impl PatternLike for CompileResult { fn compile(&self, _options: int) -> CompileResult { *self } } pub trait MatchExtensions { fn matched(self) -> ~str; fn prematch(self) -> ~str; fn postmatch(self) -> ~str; fn begin(self) -> uint; fn end(self) -> uint; fn group(self, i: uint) -> Option<@~str>; fn named_group(self, name: &str) -> Option<@~str>; fn subgroups(self) -> ~[~str]; fn subgroups_iter(self, blk: &fn(&str)); fn group_count(self) -> uint; fn group_names(self) -> ~[~str]; } impl MatchExtensions for Match { fn matched(self) -> ~str { return str::slice(*self.subject, self.begin(), self.end()); } fn prematch(self) -> ~str { return str::slice(*self.subject, 0u, self.begin()); } fn postmatch(self) -> ~str { return str::slice(*self.subject ,self.end(), str::char_len(*self.subject)); } fn begin(self) -> uint { return self.captures[0] as uint; } fn end(self) -> uint { return self.captures[1] as uint; } fn group(self, i: uint) -> Option<@~str> { if i > self.group_count() { return None; } let i1 = self.captures[i * 2u]; let i2 = self.captures[i * 2u + 1u]; if(i1 < 0 || i2 < 0) { return None; } return Some(@str::slice(*self.subject, i1 as uint, i2 as uint)); } fn named_group(self, name: &str) -> Option<@~str> { let i = unsafe { str::as_buf(name, |s, _n| { pcre::pcre_get_stringnumber(self.pattern.pcre_res.p, s as *c_char) }) }; if i <= 0 as c_int { return None; } return self.group(i as uint); } fn subgroups(self) -> ~[~str] { let mut v = ~[]; unsafe { vec::reserve(&mut v, self.group_count()); do self.subgroups_iter |subgroup| { vec::push(&mut v, str::from_slice(subgroup)); } } return v; } fn subgroups_iter(self, blk: &fn(&str)) { for uint::range(1u, self.group_count() + 1u) |i| { match self.group(i) { Some(s) => blk(*s), None => fail!(), } } } fn group_count(self) -> uint { return vec::len(*self.captures) / 2u - 1u; } fn group_names(self) -> ~[~str] { return self.pattern.group_names(); } } pub fn compile(pattern: &str, options: int) -> CompileResult { if options | COMPILE_OPTIONS != COMPILE_OPTIONS { warn!("unrecognized option bit(s) are set"); } let options = options | PCRE_NO_UTF8_CHECK; // str is always valid let errcode = 0 as c_int; let errreason: *c_char = ptr::null(); let erroffset = 0 as c_int; let p = unsafe { str::as_buf(pattern, |pat, _n| { pcre::pcre_compile2(pat as *c_char, options as c_int, ptr::addr_of(&errcode), ptr::addr_of(&errreason), ptr::addr_of(&erroffset), ptr::null()) }) }; if p == ptr::null() { let reason = unsafe { @str::raw::from_c_str(errreason) }; return Err(CompileErr {code: errcode as int, reason: reason, offset: erroffset as uint}); } return Ok(Pattern {str: @str::from_slice(pattern), pcre_res: @PcreRes {p: p}}); } pub fn exec(pattern: Pattern, subject: &str, offset: uint, options: int) -> ExecResult { if (options | EXEC_OPTIONS) != EXEC_OPTIONS { warn!("unrecognized option bit(s) are set"); } let options = options | PCRE_NO_UTF8_CHECK; // str is always valid let count = (pattern.info_capture_count() + 1u) as c_int; let mut ovec = vec::from_elem((count as uint) * 3u, 0u as c_int); let ret_code = unsafe { str::as_buf(subject, |s, _n| { pcre::pcre_exec(pattern.pcre_res.p, ptr::null(), s as *c_char, str::len(subject) as c_int, offset as c_int, options as c_int, vec::raw::to_ptr(ovec) as *c_int, count * (3 as c_int)) as int }) }; if ret_code < 0 { return Err(ret_code as ExecErr); } // Truncate the working space. unsafe { vec::raw::set_len(&mut ovec, count as uint * 2u) } let mut captures: ~[int] = ~[]; unsafe { vec::reserve(&mut captures, vec::len(ovec)); } for ovec.each |o| { unsafe { vec::push(&mut captures, *o as int); } } assert!(vec::len(captures) % 2u == 0u); return Ok(Match {subject: @str::from_slice(subject), pattern: pattern, captures: @captures}); } pub fn search<T: PatternLike>(pattern: T, subject: &str, options: int) -> SearchResult { return search_from(pattern, subject, 0u, options); } pub fn search_from<T: PatternLike>(pattern: T, subject: &str, offset: uint, options: int) -> SearchResult { assert!(offset <= str::len(subject)); let c_opts = options & COMPILE_OPTIONS; let e_opts = options & EXEC_OPTIONS; let c = pattern.compile(c_opts); match c { Ok(pattern) => { let e = exec(pattern, subject, offset, e_opts); match e { Ok(m) => { return Ok(m); } Err(e_err) => { return Err(ExecErr(e_err)); } } } Err(c_err) => { return Err(CompileErr(c_err)); } } } pub fn replace<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, options: int) -> ReplaceResult { return replace_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, 0u, options); } pub fn replace_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, offset: uint, options: int) -> ReplaceResult { return replace_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, offset, options); } pub fn replace_fn<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, options: int) -> ReplaceResult { return replace_fn_from(pattern, subject, repl_fn, 0u, options); } pub fn replace_fn_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, offset: uint, options: int) -> ReplaceResult { let r = search_from(pattern, subject, offset, options); match r { Ok(m) => { return Ok(@(m.prematch() + repl_fn(m) + m.postmatch())); } Err(e) => { return Err(e); } } } pub fn replace_all<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, options: int) -> ReplaceResult { return replace_all_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, 0u, options); } pub fn replace_all_fn<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, options: int) -> ReplaceResult { return replace_all_fn_from(pattern, subject, repl_fn, 0u, options); } pub fn replace_all_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, offset: uint, options: int) -> ReplaceResult { return replace_all_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, offset, options); } pub fn replace_all_fn_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, offset: uint, options: int) -> ReplaceResult { let mut offset = offset; let subject_len = str::len(subject); assert!(offset <= subject_len); let mut s = str::slice(subject, 0, offset); loop { let r = search_from(pattern, subject, offset, options); match r { Ok(m) => { s += str::slice(subject, offset, m.begin()); s += repl_fn(m); offset = m.end(); } Err(ExecErr(e)) if e == PCRE_ERROR_NOMATCH => { if offset != subject_len { s += str::slice(subject, offset, subject_len); } break; } Err(e) => { return Err(copy e); } } } return Ok(@s); } pub fn fmt_compile_err(e: CompileErr) -> ~str { return fmt!("error %d: %s at offset %u", e.code, *e.reason, e.offset); } /// Return true iff `sr` indicates that the subject did not match the pattern pub fn is_nomatch(sr: SearchResult) -> bool { match sr { Err(ExecErr(e)) if e == PCRE_ERROR_NOMATCH => true, _ => false, } } Convert str slice to owned str extern mod std; use core::libc::{c_char, c_int, c_void}; use core::option::{Some, None}; use core::result::{Ok, Err}; use core::result::Result; use consts::*; enum Pcre {} enum PcreExtra {} struct PcreRes { p: *Pcre, drop { unsafe { c::free(self.p as *c_void); } } } /// The result type of `compile` pub type CompileResult = Result<Pattern, CompileErr>; /// The result type of `exec` pub type ExecResult = Result<Match, ExecErr>; /// The result type of `search` pub type SearchResult = Result<Match, RegexErr>; // The result type of `replace` pub type ReplaceResult = Result<@~str, RegexErr>; /// The type that represents compile error pub struct CompileErr { code: int, reason: @~str, offset: uint, } /// The type that represents exec error pub type ExecErr = int; /// Either compile or exec error pub enum RegexErr { CompileErr(CompileErr), ExecErr(ExecErr), } /// Compiled regular expression pub struct Pattern { str: @~str, priv pcre_res: @PcreRes, } /// Match pub struct Match { subject: @~str, pattern: Pattern, priv captures: @~[int], } #[nolink] #[abi = "cdecl"] extern mod c { fn free(p: *c_void); } extern mod pcre { fn pcre_compile2(pattern: *c_char, options: c_int, errorcodeptr: *c_int, errptr: **c_char, erroffset: *c_int, tableptr: *c_char) -> *Pcre; fn pcre_exec(code: *Pcre, extra: *PcreExtra, subject: *c_char, length: c_int, startoffset: c_int, options: c_int, ovector: * c_int, ovecsize: c_int) -> c_int; fn pcre_fullinfo(code: *Pcre, extra: *PcreExtra, what: c_int, where: *c_void) -> c_int; fn pcre_get_stringnumber(code: *Pcre, name: *c_char) -> c_int; } pub trait PatternUtil { fn info_capture_count(self) -> uint; fn info_name_count(self) -> uint; fn info_name_entry_size(self) -> uint; fn with_name_table(self, blk: &fn(*u8)); fn group_count(self) -> uint; fn group_names(self) -> ~[~str]; } impl PatternUtil for Pattern { fn info_capture_count(self) -> uint { let count = -1 as c_int; unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_CAPTURECOUNT as c_int, ptr::addr_of(&count) as *c_void); } assert!(count >= 0 as c_int); return count as uint; } fn info_name_count(self) -> uint { let count = -1 as c_int; unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_NAMECOUNT as c_int, ptr::addr_of(&count) as *c_void); } assert!(count >= 0 as c_int); return count as uint; } fn info_name_entry_size(self) -> uint { let size = -1 as c_int; unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_NAMEENTRYSIZE as c_int, ptr::addr_of(&size) as *c_void); } assert!(size >= 0 as c_int); return size as uint; } fn with_name_table(self, blk: &fn(*u8)) { let table = ptr::null::<u8>(); unsafe { pcre::pcre_fullinfo(self.pcre_res.p, ptr::null(), PCRE_INFO_NAMETABLE as c_int, ptr::addr_of(&table) as *c_void); } assert!(table != ptr::null::<u8>()); blk(table); } fn group_count(self) -> uint { return self.info_capture_count(); } fn group_names(self) -> ~[~str] { let count = self.info_name_count(); if count == 0u { return ~[]; } let size = self.info_name_entry_size(); let mut names: ~[~str] = ~[]; unsafe { do self.with_name_table |table| { for uint::range(0u, count) |i| { let p = ptr::offset(table, size * i + 2u); let s = str::raw::from_c_str(p as *c_char); vec::push(&mut names, s); } } } return names; } } pub trait PatternLike { fn compile(&self, options: int) -> CompileResult; } impl<'self> PatternLike for &'self str { fn compile(&self, options: int) -> CompileResult { compile(*self, options) } } impl PatternLike for ~str { fn compile(&self, options: int) -> CompileResult { compile(*self, options) } } impl PatternLike for @str { fn compile(&self, options: int) -> CompileResult { compile(*self, options) } } impl PatternLike for Pattern { fn compile(&self, _options: int) -> CompileResult { Ok(*self) } } impl PatternLike for CompileResult { fn compile(&self, _options: int) -> CompileResult { *self } } pub trait MatchExtensions { fn matched(self) -> ~str; fn prematch(self) -> ~str; fn postmatch(self) -> ~str; fn begin(self) -> uint; fn end(self) -> uint; fn group(self, i: uint) -> Option<@~str>; fn named_group(self, name: &str) -> Option<@~str>; fn subgroups(self) -> ~[~str]; fn subgroups_iter(self, blk: &fn(&str)); fn group_count(self) -> uint; fn group_names(self) -> ~[~str]; } impl MatchExtensions for Match { fn matched(self) -> ~str { return str::slice(*self.subject, self.begin(), self.end()).to_owned(); } fn prematch(self) -> ~str { return str::slice(*self.subject, 0u, self.begin()).to_owned(); } fn postmatch(self) -> ~str { return str::slice(*self.subject ,self.end(), str::char_len(*self.subject)).to_owned(); } fn begin(self) -> uint { return self.captures[0] as uint; } fn end(self) -> uint { return self.captures[1] as uint; } fn group(self, i: uint) -> Option<@~str> { if i > self.group_count() { return None; } let i1 = self.captures[i * 2u]; let i2 = self.captures[i * 2u + 1u]; if(i1 < 0 || i2 < 0) { return None; } return Some(@str::slice(*self.subject, i1 as uint, i2 as uint).to_owned()); } fn named_group(self, name: &str) -> Option<@~str> { let i = unsafe { str::as_buf(name, |s, _n| { pcre::pcre_get_stringnumber(self.pattern.pcre_res.p, s as *c_char) }) }; if i <= 0 as c_int { return None; } return self.group(i as uint); } fn subgroups(self) -> ~[~str] { let mut v = ~[]; unsafe { vec::reserve(&mut v, self.group_count()); do self.subgroups_iter |subgroup| { vec::push(&mut v, str::from_slice(subgroup)); } } return v; } fn subgroups_iter(self, blk: &fn(&str)) { for uint::range(1u, self.group_count() + 1u) |i| { match self.group(i) { Some(s) => blk(*s), None => fail!(), } } } fn group_count(self) -> uint { return vec::len(*self.captures) / 2u - 1u; } fn group_names(self) -> ~[~str] { return self.pattern.group_names(); } } pub fn compile(pattern: &str, options: int) -> CompileResult { if options | COMPILE_OPTIONS != COMPILE_OPTIONS { warn!("unrecognized option bit(s) are set"); } let options = options | PCRE_NO_UTF8_CHECK; // str is always valid let errcode = 0 as c_int; let errreason: *c_char = ptr::null(); let erroffset = 0 as c_int; let p = unsafe { str::as_buf(pattern, |pat, _n| { pcre::pcre_compile2(pat as *c_char, options as c_int, ptr::addr_of(&errcode), ptr::addr_of(&errreason), ptr::addr_of(&erroffset), ptr::null()) }) }; if p == ptr::null() { let reason = unsafe { @str::raw::from_c_str(errreason) }; return Err(CompileErr {code: errcode as int, reason: reason, offset: erroffset as uint}); } return Ok(Pattern {str: @str::from_slice(pattern), pcre_res: @PcreRes {p: p}}); } pub fn exec(pattern: Pattern, subject: &str, offset: uint, options: int) -> ExecResult { if (options | EXEC_OPTIONS) != EXEC_OPTIONS { warn!("unrecognized option bit(s) are set"); } let options = options | PCRE_NO_UTF8_CHECK; // str is always valid let count = (pattern.info_capture_count() + 1u) as c_int; let mut ovec = vec::from_elem((count as uint) * 3u, 0u as c_int); let ret_code = unsafe { str::as_buf(subject, |s, _n| { pcre::pcre_exec(pattern.pcre_res.p, ptr::null(), s as *c_char, str::len(subject) as c_int, offset as c_int, options as c_int, vec::raw::to_ptr(ovec) as *c_int, count * (3 as c_int)) as int }) }; if ret_code < 0 { return Err(ret_code as ExecErr); } // Truncate the working space. unsafe { vec::raw::set_len(&mut ovec, count as uint * 2u) } let mut captures: ~[int] = ~[]; unsafe { vec::reserve(&mut captures, vec::len(ovec)); } for ovec.each |o| { unsafe { vec::push(&mut captures, *o as int); } } assert!(vec::len(captures) % 2u == 0u); return Ok(Match {subject: @str::from_slice(subject), pattern: pattern, captures: @captures}); } pub fn search<T: PatternLike>(pattern: T, subject: &str, options: int) -> SearchResult { return search_from(pattern, subject, 0u, options); } pub fn search_from<T: PatternLike>(pattern: T, subject: &str, offset: uint, options: int) -> SearchResult { assert!(offset <= str::len(subject)); let c_opts = options & COMPILE_OPTIONS; let e_opts = options & EXEC_OPTIONS; let c = pattern.compile(c_opts); match c { Ok(pattern) => { let e = exec(pattern, subject, offset, e_opts); match e { Ok(m) => { return Ok(m); } Err(e_err) => { return Err(ExecErr(e_err)); } } } Err(c_err) => { return Err(CompileErr(c_err)); } } } pub fn replace<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, options: int) -> ReplaceResult { return replace_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, 0u, options); } pub fn replace_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, offset: uint, options: int) -> ReplaceResult { return replace_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, offset, options); } pub fn replace_fn<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, options: int) -> ReplaceResult { return replace_fn_from(pattern, subject, repl_fn, 0u, options); } pub fn replace_fn_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, offset: uint, options: int) -> ReplaceResult { let r = search_from(pattern, subject, offset, options); match r { Ok(m) => { return Ok(@(m.prematch() + repl_fn(m) + m.postmatch())); } Err(e) => { return Err(e); } } } pub fn replace_all<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, options: int) -> ReplaceResult { return replace_all_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, 0u, options); } pub fn replace_all_fn<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, options: int) -> ReplaceResult { return replace_all_fn_from(pattern, subject, repl_fn, 0u, options); } pub fn replace_all_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl: &str, offset: uint, options: int) -> ReplaceResult { return replace_all_fn_from(pattern, subject, |_m| { str::from_slice(repl) }, offset, options); } pub fn replace_all_fn_from<T: PatternLike + Copy>(pattern: T, subject: &str, repl_fn: &fn(Match) -> ~str, offset: uint, options: int) -> ReplaceResult { let mut offset = offset; let subject_len = str::len(subject); assert!(offset <= subject_len); let mut s = str::slice(subject, 0, offset).to_owned(); loop { let r = search_from(pattern, subject, offset, options); match r { Ok(m) => { s += str::slice(subject, offset, m.begin()).to_owned(); s += repl_fn(m); offset = m.end(); } Err(ExecErr(e)) if e == PCRE_ERROR_NOMATCH => { if offset != subject_len { s += str::slice(subject, offset, subject_len).to_owned(); } break; } Err(e) => { return Err(copy e); } } } return Ok(@s); } pub fn fmt_compile_err(e: CompileErr) -> ~str { return fmt!("error %d: %s at offset %u", e.code, *e.reason, e.offset); } /// Return true iff `sr` indicates that the subject did not match the pattern pub fn is_nomatch(sr: SearchResult) -> bool { match sr { Err(ExecErr(e)) if e == PCRE_ERROR_NOMATCH => true, _ => false, } }
#[macro_use] extern crate serde_derive; extern crate docopt; extern crate image; extern crate gpx; extern crate geo; extern crate chrono; extern crate rayon; use std::fs; use std::fs::File; use std::io::BufReader; use std::path; use docopt::Docopt; use gpx::read; use gpx::{Gpx, Track}; use geo::Point; use image::ImageBuffer; use rayon::prelude::*; const USAGE: &'static str = " Generate video from GPX files. Usage: derivers <top-lat> <left-lng> <bottom-lat> <right-lng> <width> <height> <directory> "; #[derive(Debug, Deserialize)] struct CommandArgs { arg_top_lat: f64, arg_left_lng: f64, arg_bottom_lat: f64, arg_right_lng: f64, arg_width: u32, arg_height: u32, arg_directory: String, } #[derive(Debug)] struct ImageFrame { top_left: Point<f64>, bottom_right: Point<f64>, buf: image::RgbaImage } impl ImageFrame { pub fn from(args: &CommandArgs) -> ImageFrame { // h == w * (top - bottom) / (right - left) let height = (args.arg_width as f64) * ((args.arg_top_lat - args.arg_bottom_lat) / (args.arg_right_lng - args.arg_left_lng)); println!("Computed height: {:?}", height); ImageFrame { top_left: Point::new(args.arg_left_lng, args.arg_top_lat), bottom_right: Point::new(args.arg_right_lng, args.arg_bottom_lat), buf: ImageBuffer::from_pixel(args.arg_width, height as u32, image::Rgba([0, 0, 0, 255])) } } // Using simple equirectangular projection for now pub fn project_to_screen(&self, coord: &Point<f64>) -> Option<(u32, u32)> { // lng is x pos let x_pos = self.top_left.lng() - coord.lng(); let y_pos = self.top_left.lat() - coord.lat(); let x_offset = x_pos / (self.top_left.lng() - self.bottom_right.lng()); let y_offset = y_pos / (self.top_left.lat() - self.bottom_right.lat()); let (x, y) = ((x_offset * self.buf.width() as f64), (y_offset * self.buf.height() as f64)); if (x < 0.0 || x as u32 >= self.buf.width()) || (y < 0.0 || y as u32 >= self.buf.height()) { None } else { Some((x as u32, y as u32)) } } } #[derive(Debug)] struct Activity { name: String, date: chrono::DateTime<chrono::Utc>, track_points: Vec<Point<f64>>, } fn parse_gpx(path: &path::PathBuf) -> Option<Activity> { let file = File::open(path).unwrap(); let reader = BufReader::new(file); let gpx: Gpx = read(reader).unwrap(); // Nothing to do if there are no tracks if gpx.tracks.len() == 0 { return None; } else if gpx.tracks.len() > 1 { println!("Warning! more than 1 track, just taking first"); } let track: &Track = &gpx.tracks[0]; let mut activity = Activity { name: track.name.clone().unwrap_or(String::from("Untitled")), date: chrono::Utc::now(), track_points: vec![], }; if let Some(metadata) = gpx.metadata { if let Some(time) = metadata.time { activity.date = time; } } // Append all the waypoints. for seg in track.segments.iter() { let points = seg.points.iter().map(|ref wpt| wpt.point()); activity.track_points.extend(points); } if activity.track_points.len() == 0 { None } else { Some(activity) } } fn main() { let args: CommandArgs = Docopt::new(USAGE) .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); println!("{:?}", args); let mut img = ImageFrame::from(&args); let paths: Vec<path::PathBuf> = fs::read_dir(args.arg_directory) .unwrap() .into_iter() .map(|p| p.unwrap().path()) .collect(); let activities: Vec<Activity> = paths .into_par_iter() .filter_map(|ref p| parse_gpx(p)) .collect(); for act in activities { println!("Activity: {:?}", act.name); for pt in act.track_points.iter() { if let Some((x, y)) = img.project_to_screen(pt) { let pixel = img.buf.get_pixel_mut(x, y); let c = if pixel[0] == 255 { pixel[0] } else if pixel[0] == 0{ 25 } else { pixel[0] + 5 }; *pixel = image::Rgba([c, c, c, 255]); } } } let fout = &mut File::create("heatmap.png").unwrap(); image::ImageRgba8(img.buf).save(fout, image::PNG).unwrap(); } Ultra hacky support for video files #[macro_use] extern crate serde_derive; extern crate docopt; extern crate image; extern crate gpx; extern crate geo; extern crate chrono; extern crate rayon; use std::fs; use std::fs::File; use std::io::BufReader; use std::path; use docopt::Docopt; use gpx::read; use gpx::{Gpx, Track}; use geo::Point; use image::ImageBuffer; use rayon::prelude::*; const USAGE: &'static str = " Generate video from GPX files. Usage: derivers <top-lat> <left-lng> <bottom-lat> <right-lng> <width> <height> <directory> "; #[derive(Debug, Deserialize)] struct CommandArgs { arg_top_lat: f64, arg_left_lng: f64, arg_bottom_lat: f64, arg_right_lng: f64, arg_width: u32, arg_height: u32, arg_directory: String, } #[derive(Debug)] struct ImageFrame { top_left: Point<f64>, bottom_right: Point<f64>, buf: image::RgbImage } impl ImageFrame { pub fn from(args: &CommandArgs) -> ImageFrame { // h == w * (top - bottom) / (right - left) let height = (args.arg_width as f64) * ((args.arg_top_lat - args.arg_bottom_lat) / (args.arg_right_lng - args.arg_left_lng)); println!("Computed height: {:?}", height); ImageFrame { top_left: Point::new(args.arg_left_lng, args.arg_top_lat), bottom_right: Point::new(args.arg_right_lng, args.arg_bottom_lat), buf: ImageBuffer::from_pixel(args.arg_width, height as u32, image::Rgb([0, 0, 0])) } } // Using simple equirectangular projection for now pub fn project_to_screen(&self, coord: &Point<f64>) -> Option<(u32, u32)> { // lng is x pos let x_pos = self.top_left.lng() - coord.lng(); let y_pos = self.top_left.lat() - coord.lat(); let x_offset = x_pos / (self.top_left.lng() - self.bottom_right.lng()); let y_offset = y_pos / (self.top_left.lat() - self.bottom_right.lat()); let (x, y) = ((x_offset * self.buf.width() as f64), (y_offset * self.buf.height() as f64)); if (x < 0.0 || x as u32 >= self.buf.width()) || (y < 0.0 || y as u32 >= self.buf.height()) { None } else { Some((x as u32, y as u32)) } } } #[derive(Debug)] struct Activity { name: String, date: chrono::DateTime<chrono::Utc>, track_points: Vec<Point<f64>>, } fn parse_gpx(path: &path::PathBuf) -> Option<Activity> { let file = File::open(path).unwrap(); let reader = BufReader::new(file); let gpx: Gpx = read(reader).unwrap(); // Nothing to do if there are no tracks if gpx.tracks.len() == 0 { return None; } else if gpx.tracks.len() > 1 { println!("Warning! more than 1 track, just taking first"); } let track: &Track = &gpx.tracks[0]; let mut activity = Activity { name: track.name.clone().unwrap_or(String::from("Untitled")), date: chrono::Utc::now(), track_points: vec![], }; if let Some(metadata) = gpx.metadata { if let Some(time) = metadata.time { activity.date = time; } } // Append all the waypoints. for seg in track.segments.iter() { let points = seg.points.iter().map(|ref wpt| wpt.point()); activity.track_points.extend(points); } if activity.track_points.len() == 0 { None } else { Some(activity) } } fn main() { let args: CommandArgs = Docopt::new(USAGE) .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); println!("{:?}", args); let mut img = ImageFrame::from(&args); let paths: Vec<path::PathBuf> = fs::read_dir(args.arg_directory) .unwrap() .into_iter() .map(|p| p.unwrap().path()) .collect(); let activities: Vec<Activity> = paths .into_par_iter() .filter_map(|ref p| parse_gpx(p)) .collect(); let fout = &mut File::create("heatmap.ppm").unwrap(); let cloned_buf = img.buf.clone(); let dyn_image = image::ImageRgb8(img.buf.clone()); for act in activities { println!("Activity: {:?}", act.name); for pt in act.track_points.iter() { if let Some((x, y)) = img.project_to_screen(pt) { let pixel = img.buf.get_pixel_mut(x, y); let c = if pixel[0] == 255 { pixel[0] } else if pixel[0] == 0{ 25 } else { pixel[0] + 5 }; *pixel = image::Rgb([c, c, c]); } } image::ImageRgb8(img.buf.clone()).save(fout, image::PPM).unwrap(); } }
use std::fmt; use std::i64; use std::time::Duration; // Number of seconds in a day is a constant. // We do not support leap seconds here. const SECONDS_IN_DAY: u64 = 86400; // Gregorian calendar has 400 years cycles, this is a procedure // for computing if a year is a leap year. fn is_leap_year(year: i64) -> bool { if year % 4 != 0 { false } else if year % 100 != 0 { true } else if year % 400 != 0 { false } else { true } } fn days_in_year(year: i64) -> u32 { if is_leap_year(year) { 366 } else { 365 } } // Number of leap years among 400 consecutive years. const CYCLE_LEAP_YEARS: u32 = 400 / 4 - 400 / 100 + 400 / 400; // Number of days in 400 years cycle. const CYCLE_DAYS: u32 = 400 * 365 + CYCLE_LEAP_YEARS; // Number of seconds in 400 years cycle. const CYCLE_SECONDS: u64 = CYCLE_DAYS as u64 * SECONDS_IN_DAY; // Number of seconds between 1 Jan 1970 and 1 Jan 2000. // Check with: // `TZ=UTC gdate --rfc-3339=seconds --date @946684800` const YEARS_1970_2000_SECONDS: u64 = 946684800; // Number of seconds between 1 Jan 1600 and 1 Jan 1970. const YEARS_1600_1970_SECONDS: u64 = CYCLE_SECONDS - YEARS_1970_2000_SECONDS; // For each year in the cycle, number of leap years before in the cycle. #[cfg_attr(rustfmt, rustfmt_skip)] static YEAR_DELTAS: [u8; 401] = [ 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, // 100 25, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 39, 39, 39, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 49, 49, 49, // 200 49, 49, 49, 49, 49, 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 53, 53, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 57, 57, 57, 57, 58, 58, 58, 58, 59, 59, 59, 59, 60, 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 67, 67, 67, 67, 68, 68, 68, 68, 69, 69, 69, 69, 70, 70, 70, 70, 71, 71, 71, 71, 72, 72, 72, 72, 73, 73, 73, // 300 73, 73, 73, 73, 73, 74, 74, 74, 74, 75, 75, 75, 75, 76, 76, 76, 76, 77, 77, 77, 77, 78, 78, 78, 78, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82, 82, 82, 82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85, 86, 86, 86, 86, 87, 87, 87, 87, 88, 88, 88, 88, 89, 89, 89, 89, 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92, 92, 93, 93, 93, 93, 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97, ]; /// UTC time pub struct TmUtc { /// Year year: i64, /// 1..=12 month: u32, /// 1-based day of month day: u32, /// 0..=23 hour: u32, /// 0..=59 minute: u32, /// 0..=59; no leap seconds second: u32, /// 0..=999_999_999 nanos: u32, } #[derive(Debug)] pub enum Rfc3339ParseError { UnexpectedEof, TrailngCharacters, ExpectingDigits, ExpectingChar(char), ExpectingTimezone, NoDigitsAfterDot, DateTimeFieldOutOfRange, ExpectingDateTimeSeparator, } impl fmt::Display for Rfc3339ParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Rfc3339ParseError::UnexpectedEof => write!(f, "Unexpected EOF"), Rfc3339ParseError::TrailngCharacters => write!(f, "Trailing characters"), Rfc3339ParseError::ExpectingDigits => write!(f, "Expecting digits"), Rfc3339ParseError::ExpectingChar(c) => write!(f, "Expecting char: {}", c), Rfc3339ParseError::ExpectingTimezone => write!(f, "Expecting timezone"), Rfc3339ParseError::NoDigitsAfterDot => write!(f, "No digits after dot"), Rfc3339ParseError::DateTimeFieldOutOfRange => { write!(f, "Date-time field is out of range") } Rfc3339ParseError::ExpectingDateTimeSeparator => { write!(f, "Expecting date-time separator") } } } } impl std::error::Error for Rfc3339ParseError {} pub type Rfc3339ParseResult<A> = Result<A, Rfc3339ParseError>; impl TmUtc { fn day_of_cycle_to_year_day_of_year(day_of_cycle: u32) -> (i64, u32) { debug_assert!(day_of_cycle < CYCLE_DAYS); let mut year_mod_400 = (day_of_cycle / 365) as i64; let mut day_or_year = (day_of_cycle % 365) as u32; let delta = YEAR_DELTAS[year_mod_400 as usize] as u32; if day_or_year < delta { year_mod_400 -= 1; day_or_year += 365 - YEAR_DELTAS[year_mod_400 as usize] as u32; } else { day_or_year -= delta; } (year_mod_400, day_or_year) } fn year_day_of_year_to_day_of_cycle(year_mod_400: u32, day_of_year: u32) -> u32 { debug_assert!(year_mod_400 < 400); debug_assert!(day_of_year < days_in_year(year_mod_400 as i64)); year_mod_400 * 365 + YEAR_DELTAS[year_mod_400 as usize] as u32 + day_of_year } // Convert seconds of the day of hour, minute and second fn second_of_day_to_h_m_s(seconds: u32) -> (u32, u32, u32) { debug_assert!(seconds < 86400); let hour = seconds / 3600; let minute = seconds % 3600 / 60; let second = seconds % 60; (hour, minute, second) } fn h_m_s_to_second_of_day(hour: u32, minute: u32, second: u32) -> u32 { debug_assert!(hour < 24); debug_assert!(minute < 60); debug_assert!(second < 60); hour * 3600 + minute * 60 + second } fn days_in_months(year: i64) -> &'static [u32] { if is_leap_year(year) { &[31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] } else { &[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] } } // Convert day of year (0-based) to month and day fn day_of_year_to_month_day(year: i64, day_of_year: u32) -> (u32, u32) { debug_assert!(day_of_year < days_in_year(year)); let days_in_months = TmUtc::days_in_months(year); let mut rem_days = day_of_year; let mut month = 1; while rem_days >= days_in_months[month - 1] { rem_days -= days_in_months[month - 1]; month += 1; } debug_assert!(rem_days + 1 <= days_in_months[month - 1]); (month as u32, rem_days + 1) } fn month_day_to_day_of_year(year: i64, month: u32, day: u32) -> u32 { debug_assert!(month >= 1); debug_assert!(month <= 12); debug_assert!(day >= 1); let days_in_months = TmUtc::days_in_months(year); // TODO: replace loop with precomputed table let mut day_of_year = 0; for next_month in 1..month { day_of_year += days_in_months[next_month as usize - 1]; } debug_assert!(day <= days_in_months[month as usize - 1]); day_of_year + day - 1 } // Construct from duration added to cycle start year fn from_cycle_start_add_duration(mut cycle_start: i64, add: Duration) -> TmUtc { debug_assert!(cycle_start % 400 == 0); // Split duration to days and duration within day let days = add.as_secs() / SECONDS_IN_DAY; let duration_of_day = add - Duration::from_secs(days * SECONDS_IN_DAY); let cycles = days / CYCLE_DAYS as u64; cycle_start += cycles as i64 * 400; let day_of_cycle = days % CYCLE_DAYS as u64; let (year_mod_400, day_of_year) = TmUtc::day_of_cycle_to_year_day_of_year(day_of_cycle as u32); let (year,) = (cycle_start + year_mod_400,); let (month, day) = TmUtc::day_of_year_to_month_day(year, day_of_year); let (hour, minute, second) = TmUtc::second_of_day_to_h_m_s(duration_of_day.as_secs() as u32); TmUtc { year, month, day, hour, minute, second, nanos: duration_of_day.subsec_nanos(), } } // Protobuf timestamp: seconds from epoch, and nanos 0..=999_999_999 counting forward. pub fn from_protobuf_timestamp(seconds: i64, nanos: u32) -> TmUtc { assert!(nanos <= 999_999_999); let (mut year, mut seconds) = if seconds >= 0 { (1970, seconds as u64) } else { let minus_seconds = if seconds == i64::min_value() { i64::min_value() as u64 } else { -seconds as u64 }; let cycles = (minus_seconds + CYCLE_SECONDS) / CYCLE_SECONDS; ( 1970 - 400 * cycles as i64, cycles * CYCLE_SECONDS - minus_seconds, ) }; year -= 370; seconds += YEARS_1600_1970_SECONDS; TmUtc::from_cycle_start_add_duration(year, Duration::new(seconds, nanos)) } pub fn to_protobuf_timestamp(&self) -> (i64, u32) { assert!(self.year >= 0); assert!(self.year <= 9999); let year_mod_400 = ((self.year % 400 + 400) % 400) as u32; let cycle_start = self.year - year_mod_400 as i64; let day_of_year = TmUtc::month_day_to_day_of_year(self.year, self.month, self.day); let day_of_cycle = TmUtc::year_day_of_year_to_day_of_cycle(year_mod_400, day_of_year); let second_of_day = TmUtc::h_m_s_to_second_of_day(self.hour, self.minute, self.second); let second_of_cycle = day_of_cycle as u64 * SECONDS_IN_DAY + second_of_day as u64; let epoch_seconds = (cycle_start - 1600) / 400 * CYCLE_SECONDS as i64 - YEARS_1600_1970_SECONDS as i64 + second_of_cycle as i64; (epoch_seconds, self.nanos) } pub fn parse_rfc_3339(s: &str) -> Rfc3339ParseResult<(i64, u32)> { struct Parser<'a> { s: &'a [u8], pos: usize, } impl<'a> Parser<'a> { fn next_number(&mut self, len: usize) -> Rfc3339ParseResult<u32> { let end_pos = self.pos + len; if end_pos > self.s.len() { return Err(Rfc3339ParseError::UnexpectedEof); } let mut r = 0; for i in 0..len { let c = self.s[self.pos + i]; if c >= b'0' && c <= b'9' { r = r * 10 + (c - b'0') as u32; } else { return Err(Rfc3339ParseError::ExpectingDigits); } } self.pos += len; Ok(r) } fn lookahead_char(&self) -> Rfc3339ParseResult<u8> { if self.pos == self.s.len() { return Err(Rfc3339ParseError::UnexpectedEof); } Ok(self.s[self.pos]) } fn next_char(&mut self, expect: u8) -> Rfc3339ParseResult<()> { assert!(expect < 0x80); let c = self.lookahead_char()?; if c != expect { return Err(Rfc3339ParseError::ExpectingChar(expect as char)); } self.pos += 1; Ok(()) } } let mut parser = Parser { s: s.as_bytes(), pos: 0, }; let year = parser.next_number(4)? as i64; parser.next_char(b'-')?; let month = parser.next_number(2)?; parser.next_char(b'-')?; let day = parser.next_number(2)?; if month < 1 || month > 12 { return Err(Rfc3339ParseError::DateTimeFieldOutOfRange); } if day < 1 || day > TmUtc::days_in_months(year as i64)[month as usize - 1] { return Err(Rfc3339ParseError::DateTimeFieldOutOfRange); } match parser.lookahead_char()? { b'T' | b't' | b' ' => parser.pos += 1, _ => return Err(Rfc3339ParseError::ExpectingDateTimeSeparator), } let hour = parser.next_number(2)?; parser.next_char(b':')?; let minute = parser.next_number(2)?; parser.next_char(b':')?; let second = parser.next_number(2)?; if hour > 23 || minute > 59 || second > 60 { return Err(Rfc3339ParseError::DateTimeFieldOutOfRange); } // round down leap second let second = if second == 60 { 59 } else { second }; let nanos = if parser.lookahead_char()? == b'.' { parser.pos += 1; let mut digits = 0; let mut nanos = 0; while parser.lookahead_char()? >= b'0' && parser.lookahead_char()? <= b'9' { let digit = (parser.lookahead_char()? - b'0') as u32; parser.pos += 1; if digits == 9 { continue; } digits += 1; nanos = nanos * 10 + digit; } if digits == 0 { return Err(Rfc3339ParseError::NoDigitsAfterDot); } for _ in digits..9 { nanos *= 10; } nanos } else { 0 }; let offset_seconds = if parser.lookahead_char()? == b'Z' || parser.lookahead_char()? == b'z' { parser.pos += 1; 0 } else { let sign = if parser.lookahead_char()? == b'+' { 1 } else if parser.lookahead_char()? == b'-' { -1 } else { return Err(Rfc3339ParseError::ExpectingTimezone); }; parser.pos += 1; let hour_offset = parser.next_number(2)?; parser.next_char(b':')?; let minute_offset = parser.next_number(2)?; (hour_offset * 3600 + 60 * minute_offset) as i64 * sign }; if parser.pos != parser.s.len() { return Err(Rfc3339ParseError::TrailngCharacters); } let (seconds, nanos) = TmUtc { year, month, day, hour, minute, second, nanos, } .to_protobuf_timestamp(); Ok((seconds - offset_seconds, nanos)) } } impl fmt::Display for TmUtc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.year > 9999 { write!(f, "+{}", self.year)?; } else if self.year < 0 { write!(f, "{:05}", self.year)?; } else { write!(f, "{:04}", self.year)?; } write!( f, "-{:02}-{:02}T{:02}:{:02}:{:02}", self.month, self.day, self.hour, self.minute, self.second )?; // if precision is not specified, print nanoseconds let subsec_digits = f.precision().unwrap_or(9); if subsec_digits != 0 { let mut subsec_digits = subsec_digits; let width = if subsec_digits > 9 { 9 } else { subsec_digits }; // "Truncated" nanonseconds. let mut subsec = self.nanos; // Performs 8 iterations when precision=1, // but that's probably not a issue compared to other computations. for _ in width..9 { subsec /= 10; } write!(f, ".{:0width$}", subsec, width = width as usize)?; // Adding more than 9 digits is meaningless, // but if user requests it, we should print zeros. for _ in 9..subsec_digits { write!(f, "0")?; subsec_digits -= 1; } } write!(f, "Z") } } #[cfg(test)] mod test { use std::i64; use super::*; #[test] fn test_fmt() { fn test_impl(expected: &str, secs: i64, nanos: u32, subsec_digits: u32) { let tm_utc = TmUtc::from_protobuf_timestamp(secs, nanos); assert_eq!( expected, format!("{:.prec$}", tm_utc, prec = subsec_digits as usize) ); } // Tests can be validated with with GNU date: // `TZ=UTC gdate --date @1535585179 --iso-8601=seconds` test_impl("1970-01-01T00:00:00Z", 0, 0, 0); test_impl("2018-08-29T23:26:19Z", 1535585179, 0, 0); test_impl("2018-08-29T23:26:19.123Z", 1535585179, 123456789, 3); test_impl("1646-04-01T03:45:44Z", -10216613656, 0, 0); test_impl("1970-01-01T00:00:00.000000001000Z", 0, 1, 12); test_impl("5138-11-16T09:46:40Z", 100000000000, 0, 0); test_impl("+33658-09-27T01:46:41Z", 1000000000001, 0, 0); // Leading zero test_impl("0000-12-31T00:00:00Z", -62135683200, 0, 0); // Minus zero test_impl("-0003-10-30T14:13:20Z", -62235683200, 0, 0); // More than 4 digits // Largest value GNU date can handle test_impl("+2147485547-12-31T23:59:59Z", 67768036191676799, 0, 0); // Negative dates test_impl("1969-12-31T23:59:59Z", -1, 0, 0); test_impl("1969-12-31T23:59:00Z", -60, 0, 0); test_impl("1969-12-31T23:59:58.900Z", -2, 900_000_000, 3); test_impl("1966-10-31T14:13:20Z", -100000000, 0, 0); test_impl("-29719-04-05T22:13:19Z", -1000000000001, 0, 0); // Smallest value GNU date can handle test_impl("-2147481748-01-01T00:00:00Z", -67768040609740800, 0, 0); } #[test] fn test_parse_fmt() { fn test_impl(s: &str, width: usize) { let (seconds, nanos) = TmUtc::parse_rfc_3339(s).unwrap(); let formatted = format!( "{:.width$}", TmUtc::from_protobuf_timestamp(seconds, nanos), width = width ); assert_eq!(formatted, s); } test_impl("1970-01-01T00:00:00Z", 0); test_impl("1970-01-01T00:00:00.000Z", 3); test_impl("1970-01-01T00:00:00.000000000Z", 9); test_impl("1970-01-02T00:00:00Z", 0); test_impl("1970-03-01T00:00:00Z", 0); test_impl("1974-01-01T00:00:00Z", 0); test_impl("2018-01-01T00:00:00Z", 0); test_impl("2018-09-02T05:49:10.123456789Z", 9); test_impl("0001-01-01T00:00:00.000000000Z", 9); test_impl("9999-12-31T23:59:59.999999999Z", 9); } #[test] fn test_parse_alt() { fn test_impl(alt: &str, parse: &str) { let reference = TmUtc::parse_rfc_3339(alt).unwrap(); let parsed = TmUtc::parse_rfc_3339(parse).unwrap(); assert_eq!(reference, parsed, "{} - {}", alt, parse); } // alternative spelling test_impl("1970-01-01 00:00:00Z", "1970-01-01T00:00:00Z"); test_impl("1970-01-01 00:00:00Z", "1970-01-01t00:00:00Z"); test_impl("1970-01-01 00:00:00Z", "1970-01-01 00:00:00z"); // leap second is rounded down test_impl("2016-12-31 23:59:59Z", "2016-12-31 23:59:60Z"); // TZ offset test_impl("1970-01-01 00:00:00Z", "1970-01-01T03:00:00+03:00"); test_impl("1970-01-01 00:00:00Z", "1969-12-31 22:15:00-01:45"); } #[test] fn test_parse_incorrect_inputs() { fn test_impl(s: &str) { assert!(TmUtc::parse_rfc_3339(s).is_err(), "{}", s); } test_impl("1970-01-01T00:00:61Z"); test_impl("1970-01-01T00:60:61Z"); test_impl("1970-01-01T24:00:61Z"); test_impl("1970-01-01T00:00:00.Z"); test_impl("1970-01-32T00:00:00Z"); test_impl("1970-02-29T00:00:00Z"); test_impl("1980-02-30T00:00:00Z"); test_impl("1980-13-01T00:00:00Z"); test_impl("1970-01-01T00:00:00"); test_impl("1970-01-01T00:00Z"); } #[test] fn test_fmt_max_duration() { // Simply check that there are no integer overflows. // I didn't check that resulting strings are correct. assert_eq!( "-292277022657-01-27T08:29:52.000000000Z", format!("{}", TmUtc::from_protobuf_timestamp(i64::min_value(), 0)) ); assert_eq!( "+292277026596-12-04T15:30:07.999999999Z", format!( "{}", TmUtc::from_protobuf_timestamp(i64::max_value(), 999_999_999) ) ); } } Use thiserror for Rfc3339ParseError use std::fmt; use std::i64; use std::time::Duration; // Number of seconds in a day is a constant. // We do not support leap seconds here. const SECONDS_IN_DAY: u64 = 86400; // Gregorian calendar has 400 years cycles, this is a procedure // for computing if a year is a leap year. fn is_leap_year(year: i64) -> bool { if year % 4 != 0 { false } else if year % 100 != 0 { true } else if year % 400 != 0 { false } else { true } } fn days_in_year(year: i64) -> u32 { if is_leap_year(year) { 366 } else { 365 } } // Number of leap years among 400 consecutive years. const CYCLE_LEAP_YEARS: u32 = 400 / 4 - 400 / 100 + 400 / 400; // Number of days in 400 years cycle. const CYCLE_DAYS: u32 = 400 * 365 + CYCLE_LEAP_YEARS; // Number of seconds in 400 years cycle. const CYCLE_SECONDS: u64 = CYCLE_DAYS as u64 * SECONDS_IN_DAY; // Number of seconds between 1 Jan 1970 and 1 Jan 2000. // Check with: // `TZ=UTC gdate --rfc-3339=seconds --date @946684800` const YEARS_1970_2000_SECONDS: u64 = 946684800; // Number of seconds between 1 Jan 1600 and 1 Jan 1970. const YEARS_1600_1970_SECONDS: u64 = CYCLE_SECONDS - YEARS_1970_2000_SECONDS; // For each year in the cycle, number of leap years before in the cycle. #[cfg_attr(rustfmt, rustfmt_skip)] static YEAR_DELTAS: [u8; 401] = [ 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, // 100 25, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 39, 39, 39, 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43, 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 48, 48, 48, 48, 49, 49, 49, // 200 49, 49, 49, 49, 49, 50, 50, 50, 50, 51, 51, 51, 51, 52, 52, 52, 52, 53, 53, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 56, 56, 56, 56, 57, 57, 57, 57, 58, 58, 58, 58, 59, 59, 59, 59, 60, 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64, 64, 64, 65, 65, 65, 65, 66, 66, 66, 66, 67, 67, 67, 67, 68, 68, 68, 68, 69, 69, 69, 69, 70, 70, 70, 70, 71, 71, 71, 71, 72, 72, 72, 72, 73, 73, 73, // 300 73, 73, 73, 73, 73, 74, 74, 74, 74, 75, 75, 75, 75, 76, 76, 76, 76, 77, 77, 77, 77, 78, 78, 78, 78, 79, 79, 79, 79, 80, 80, 80, 80, 81, 81, 81, 81, 82, 82, 82, 82, 83, 83, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85, 86, 86, 86, 86, 87, 87, 87, 87, 88, 88, 88, 88, 89, 89, 89, 89, 90, 90, 90, 90, 91, 91, 91, 91, 92, 92, 92, 92, 93, 93, 93, 93, 94, 94, 94, 94, 95, 95, 95, 95, 96, 96, 96, 96, 97, 97, 97, 97, ]; /// UTC time pub struct TmUtc { /// Year year: i64, /// 1..=12 month: u32, /// 1-based day of month day: u32, /// 0..=23 hour: u32, /// 0..=59 minute: u32, /// 0..=59; no leap seconds second: u32, /// 0..=999_999_999 nanos: u32, } #[derive(Debug, thiserror::Error)] pub enum Rfc3339ParseError { #[error("Unexpected EOF")] UnexpectedEof, #[error("Trailing characters")] TrailngCharacters, #[error("Expecting digits")] ExpectingDigits, #[error("Expecting character: {:?}", .0)] ExpectingChar(char), #[error("Expecting timezone")] ExpectingTimezone, #[error("No digits after dot")] NoDigitsAfterDot, #[error("Date-time field is out of range")] DateTimeFieldOutOfRange, #[error("Expecting date-time separator")] ExpectingDateTimeSeparator, } pub type Rfc3339ParseResult<A> = Result<A, Rfc3339ParseError>; impl TmUtc { fn day_of_cycle_to_year_day_of_year(day_of_cycle: u32) -> (i64, u32) { debug_assert!(day_of_cycle < CYCLE_DAYS); let mut year_mod_400 = (day_of_cycle / 365) as i64; let mut day_or_year = (day_of_cycle % 365) as u32; let delta = YEAR_DELTAS[year_mod_400 as usize] as u32; if day_or_year < delta { year_mod_400 -= 1; day_or_year += 365 - YEAR_DELTAS[year_mod_400 as usize] as u32; } else { day_or_year -= delta; } (year_mod_400, day_or_year) } fn year_day_of_year_to_day_of_cycle(year_mod_400: u32, day_of_year: u32) -> u32 { debug_assert!(year_mod_400 < 400); debug_assert!(day_of_year < days_in_year(year_mod_400 as i64)); year_mod_400 * 365 + YEAR_DELTAS[year_mod_400 as usize] as u32 + day_of_year } // Convert seconds of the day of hour, minute and second fn second_of_day_to_h_m_s(seconds: u32) -> (u32, u32, u32) { debug_assert!(seconds < 86400); let hour = seconds / 3600; let minute = seconds % 3600 / 60; let second = seconds % 60; (hour, minute, second) } fn h_m_s_to_second_of_day(hour: u32, minute: u32, second: u32) -> u32 { debug_assert!(hour < 24); debug_assert!(minute < 60); debug_assert!(second < 60); hour * 3600 + minute * 60 + second } fn days_in_months(year: i64) -> &'static [u32] { if is_leap_year(year) { &[31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] } else { &[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] } } // Convert day of year (0-based) to month and day fn day_of_year_to_month_day(year: i64, day_of_year: u32) -> (u32, u32) { debug_assert!(day_of_year < days_in_year(year)); let days_in_months = TmUtc::days_in_months(year); let mut rem_days = day_of_year; let mut month = 1; while rem_days >= days_in_months[month - 1] { rem_days -= days_in_months[month - 1]; month += 1; } debug_assert!(rem_days + 1 <= days_in_months[month - 1]); (month as u32, rem_days + 1) } fn month_day_to_day_of_year(year: i64, month: u32, day: u32) -> u32 { debug_assert!(month >= 1); debug_assert!(month <= 12); debug_assert!(day >= 1); let days_in_months = TmUtc::days_in_months(year); // TODO: replace loop with precomputed table let mut day_of_year = 0; for next_month in 1..month { day_of_year += days_in_months[next_month as usize - 1]; } debug_assert!(day <= days_in_months[month as usize - 1]); day_of_year + day - 1 } // Construct from duration added to cycle start year fn from_cycle_start_add_duration(mut cycle_start: i64, add: Duration) -> TmUtc { debug_assert!(cycle_start % 400 == 0); // Split duration to days and duration within day let days = add.as_secs() / SECONDS_IN_DAY; let duration_of_day = add - Duration::from_secs(days * SECONDS_IN_DAY); let cycles = days / CYCLE_DAYS as u64; cycle_start += cycles as i64 * 400; let day_of_cycle = days % CYCLE_DAYS as u64; let (year_mod_400, day_of_year) = TmUtc::day_of_cycle_to_year_day_of_year(day_of_cycle as u32); let (year,) = (cycle_start + year_mod_400,); let (month, day) = TmUtc::day_of_year_to_month_day(year, day_of_year); let (hour, minute, second) = TmUtc::second_of_day_to_h_m_s(duration_of_day.as_secs() as u32); TmUtc { year, month, day, hour, minute, second, nanos: duration_of_day.subsec_nanos(), } } // Protobuf timestamp: seconds from epoch, and nanos 0..=999_999_999 counting forward. pub fn from_protobuf_timestamp(seconds: i64, nanos: u32) -> TmUtc { assert!(nanos <= 999_999_999); let (mut year, mut seconds) = if seconds >= 0 { (1970, seconds as u64) } else { let minus_seconds = if seconds == i64::min_value() { i64::min_value() as u64 } else { -seconds as u64 }; let cycles = (minus_seconds + CYCLE_SECONDS) / CYCLE_SECONDS; ( 1970 - 400 * cycles as i64, cycles * CYCLE_SECONDS - minus_seconds, ) }; year -= 370; seconds += YEARS_1600_1970_SECONDS; TmUtc::from_cycle_start_add_duration(year, Duration::new(seconds, nanos)) } pub fn to_protobuf_timestamp(&self) -> (i64, u32) { assert!(self.year >= 0); assert!(self.year <= 9999); let year_mod_400 = ((self.year % 400 + 400) % 400) as u32; let cycle_start = self.year - year_mod_400 as i64; let day_of_year = TmUtc::month_day_to_day_of_year(self.year, self.month, self.day); let day_of_cycle = TmUtc::year_day_of_year_to_day_of_cycle(year_mod_400, day_of_year); let second_of_day = TmUtc::h_m_s_to_second_of_day(self.hour, self.minute, self.second); let second_of_cycle = day_of_cycle as u64 * SECONDS_IN_DAY + second_of_day as u64; let epoch_seconds = (cycle_start - 1600) / 400 * CYCLE_SECONDS as i64 - YEARS_1600_1970_SECONDS as i64 + second_of_cycle as i64; (epoch_seconds, self.nanos) } pub fn parse_rfc_3339(s: &str) -> Rfc3339ParseResult<(i64, u32)> { struct Parser<'a> { s: &'a [u8], pos: usize, } impl<'a> Parser<'a> { fn next_number(&mut self, len: usize) -> Rfc3339ParseResult<u32> { let end_pos = self.pos + len; if end_pos > self.s.len() { return Err(Rfc3339ParseError::UnexpectedEof); } let mut r = 0; for i in 0..len { let c = self.s[self.pos + i]; if c >= b'0' && c <= b'9' { r = r * 10 + (c - b'0') as u32; } else { return Err(Rfc3339ParseError::ExpectingDigits); } } self.pos += len; Ok(r) } fn lookahead_char(&self) -> Rfc3339ParseResult<u8> { if self.pos == self.s.len() { return Err(Rfc3339ParseError::UnexpectedEof); } Ok(self.s[self.pos]) } fn next_char(&mut self, expect: u8) -> Rfc3339ParseResult<()> { assert!(expect < 0x80); let c = self.lookahead_char()?; if c != expect { return Err(Rfc3339ParseError::ExpectingChar(expect as char)); } self.pos += 1; Ok(()) } } let mut parser = Parser { s: s.as_bytes(), pos: 0, }; let year = parser.next_number(4)? as i64; parser.next_char(b'-')?; let month = parser.next_number(2)?; parser.next_char(b'-')?; let day = parser.next_number(2)?; if month < 1 || month > 12 { return Err(Rfc3339ParseError::DateTimeFieldOutOfRange); } if day < 1 || day > TmUtc::days_in_months(year as i64)[month as usize - 1] { return Err(Rfc3339ParseError::DateTimeFieldOutOfRange); } match parser.lookahead_char()? { b'T' | b't' | b' ' => parser.pos += 1, _ => return Err(Rfc3339ParseError::ExpectingDateTimeSeparator), } let hour = parser.next_number(2)?; parser.next_char(b':')?; let minute = parser.next_number(2)?; parser.next_char(b':')?; let second = parser.next_number(2)?; if hour > 23 || minute > 59 || second > 60 { return Err(Rfc3339ParseError::DateTimeFieldOutOfRange); } // round down leap second let second = if second == 60 { 59 } else { second }; let nanos = if parser.lookahead_char()? == b'.' { parser.pos += 1; let mut digits = 0; let mut nanos = 0; while parser.lookahead_char()? >= b'0' && parser.lookahead_char()? <= b'9' { let digit = (parser.lookahead_char()? - b'0') as u32; parser.pos += 1; if digits == 9 { continue; } digits += 1; nanos = nanos * 10 + digit; } if digits == 0 { return Err(Rfc3339ParseError::NoDigitsAfterDot); } for _ in digits..9 { nanos *= 10; } nanos } else { 0 }; let offset_seconds = if parser.lookahead_char()? == b'Z' || parser.lookahead_char()? == b'z' { parser.pos += 1; 0 } else { let sign = if parser.lookahead_char()? == b'+' { 1 } else if parser.lookahead_char()? == b'-' { -1 } else { return Err(Rfc3339ParseError::ExpectingTimezone); }; parser.pos += 1; let hour_offset = parser.next_number(2)?; parser.next_char(b':')?; let minute_offset = parser.next_number(2)?; (hour_offset * 3600 + 60 * minute_offset) as i64 * sign }; if parser.pos != parser.s.len() { return Err(Rfc3339ParseError::TrailngCharacters); } let (seconds, nanos) = TmUtc { year, month, day, hour, minute, second, nanos, } .to_protobuf_timestamp(); Ok((seconds - offset_seconds, nanos)) } } impl fmt::Display for TmUtc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.year > 9999 { write!(f, "+{}", self.year)?; } else if self.year < 0 { write!(f, "{:05}", self.year)?; } else { write!(f, "{:04}", self.year)?; } write!( f, "-{:02}-{:02}T{:02}:{:02}:{:02}", self.month, self.day, self.hour, self.minute, self.second )?; // if precision is not specified, print nanoseconds let subsec_digits = f.precision().unwrap_or(9); if subsec_digits != 0 { let mut subsec_digits = subsec_digits; let width = if subsec_digits > 9 { 9 } else { subsec_digits }; // "Truncated" nanonseconds. let mut subsec = self.nanos; // Performs 8 iterations when precision=1, // but that's probably not a issue compared to other computations. for _ in width..9 { subsec /= 10; } write!(f, ".{:0width$}", subsec, width = width as usize)?; // Adding more than 9 digits is meaningless, // but if user requests it, we should print zeros. for _ in 9..subsec_digits { write!(f, "0")?; subsec_digits -= 1; } } write!(f, "Z") } } #[cfg(test)] mod test { use std::i64; use super::*; #[test] fn test_fmt() { fn test_impl(expected: &str, secs: i64, nanos: u32, subsec_digits: u32) { let tm_utc = TmUtc::from_protobuf_timestamp(secs, nanos); assert_eq!( expected, format!("{:.prec$}", tm_utc, prec = subsec_digits as usize) ); } // Tests can be validated with with GNU date: // `TZ=UTC gdate --date @1535585179 --iso-8601=seconds` test_impl("1970-01-01T00:00:00Z", 0, 0, 0); test_impl("2018-08-29T23:26:19Z", 1535585179, 0, 0); test_impl("2018-08-29T23:26:19.123Z", 1535585179, 123456789, 3); test_impl("1646-04-01T03:45:44Z", -10216613656, 0, 0); test_impl("1970-01-01T00:00:00.000000001000Z", 0, 1, 12); test_impl("5138-11-16T09:46:40Z", 100000000000, 0, 0); test_impl("+33658-09-27T01:46:41Z", 1000000000001, 0, 0); // Leading zero test_impl("0000-12-31T00:00:00Z", -62135683200, 0, 0); // Minus zero test_impl("-0003-10-30T14:13:20Z", -62235683200, 0, 0); // More than 4 digits // Largest value GNU date can handle test_impl("+2147485547-12-31T23:59:59Z", 67768036191676799, 0, 0); // Negative dates test_impl("1969-12-31T23:59:59Z", -1, 0, 0); test_impl("1969-12-31T23:59:00Z", -60, 0, 0); test_impl("1969-12-31T23:59:58.900Z", -2, 900_000_000, 3); test_impl("1966-10-31T14:13:20Z", -100000000, 0, 0); test_impl("-29719-04-05T22:13:19Z", -1000000000001, 0, 0); // Smallest value GNU date can handle test_impl("-2147481748-01-01T00:00:00Z", -67768040609740800, 0, 0); } #[test] fn test_parse_fmt() { fn test_impl(s: &str, width: usize) { let (seconds, nanos) = TmUtc::parse_rfc_3339(s).unwrap(); let formatted = format!( "{:.width$}", TmUtc::from_protobuf_timestamp(seconds, nanos), width = width ); assert_eq!(formatted, s); } test_impl("1970-01-01T00:00:00Z", 0); test_impl("1970-01-01T00:00:00.000Z", 3); test_impl("1970-01-01T00:00:00.000000000Z", 9); test_impl("1970-01-02T00:00:00Z", 0); test_impl("1970-03-01T00:00:00Z", 0); test_impl("1974-01-01T00:00:00Z", 0); test_impl("2018-01-01T00:00:00Z", 0); test_impl("2018-09-02T05:49:10.123456789Z", 9); test_impl("0001-01-01T00:00:00.000000000Z", 9); test_impl("9999-12-31T23:59:59.999999999Z", 9); } #[test] fn test_parse_alt() { fn test_impl(alt: &str, parse: &str) { let reference = TmUtc::parse_rfc_3339(alt).unwrap(); let parsed = TmUtc::parse_rfc_3339(parse).unwrap(); assert_eq!(reference, parsed, "{} - {}", alt, parse); } // alternative spelling test_impl("1970-01-01 00:00:00Z", "1970-01-01T00:00:00Z"); test_impl("1970-01-01 00:00:00Z", "1970-01-01t00:00:00Z"); test_impl("1970-01-01 00:00:00Z", "1970-01-01 00:00:00z"); // leap second is rounded down test_impl("2016-12-31 23:59:59Z", "2016-12-31 23:59:60Z"); // TZ offset test_impl("1970-01-01 00:00:00Z", "1970-01-01T03:00:00+03:00"); test_impl("1970-01-01 00:00:00Z", "1969-12-31 22:15:00-01:45"); } #[test] fn test_parse_incorrect_inputs() { fn test_impl(s: &str) { assert!(TmUtc::parse_rfc_3339(s).is_err(), "{}", s); } test_impl("1970-01-01T00:00:61Z"); test_impl("1970-01-01T00:60:61Z"); test_impl("1970-01-01T24:00:61Z"); test_impl("1970-01-01T00:00:00.Z"); test_impl("1970-01-32T00:00:00Z"); test_impl("1970-02-29T00:00:00Z"); test_impl("1980-02-30T00:00:00Z"); test_impl("1980-13-01T00:00:00Z"); test_impl("1970-01-01T00:00:00"); test_impl("1970-01-01T00:00Z"); } #[test] fn test_fmt_max_duration() { // Simply check that there are no integer overflows. // I didn't check that resulting strings are correct. assert_eq!( "-292277022657-01-27T08:29:52.000000000Z", format!("{}", TmUtc::from_protobuf_timestamp(i64::min_value(), 0)) ); assert_eq!( "+292277026596-12-04T15:30:07.999999999Z", format!( "{}", TmUtc::from_protobuf_timestamp(i64::max_value(), 999_999_999) ) ); } }
use std::process::Command; use std::process::Child; use std::process::Stdio; use std::rc::Rc; use std::sync::RwLock; use std::ffi::OsStr; use std::convert::AsRef; use core::workspaces::Workspaces; use config::Config; use window_system::*; #[macro_export] macro_rules! add_key_handler_str( ($config: expr, $w:expr, $key:expr, $modkey:expr, $inp:expr) => ( $config.add_key_handler($w.get_keycode_from_string($key), $modkey, box $inp); ) ); #[macro_export] macro_rules! add_key_handler_code( ($config: expr, $key:expr, $modkey:expr, $inp:expr) => ( $config.add_key_handler($key, $modkey, box $inp); ) ); #[macro_export] macro_rules! add_mouse_handler( ($config: expr, $button:expr, $modkey:expr, $inp:expr) => ( $config.add_mouse_handler($button, $modkey, box $inp); ) ); #[macro_export] macro_rules! send_layout_message( ($message: expr) => ( |m, w, c| m.send_layout_message($message, w.deref(), c).windows(w.deref(), c, &|x| x.clone()) ) ); #[macro_export] macro_rules! run( ($command: expr, $options: expr) => ( |w, _, _| { run($command, String::from($options).split(' ').map(String::from).collect()); w } ) ); pub fn run<S: AsRef<OsStr>>(program: S, args: Vec<String>) { match Command::new(program).args(&args).spawn() { _ => () } } pub fn spawn_pipe<S: AsRef<OsStr>>(config: &mut Config, program: S, args: Vec<String>) -> Rc<RwLock<Child>> { let result = Command::new(program) .args(&args).stdin(Stdio::piped()).spawn().unwrap(); let rc = Rc::new(RwLock::new(result)); config.general.pipes.push(rc.clone()); rc } pub fn spawn_on(workspaces: Workspaces, _: &WindowSystem, window: Window, workspace_id: u32) -> Workspaces { workspaces.focus_window(window).shift(workspace_id) } Remove box syntax requirement from macros use std::process::Command; use std::process::Child; use std::process::Stdio; use std::rc::Rc; use std::sync::RwLock; use std::ffi::OsStr; use std::convert::AsRef; use core::workspaces::Workspaces; use config::Config; use window_system::*; #[macro_export] macro_rules! add_key_handler_str( ($config: expr, $w:expr, $key:expr, $modkey:expr, $inp:expr) => ( $config.add_key_handler($w.get_keycode_from_string($key), $modkey, Box::new($inp)); ) ); #[macro_export] macro_rules! add_key_handler_code( ($config: expr, $key:expr, $modkey:expr, $inp:expr) => ( $config.add_key_handler($key, $modkey, Box::new($inp)); ) ); #[macro_export] macro_rules! add_mouse_handler( ($config: expr, $button:expr, $modkey:expr, $inp:expr) => ( $config.add_mouse_handler($button, $modkey, Box::new($inp)); ) ); #[macro_export] macro_rules! send_layout_message( ($message: expr) => ( |m, w, c| m.send_layout_message($message, w.deref(), c).windows(w.deref(), c, &|x| x.clone()) ) ); #[macro_export] macro_rules! run( ($command: expr, $options: expr) => ( |w, _, _| { run($command, String::from($options).split(' ').map(String::from).collect()); w } ) ); pub fn run<S: AsRef<OsStr>>(program: S, args: Vec<String>) { match Command::new(program).args(&args).spawn() { _ => () } } pub fn spawn_pipe<S: AsRef<OsStr>>(config: &mut Config, program: S, args: Vec<String>) -> Rc<RwLock<Child>> { let result = Command::new(program) .args(&args).stdin(Stdio::piped()).spawn().unwrap(); let rc = Rc::new(RwLock::new(result)); config.general.pipes.push(rc.clone()); rc } pub fn spawn_on(workspaces: Workspaces, _: &WindowSystem, window: Window, workspace_id: u32) -> Workspaces { workspaces.focus_window(window).shift(workspace_id) }
// Implements http://rosettacode.org/wiki/Metered_concurrency // Rust has a perfectly good Semaphore type already. It lacks count(), though, so we can't use it // directly. #![feature(unsafe_destructor)] extern crate sync; use std::io::timer; use std::sync::Arc; use std::sync::atomic::AtomicUint; use std::sync::atomics; use std::time::duration::Duration; pub struct CountingSemaphore { count: AtomicUint, // Remaining resource count backoff: Duration, // How long to sleep if a resource is being contended } pub struct CountingSemaphoreGuard<'a> { sem: &'a CountingSemaphore, // A reference to the owning semaphore. } impl CountingSemaphore { // Create a semaphore with `max` available resources and a linearly increasing backoff of // `backoff` (used during spinlock contention). pub fn new(max: uint, backoff: Duration) -> CountingSemaphore { CountingSemaphore { count: AtomicUint::new(max), backoff: backoff } } // Acquire a resource, returning a RAII CountingSemaphoreGuard. pub fn acquire(&self) -> CountingSemaphoreGuard { // Spinlock until remaining resource count is at least 1 let mut backoff: Duration = self.backoff; loop { // Probably don't need SeqCst here, but it doesn't hurt. let count = self.count.load(atomics::SeqCst); // The check for 0 is necessary to make sure we don't go negative, which is why this // must be a compare-and-swap rather than a straight decrement. if count == 0 || self.count.compare_and_swap(count, count - 1, atomics::SeqCst) != count { // Linear backoff a la Servo's spinlock contention. timer::sleep(backoff); backoff = backoff + self.backoff; } else { // We successfully acquired the resource. break } } CountingSemaphoreGuard { sem: self } } // Return remaining resource count pub fn count(&self) -> uint { self.count.load(atomics::SeqCst) } } #[unsafe_destructor] impl<'a> Drop for CountingSemaphoreGuard<'a> { // When the guard is dropped, a resource is released back to the pool. fn drop(&mut self) { self.sem.count.fetch_add(1, atomics::SeqCst); } } fn metered(duration: Duration, backoff: Duration) { static MAX_COUNT: uint = 4; // Total available resources static NUM_WORKERS: u8 = 10; // Number of workers contending for the resources let sem = Arc::new(CountingSemaphore::new(MAX_COUNT, backoff)); let (tx, rx) = channel(); for i in range(0, NUM_WORKERS) { let sem = sem.clone(); let tx = tx.clone(); spawn(proc() { let guard = sem.acquire(); let count = sem.count(); assert!(count < MAX_COUNT); println!("Worker {} after acquire: count = {}", i, count); timer::sleep(duration); drop(guard); let count = sem.count(); assert!(count <= MAX_COUNT); println!("Worker {} after release: count = {}", i, count); tx.send(()); }) } drop(tx); for _ in range(0, NUM_WORKERS) { rx.recv(); } } #[test] fn test_metered_concurrency() { metered(Duration::seconds(1) / 20, Duration::seconds(1) / 20); } #[cfg(not(test))] fn main() { metered(Duration::seconds(2), Duration::seconds(1) / 10); } More comments for metered concurrency, and shorter backoff. // Implements http://rosettacode.org/wiki/Metered_concurrency // Rust has a perfectly good Semaphore type already. It lacks count(), though, so we can't use it // directly. #![feature(unsafe_destructor)] extern crate sync; use std::io::timer; use std::sync::Arc; use std::sync::atomic::AtomicUint; use std::sync::atomics; use std::time::duration::Duration; pub struct CountingSemaphore { count: AtomicUint, // Remaining resource count backoff: Duration, // How long to sleep if a resource is being contended } pub struct CountingSemaphoreGuard<'a> { sem: &'a CountingSemaphore, // A reference to the owning semaphore. } impl CountingSemaphore { // Create a semaphore with `max` available resources and a linearly increasing backoff of // `backoff` (used during spinlock contention). pub fn new(max: uint, backoff: Duration) -> CountingSemaphore { CountingSemaphore { count: AtomicUint::new(max), backoff: backoff } } // Acquire a resource, returning a RAII CountingSemaphoreGuard. pub fn acquire(&self) -> CountingSemaphoreGuard { // Spinlock until remaining resource count is at least 1 let mut backoff: Duration = self.backoff; loop { // Probably don't need SeqCst here, but it doesn't hurt. let count = self.count.load(atomics::SeqCst); // The check for 0 is necessary to make sure we don't go negative, which is why this // must be a compare-and-swap rather than a straight decrement. if count == 0 || self.count.compare_and_swap(count, count - 1, atomics::SeqCst) != count { // Linear backoff a la Servo's spinlock contention. timer::sleep(backoff); backoff = backoff + self.backoff; } else { // We successfully acquired the resource. break } } CountingSemaphoreGuard { sem: self } } // Return remaining resource count pub fn count(&self) -> uint { self.count.load(atomics::SeqCst) } } #[unsafe_destructor] impl<'a> Drop for CountingSemaphoreGuard<'a> { // When the guard is dropped, a resource is released back to the pool. fn drop(&mut self) { self.sem.count.fetch_add(1, atomics::SeqCst); } } fn metered(duration: Duration) { static MAX_COUNT: uint = 4; // Total available resources static NUM_WORKERS: u8 = 10; // Number of workers contending for the resources let backoff = Duration::milliseconds(1); // Linear backoff time // Create a shared reference to the semaphore let sem = Arc::new(CountingSemaphore::new(MAX_COUNT, backoff)); // Create a channel for notifying the main task that the workers are done let (tx, rx) = channel(); for i in range(0, NUM_WORKERS) { let sem = sem.clone(); let tx = tx.clone(); spawn(proc() { // Acquire the resource let guard = sem.acquire(); let count = sem.count(); // Make sure the count is legal assert!(count < MAX_COUNT); println!("Worker {} after acquire: count = {}", i, count); // Sleep for `duration` timer::sleep(duration); // Release the resource drop(guard); // Make sure the count is legal let count = sem.count(); assert!(count <= MAX_COUNT); println!("Worker {} after release: count = {}", i, count); // Notify the main task of completion tx.send(()); }) } drop(tx); // Wait for all the subtasks to finish for _ in range(0, NUM_WORKERS) { rx.recv(); } } #[test] fn test_metered_concurrency() { // Hold each resource for 1/20 of a second per worker metered(Duration::seconds(1) / 20); } #[cfg(not(test))] fn main() { // Hold each resource for 2 seconds per worker metered(Duration::seconds(2)); }
extern crate html5ever; extern crate regex; extern crate hyper; extern crate string_cache; use self::html5ever::sink::common::{Document, Doctype, Text, Comment, Element}; use self::html5ever::sink::rcdom::{RcDom, Handle}; use self::html5ever::{parse, one_input, Attribute}; use std::default::Default; use std::io::Read; use self::regex::Regex; use self::hyper::Client; use self::hyper::header::Connection; use self::hyper::header::ConnectionOption; use self::string_cache::namespace::{QualName, Namespace}; use self::string_cache::atom::Atom; use Provider; use Track; pub fn extract_tracks(url: &str) -> Vec<Track> { let mut client = Client::new(); let mut res = client.get(url) .header(Connection(vec![ConnectionOption::Close])) .send().unwrap(); let mut body = String::new(); res.read_to_string(&mut body).unwrap(); let dom: RcDom = parse(one_input(body), Default::default()); let mut tracks = Vec::new(); walk(0, dom.document, &mut tracks); return tracks } // This is not proper HTML serialization, of course. fn walk(indent: usize, handle: Handle, tracks: &mut Vec<Track>) { let node = handle.borrow(); match node.node { Document => (), Doctype(_, _, _) => (), Text(_) => (), Comment(_) => (), Element(ref name, ref attrs) => { let tag_name = name.local.as_slice(); match extract_track(tag_name, attrs) { Some(track) => (*tracks).push(track), None => {} } } } for child in node.children.iter() { walk(indent+4, child.clone(), tracks); } } pub fn extract_track(tag_name: &str, attrs: &Vec<Attribute>) -> Option<Track> { if tag_name == "iframe" { for attr in attrs.iter() { match Regex::new(r"www.youtube.com/embed") { Ok(re) => if re.is_match(&attr.value) { match Regex::new(r"www.youtube.com/embed/(.+)") { Ok(re) => { let cap = re.captures(&attr.value).unwrap(); let strs: Vec<&str> = cap.at(1).unwrap().split_str('?').collect(); return Some(Track { id: 0, provider: Provider::YouTube, title: strs[0].to_string(), url: attr.value.to_string(), identifier: strs[0].to_string() }) }, Err(_) => return None } }, Err(_) => return None }; match Regex::new(r"api.soundcloud.com/tracks/") { Ok(re) => if re.is_match(&attr.value) { /* println!("SoundCloud {}=\"{}\"", attr.name.local.as_slice(), attr.value);*/ match Regex::new(r"api.soundcloud.com/tracks/(.+)") { Ok(re) => { let cap = re.captures(&attr.value).unwrap(); let strs: Vec<&str> = cap.at(1).unwrap().split_str('&').collect(); // println!("id: {} ", strs[0]); return Some(Track { id: 0, provider: Provider::SoundCloud, title: strs[0].to_string(), url: attr.value.to_string(), identifier: strs[0].to_string() }) }, Err(_) => return None } }, Err(_) => return None }; } } else if tag_name == "a" { for attr in attrs.iter() { let href = QualName { ns: Namespace(string_cache::atom::Atom::from_slice("")), local: string_cache::atom::Atom::from_slice("href") }; if attr.name == href { match Regex::new(r"www.youtube.com/watch\?v=(.+)") { Ok(re) => match re.captures(&attr.value) { Some(cap) => match cap.at(1) { Some(str) => { let strs: Vec<&str> = str.split_str('?').collect(); return Some(Track { id: 0, provider: Provider::YouTube, title: strs[0].to_string(), url: attr.value.to_string(), identifier: strs[0].to_string() }) }, None => () }, None => () }, Err(_) => () } } } } return None } Refactoring: Remove unused regex match extern crate html5ever; extern crate regex; extern crate hyper; extern crate string_cache; use self::html5ever::sink::common::{Document, Doctype, Text, Comment, Element}; use self::html5ever::sink::rcdom::{RcDom, Handle}; use self::html5ever::{parse, one_input, Attribute}; use std::default::Default; use std::io::Read; use self::regex::Regex; use self::hyper::Client; use self::hyper::header::Connection; use self::hyper::header::ConnectionOption; use self::string_cache::namespace::{QualName, Namespace}; use self::string_cache::atom::Atom; use Provider; use Track; pub fn extract_tracks(url: &str) -> Vec<Track> { let mut client = Client::new(); let mut res = client.get(url) .header(Connection(vec![ConnectionOption::Close])) .send().unwrap(); let mut body = String::new(); res.read_to_string(&mut body).unwrap(); let dom: RcDom = parse(one_input(body), Default::default()); let mut tracks = Vec::new(); walk(0, dom.document, &mut tracks); return tracks } // This is not proper HTML serialization, of course. fn walk(indent: usize, handle: Handle, tracks: &mut Vec<Track>) { let node = handle.borrow(); match node.node { Document => (), Doctype(_, _, _) => (), Text(_) => (), Comment(_) => (), Element(ref name, ref attrs) => { let tag_name = name.local.as_slice(); match extract_track(tag_name, attrs) { Some(track) => (*tracks).push(track), None => {} } } } for child in node.children.iter() { walk(indent+4, child.clone(), tracks); } } pub fn extract_track(tag_name: &str, attrs: &Vec<Attribute>) -> Option<Track> { if tag_name == "iframe" { for attr in attrs.iter() { match Regex::new(r"www.youtube.com/embed/(.+)") { Ok(re) => match re.captures(&attr.value) { Some(cap) => match (cap.at(1)) { Some(str) => { let strs: Vec<&str> = str.split_str('?').collect(); return Some(Track { id: 0, provider: Provider::YouTube, title: strs[0].to_string(), url: attr.value.to_string(), identifier: strs[0].to_string() }) }, None => () }, None => () }, Err(_) => () }; match Regex::new(r"api.soundcloud.com/tracks/(.+)") { Ok(re) => match re.captures(&attr.value) { Some(cap) => match cap.at(1) { Some(str) => { let strs: Vec<&str> = str.split_str('&').collect(); return Some(Track { id: 0, provider: Provider::SoundCloud, title: strs[0].to_string(), url: attr.value.to_string(), identifier: strs[0].to_string() }) }, None => () } }, Err(_) => () }; } } else if tag_name == "a" { for attr in attrs.iter() { let href = QualName { ns: Namespace(string_cache::atom::Atom::from_slice("")), local: string_cache::atom::Atom::from_slice("href") }; if attr.name == href { match Regex::new(r"www.youtube.com/watch\?v=(.+)") { Ok(re) => match re.captures(&attr.value) { Some(cap) => match cap.at(1) { Some(str) => { let strs: Vec<&str> = str.split_str('?').collect(); return Some(Track { id: 0, provider: Provider::YouTube, title: strs[0].to_string(), url: attr.value.to_string(), identifier: strs[0].to_string() }) }, None => () }, None => () }, Err(_) => () } } } } return None }
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Comments for configuration + injection into output .toml use std::collections::HashMap; /// maps entries to Comments that should precede them fn comments() -> HashMap<String, String> { let mut retval = HashMap::new(); retval.insert( "[server]".to_string(), " # Generated Server Configuration File for Grin # # When running the grin executable without specifying any command line # arguments, it will look for this file in two places, in the following # order: # # -The working directory # -[user home]/.grin # ######################################### ### SERVER CONFIGURATION ### ######################################### #Server connection details " .to_string(), ); retval.insert( "api_http_addr".to_string(), " #path of TLS certificate file, self-signed certificates are not supported #tls_certificate_file = \"\" #private key for the TLS certificate #tls_certificate_key = \"\" #the address on which services will listen, e.g. Transaction Pool " .to_string(), ); retval.insert( "api_secret_path".to_string(), " #path of the secret token used by the API to authenticate the calls #comment the it to disable basic auth " .to_string(), ); retval.insert( "db_root".to_string(), " #the directory, relative to current, in which the grin blockchain #is stored " .to_string(), ); retval.insert( "chain_type".to_string(), " #The chain type, which defines the genesis block and the set of cuckoo #parameters used for mining as well as wallet output coinbase maturity. Can be: #AutomatedTesting - For CI builds and instant blockchain creation #UserTesting - For regular user testing (cuckoo 16) #Floonet - For the long term floonet test network #Mainnet - For mainnet " .to_string(), ); retval.insert( "chain_validation_mode".to_string(), " #the chain validation mode, defines how often (if at all) we #want to run a full chain validation. Can be: #\"EveryBlock\" - run full chain validation when processing each block (except during sync) #\"Disabled\" - disable full chain validation (just run regular block validation) " .to_string(), ); retval.insert( "archive_mode".to_string(), " #run the node in \"full archive\" mode (default is fast-sync, pruned node) " .to_string(), ); retval.insert( "skip_sync_wait".to_string(), " #skip waiting for sync on startup, (optional param, mostly for testing) " .to_string(), ); retval.insert( "run_tui".to_string(), " #whether to run the ncurses TUI. Ncurses must be installed and this #will also disable logging to stdout " .to_string(), ); retval.insert( "run_test_miner".to_string(), " #Whether to run a test miner. This is only for developer testing (chaintype #usertesting) at cuckoo 16, and will only mine into the default wallet port. #real mining should use the standalone grin-miner " .to_string(), ); retval.insert( "[server.dandelion_config]".to_string(), " ######################################### ### DANDELION CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "epoch_secs".to_string(), " #dandelion epoch duration " .to_string(), ); retval.insert( "aggregation_secs".to_string(), " #dandelion aggregation period in secs " .to_string(), ); retval.insert( "embargo_secs".to_string(), " #fluff and broadcast after embargo expires if tx not seen on network " .to_string(), ); retval.insert( "stem_probability".to_string(), " #dandelion stem probability (stem 90% of the time, fluff 10% of the time) " .to_string(), ); retval.insert( "[server.p2p_config]".to_string(), "#test miner wallet URL (burns if this doesn't exist) #test_miner_wallet_url = \"http://127.0.0.1:3415\" ######################################### ### SERVER P2P CONFIGURATION ### ######################################### #The P2P server details (i.e. the server that communicates with other " .to_string(), ); retval.insert( "host".to_string(), " #The interface on which to listen. #0.0.0.0 will listen on all interfaces, allowing others to interact #127.0.0.1 will listen on the local machine only " .to_string(), ); retval.insert( "port".to_string(), " #The port on which to listen. " .to_string(), ); retval.insert( "seeding_type".to_string(), " #how to seed this server, can be None, List or DNSSeed " .to_string(), ); retval.insert( "[server.p2p_config.capabilities]".to_string(), "#If the seeding type is List, the list of peers to connect to can #be specified as follows: #seeds = [\"192.168.0.1:3414\",\"192.168.0.2:3414\"] #hardcoded peer lists for allow/deny #will *only* connect to peers in allow list #peers_allow = [\"192.168.0.1:3414\", \"192.168.0.2:3414\"] #will *never* connect to peers in deny list #peers_deny = [\"192.168.0.3:3414\", \"192.168.0.4:3414\"] #a list of preferred peers to connect to #peers_preferred = [\"192.168.0.1:3414\",\"192.168.0.2:3414\"] #how long a banned peer should stay banned #ban_window = 10800 #maximum number of peers #peer_max_count = 125 #preferred minimum number of peers (we'll actively keep trying to add peers #until we get to at least this number #peer_min_preferred_count = 8 # 15 = Bit flags for FULL_NODE #This structure needs to be changed internally, to make it more configurable # A preferred dandelion_peer, mainly used for testing dandelion # dandelion_peer = \"10.0.0.1:13144\" " .to_string(), ); retval.insert( "[server.pool_config]".to_string(), " ######################################### ### MEMPOOL CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "accept_fee_base".to_string(), " #base fee that's accepted into the pool " .to_string(), ); retval.insert( "max_pool_size".to_string(), " #maximum number of transactions allowed in the pool " .to_string(), ); retval.insert( "max_stempool_size".to_string(), " #maximum number of transactions allowed in the stempool " .to_string(), ); retval.insert( "mineable_max_weight".to_string(), " #maximum total weight of transactions that can get selected to build a block " .to_string(), ); retval.insert( "[server.stratum_mining_config]".to_string(), " ################################################ ### STRATUM MINING SERVER CONFIGURATION ### ################################################ " .to_string(), ); retval.insert( "enable_stratum_server".to_string(), " #whether stratum server is enabled " .to_string(), ); retval.insert( "stratum_server_addr".to_string(), " #what port and address for the stratum server to listen on " .to_string(), ); retval.insert( "attempt_time_per_block".to_string(), " #the amount of time, in seconds, to attempt to mine on a particular #header before stopping and re-collecting transactions from the pool " .to_string(), ); retval.insert( "minimum_share_difficulty".to_string(), " #the minimum acceptable share difficulty to request from miners " .to_string(), ); retval.insert( "wallet_listener_url".to_string(), " #the wallet receiver to which coinbase rewards will be sent " .to_string(), ); retval.insert( "burn_reward".to_string(), " #whether to ignore the reward (mostly for testing) " .to_string(), ); retval.insert( "[logging]".to_string(), " ######################################### ### LOGGING CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "log_to_stdout".to_string(), " #whether to log to stdout " .to_string(), ); retval.insert( "stdout_log_level".to_string(), " #log level for stdout: Error, Warning, Info, Debug, Trace " .to_string(), ); retval.insert( "log_to_file".to_string(), " #whether to log to a file " .to_string(), ); retval.insert( "file_log_level".to_string(), " #log level for file: Error, Warning, Info, Debug, Trace " .to_string(), ); retval.insert( "log_file_path".to_string(), " #log file path " .to_string(), ); retval.insert( "log_file_append".to_string(), " #whether to append to the log file (true), or replace it on every run (false) " .to_string(), ); retval.insert( "log_max_size".to_string(), " #maximum log file size in bytes before performing log rotation #comment it to disable log rotation " .to_string(), ); retval.insert( "log_max_files".to_string(), " #maximum count of the log files to rotate over " .to_string(), ); retval } fn get_key(line: &str) -> String { if line.contains("[") && line.contains("]") { return line.to_owned(); } else if line.contains("=") { return line.split("=").collect::<Vec<&str>>()[0].trim().to_owned(); } else { return "NOT_FOUND".to_owned(); } } pub fn insert_comments(orig: String) -> String { let comments = comments(); let lines: Vec<&str> = orig.split("\n").collect(); let mut out_lines = vec![]; for l in lines { let key = get_key(l); if let Some(v) = comments.get(&key) { out_lines.push(v.to_owned()); } out_lines.push(l.to_owned()); out_lines.push("\n".to_owned()); } let mut ret_val = String::from(""); for l in out_lines { ret_val.push_str(&l); } ret_val.to_owned() } Add commented webhook config (#2849) * Add commented webhook config * Remove redundant config // Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Comments for configuration + injection into output .toml use std::collections::HashMap; /// maps entries to Comments that should precede them fn comments() -> HashMap<String, String> { let mut retval = HashMap::new(); retval.insert( "[server]".to_string(), " # Generated Server Configuration File for Grin # # When running the grin executable without specifying any command line # arguments, it will look for this file in two places, in the following # order: # # -The working directory # -[user home]/.grin # ######################################### ### SERVER CONFIGURATION ### ######################################### #Server connection details " .to_string(), ); retval.insert( "api_http_addr".to_string(), " #path of TLS certificate file, self-signed certificates are not supported #tls_certificate_file = \"\" #private key for the TLS certificate #tls_certificate_key = \"\" #the address on which services will listen, e.g. Transaction Pool " .to_string(), ); retval.insert( "api_secret_path".to_string(), " #path of the secret token used by the API to authenticate the calls #comment the it to disable basic auth " .to_string(), ); retval.insert( "db_root".to_string(), " #the directory, relative to current, in which the grin blockchain #is stored " .to_string(), ); retval.insert( "chain_type".to_string(), " #The chain type, which defines the genesis block and the set of cuckoo #parameters used for mining as well as wallet output coinbase maturity. Can be: #AutomatedTesting - For CI builds and instant blockchain creation #UserTesting - For regular user testing (cuckoo 16) #Floonet - For the long term floonet test network #Mainnet - For mainnet " .to_string(), ); retval.insert( "chain_validation_mode".to_string(), " #the chain validation mode, defines how often (if at all) we #want to run a full chain validation. Can be: #\"EveryBlock\" - run full chain validation when processing each block (except during sync) #\"Disabled\" - disable full chain validation (just run regular block validation) " .to_string(), ); retval.insert( "archive_mode".to_string(), " #run the node in \"full archive\" mode (default is fast-sync, pruned node) " .to_string(), ); retval.insert( "skip_sync_wait".to_string(), " #skip waiting for sync on startup, (optional param, mostly for testing) " .to_string(), ); retval.insert( "run_tui".to_string(), " #whether to run the ncurses TUI. Ncurses must be installed and this #will also disable logging to stdout " .to_string(), ); retval.insert( "run_test_miner".to_string(), " #Whether to run a test miner. This is only for developer testing (chaintype #usertesting) at cuckoo 16, and will only mine into the default wallet port. #real mining should use the standalone grin-miner " .to_string(), ); retval.insert( "[server.webhook_config]".to_string(), " ######################################### ### WEBHOOK CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "nthreads".to_string(), " #The url where a POST request will be sent when a new block is accepted by our node. #block_accepted_url = \"http://127.0.0.1:8080/acceptedblock\" #The url where a POST request will be sent when a new transaction is received by a peer. #tx_received_url = \"http://127.0.0.1:8080/tx\" #The url where a POST request will be sent when a new header is received by a peer. #header_received_url = \"http://127.0.0.1:8080/header\" #The url where a POST request will be sent when a new block is received by a peer. #block_received_url = \"http://127.0.0.1:8080/block\" #The number of worker threads that will be assigned to making the http requests. " .to_string(), ); retval.insert( "timeout".to_string(), " #The timeout of the http request in seconds. " .to_string(), ); retval.insert( "[server.dandelion_config]".to_string(), " ######################################### ### DANDELION CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "epoch_secs".to_string(), " #dandelion epoch duration " .to_string(), ); retval.insert( "aggregation_secs".to_string(), " #dandelion aggregation period in secs " .to_string(), ); retval.insert( "embargo_secs".to_string(), " #fluff and broadcast after embargo expires if tx not seen on network " .to_string(), ); retval.insert( "stem_probability".to_string(), " #dandelion stem probability (stem 90% of the time, fluff 10% of the time) " .to_string(), ); retval.insert( "[server.p2p_config]".to_string(), "#test miner wallet URL (burns if this doesn't exist) #test_miner_wallet_url = \"http://127.0.0.1:3415\" ######################################### ### SERVER P2P CONFIGURATION ### ######################################### #The P2P server details (i.e. the server that communicates with other " .to_string(), ); retval.insert( "host".to_string(), " #The interface on which to listen. #0.0.0.0 will listen on all interfaces, allowing others to interact #127.0.0.1 will listen on the local machine only " .to_string(), ); retval.insert( "port".to_string(), " #The port on which to listen. " .to_string(), ); retval.insert( "seeding_type".to_string(), " #how to seed this server, can be None, List or DNSSeed " .to_string(), ); retval.insert( "[server.p2p_config.capabilities]".to_string(), "#If the seeding type is List, the list of peers to connect to can #be specified as follows: #seeds = [\"192.168.0.1:3414\",\"192.168.0.2:3414\"] #hardcoded peer lists for allow/deny #will *only* connect to peers in allow list #peers_allow = [\"192.168.0.1:3414\", \"192.168.0.2:3414\"] #will *never* connect to peers in deny list #peers_deny = [\"192.168.0.3:3414\", \"192.168.0.4:3414\"] #a list of preferred peers to connect to #peers_preferred = [\"192.168.0.1:3414\",\"192.168.0.2:3414\"] #how long a banned peer should stay banned #ban_window = 10800 #maximum number of peers #peer_max_count = 125 #preferred minimum number of peers (we'll actively keep trying to add peers #until we get to at least this number #peer_min_preferred_count = 8 # 15 = Bit flags for FULL_NODE #This structure needs to be changed internally, to make it more configurable # A preferred dandelion_peer, mainly used for testing dandelion # dandelion_peer = \"10.0.0.1:13144\" " .to_string(), ); retval.insert( "[server.pool_config]".to_string(), " ######################################### ### MEMPOOL CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "accept_fee_base".to_string(), " #base fee that's accepted into the pool " .to_string(), ); retval.insert( "max_pool_size".to_string(), " #maximum number of transactions allowed in the pool " .to_string(), ); retval.insert( "max_stempool_size".to_string(), " #maximum number of transactions allowed in the stempool " .to_string(), ); retval.insert( "mineable_max_weight".to_string(), " #maximum total weight of transactions that can get selected to build a block " .to_string(), ); retval.insert( "[server.stratum_mining_config]".to_string(), " ################################################ ### STRATUM MINING SERVER CONFIGURATION ### ################################################ " .to_string(), ); retval.insert( "enable_stratum_server".to_string(), " #whether stratum server is enabled " .to_string(), ); retval.insert( "stratum_server_addr".to_string(), " #what port and address for the stratum server to listen on " .to_string(), ); retval.insert( "attempt_time_per_block".to_string(), " #the amount of time, in seconds, to attempt to mine on a particular #header before stopping and re-collecting transactions from the pool " .to_string(), ); retval.insert( "minimum_share_difficulty".to_string(), " #the minimum acceptable share difficulty to request from miners " .to_string(), ); retval.insert( "wallet_listener_url".to_string(), " #the wallet receiver to which coinbase rewards will be sent " .to_string(), ); retval.insert( "burn_reward".to_string(), " #whether to ignore the reward (mostly for testing) " .to_string(), ); retval.insert( "[logging]".to_string(), " ######################################### ### LOGGING CONFIGURATION ### ######################################### " .to_string(), ); retval.insert( "log_to_stdout".to_string(), " #whether to log to stdout " .to_string(), ); retval.insert( "stdout_log_level".to_string(), " #log level for stdout: Error, Warning, Info, Debug, Trace " .to_string(), ); retval.insert( "log_to_file".to_string(), " #whether to log to a file " .to_string(), ); retval.insert( "file_log_level".to_string(), " #log level for file: Error, Warning, Info, Debug, Trace " .to_string(), ); retval.insert( "log_file_path".to_string(), " #log file path " .to_string(), ); retval.insert( "log_file_append".to_string(), " #whether to append to the log file (true), or replace it on every run (false) " .to_string(), ); retval.insert( "log_max_size".to_string(), " #maximum log file size in bytes before performing log rotation #comment it to disable log rotation " .to_string(), ); retval.insert( "log_max_files".to_string(), " #maximum count of the log files to rotate over " .to_string(), ); retval } fn get_key(line: &str) -> String { if line.contains("[") && line.contains("]") { return line.to_owned(); } else if line.contains("=") { return line.split("=").collect::<Vec<&str>>()[0].trim().to_owned(); } else { return "NOT_FOUND".to_owned(); } } pub fn insert_comments(orig: String) -> String { let comments = comments(); let lines: Vec<&str> = orig.split("\n").collect(); let mut out_lines = vec![]; for l in lines { let key = get_key(l); if let Some(v) = comments.get(&key) { out_lines.push(v.to_owned()); } out_lines.push(l.to_owned()); out_lines.push("\n".to_owned()); } let mut ret_val = String::from(""); for l in out_lines { ret_val.push_str(&l); } ret_val.to_owned() }
// Copyright 2015 Nicholas Bishop // // Closest-point method adapted from "Real-Time Collision Detection" // by Christer Ericson, published by Morgan Kaufmann Publishers, // Copyright 2005 Elsevier Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use vector::{distance3, dot3, Vec3f}; /// Line segment between two points #[derive(Clone, Copy, Debug, PartialEq)] pub struct Segment3f { pub start: Vec3f, pub end: Vec3f } impl Segment3f { /// Create a segment between two points pub fn new(start: &Vec3f, end: &Vec3f) -> Segment3f { Segment3f { start: *start, end: *end } } /// Length of the line segment pub fn length(&self) -> f32 { distance3(self.start, self.end) } /// Convert a distance in coordinate space to a distance in the /// line segment's parametric space. The sign of the input is /// kept. pub fn distance_to_parametric_delta(&self, distance: f32) -> f32 { distance / self.length() } /// Find the point on the segment closest to the input point. The /// return value contains both the parametric and actual location /// of the closest point. /// /// Adapted from "Real-Time Collision Detection" by Christer /// Ericson, published by Morgan Kaufmann Publishers, Copyright /// 2005 Elsevier Inc pub fn closest_point_to_point(&self, point: &Vec3f) -> (f32, Vec3f) { let a = self.start; let b = self.end; let ab = b - a; // Project point onto ab, but deferring divide by dot3(ab, ab) let t = dot3(*point - a, ab); if t <= 0.0f32 { // point projects outside the [a,b] interval, on the a // side; clamp to a (0.0f32, a) } else { // Always nonnegative since denom = ||ab|| ∧ 2 let denom = dot3(ab, ab); if t >= denom { // point projects outside the [a,b] interval, on the b // side; clamp to b (1.0f32, b) } else { // point projects inside the [a,b] interval; must do // deferred divide now (t / denom, a + ab * t) } } } } #[test] fn test_segment_length() { use vector::vec3f; let s = Segment3f::new(&vec3f(0, 0, 0), &vec3f(0, 0, 9)); assert!(s.length() == 9.0); } #[test] fn test_segment_distance_to_parametric_delta() { use vector::vec3f; let s = Segment3f::new(&vec3f(0, 1, 0), &vec3f(0, 7, 0)); assert!(s.distance_to_parametric_delta(0.0) == 0.0); assert!(s.distance_to_parametric_delta(6.0) == 1.0); assert!(s.distance_to_parametric_delta(12.0) == 2.0); assert!(s.distance_to_parametric_delta(-3.0) == -0.5); } #[test] fn test_segment_closest_point_to_point() { use vector::vec3f; let s = Segment3f::new(&vec3f(2, 0, 0), &vec3f(3, 0, 0)); assert!(s.closest_point_to_point(&vec3f(1, 0, 0)) == (0.0, vec3f(2, 0, 0))); assert!(s.closest_point_to_point(&vec3f(4, 0, 0)) == (1.0, vec3f(3, 0, 0))); assert!(s.closest_point_to_point(&vec3f(2.5, 1, 0)) == (0.5, vec3f(2.5, 0, 0))); } Add segment distance_from_parametric_delta // Copyright 2015 Nicholas Bishop // // Closest-point method adapted from "Real-Time Collision Detection" // by Christer Ericson, published by Morgan Kaufmann Publishers, // Copyright 2005 Elsevier Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use vector::{distance3, dot3, Vec3f}; /// Line segment between two points #[derive(Clone, Copy, Debug, PartialEq)] pub struct Segment3f { pub start: Vec3f, pub end: Vec3f } impl Segment3f { /// Create a segment between two points pub fn new(start: &Vec3f, end: &Vec3f) -> Segment3f { Segment3f { start: *start, end: *end } } /// Length of the line segment pub fn length(&self) -> f32 { distance3(self.start, self.end) } /// Convert a distance in coordinate space to a distance in the /// line segment's parametric space. The sign of the input is /// kept. pub fn distance_to_parametric_delta(&self, distance: f32) -> f32 { distance / self.length() } /// Convert a parametric delta to coordinate space. The sign of /// the input is kept. pub fn distance_from_parametric_delta(&self, delta: f32) -> f32 { delta * self.length() } /// Find the point on the segment closest to the input point. The /// return value contains both the parametric and actual location /// of the closest point. /// /// Adapted from "Real-Time Collision Detection" by Christer /// Ericson, published by Morgan Kaufmann Publishers, Copyright /// 2005 Elsevier Inc pub fn closest_point_to_point(&self, point: &Vec3f) -> (f32, Vec3f) { let a = self.start; let b = self.end; let ab = b - a; // Project point onto ab, but deferring divide by dot3(ab, ab) let t = dot3(*point - a, ab); if t <= 0.0f32 { // point projects outside the [a,b] interval, on the a // side; clamp to a (0.0f32, a) } else { // Always nonnegative since denom = ||ab|| ∧ 2 let denom = dot3(ab, ab); if t >= denom { // point projects outside the [a,b] interval, on the b // side; clamp to b (1.0f32, b) } else { // point projects inside the [a,b] interval; must do // deferred divide now (t / denom, a + ab * t) } } } } #[test] fn test_segment_length() { use vector::vec3f; let s = Segment3f::new(&vec3f(0, 0, 0), &vec3f(0, 0, 9)); assert!(s.length() == 9.0); } #[test] fn test_segment_distance_to_parametric_delta() { use vector::vec3f; let s = Segment3f::new(&vec3f(0, 1, 0), &vec3f(0, 7, 0)); assert!(s.distance_to_parametric_delta(0.0) == 0.0); assert!(s.distance_to_parametric_delta(6.0) == 1.0); assert!(s.distance_to_parametric_delta(12.0) == 2.0); assert!(s.distance_to_parametric_delta(-3.0) == -0.5); } #[test] fn test_segment_closest_point_to_point() { use vector::vec3f; let s = Segment3f::new(&vec3f(2, 0, 0), &vec3f(3, 0, 0)); assert!(s.closest_point_to_point(&vec3f(1, 0, 0)) == (0.0, vec3f(2, 0, 0))); assert!(s.closest_point_to_point(&vec3f(4, 0, 0)) == (1.0, vec3f(3, 0, 0))); assert!(s.closest_point_to_point(&vec3f(2.5, 1, 0)) == (0.5, vec3f(2.5, 0, 0))); }
use std::fmt; use std::ops; use std::ops::{Range, RangeFrom, RangeTo, RangeFull}; use super::util; // This is useful til the RangeArgument is made stable trait FromRange { #[inline(always)] fn from_range(&self, seg: &Segment) -> (usize, usize); } impl FromRange for RangeFull { #[inline(always)] fn from_range(&self, seg: &Segment) -> (usize, usize) { return (0, seg.len()); } } impl FromRange for Range<usize> { #[inline(always)] fn from_range(&self, _: &Segment) -> (usize, usize) { return (self.start, self.end); } } impl FromRange for RangeFrom<usize> { #[inline(always)] fn from_range(&self, seg: &Segment) -> (usize, usize) { return (self.start, seg.len()); } } impl FromRange for RangeTo<usize> { #[inline(always)] fn from_range(&self, _: &Segment) -> (usize, usize) { return (0, self.end); } } pub struct Segment { vecs: Vec<Vec<u8>>, length: usize, } #[derive(Copy, Clone)] struct Index { outer: usize, inner: usize, } pub struct Items<'a> { seg: &'a Segment, index: Index, num_elem: Option<usize>, } pub struct MutItems<'a> { seg: &'a mut Segment, index: Index, num_elem: Option<usize>, } pub struct Slices<'a> { seg: &'a Segment, outer: usize, } static MIN_BLOCK_SIZE: usize = 1024 * 1024; static MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024; impl Segment { pub fn new() -> Segment { Segment { vecs: Vec::new(), length: 0, } } pub fn from_vec(values: Vec<u8>) -> Segment { let len = values.len(); Segment { vecs: vec!(values), length: len, } } pub fn from_slice(values: &[u8]) -> Segment { Segment { vecs: vec!(values.into()), length: values.len(), } } pub fn len(&self) -> usize { self.length } fn calc_len(&mut self) { self.length = 0; for len in self.vecs.iter().map(|v| v.len()) { self.length += len } } fn pos_to_index(&self, pos: usize, for_insert: bool) -> Index { if pos == 0 { return Index { outer: 0, inner: 0 }; } let mut cur_pos = pos; for (i, vec) in self.vecs.iter().enumerate() { if cur_pos < vec.len() || (for_insert && cur_pos == vec.len()) { return Index { outer: i, inner: cur_pos, } } cur_pos -= vec.len(); } panic!("Position {} is out of bounds", pos); } pub fn iter_range<'a, R: FromRange>(&'a self, range: R) -> Items<'a> { let (from, to) = range.from_range(self); if to < from { panic!("to ({}) is smaller than from ({})!", to, from); } let idx = self.pos_to_index(from, false); Items { seg: self, index: idx, num_elem: Some(to - from), } } pub fn mut_iter_range<'a, R: FromRange>(&'a mut self, range: R) -> MutItems<'a> { let (from, to) = range.from_range(self); if to < from { panic!("to ({}) is smaller than from ({})!", to, from); } let idx = self.pos_to_index(from, false); MutItems { seg: self, index: idx, num_elem: Some(to - from), } } pub fn iter_slices<'a>(&'a self) -> Slices<'a> { Slices { seg: self, outer: 0, } } fn prepare_insert(&mut self, index: Index) -> Index { // TODO: Get self.vecs.get(index.outer) into a local variable without ruining lifetimes? if index.outer >= self.vecs.len() { self.vecs.push(Vec::new()); } if self.vecs[index.outer].len() < MAX_BLOCK_SIZE { return index; } let page_start_idx = (index.inner / MIN_BLOCK_SIZE) * MIN_BLOCK_SIZE; if page_start_idx == 0 { if self.vecs[index.outer].len() > MAX_BLOCK_SIZE { let insert_vec: Vec < _ >= self.vecs[index.outer][MIN_BLOCK_SIZE..].into(); self.vecs.insert(index.outer + 1, insert_vec); self.vecs[index.outer].truncate(MIN_BLOCK_SIZE); } return index; } else { let insert_vec: Vec<_> = self.vecs[index.outer][page_start_idx..].into(); self.vecs.insert(index.outer + 1, insert_vec); self.vecs[index.outer].truncate(page_start_idx); return self.prepare_insert(Index { outer: index.outer + 1, inner: index.inner - page_start_idx }) } } pub fn insert(&mut self, offset: usize, values: &[u8]) { let mut index = self.pos_to_index(offset, true); index = self.prepare_insert(index); // This is needed for the mut borrow vec { let vec = &mut self.vecs[index.outer]; // TODO: There has to be a better way for this range for val in values.into_iter().rev() { vec.insert(index.inner, *val); } } self.calc_len(); } // TODO: Convert to drain when that settles pub fn move_out_slice(&mut self, start_offset: usize, end_offset: usize) -> Vec<u8> { assert!(start_offset <= end_offset); let mut res = Vec::new(); let mut index = self.pos_to_index(start_offset, false); let num_elem = end_offset - start_offset; for _ in 0..num_elem { let c = self.vecs[index.outer].remove(index.inner); res.push(c); if index.inner >= self.vecs[index.outer].len() { if self.vecs[index.outer].len() == 0 { self.vecs.remove(index.outer); } else { index.inner = 0; index.outer += 1; } } } self.calc_len(); res } pub fn find_slice(&self, needle: &[u8]) -> Option<usize> { self.find_slice_from(0, needle) } pub fn find_slice_from(&self, from: usize, needle: &[u8]) -> Option<usize> { for i in from..self.len() { if util::iter_equals(self.iter_range(i..i+needle.len()), needle.iter()) { return Some(i); } } None } #[cfg(test)] fn get_lengths(&self) -> Vec<usize> { self.vecs.iter().map(|v| v.len()).collect::<Vec<usize>>() } } impl ops::Index<usize> for Segment { type Output = u8; fn index<'a>(&'a self, _index: usize) -> &'a u8 { let idx = self.pos_to_index(_index, false); &self.vecs[idx.outer][idx.inner] } } impl ops::IndexMut<usize> for Segment { fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut u8 { let idx = self.pos_to_index(_index, false); &mut self.vecs[idx.outer][idx.inner] } } impl fmt::Debug for Segment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.vecs.fmt(f) } } impl<'a> Iterator for Items<'a> { type Item = &'a u8; fn next(&mut self) -> Option<&'a u8> { if self.index.outer >= self.seg.vecs.len() { return None; } if let Some(ref mut num_elem) = self.num_elem { if *num_elem <= 0 { return None; } *num_elem -= 1; } let elem = { let vv = &self.seg.vecs[self.index.outer]; &vv[self.index.inner] }; self.index.inner += 1; if self.index.inner >= self.seg.vecs[self.index.outer].len() { self.index.inner = 0; self.index.outer += 1; } Some(elem) } } impl<'a> Iterator for MutItems<'a> { type Item = &'a mut u8; fn next(&mut self) -> Option<&'a mut u8> { if self.index.outer >= self.seg.vecs.len() { return None; } if let Some(ref mut num_elem) = self.num_elem { if *num_elem <= 0 { return None; } *num_elem -= 1; } let elem_raw: *mut u8 = { let vv = &mut self.seg.vecs[self.index.outer]; &mut vv[self.index.inner] }; self.index.inner += 1; if self.index.inner >= self.seg.vecs[self.index.outer].len() { self.index.inner = 0; self.index.outer += 1; } Some(unsafe { &mut *elem_raw }) } } impl<'a> Iterator for Slices<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<&'a [u8]> { if self.outer >= self.seg.vecs.len() { None } else { let i = self.outer; self.outer += 1; Some(&self.seg.vecs[i]) } } } #[test] fn test_segment() { let mut s = Segment::from_slice(&[1, 2, 3, 4]); s.insert_slice(0, &[7, 7, 7, 7, 7]); } Add some initial really basic tests to the segment use std::fmt; use std::ops; use std::ops::{Range, RangeFrom, RangeTo, RangeFull}; use super::util; // This is useful til the RangeArgument is made stable trait FromRange { #[inline(always)] fn from_range(&self, seg: &Segment) -> (usize, usize); } impl FromRange for RangeFull { #[inline(always)] fn from_range(&self, seg: &Segment) -> (usize, usize) { return (0, seg.len()); } } impl FromRange for Range<usize> { #[inline(always)] fn from_range(&self, _: &Segment) -> (usize, usize) { return (self.start, self.end); } } impl FromRange for RangeFrom<usize> { #[inline(always)] fn from_range(&self, seg: &Segment) -> (usize, usize) { return (self.start, seg.len()); } } impl FromRange for RangeTo<usize> { #[inline(always)] fn from_range(&self, _: &Segment) -> (usize, usize) { return (0, self.end); } } pub struct Segment { vecs: Vec<Vec<u8>>, length: usize, } #[derive(Copy, Clone)] struct Index { outer: usize, inner: usize, } pub struct Items<'a> { seg: &'a Segment, index: Index, num_elem: Option<usize>, } pub struct MutItems<'a> { seg: &'a mut Segment, index: Index, num_elem: Option<usize>, } pub struct Slices<'a> { seg: &'a Segment, outer: usize, } static MIN_BLOCK_SIZE: usize = 1024 * 1024; static MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024; impl Segment { pub fn new() -> Segment { Segment { vecs: Vec::new(), length: 0, } } pub fn from_vec(values: Vec<u8>) -> Segment { let len = values.len(); Segment { vecs: vec!(values), length: len, } } pub fn from_slice(values: &[u8]) -> Segment { Segment { vecs: vec!(values.into()), length: values.len(), } } pub fn len(&self) -> usize { self.length } fn calc_len(&mut self) { self.length = 0; for len in self.vecs.iter().map(|v| v.len()) { self.length += len } } fn pos_to_index(&self, pos: usize, for_insert: bool) -> Index { if pos == 0 { return Index { outer: 0, inner: 0 }; } let mut cur_pos = pos; for (i, vec) in self.vecs.iter().enumerate() { if cur_pos < vec.len() || (for_insert && cur_pos == vec.len()) { return Index { outer: i, inner: cur_pos, } } cur_pos -= vec.len(); } panic!("Position {} is out of bounds", pos); } pub fn iter_range<'a, R: FromRange>(&'a self, range: R) -> Items<'a> { let (from, to) = range.from_range(self); if to < from { panic!("to ({}) is smaller than from ({})!", to, from); } let idx = self.pos_to_index(from, false); Items { seg: self, index: idx, num_elem: Some(to - from), } } pub fn mut_iter_range<'a, R: FromRange>(&'a mut self, range: R) -> MutItems<'a> { let (from, to) = range.from_range(self); if to < from { panic!("to ({}) is smaller than from ({})!", to, from); } let idx = self.pos_to_index(from, false); MutItems { seg: self, index: idx, num_elem: Some(to - from), } } pub fn iter_slices<'a>(&'a self) -> Slices<'a> { Slices { seg: self, outer: 0, } } fn prepare_insert(&mut self, index: Index) -> Index { // TODO: Get self.vecs.get(index.outer) into a local variable without ruining lifetimes? if index.outer >= self.vecs.len() { self.vecs.push(Vec::new()); } if self.vecs[index.outer].len() < MAX_BLOCK_SIZE { return index; } let page_start_idx = (index.inner / MIN_BLOCK_SIZE) * MIN_BLOCK_SIZE; if page_start_idx == 0 { if self.vecs[index.outer].len() > MAX_BLOCK_SIZE { let insert_vec: Vec < _ >= self.vecs[index.outer][MIN_BLOCK_SIZE..].into(); self.vecs.insert(index.outer + 1, insert_vec); self.vecs[index.outer].truncate(MIN_BLOCK_SIZE); } return index; } else { let insert_vec: Vec<_> = self.vecs[index.outer][page_start_idx..].into(); self.vecs.insert(index.outer + 1, insert_vec); self.vecs[index.outer].truncate(page_start_idx); return self.prepare_insert(Index { outer: index.outer + 1, inner: index.inner - page_start_idx }) } } pub fn insert(&mut self, offset: usize, values: &[u8]) { let mut index = self.pos_to_index(offset, true); index = self.prepare_insert(index); // This is needed for the mut borrow vec { let vec = &mut self.vecs[index.outer]; // TODO: There has to be a better way for this range for val in values.into_iter().rev() { vec.insert(index.inner, *val); } } self.calc_len(); } // TODO: Convert to drain when that settles pub fn move_out_slice(&mut self, start_offset: usize, end_offset: usize) -> Vec<u8> { assert!(start_offset <= end_offset); let mut res = Vec::new(); let mut index = self.pos_to_index(start_offset, false); let num_elem = end_offset - start_offset; for _ in 0..num_elem { let c = self.vecs[index.outer].remove(index.inner); res.push(c); if index.inner >= self.vecs[index.outer].len() { if self.vecs[index.outer].len() == 0 { self.vecs.remove(index.outer); } else { index.inner = 0; index.outer += 1; } } } self.calc_len(); res } pub fn find_slice(&self, needle: &[u8]) -> Option<usize> { self.find_slice_from(0, needle) } pub fn find_slice_from(&self, from: usize, needle: &[u8]) -> Option<usize> { for i in from..self.len() { if util::iter_equals(self.iter_range(i..i+needle.len()), needle.iter()) { return Some(i); } } None } #[cfg(test)] fn get_lengths(&self) -> Vec<usize> { self.vecs.iter().map(|v| v.len()).collect::<Vec<usize>>() } } impl ops::Index<usize> for Segment { type Output = u8; fn index<'a>(&'a self, _index: usize) -> &'a u8 { let idx = self.pos_to_index(_index, false); &self.vecs[idx.outer][idx.inner] } } impl ops::IndexMut<usize> for Segment { fn index_mut<'a>(&'a mut self, _index: usize) -> &'a mut u8 { let idx = self.pos_to_index(_index, false); &mut self.vecs[idx.outer][idx.inner] } } impl fmt::Debug for Segment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.vecs.fmt(f) } } impl<'a> Iterator for Items<'a> { type Item = &'a u8; fn next(&mut self) -> Option<&'a u8> { if self.index.outer >= self.seg.vecs.len() { return None; } if let Some(ref mut num_elem) = self.num_elem { if *num_elem <= 0 { return None; } *num_elem -= 1; } let elem = { let vv = &self.seg.vecs[self.index.outer]; &vv[self.index.inner] }; self.index.inner += 1; if self.index.inner >= self.seg.vecs[self.index.outer].len() { self.index.inner = 0; self.index.outer += 1; } Some(elem) } } impl<'a> Iterator for MutItems<'a> { type Item = &'a mut u8; fn next(&mut self) -> Option<&'a mut u8> { if self.index.outer >= self.seg.vecs.len() { return None; } if let Some(ref mut num_elem) = self.num_elem { if *num_elem <= 0 { return None; } *num_elem -= 1; } let elem_raw: *mut u8 = { let vv = &mut self.seg.vecs[self.index.outer]; &mut vv[self.index.inner] }; self.index.inner += 1; if self.index.inner >= self.seg.vecs[self.index.outer].len() { self.index.inner = 0; self.index.outer += 1; } Some(unsafe { &mut *elem_raw }) } } impl<'a> Iterator for Slices<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<&'a [u8]> { if self.outer >= self.seg.vecs.len() { None } else { let i = self.outer; self.outer += 1; Some(&self.seg.vecs[i]) } } } #[test] fn test_small_segment() { let size = 1024; let mut seg = Segment::from_vec(vec![1, 2, 3, 4, 5]); assert_eq!(Some(4), seg.find_slice(&[5])); let seg_len = seg.len(); seg.insert(seg_len/2, &vec![1 as u8; size]); assert_eq!(Some(size + 4), seg.find_slice(&[5])); } #[test] fn test_large_segment() { let big_size = 4*1024*1024; let small_size = 1024; let mut seg = Segment::from_vec(vec![0; big_size]); seg.insert(big_size/2, &vec![1 as u8; small_size]); assert_eq!(Some(big_size/2 -1), seg.find_slice(&[0, 1])); // Make sure we actually tested a "split" version let seg_lengths = seg.get_lengths(); assert_eq!(2, seg_lengths.len()); let index = seg_lengths[0]; let sentinal = 100; seg[index] = sentinal; seg[index+1] = sentinal +1; assert_eq!(Some(index), seg.find_slice(&[sentinal, sentinal+1])); }
use std::sync::Arc; use std::arch::x86_64::*; use std::convert::TryInto; use num_integer::{Integer, div_ceil}; use num_complex::Complex; use num_traits::Zero; use strength_reduce::StrengthReducedUsize; use primal_check::miller_rabin; use crate::math_utils; use crate::{Length, IsInverse, Fft}; use super::{AvxNum, avx_vector::{AvxVector, AvxVector256, AvxVector128, AvxArray, AvxArrayMut}}; use super::avx_vector; // This struct wraps the necessary data to compute (a * b) % divisor, where b and divisor are determined at runtime but rarely change, and a changes on every call. // It's written using AVX2 instructions and assumes the input a are 64-bit integers, and has a restriction that each a, b, and divisor must be 31-bit numbers or smaller. #[derive(Clone)] struct VectorizedMultiplyMod { b: __m256i, divisor: __m256i, intermediate: __m256i, } impl VectorizedMultiplyMod { #[target_feature(enable = "avx")] unsafe fn new(b: u32, divisor: u32) -> Self { assert!(divisor.leading_zeros() > 0, "divisor must be less than {}, got {}", 1 << 31, divisor); let b = b % divisor; let intermediate = ((b as i64) << 32) / divisor as i64; Self { b: _mm256_set1_epi64x(b as i64), divisor: _mm256_set1_epi64x(divisor as i64), intermediate: _mm256_set1_epi64x(intermediate), } } // Input: 4 unsigned 64-bit numbers, each less than 2^30 // Output: (x * multiplier) % divisor for each x in input #[allow(unused)] #[target_feature(enable = "avx2")] unsafe fn mul_rem(&self, a: __m256i) -> __m256i { // Pretty hacky, but we need to prove to the compiler that each entry of the divisor is a 32-bit number, by blending the divisor vector with zeroes in the upper bits of each number. // If we don't do this manually, the compiler will do it anyways, but only for _mm256_mul_epu32, not for the _mm256_sub_epi64 correction step at the end // That inconstistency results in sub-optimal codegen where the compiler inserts extra code to handle the case where divisor is 64-bit. It also results in using one more register than necessary. // Since we know that can't happen, we can placate the compiler by explicitly zeroing the upper 32 bit of each divisor and relying on the compiler to lift it out of the loop. let masked_divisor = _mm256_blend_epi32(self.divisor, _mm256_setzero_si256(), 0xAA); // compute the integer quotient of (a * b) / divisor. Our precomputed intermediate value lets us skip the expensive division via arithmetic strength reduction let quotient = _mm256_srli_epi64(_mm256_mul_epu32(a, self.intermediate), 32); // Now we can compute numerator - quotient * divisor to get the remanider let numerator = _mm256_mul_epu32(a, self.b); let quotient_product = _mm256_mul_epu32(quotient, masked_divisor); // Standard remainder formula: remainder = numerator - quotient * divisor let remainder = _mm256_sub_epi64(numerator, quotient_product); // it's possible for the "remainder" to end up between divisor and 2 * divisor. so we'll subtract divisor from remainder, which will make some of the result negative // We can then use the subtracted result as the input to a blendv. Sadly avx doesn't have a blendv_epi32 or blendv_epi64, so we're gonna do blendv_pd instead // this works because blendv looks at the uppermost bit to decide which variable to use, and for a two's complement i64, the upper most bit is 1 when the number is negative! // So when the subtraction result is negative, the uppermost bit is 1, which means the blend will choose the second param, which is the unsubtracted remainder let casted_remainder = _mm256_castsi256_pd(remainder); let subtracted_remainder = _mm256_castsi256_pd(_mm256_sub_epi64(remainder, masked_divisor)); let wrapped_remainder = _mm256_castpd_si256(_mm256_blendv_pd(subtracted_remainder, casted_remainder, subtracted_remainder)); wrapped_remainder } } /// Implementation of Rader's Algorithm /// /// This algorithm computes a prime-sized FFT in O(nlogn) time. It does this by converting this size n FFT into a /// size (n - 1) which is guaranteed to be composite. /// /// The worst case for this algorithm is when (n - 1) is 2 * prime, resulting in a /// [Cunningham Chain](https://en.wikipedia.org/wiki/Cunningham_chain) /// /// ~~~ /// // Computes a forward FFT of size 1201 (prime number), using Rader's Algorithm /// use rustfft::algorithm::avx::RadersAvx2; /// use rustfft::{Fft, FFTplanner}; /// use rustfft::num_complex::Complex; /// use rustfft::num_traits::Zero; /// /// let mut input: Vec<Complex<f32>> = vec![Zero::zero(); 1201]; /// let mut output: Vec<Complex<f32>> = vec![Zero::zero(); 1201]; /// /// // plan a FFT of size n - 1 = 1200 /// let mut planner = FFTplanner::new(false); /// let inner_fft = planner.plan_fft(1200); /// /// let fft = RadersAvx2::new(inner_fft).unwrap(); /// fft.process(&mut input, &mut output); /// ~~~ /// /// Rader's Algorithm is relatively expensive compared to other FFT algorithms. Benchmarking shows that it is up to /// an order of magnitude slower than similar composite sizes. In the example size above of 1201, benchmarking shows /// that it takes 2.5x more time to compute than a FFT of size 1200. pub struct RadersAvx2<T: AvxNum> { input_index_multiplier: VectorizedMultiplyMod, input_index_init: __m256i, output_index_mapping: Box<[__m128i]>, twiddles: Box<[T::VectorType]>, inner_fft: Arc<dyn Fft<T>>, len: usize, inplace_scratch_len: usize, outofplace_scratch_len: usize, inverse: bool, } impl<T: AvxNum> RadersAvx2<T> { /// Preallocates necessary arrays and precomputes necessary data to efficiently compute the FFT /// Returns Ok(instance) if this machine has the required instruction sets ("avx", "fma", and "avx2"), Err() if some instruction sets are missing /// /// # Panics /// Panics if `inner_fft_len() + 1` is not a prime number. #[inline] pub fn new(inner_fft: Arc<dyn Fft<T>>) -> Result<Self, ()> { let has_avx = is_x86_feature_detected!("avx"); let has_avx2 = is_x86_feature_detected!("avx2"); let has_fma = is_x86_feature_detected!("fma"); if has_avx && has_avx2 && has_fma { // Safety: new_with_avx2 requires the "avx" feature set. Since we know it's present, we're safe Ok(unsafe { Self::new_with_avx(inner_fft) }) } else { Err(()) } } #[target_feature(enable = "avx")] unsafe fn new_with_avx(inner_fft: Arc<dyn Fft<T>>) -> Self { let inner_fft_len = inner_fft.len(); let len = inner_fft_len + 1; assert!(miller_rabin(len as u64), "For raders algorithm, inner_fft.len() + 1 must be prime. Expected prime number, got {} + 1 = {}", inner_fft_len, len); let inverse = inner_fft.is_inverse(); let reduced_len = StrengthReducedUsize::new(len); // compute the primitive root and its inverse for this size let primitive_root = math_utils::primitive_root(len as u64).unwrap() as usize; // compute the multiplicative inverse of primative_root mod len and vice versa. // i64::extended_gcd will compute both the inverse of left mod right, and the inverse of right mod left, but we're only goingto use one of them // the primtive root inverse might be negative, if so make it positive by wrapping let gcd_data = i64::extended_gcd(&(primitive_root as i64), &(len as i64)); let primitive_root_inverse = if gcd_data.x >= 0 { gcd_data.x } else { gcd_data.x + len as i64 } as usize; // precompute the coefficients to use inside the process method let unity_scale = T::from_f64(1f64 / inner_fft_len as f64).unwrap(); let mut inner_fft_input = vec![Complex::zero(); inner_fft_len]; let mut twiddle_input = 1; for input_cell in &mut inner_fft_input { let twiddle = T::generate_twiddle_factor(twiddle_input, len, inverse); *input_cell = twiddle * unity_scale; twiddle_input = (twiddle_input * primitive_root_inverse) % reduced_len; } let required_inner_scratch = inner_fft.get_inplace_scratch_len(); let extra_inner_scratch = if required_inner_scratch <= inner_fft_len { 0 } else { required_inner_scratch }; //precompute a FFT of our reordered twiddle factors let mut inner_fft_scratch = vec![Zero::zero(); required_inner_scratch]; inner_fft.process_inplace_with_scratch(&mut inner_fft_input, &mut inner_fft_scratch); // When computing the FFT, we'll want this array to be pre-conjugated, so conjugate it. at the same time, convert it to vectors for convenient use later. let conjugation_mask = AvxVector256::broadcast_complex_elements(Complex::new(T::zero(), -T::zero())); let inner_fft_multiplier : Box<[_]> = inner_fft_input.chunks(T::VectorType::COMPLEX_PER_VECTOR).map(|chunk| { let chunk_vector = match chunk.len() { 1 => chunk.load_partial1_complex(0).zero_extend(), 2 => if chunk.len() == T::VectorType::COMPLEX_PER_VECTOR { chunk.load_complex(0) } else {chunk.load_partial2_complex(0).zero_extend()}, 3 => chunk.load_partial3_complex(0), 4 => chunk.load_complex(0), _ => unreachable!() }; AvxVector::xor(chunk_vector, conjugation_mask) // compute our conjugation by xoring our data with a precomputed mask }).collect(); // Set up the data for our input index remapping computation const NUM_POWERS : usize = 5; let mut root_powers = [0; NUM_POWERS]; let mut current_power = 1; for i in 0..NUM_POWERS { root_powers[i] = current_power; current_power = (current_power * primitive_root) % reduced_len; } let (input_index_multiplier, input_index_init) = if T::VectorType::COMPLEX_PER_VECTOR == 4 { (VectorizedMultiplyMod::new(root_powers[4] as u32, len as u32), _mm256_loadu_si256(root_powers.as_ptr().add(1) as *const __m256i)) } else { let duplicated_powers = [root_powers[1],root_powers[1],root_powers[2],root_powers[2],]; (VectorizedMultiplyMod::new(root_powers[2] as u32, len as u32), _mm256_loadu_si256(duplicated_powers.as_ptr() as *const __m256i)) }; // Set up our output index remapping. Ideally we could compute the output indexes on the fly, but the output reindexing requires scatter, which doesn't exist until avx-512 // Instead, we can invert the scatter indexes to be gather indexes. But if there's an algorithmic way to compute this, I don't know what it is. So instead, we're going to precompute the mapping // We want enough elements in our array to fill out an entire set of vectors so that we don't have to deal with any partial indexes etc. let mapping_size = 1 + div_ceil(len, T::VectorType::COMPLEX_PER_VECTOR) * T::VectorType::COMPLEX_PER_VECTOR; let mut output_mapping_inverse: Vec<i32> = vec![0; mapping_size]; let mut output_index = 1; for i in 1..len { output_index = (output_index * primitive_root_inverse) % reduced_len; output_mapping_inverse[output_index] = i.try_into().unwrap(); } // the actual vector of indexes depends on whether we're f32 or f64 let output_index_mapping = if T::VectorType::COMPLEX_PER_VECTOR == 4 { (&output_mapping_inverse[1..]).chunks_exact(T::VectorType::COMPLEX_PER_VECTOR).map(|chunk| _mm_loadu_si128(chunk.as_ptr() as *const __m128i)).collect::<Box<[__m128i]>>() } else { (&output_mapping_inverse[1..]).chunks_exact(T::VectorType::COMPLEX_PER_VECTOR).map(|chunk| { let duplicated_indexes = [chunk[0], chunk[0], chunk[1], chunk[1]]; _mm_loadu_si128(duplicated_indexes.as_ptr() as *const __m128i) }).collect::<Box<[__m128i]>>() }; Self { input_index_multiplier, input_index_init, output_index_mapping, inner_fft: inner_fft, twiddles: inner_fft_multiplier, len, inplace_scratch_len: len + extra_inner_scratch, outofplace_scratch_len: extra_inner_scratch, inverse, } } // Do the necessary setup for rader's algorithm: Reorder the inputs into the output buffer, gather a sum of all inputs. Return the first input, and the aum of all inputs #[target_feature(enable = "avx2", enable = "avx", enable = "fma")] unsafe fn prepare_raders(&self, input: &[Complex<T>], output: &mut [Complex<T>]) -> (Complex<T>, Complex<T>) { let mut vector_sum = T::VectorType::zero(); let mut indexes = self.input_index_init; let first_element = input[0]; let index_multiplier = self.input_index_multiplier.clone(); // loop over the output array and use AVX gathers to reorder data from the input let mut chunks_iter = (&mut output[1..]).chunks_exact_mut(T::VectorType::COMPLEX_PER_VECTOR); for chunk in chunks_iter.by_ref() { let gathered_elements = T::VectorType::gather64_complex_avx2(input.as_ptr(), indexes); // advance our indexes indexes = index_multiplier.mul_rem(indexes); // keep the sum of data updated vector_sum = AvxVector::add(vector_sum, gathered_elements); // Store this chunk chunk.store_complex(gathered_elements, 0); } // at this point, we either have 0 or 2 remaining elements to gather. because we know our length ends in 1 or 3. so when we subtract 1 for the inner FFT, that gives us 0 or 2 let output_remainder = chunks_iter.into_remainder(); if output_remainder.len() == 2 { let half_data = AvxVector128::gather64_complex_avx2(input.as_ptr(), _mm256_castsi256_si128(indexes)); // add this last chunk to our sum vector_sum = AvxVector::add(vector_sum, AvxVector128::zero_extend(half_data)); // store the remainder in the last chunk output_remainder.store_partial2_complex(half_data, 0); } (first_element, vector_sum.hadd_complex() + first_element) } // Do the necessary finalization for rader's algorithm: Reorder the inputs into the output buffer, conjugating the input as we go, and add the first input value to every output value #[target_feature(enable = "avx2", enable = "avx", enable = "fma")] unsafe fn finalize_raders(&self, input: &[Complex<T>], output: &mut [Complex<T>], first_input: Complex<T>) { let output_add = AvxVector256::broadcast_complex_elements(first_input); // We need to conjugate elements as a part of the finalization step, and sadly we can't roll it into any other instructions. So we'll do it via an xor. let conjugation_mask = AvxVector256::broadcast_complex_elements(Complex::new(T::zero(), -T::zero())); let mut chunks_iter = (&mut output[1..]).chunks_exact_mut(T::VectorType::COMPLEX_PER_VECTOR); for (i, chunk) in chunks_iter.by_ref().enumerate() { let index_chunk = *self.output_index_mapping.get_unchecked(i); let gathered_elements = T::VectorType::gather32_complex_avx2(input.as_ptr(), index_chunk); let conjugated_elements = AvxVector::xor(gathered_elements, conjugation_mask); // Add the first input value to each output value, then store let added_elements = AvxVector::add(output_add, conjugated_elements); chunk.store_complex(added_elements, 0); } // at this point, we either have 0 or 2 remaining elements to gather. because we know our length ends in 1 or 3. so when we subtract 1 for the inner FFT, that gives us 0 or 2 let output_remainder = chunks_iter.into_remainder(); if output_remainder.len() == 2 { let index_chunk = *self.output_index_mapping.get_unchecked(self.output_index_mapping.len() - 1); let half_data = AvxVector128::gather32_complex_avx2(input.as_ptr(), index_chunk); let conjugated_elements = AvxVector::xor(half_data, conjugation_mask.lo()); // Add the first input value to each output value, then store let added_elements = AvxVector::add(output_add.lo(), conjugated_elements); output_remainder.store_partial2_complex(added_elements, 0); } } fn perform_fft_out_of_place(&self, input: &mut [Complex<T>], output: &mut [Complex<T>], scratch: &mut [Complex<T>]) { let (first_input, first_output) = unsafe { self.prepare_raders(input, output) }; let inner_input = &mut input[1..]; let inner_output = &mut output[1..]; // perform the first of two inner FFTs let inner_scratch = if scratch.len() > 0 { &mut scratch[..] } else { &mut inner_input[..] }; self.inner_fft.process_inplace_with_scratch(inner_output, inner_scratch); // multiply the inner result with our cached setup data // also conjugate every entry. this sets us up to do an inverse FFT // (because an inverse FFT is equivalent to a normal FFT where you conjugate both the inputs and outputs) unsafe { avx_vector::pairwise_complex_mul_conjugated(&mut inner_output[..], &mut inner_input[..], &self.twiddles) }; // execute the second FFT let inner_scratch = if scratch.len() > 0 { scratch } else { &mut inner_output[..] }; self.inner_fft.process_inplace_with_scratch(inner_input, inner_scratch); // copy the final values into the output, reordering as we go output[0] = first_output; unsafe { self.finalize_raders(input, output, first_input); } } fn perform_fft_inplace(&self, buffer: &mut [Complex<T>], scratch: &mut [Complex<T>]) { let (first_input, first_output) = unsafe { self.prepare_raders(buffer, scratch) }; let (scratch, extra_scratch) = scratch.split_at_mut(self.len()); let truncated_scratch = &mut scratch[1..]; // perform the first of two inner FFTs let inner_scratch = if extra_scratch.len() > 0 { extra_scratch } else { &mut buffer[..] }; self.inner_fft.process_inplace_with_scratch(truncated_scratch, inner_scratch); // multiply the inner result with our cached setup data // also conjugate every entry. this sets us up to do an inverse FFT // (because an inverse FFT is equivalent to a normal FFT where you conjugate both the inputs and outputs) unsafe { avx_vector::pairwise_complex_mul_assign_conjugated(truncated_scratch, &self.twiddles) }; // execute the second FFT self.inner_fft.process_inplace_with_scratch(truncated_scratch, inner_scratch); // copy the final values into the output, reordering as we go buffer[0] = first_output; unsafe { self.finalize_raders(scratch, buffer, first_input); } } } boilerplate_avx_fft!(RadersAvx2, |this: &RadersAvx2<_>| this.len, |this: &RadersAvx2<_>| this.inplace_scratch_len, |this: &RadersAvx2<_>| this.outofplace_scratch_len ); #[cfg(test)] mod unit_tests { use super::*; use std::sync::Arc; use crate::test_utils::check_fft_algorithm; use crate::algorithm::DFT; #[test] fn test_raders_avx_f32() { for len in 3..100 { if miller_rabin(len as u64) { test_raders_with_length::<f32>(len, false); test_raders_with_length::<f32>(len, true); } } } #[test] fn test_raders_avx_f64() { for len in 3..100 { if miller_rabin(len as u64) { test_raders_with_length::<f64>(len, false); test_raders_with_length::<f64>(len, true); } } } fn test_raders_with_length<T: AvxNum + num_traits::Float>(len: usize, inverse: bool) { let inner_fft = Arc::new(DFT::new(len - 1, inverse)); let fft = RadersAvx2::new(inner_fft).unwrap(); check_fft_algorithm::<T>(&fft, len, inverse); } } Split off the scratch before passing it to prepare_raders use std::sync::Arc; use std::arch::x86_64::*; use std::convert::TryInto; use num_integer::{Integer, div_ceil}; use num_complex::Complex; use num_traits::Zero; use strength_reduce::StrengthReducedUsize; use primal_check::miller_rabin; use crate::math_utils; use crate::{Length, IsInverse, Fft}; use super::{AvxNum, avx_vector::{AvxVector, AvxVector256, AvxVector128, AvxArray, AvxArrayMut}}; use super::avx_vector; // This struct wraps the necessary data to compute (a * b) % divisor, where b and divisor are determined at runtime but rarely change, and a changes on every call. // It's written using AVX2 instructions and assumes the input a are 64-bit integers, and has a restriction that each a, b, and divisor must be 31-bit numbers or smaller. #[derive(Clone)] struct VectorizedMultiplyMod { b: __m256i, divisor: __m256i, intermediate: __m256i, } impl VectorizedMultiplyMod { #[target_feature(enable = "avx")] unsafe fn new(b: u32, divisor: u32) -> Self { assert!(divisor.leading_zeros() > 0, "divisor must be less than {}, got {}", 1 << 31, divisor); let b = b % divisor; let intermediate = ((b as i64) << 32) / divisor as i64; Self { b: _mm256_set1_epi64x(b as i64), divisor: _mm256_set1_epi64x(divisor as i64), intermediate: _mm256_set1_epi64x(intermediate), } } // Input: 4 unsigned 64-bit numbers, each less than 2^30 // Output: (x * multiplier) % divisor for each x in input #[allow(unused)] #[target_feature(enable = "avx2")] unsafe fn mul_rem(&self, a: __m256i) -> __m256i { // Pretty hacky, but we need to prove to the compiler that each entry of the divisor is a 32-bit number, by blending the divisor vector with zeroes in the upper bits of each number. // If we don't do this manually, the compiler will do it anyways, but only for _mm256_mul_epu32, not for the _mm256_sub_epi64 correction step at the end // That inconstistency results in sub-optimal codegen where the compiler inserts extra code to handle the case where divisor is 64-bit. It also results in using one more register than necessary. // Since we know that can't happen, we can placate the compiler by explicitly zeroing the upper 32 bit of each divisor and relying on the compiler to lift it out of the loop. let masked_divisor = _mm256_blend_epi32(self.divisor, _mm256_setzero_si256(), 0xAA); // compute the integer quotient of (a * b) / divisor. Our precomputed intermediate value lets us skip the expensive division via arithmetic strength reduction let quotient = _mm256_srli_epi64(_mm256_mul_epu32(a, self.intermediate), 32); // Now we can compute numerator - quotient * divisor to get the remanider let numerator = _mm256_mul_epu32(a, self.b); let quotient_product = _mm256_mul_epu32(quotient, masked_divisor); // Standard remainder formula: remainder = numerator - quotient * divisor let remainder = _mm256_sub_epi64(numerator, quotient_product); // it's possible for the "remainder" to end up between divisor and 2 * divisor. so we'll subtract divisor from remainder, which will make some of the result negative // We can then use the subtracted result as the input to a blendv. Sadly avx doesn't have a blendv_epi32 or blendv_epi64, so we're gonna do blendv_pd instead // this works because blendv looks at the uppermost bit to decide which variable to use, and for a two's complement i64, the upper most bit is 1 when the number is negative! // So when the subtraction result is negative, the uppermost bit is 1, which means the blend will choose the second param, which is the unsubtracted remainder let casted_remainder = _mm256_castsi256_pd(remainder); let subtracted_remainder = _mm256_castsi256_pd(_mm256_sub_epi64(remainder, masked_divisor)); let wrapped_remainder = _mm256_castpd_si256(_mm256_blendv_pd(subtracted_remainder, casted_remainder, subtracted_remainder)); wrapped_remainder } } /// Implementation of Rader's Algorithm /// /// This algorithm computes a prime-sized FFT in O(nlogn) time. It does this by converting this size n FFT into a /// size (n - 1) which is guaranteed to be composite. /// /// The worst case for this algorithm is when (n - 1) is 2 * prime, resulting in a /// [Cunningham Chain](https://en.wikipedia.org/wiki/Cunningham_chain) /// /// ~~~ /// // Computes a forward FFT of size 1201 (prime number), using Rader's Algorithm /// use rustfft::algorithm::avx::RadersAvx2; /// use rustfft::{Fft, FFTplanner}; /// use rustfft::num_complex::Complex; /// use rustfft::num_traits::Zero; /// /// let mut input: Vec<Complex<f32>> = vec![Zero::zero(); 1201]; /// let mut output: Vec<Complex<f32>> = vec![Zero::zero(); 1201]; /// /// // plan a FFT of size n - 1 = 1200 /// let mut planner = FFTplanner::new(false); /// let inner_fft = planner.plan_fft(1200); /// /// let fft = RadersAvx2::new(inner_fft).unwrap(); /// fft.process(&mut input, &mut output); /// ~~~ /// /// Rader's Algorithm is relatively expensive compared to other FFT algorithms. Benchmarking shows that it is up to /// an order of magnitude slower than similar composite sizes. In the example size above of 1201, benchmarking shows /// that it takes 2.5x more time to compute than a FFT of size 1200. pub struct RadersAvx2<T: AvxNum> { input_index_multiplier: VectorizedMultiplyMod, input_index_init: __m256i, output_index_mapping: Box<[__m128i]>, twiddles: Box<[T::VectorType]>, inner_fft: Arc<dyn Fft<T>>, len: usize, inplace_scratch_len: usize, outofplace_scratch_len: usize, inverse: bool, } impl<T: AvxNum> RadersAvx2<T> { /// Preallocates necessary arrays and precomputes necessary data to efficiently compute the FFT /// Returns Ok(instance) if this machine has the required instruction sets ("avx", "fma", and "avx2"), Err() if some instruction sets are missing /// /// # Panics /// Panics if `inner_fft_len() + 1` is not a prime number. #[inline] pub fn new(inner_fft: Arc<dyn Fft<T>>) -> Result<Self, ()> { let has_avx = is_x86_feature_detected!("avx"); let has_avx2 = is_x86_feature_detected!("avx2"); let has_fma = is_x86_feature_detected!("fma"); if has_avx && has_avx2 && has_fma { // Safety: new_with_avx2 requires the "avx" feature set. Since we know it's present, we're safe Ok(unsafe { Self::new_with_avx(inner_fft) }) } else { Err(()) } } #[target_feature(enable = "avx")] unsafe fn new_with_avx(inner_fft: Arc<dyn Fft<T>>) -> Self { let inner_fft_len = inner_fft.len(); let len = inner_fft_len + 1; assert!(miller_rabin(len as u64), "For raders algorithm, inner_fft.len() + 1 must be prime. Expected prime number, got {} + 1 = {}", inner_fft_len, len); let inverse = inner_fft.is_inverse(); let reduced_len = StrengthReducedUsize::new(len); // compute the primitive root and its inverse for this size let primitive_root = math_utils::primitive_root(len as u64).unwrap() as usize; // compute the multiplicative inverse of primative_root mod len and vice versa. // i64::extended_gcd will compute both the inverse of left mod right, and the inverse of right mod left, but we're only goingto use one of them // the primtive root inverse might be negative, if so make it positive by wrapping let gcd_data = i64::extended_gcd(&(primitive_root as i64), &(len as i64)); let primitive_root_inverse = if gcd_data.x >= 0 { gcd_data.x } else { gcd_data.x + len as i64 } as usize; // precompute the coefficients to use inside the process method let unity_scale = T::from_f64(1f64 / inner_fft_len as f64).unwrap(); let mut inner_fft_input = vec![Complex::zero(); inner_fft_len]; let mut twiddle_input = 1; for input_cell in &mut inner_fft_input { let twiddle = T::generate_twiddle_factor(twiddle_input, len, inverse); *input_cell = twiddle * unity_scale; twiddle_input = (twiddle_input * primitive_root_inverse) % reduced_len; } let required_inner_scratch = inner_fft.get_inplace_scratch_len(); let extra_inner_scratch = if required_inner_scratch <= inner_fft_len { 0 } else { required_inner_scratch }; //precompute a FFT of our reordered twiddle factors let mut inner_fft_scratch = vec![Zero::zero(); required_inner_scratch]; inner_fft.process_inplace_with_scratch(&mut inner_fft_input, &mut inner_fft_scratch); // When computing the FFT, we'll want this array to be pre-conjugated, so conjugate it. at the same time, convert it to vectors for convenient use later. let conjugation_mask = AvxVector256::broadcast_complex_elements(Complex::new(T::zero(), -T::zero())); let inner_fft_multiplier : Box<[_]> = inner_fft_input.chunks(T::VectorType::COMPLEX_PER_VECTOR).map(|chunk| { let chunk_vector = match chunk.len() { 1 => chunk.load_partial1_complex(0).zero_extend(), 2 => if chunk.len() == T::VectorType::COMPLEX_PER_VECTOR { chunk.load_complex(0) } else {chunk.load_partial2_complex(0).zero_extend()}, 3 => chunk.load_partial3_complex(0), 4 => chunk.load_complex(0), _ => unreachable!() }; AvxVector::xor(chunk_vector, conjugation_mask) // compute our conjugation by xoring our data with a precomputed mask }).collect(); // Set up the data for our input index remapping computation const NUM_POWERS : usize = 5; let mut root_powers = [0; NUM_POWERS]; let mut current_power = 1; for i in 0..NUM_POWERS { root_powers[i] = current_power; current_power = (current_power * primitive_root) % reduced_len; } let (input_index_multiplier, input_index_init) = if T::VectorType::COMPLEX_PER_VECTOR == 4 { (VectorizedMultiplyMod::new(root_powers[4] as u32, len as u32), _mm256_loadu_si256(root_powers.as_ptr().add(1) as *const __m256i)) } else { let duplicated_powers = [root_powers[1],root_powers[1],root_powers[2],root_powers[2],]; (VectorizedMultiplyMod::new(root_powers[2] as u32, len as u32), _mm256_loadu_si256(duplicated_powers.as_ptr() as *const __m256i)) }; // Set up our output index remapping. Ideally we could compute the output indexes on the fly, but the output reindexing requires scatter, which doesn't exist until avx-512 // Instead, we can invert the scatter indexes to be gather indexes. But if there's an algorithmic way to compute this, I don't know what it is. So instead, we're going to precompute the mapping // We want enough elements in our array to fill out an entire set of vectors so that we don't have to deal with any partial indexes etc. let mapping_size = 1 + div_ceil(len, T::VectorType::COMPLEX_PER_VECTOR) * T::VectorType::COMPLEX_PER_VECTOR; let mut output_mapping_inverse: Vec<i32> = vec![0; mapping_size]; let mut output_index = 1; for i in 1..len { output_index = (output_index * primitive_root_inverse) % reduced_len; output_mapping_inverse[output_index] = i.try_into().unwrap(); } // the actual vector of indexes depends on whether we're f32 or f64 let output_index_mapping = if T::VectorType::COMPLEX_PER_VECTOR == 4 { (&output_mapping_inverse[1..]).chunks_exact(T::VectorType::COMPLEX_PER_VECTOR).map(|chunk| _mm_loadu_si128(chunk.as_ptr() as *const __m128i)).collect::<Box<[__m128i]>>() } else { (&output_mapping_inverse[1..]).chunks_exact(T::VectorType::COMPLEX_PER_VECTOR).map(|chunk| { let duplicated_indexes = [chunk[0], chunk[0], chunk[1], chunk[1]]; _mm_loadu_si128(duplicated_indexes.as_ptr() as *const __m128i) }).collect::<Box<[__m128i]>>() }; Self { input_index_multiplier, input_index_init, output_index_mapping, inner_fft: inner_fft, twiddles: inner_fft_multiplier, len, inplace_scratch_len: len + extra_inner_scratch, outofplace_scratch_len: extra_inner_scratch, inverse, } } // Do the necessary setup for rader's algorithm: Reorder the inputs into the output buffer, gather a sum of all inputs. Return the first input, and the aum of all inputs #[target_feature(enable = "avx2", enable = "avx", enable = "fma")] unsafe fn prepare_raders(&self, input: &[Complex<T>], output: &mut [Complex<T>]) -> (Complex<T>, Complex<T>) { let mut vector_sum = T::VectorType::zero(); let mut indexes = self.input_index_init; let first_element = input[0]; let index_multiplier = self.input_index_multiplier.clone(); // loop over the output array and use AVX gathers to reorder data from the input let mut chunks_iter = (&mut output[1..]).chunks_exact_mut(T::VectorType::COMPLEX_PER_VECTOR); for chunk in chunks_iter.by_ref() { let gathered_elements = T::VectorType::gather64_complex_avx2(input.as_ptr(), indexes); // advance our indexes indexes = index_multiplier.mul_rem(indexes); // keep the sum of data updated vector_sum = AvxVector::add(vector_sum, gathered_elements); // Store this chunk chunk.store_complex(gathered_elements, 0); } // at this point, we either have 0 or 2 remaining elements to gather. because we know our length ends in 1 or 3. so when we subtract 1 for the inner FFT, that gives us 0 or 2 let output_remainder = chunks_iter.into_remainder(); if output_remainder.len() == 2 { let half_data = AvxVector128::gather64_complex_avx2(input.as_ptr(), _mm256_castsi256_si128(indexes)); // add this last chunk to our sum vector_sum = AvxVector::add(vector_sum, AvxVector128::zero_extend(half_data)); // store the remainder in the last chunk output_remainder.store_partial2_complex(half_data, 0); } (first_element, vector_sum.hadd_complex() + first_element) } // Do the necessary finalization for rader's algorithm: Reorder the inputs into the output buffer, conjugating the input as we go, and add the first input value to every output value #[target_feature(enable = "avx2", enable = "avx", enable = "fma")] unsafe fn finalize_raders(&self, input: &[Complex<T>], output: &mut [Complex<T>], first_input: Complex<T>) { let output_add = AvxVector256::broadcast_complex_elements(first_input); // We need to conjugate elements as a part of the finalization step, and sadly we can't roll it into any other instructions. So we'll do it via an xor. let conjugation_mask = AvxVector256::broadcast_complex_elements(Complex::new(T::zero(), -T::zero())); let mut chunks_iter = (&mut output[1..]).chunks_exact_mut(T::VectorType::COMPLEX_PER_VECTOR); for (i, chunk) in chunks_iter.by_ref().enumerate() { let index_chunk = *self.output_index_mapping.get_unchecked(i); let gathered_elements = T::VectorType::gather32_complex_avx2(input.as_ptr(), index_chunk); let conjugated_elements = AvxVector::xor(gathered_elements, conjugation_mask); // Add the first input value to each output value, then store let added_elements = AvxVector::add(output_add, conjugated_elements); chunk.store_complex(added_elements, 0); } // at this point, we either have 0 or 2 remaining elements to gather. because we know our length ends in 1 or 3. so when we subtract 1 for the inner FFT, that gives us 0 or 2 let output_remainder = chunks_iter.into_remainder(); if output_remainder.len() == 2 { let index_chunk = *self.output_index_mapping.get_unchecked(self.output_index_mapping.len() - 1); let half_data = AvxVector128::gather32_complex_avx2(input.as_ptr(), index_chunk); let conjugated_elements = AvxVector::xor(half_data, conjugation_mask.lo()); // Add the first input value to each output value, then store let added_elements = AvxVector::add(output_add.lo(), conjugated_elements); output_remainder.store_partial2_complex(added_elements, 0); } } fn perform_fft_out_of_place(&self, input: &mut [Complex<T>], output: &mut [Complex<T>], scratch: &mut [Complex<T>]) { let (first_input, first_output) = unsafe { self.prepare_raders(input, output) }; let inner_input = &mut input[1..]; let inner_output = &mut output[1..]; // perform the first of two inner FFTs let inner_scratch = if scratch.len() > 0 { &mut scratch[..] } else { &mut inner_input[..] }; self.inner_fft.process_inplace_with_scratch(inner_output, inner_scratch); // multiply the inner result with our cached setup data // also conjugate every entry. this sets us up to do an inverse FFT // (because an inverse FFT is equivalent to a normal FFT where you conjugate both the inputs and outputs) unsafe { avx_vector::pairwise_complex_mul_conjugated(&mut inner_output[..], &mut inner_input[..], &self.twiddles) }; // execute the second FFT let inner_scratch = if scratch.len() > 0 { scratch } else { &mut inner_output[..] }; self.inner_fft.process_inplace_with_scratch(inner_input, inner_scratch); // copy the final values into the output, reordering as we go output[0] = first_output; unsafe { self.finalize_raders(input, output, first_input); } } fn perform_fft_inplace(&self, buffer: &mut [Complex<T>], scratch: &mut [Complex<T>]) { let (scratch, extra_scratch) = scratch.split_at_mut(self.len()); let (first_input, first_output) = unsafe { self.prepare_raders(buffer, scratch) }; let truncated_scratch = &mut scratch[1..]; // perform the first of two inner FFTs let inner_scratch = if extra_scratch.len() > 0 { extra_scratch } else { &mut buffer[..] }; self.inner_fft.process_inplace_with_scratch(truncated_scratch, inner_scratch); // multiply the inner result with our cached setup data // also conjugate every entry. this sets us up to do an inverse FFT // (because an inverse FFT is equivalent to a normal FFT where you conjugate both the inputs and outputs) unsafe { avx_vector::pairwise_complex_mul_assign_conjugated(truncated_scratch, &self.twiddles) }; // execute the second FFT self.inner_fft.process_inplace_with_scratch(truncated_scratch, inner_scratch); // copy the final values into the output, reordering as we go buffer[0] = first_output; unsafe { self.finalize_raders(scratch, buffer, first_input); } } } boilerplate_avx_fft!(RadersAvx2, |this: &RadersAvx2<_>| this.len, |this: &RadersAvx2<_>| this.inplace_scratch_len, |this: &RadersAvx2<_>| this.outofplace_scratch_len ); #[cfg(test)] mod unit_tests { use super::*; use std::sync::Arc; use crate::test_utils::check_fft_algorithm; use crate::algorithm::DFT; #[test] fn test_raders_avx_f32() { for len in 3..100 { if miller_rabin(len as u64) { test_raders_with_length::<f32>(len, false); test_raders_with_length::<f32>(len, true); } } } #[test] fn test_raders_avx_f64() { for len in 3..100 { if miller_rabin(len as u64) { test_raders_with_length::<f64>(len, false); test_raders_with_length::<f64>(len, true); } } } fn test_raders_with_length<T: AvxNum + num_traits::Float>(len: usize, inverse: bool) { let inner_fft = Arc::new(DFT::new(len - 1, inverse)); let fft = RadersAvx2::new(inner_fft).unwrap(); check_fft_algorithm::<T>(&fft, len, inverse); } }
//! `i686`'s Streaming SIMD Extensions 4a (SSE4a) use core::mem; use v128::*; #[cfg(test)] use stdsimd_test::assert_instr; #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.sse4a.extrq"] fn extrq(x: i64x2, y: i8x16) -> i64x2; #[link_name = "llvm.x86.sse4a.insertq"] fn insertq(x: i64x2, y: i64x2) -> i64x2; #[link_name = "llvm.x86.sse4a.movnt.sd"] fn movntsd(x: *mut f64, y: f64x2); #[link_name = "llvm.x86.sse4a.movnt.ss"] fn movntss(x: *mut f32, y: f32x4); } // FIXME(blocked on #248): _mm_extracti_si64(x, len, idx) // EXTRQ // FIXME(blocked on #248): _mm_inserti_si64(x, y, len, idx) // INSERTQ /// Extracts the bit range specified by `y` from the lower 64 bits of `x`. /// /// The [13:8] bits of `y` specify the index of the bit-range to extract. The /// [5:0] bits of `y` specify the length of the bit-range to extract. All other /// bits are ignored. /// /// If the length is zero, it is interpreted as `64`. If the length and index /// are zero, the lower 64 bits of `x` are extracted. /// /// If `length == 0 && index > 0` or `lenght + index > 64` the result is /// undefined. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(extrq))] pub unsafe fn _mm_extract_si64(x: i64x2, y: i64x2) -> i64x2 { extrq(x, mem::transmute(y)) } /// Inserts the `[length:0]` bits of `y` into `x` at `index`. /// /// The bits of `y`: /// /// - `[69:64]` specify the `length`, /// - `[77:72]` specify the index. /// /// If the `length` is zero it is interpreted as `64`. If `index + length > 64` /// or `index > 0 && length == 0` the result is undefined. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(insertq))] pub unsafe fn _mm_insert_si64(x: i64x2, y: i64x2) -> i64x2 { insertq(x, mem::transmute(y)) } /// Non-temporal store of `a.1` into `p`. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(movntsd))] pub unsafe fn _mm_stream_sd(p: *mut f64, a: f64x2) { movntsd(p, a); } /// Non-temporal store of `a.3` into `p`. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(movntss))] pub unsafe fn _mm_stream_ss(p: *mut f32, a: f32x4) { movntss(p, a); } #[cfg(test)] mod tests { use stdsimd_test::simd_test; use x86::i686::sse4a; use v128::*; #[simd_test = "sse4a"] unsafe fn _mm_extract_si64() { let b = 0b0110_0000_0000_i64; // ^^^^ bit range extracted let x = i64x2::new(b, 0); let v = 0b001000___00___000100_i64; // ^idx: 2^3 = 8 ^length = 2^2 = 4 let y = i64x2::new(v, 0); let e = i64x2::new(0b0110_i64, 0); let r = sse4a::_mm_extract_si64(x, y); assert_eq!(r, e); } #[simd_test = "sse4a"] unsafe fn _mm_insert_si64() { let i = 0b0110_i64; // ^^^^ bit range inserted let z = 0b1010_1010_1010i64; // ^^^^ bit range replaced let e = 0b0110_1010_1010i64; // ^^^^ replaced 1010 with 0110 let x = i64x2::new(z, 0); let expected = i64x2::new(e, 0); let v = 0b001000___00___000100_i64; // ^idx: 2^3 = 8 ^length = 2^2 = 4 let y = i64x2::new(i, v); let r = sse4a::_mm_insert_si64(x, y); assert_eq!(r, expected); } #[repr(align(16))] struct MemoryF64 { data: [f64; 2], } #[simd_test = "sse4a"] unsafe fn _mm_stream_sd() { let mut mem = MemoryF64 { data: [1.0_f64, 2.0], }; { let vals = &mut mem.data; let d = vals.as_mut_ptr(); let x = f64x2::new(3.0, 4.0); sse4a::_mm_stream_sd(d, x); } assert_eq!(mem.data[0], 4.0); assert_eq!(mem.data[1], 2.0); } #[repr(align(16))] struct MemoryF32 { data: [f32; 4], } #[simd_test = "sse4a"] unsafe fn _mm_stream_ss() { let mut mem = MemoryF32 { data: [1.0_f32, 2.0, 3.0, 4.0], }; { let vals = &mut mem.data; let d = vals.as_mut_ptr(); let x = f32x4::new(5.0, 6.0, 7.0, 8.0); sse4a::_mm_stream_ss(d, x); } assert_eq!(mem.data[0], 8.0); assert_eq!(mem.data[1], 2.0); assert_eq!(mem.data[2], 3.0); assert_eq!(mem.data[3], 4.0); } } fix sse4a _mm_stream_{ss, sd} tests and docs //! `i686`'s Streaming SIMD Extensions 4a (SSE4a) use core::mem; use v128::*; #[cfg(test)] use stdsimd_test::assert_instr; #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.sse4a.extrq"] fn extrq(x: i64x2, y: i8x16) -> i64x2; #[link_name = "llvm.x86.sse4a.insertq"] fn insertq(x: i64x2, y: i64x2) -> i64x2; #[link_name = "llvm.x86.sse4a.movnt.sd"] fn movntsd(x: *mut f64, y: f64x2); #[link_name = "llvm.x86.sse4a.movnt.ss"] fn movntss(x: *mut f32, y: f32x4); } // FIXME(blocked on #248): _mm_extracti_si64(x, len, idx) // EXTRQ // FIXME(blocked on #248): _mm_inserti_si64(x, y, len, idx) // INSERTQ /// Extracts the bit range specified by `y` from the lower 64 bits of `x`. /// /// The [13:8] bits of `y` specify the index of the bit-range to extract. The /// [5:0] bits of `y` specify the length of the bit-range to extract. All other /// bits are ignored. /// /// If the length is zero, it is interpreted as `64`. If the length and index /// are zero, the lower 64 bits of `x` are extracted. /// /// If `length == 0 && index > 0` or `lenght + index > 64` the result is /// undefined. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(extrq))] pub unsafe fn _mm_extract_si64(x: i64x2, y: i64x2) -> i64x2 { extrq(x, mem::transmute(y)) } /// Inserts the `[length:0]` bits of `y` into `x` at `index`. /// /// The bits of `y`: /// /// - `[69:64]` specify the `length`, /// - `[77:72]` specify the index. /// /// If the `length` is zero it is interpreted as `64`. If `index + length > 64` /// or `index > 0 && length == 0` the result is undefined. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(insertq))] pub unsafe fn _mm_insert_si64(x: i64x2, y: i64x2) -> i64x2 { insertq(x, mem::transmute(y)) } /// Non-temporal store of `a.0` into `p`. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(movntsd))] pub unsafe fn _mm_stream_sd(p: *mut f64, a: f64x2) { movntsd(p, a); } /// Non-temporal store of `a.0` into `p`. #[inline(always)] #[target_feature = "+sse4a"] #[cfg_attr(test, assert_instr(movntss))] pub unsafe fn _mm_stream_ss(p: *mut f32, a: f32x4) { movntss(p, a); } #[cfg(test)] mod tests { use stdsimd_test::simd_test; use x86::i686::sse4a; use v128::*; #[simd_test = "sse4a"] unsafe fn _mm_extract_si64() { let b = 0b0110_0000_0000_i64; // ^^^^ bit range extracted let x = i64x2::new(b, 0); let v = 0b001000___00___000100_i64; // ^idx: 2^3 = 8 ^length = 2^2 = 4 let y = i64x2::new(v, 0); let e = i64x2::new(0b0110_i64, 0); let r = sse4a::_mm_extract_si64(x, y); assert_eq!(r, e); } #[simd_test = "sse4a"] unsafe fn _mm_insert_si64() { let i = 0b0110_i64; // ^^^^ bit range inserted let z = 0b1010_1010_1010i64; // ^^^^ bit range replaced let e = 0b0110_1010_1010i64; // ^^^^ replaced 1010 with 0110 let x = i64x2::new(z, 0); let expected = i64x2::new(e, 0); let v = 0b001000___00___000100_i64; // ^idx: 2^3 = 8 ^length = 2^2 = 4 let y = i64x2::new(i, v); let r = sse4a::_mm_insert_si64(x, y); assert_eq!(r, expected); } #[repr(align(16))] struct MemoryF64 { data: [f64; 2], } #[simd_test = "sse4a"] unsafe fn _mm_stream_sd() { let mut mem = MemoryF64 { data: [1.0_f64, 2.0], }; { let vals = &mut mem.data; let d = vals.as_mut_ptr(); let x = f64x2::new(3.0, 4.0); sse4a::_mm_stream_sd(d, x); } assert_eq!(mem.data[0], 3.0); assert_eq!(mem.data[1], 2.0); } #[repr(align(16))] struct MemoryF32 { data: [f32; 4], } #[simd_test = "sse4a"] unsafe fn _mm_stream_ss() { let mut mem = MemoryF32 { data: [1.0_f32, 2.0, 3.0, 4.0], }; { let vals = &mut mem.data; let d = vals.as_mut_ptr(); let x = f32x4::new(5.0, 6.0, 7.0, 8.0); sse4a::_mm_stream_ss(d, x); } assert_eq!(mem.data[0], 5.0); assert_eq!(mem.data[1], 2.0); assert_eq!(mem.data[2], 3.0); assert_eq!(mem.data[3], 4.0); } }
use stdweb::{ js, unstable::TryInto, web::{document, html_element::CanvasElement, window, CanvasRenderingContext2d, FillRule}, }; // pub use crate::image::Image as InnerImage; use crate::{utils::*, FontConfig, RenderPipeline, RenderTarget, TextMetrics}; pub use self::image::*; mod image; /// The RenderContext2D trait, provides the rendering context. It is used for drawing shapes, text, images, and other objects. pub struct RenderContext2D { canvas_render_context_2_d: CanvasRenderingContext2d, font_config: FontConfig, } impl RenderContext2D { /// Creates a new render context with the given width and height. pub fn new(width: f64, height: f64) -> Self { let canvas: CanvasElement = document() .create_element("canvas") .unwrap() .try_into() .unwrap(); canvas.set_width(width as u32); canvas.set_height(height as u32); let context: CanvasRenderingContext2d = canvas.get_context().unwrap(); let device_pixel_ratio = window().device_pixel_ratio(); let backing_store_ratio = js! { var context = @{&context}; return context.webkitBackingStorePixelRatio || context.mozBackingStorePixelRatio || context.msBackingStorePixelRatio || context.oBackingStorePixelRatio || context.backingStorePixelRatio || 1; }; let ratio: f64 = js! { return @{&device_pixel_ratio} / @{&backing_store_ratio}; } .try_into() .unwrap(); if device_pixel_ratio != backing_store_ratio { let old_width = canvas.width(); let old_height = canvas.height(); canvas.set_width((old_width as f64 * ratio) as u32); canvas.set_height((old_height as f64 * ratio) as u32); js! { @{&canvas}.style.width = @{&old_width} + "px"; @{&canvas}.style.height = @{&old_height} + "px"; } context.scale(ratio, ratio); } Self::from_context(context) } /// Creates a new render context 2d. pub fn from_context(canvas_render_context_2_d: CanvasRenderingContext2d) -> Self { canvas_render_context_2_d.set_text_baseline(stdweb::web::TextBaseline::Middle); RenderContext2D { canvas_render_context_2_d, font_config: FontConfig::default(), } } // Rectangles /// Draws a filled rectangle whose starting point is at the coordinates {x, y} with the specified width and height and whose style is determined by the fillStyle attribute. pub fn fill_rect(&mut self, x: f64, y: f64, width: f64, height: f64) { self.canvas_render_context_2_d .fill_rect(x, y, width, height); } /// Draws a rectangle that is stroked (outlined) according to the current strokeStyle and other context settings. pub fn stroke_rect(&mut self, x: f64, y: f64, width: f64, height: f64) { self.canvas_render_context_2_d .stroke_rect(x, y, width, height); } // Text /// Draws (fills) a given text at the given (x, y) position. pub fn fill_text(&mut self, text: &str, x: f64, y: f64) { self.canvas_render_context_2_d .set_text_baseline(stdweb::web::TextBaseline::Middle); self.canvas_render_context_2_d.fill_text( text, x, y + self.font_config.font_size.ceil() / 2.0, None, ); } pub fn measure( &mut self, text: &str, font_size: f64, family: impl Into<String>, ) -> TextMetrics { self.set_font_family(family); self.set_font_size(font_size); self.measure_text(text) } /// Returns a TextMetrics object. pub fn measure_text(&mut self, text: &str) -> TextMetrics { TextMetrics { width: self .canvas_render_context_2_d .measure_text(text) .unwrap() .get_width(), height: self.font_config.font_size.ceil(), } } /// Fills the current or given path with the current file style. pub fn fill(&mut self) { self.canvas_render_context_2_d.fill(FillRule::default()); } /// Strokes {outlines} the current or given path with the current stroke style. pub fn stroke(&mut self) { self.canvas_render_context_2_d.stroke(); } /// Starts a new path by emptying the list of sub-paths. Call this when you want to create a new path. pub fn begin_path(&mut self) { self.canvas_render_context_2_d.begin_path(); } /// Attempts to add a straight line from the current point to the start of the current sub-path. If the shape has already been closed or has only one point, this function does nothing. pub fn close_path(&mut self) { self.canvas_render_context_2_d.close_path(); } /// Adds a rectangle to the current path. pub fn rect(&mut self, x: f64, y: f64, width: f64, height: f64) { self.canvas_render_context_2_d.rect(x, y, width, height); } /// Creates a circular arc centered at (x, y) with a radius of radius. The path starts at startAngle and ends at endAngle. pub fn arc(&mut self, x: f64, y: f64, radius: f64, start_angle: f64, end_angle: f64) { self.canvas_render_context_2_d .arc(x, y, radius, start_angle, end_angle, false); } /// Begins a new sub-path at the point specified by the given {x, y} coordinates. pub fn move_to(&mut self, x: f64, y: f64) { self.canvas_render_context_2_d.move_to(x, y); } /// Adds a straight line to the current sub-path by connecting the sub-path's last point to the specified {x, y} coordinates. pub fn line_to(&mut self, x: f64, y: f64) { self.canvas_render_context_2_d.line_to(x, y); } /// Adds a quadratic Bézier curve to the current sub-path. pub fn quadratic_curve_to(&mut self, cpx: f64, cpy: f64, x: f64, y: f64) { self.canvas_render_context_2_d .quadratic_curve_to(cpx, cpy, x, y); } /// Adds a cubic Bézier curve to the current sub-path. It requires three points: the first two are control points and the third one is the end point. The starting point is the latest point in the current path, which can be changed using MoveTo{} before creating the Bézier curve. pub fn bezier_curve_to(&mut self, cp1x: f64, cp1y: f64, cp2x: f64, cp2y: f64, x: f64, y: f64) { self.canvas_render_context_2_d .bezier_curve_to(cp1x, cp1y, cp2x, cp2y, x, y); } // Draw image /// Draws the image. pub fn draw_image(&mut self, image: &Image, x: f64, y: f64) { js!( var img = document.image_store.image(@{&image.source}); if(img == null) { img = document.image_store.load_image(@{&image.source}); img.then( function(i) { @{&self.canvas_render_context_2_d}.drawImage(i, @{&x}, @{&y}); } ) } else { // @{&self.canvas_render_context_2_d}.drawImage(img, @{&x}, @{&y}); } ); } /// Draws the image with the given size. pub fn draw_image_with_size(&mut self, image: &Image, x: f64, y: f64, width: f64, height: f64) { js!( var img = document.image_store.image(@{&image.source}); if(img == null) { img = document.image_store.load_image(@{&image.source}); img.then( function(i) { @{&self.canvas_render_context_2_d}.drawImage(i, @{&x}, @{&y}, @{&width}, @{&height}); } ) } else { @{&self.canvas_render_context_2_d}.drawImage(img, @{&x}, @{&y}); } ); } /// Draws the given part of the image. pub fn draw_image_with_clip_and_size( &mut self, image: &Image, clip_x: f64, clip_y: f64, clip_width: f64, clip_height: f64, x: f64, y: f64, width: f64, height: f64, ) { js!( var img = document.image_store.image(@{&image.source}); if(img == null) { img = document.image_store.load_image(@{&image.source}); img.then( function(i) { @{&self.canvas_render_context_2_d}.drawImage(img, @{&clip_x}, @{&clip_y}, @{&clip_width}, @{&clip_height}, @{&x}, @{&y}, @{&width}, @{&height}); } ) } else { @{&self.canvas_render_context_2_d}.drawImage(img, @{&x}, @{&y}); } ); } pub fn draw_pipeline( &mut self, x: f64, y: f64, width: f64, height: f64, pipeline: Box<dyn RenderPipeline>, ) { let mut render_target = RenderTarget::new(width as u32, height as u32); pipeline.draw_pipeline(&mut render_target); let image_data = self .canvas_render_context_2_d .create_image_data(width, height) .unwrap(); for i in 0..(render_target.data.len() - 1) { let pixel = render_target.data.get(i).unwrap(); let r = ((pixel & 0x00FF0000) >> 16) as u8; let g = ((pixel & 0x0000FF00) >> 8) as u8; let b = (pixel & 0x000000FF) as u8; let a = ((pixel & 0xFF000000) >> 24) as u8; let index = i as u32 * 4; js!( @{&image_data}.data[@{index} + 0] = @{r}; // R value @{&image_data}.data[@{index} + 1] = @{g}; // G value @{&image_data}.data[@{index} + 2] = @{b}; // B value @{&image_data}.data[@{index} + 3] = @{a}; // A value ); } let canvas: CanvasElement = document() .create_element("canvas") .unwrap() .try_into() .unwrap(); canvas.set_width(width as u32); canvas.set_height(height as u32); let context: CanvasRenderingContext2d = canvas.get_context().unwrap(); context .put_image_data(image_data, 0.0, 0.0) .expect("Could no draw pipeline."); // todo: use await after stdweb futures are stable js!( // use the tempCanvas.toDataURL to create an img object var img = new Image(); img.onload = function () { @{&self.canvas_render_context_2_d}.drawImage(img,@{&x},@{&y}); }; img.src = @{&canvas}.toDataURL(); ); } /// Creates a clipping path from the current sub-paths. Everything drawn after clip() is called appears inside the clipping path only. pub fn clip(&mut self) { self.canvas_render_context_2_d.clip(FillRule::EvenOdd); } // Line styles /// Sets the thickness of lines. pub fn set_line_width(&mut self, line_width: f64) { self.canvas_render_context_2_d.set_line_width(line_width); } /// Specific the font family. pub fn set_font_family(&mut self, family: impl Into<String>) { self.font_config.family = family.into().replace(" Regular", ""); self.canvas_render_context_2_d .set_font(&self.font_config.to_string()); } /// Specifies the font size. pub fn set_font_size(&mut self, size: f64) { self.font_config.font_size = size; self.canvas_render_context_2_d .set_font(&self.font_config.to_string()); } // Fill and stroke style /// Specifies the fill color to use inside shapes. pub fn set_fill_style(&mut self, brush: Brush) { match brush { Brush::SolidColor(color) => { self.canvas_render_context_2_d .set_fill_style_color(&color.to_string()); } _ => (), } } /// Specifies the fill stroke to use inside shapes. pub fn set_stroke_style(&mut self, brush: Brush) { match brush { Brush::SolidColor(color) => { self.canvas_render_context_2_d .set_stroke_style_color(&color.to_string()); } _ => (), } } // Transformations /// Sets the tranformation. pub fn set_transform(&mut self, a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) { self.canvas_render_context_2_d .set_transform(a, b, c, d, e, f); } // Canvas states /// Saves the entire state of the canvas by pushing the current state onto a stack. pub fn save(&mut self) { self.canvas_render_context_2_d.save(); } /// Restores the most recently saved canvas state by popping the top entry in the drawing state stack. If there is no saved state, this method does nothing. pub fn restore(&mut self) { self.canvas_render_context_2_d.restore(); } pub fn clear(&mut self, brush: &Brush) { let color = match brush { Brush::SolidColor(color) => color.to_string(), _ => Color::rgba(0, 0, 0, 0).to_string(), }; self.save(); self.canvas_render_context_2_d .set_fill_style_color(color.as_str()); let canvas = self.canvas_render_context_2_d.get_canvas(); self.canvas_render_context_2_d.fill_rect( 0.0, 0.0, canvas.width() as f64, canvas.height() as f64, ); self.restore(); } pub fn set_canvas_render_context_2d( &mut self, canvas_render_context_2_d: CanvasRenderingContext2d, ) { self.canvas_render_context_2_d = canvas_render_context_2_d; } pub fn start(&mut self) {} pub fn finish(&mut self) {} } // --- Conversions --- impl From<&str> for Image { fn from(s: &str) -> Image { Image::new(s) } } impl From<String> for Image { fn from(s: String) -> Image { Image::new(s) } } Update web canvas. use stdweb::{ js, unstable::TryInto, web::{document, html_element::CanvasElement, window, CanvasRenderingContext2d, FillRule}, }; // pub use crate::image::Image as InnerImage; use crate::{utils::*, FontConfig, RenderPipeline, RenderTarget, TextMetrics}; pub use self::image::*; mod image; /// The RenderContext2D trait, provides the rendering context. It is used for drawing shapes, text, images, and other objects. pub struct RenderContext2D { canvas_render_context_2_d: CanvasRenderingContext2d, font_config: FontConfig, export_data: Vec<u32>, } impl RenderContext2D { /// Creates a new render context with the given width and height. pub fn new(width: f64, height: f64) -> Self { let canvas: CanvasElement = document() .create_element("canvas") .unwrap() .try_into() .unwrap(); canvas.set_width(width as u32); canvas.set_height(height as u32); let context: CanvasRenderingContext2d = canvas.get_context().unwrap(); let device_pixel_ratio = window().device_pixel_ratio(); let backing_store_ratio = js! { var context = @{&context}; return context.webkitBackingStorePixelRatio || context.mozBackingStorePixelRatio || context.msBackingStorePixelRatio || context.oBackingStorePixelRatio || context.backingStorePixelRatio || 1; }; let ratio: f64 = js! { return @{&device_pixel_ratio} / @{&backing_store_ratio}; } .try_into() .unwrap(); if device_pixel_ratio != backing_store_ratio { let old_width = canvas.width(); let old_height = canvas.height(); canvas.set_width((old_width as f64 * ratio) as u32); canvas.set_height((old_height as f64 * ratio) as u32); js! { @{&canvas}.style.width = @{&old_width} + "px"; @{&canvas}.style.height = @{&old_height} + "px"; } context.scale(ratio, ratio); } Self::from_context(context) } /// Creates a new render context 2d. pub fn from_context(canvas_render_context_2_d: CanvasRenderingContext2d) -> Self { let export_data = vec![ 0; (canvas_render_context_2_d.get_canvas().width() * canvas_render_context_2_d.get_canvas().height()) as usize ]; canvas_render_context_2_d.set_text_baseline(stdweb::web::TextBaseline::Middle); RenderContext2D { canvas_render_context_2_d, font_config: FontConfig::default(), export_data, } } // Rectangles /// Draws a filled rectangle whose starting point is at the coordinates {x, y} with the specified width and height and whose style is determined by the fillStyle attribute. pub fn fill_rect(&mut self, x: f64, y: f64, width: f64, height: f64) { self.canvas_render_context_2_d .fill_rect(x, y, width, height); } /// Draws a rectangle that is stroked (outlined) according to the current strokeStyle and other context settings. pub fn stroke_rect(&mut self, x: f64, y: f64, width: f64, height: f64) { self.canvas_render_context_2_d .stroke_rect(x, y, width, height); } // Text /// Draws (fills) a given text at the given (x, y) position. pub fn fill_text(&mut self, text: &str, x: f64, y: f64) { self.canvas_render_context_2_d .set_text_baseline(stdweb::web::TextBaseline::Middle); self.canvas_render_context_2_d.fill_text( text, x, y + self.font_config.font_size.ceil() / 2.0, None, ); } pub fn measure( &mut self, text: &str, font_size: f64, family: impl Into<String>, ) -> TextMetrics { self.set_font_family(family); self.set_font_size(font_size); self.measure_text(text) } /// Returns a TextMetrics object. pub fn measure_text(&mut self, text: &str) -> TextMetrics { TextMetrics { width: self .canvas_render_context_2_d .measure_text(text) .unwrap() .get_width(), height: self.font_config.font_size.ceil(), } } /// Fills the current or given path with the current file style. pub fn fill(&mut self) { self.canvas_render_context_2_d.fill(FillRule::default()); } /// Strokes {outlines} the current or given path with the current stroke style. pub fn stroke(&mut self) { self.canvas_render_context_2_d.stroke(); } /// Starts a new path by emptying the list of sub-paths. Call this when you want to create a new path. pub fn begin_path(&mut self) { self.canvas_render_context_2_d.begin_path(); } /// Attempts to add a straight line from the current point to the start of the current sub-path. If the shape has already been closed or has only one point, this function does nothing. pub fn close_path(&mut self) { self.canvas_render_context_2_d.close_path(); } /// Adds a rectangle to the current path. pub fn rect(&mut self, x: f64, y: f64, width: f64, height: f64) { self.canvas_render_context_2_d.rect(x, y, width, height); } /// Creates a circular arc centered at (x, y) with a radius of radius. The path starts at startAngle and ends at endAngle. pub fn arc(&mut self, x: f64, y: f64, radius: f64, start_angle: f64, end_angle: f64) { self.canvas_render_context_2_d .arc(x, y, radius, start_angle, end_angle, false); } /// Begins a new sub-path at the point specified by the given {x, y} coordinates. pub fn move_to(&mut self, x: f64, y: f64) { self.canvas_render_context_2_d.move_to(x, y); } /// Adds a straight line to the current sub-path by connecting the sub-path's last point to the specified {x, y} coordinates. pub fn line_to(&mut self, x: f64, y: f64) { self.canvas_render_context_2_d.line_to(x, y); } /// Adds a quadratic Bézier curve to the current sub-path. pub fn quadratic_curve_to(&mut self, cpx: f64, cpy: f64, x: f64, y: f64) { self.canvas_render_context_2_d .quadratic_curve_to(cpx, cpy, x, y); } /// Adds a cubic Bézier curve to the current sub-path. It requires three points: the first two are control points and the third one is the end point. The starting point is the latest point in the current path, which can be changed using MoveTo{} before creating the Bézier curve. pub fn bezier_curve_to(&mut self, cp1x: f64, cp1y: f64, cp2x: f64, cp2y: f64, x: f64, y: f64) { self.canvas_render_context_2_d .bezier_curve_to(cp1x, cp1y, cp2x, cp2y, x, y); } // Draw image /// Draws the image. pub fn draw_image(&mut self, image: &Image, x: f64, y: f64) { js!( var img = document.image_store.image(@{&image.source}); if(img == null) { img = document.image_store.load_image(@{&image.source}); img.then( function(i) { @{&self.canvas_render_context_2_d}.drawImage(i, @{&x}, @{&y}); } ) } else { // @{&self.canvas_render_context_2_d}.drawImage(img, @{&x}, @{&y}); } ); } /// Draws the image with the given size. pub fn draw_image_with_size(&mut self, image: &Image, x: f64, y: f64, width: f64, height: f64) { js!( var img = document.image_store.image(@{&image.source}); if(img == null) { img = document.image_store.load_image(@{&image.source}); img.then( function(i) { @{&self.canvas_render_context_2_d}.drawImage(i, @{&x}, @{&y}, @{&width}, @{&height}); } ) } else { @{&self.canvas_render_context_2_d}.drawImage(img, @{&x}, @{&y}); } ); } /// Draws the given part of the image. pub fn draw_image_with_clip_and_size( &mut self, image: &Image, clip_x: f64, clip_y: f64, clip_width: f64, clip_height: f64, x: f64, y: f64, width: f64, height: f64, ) { js!( var img = document.image_store.image(@{&image.source}); if(img == null) { img = document.image_store.load_image(@{&image.source}); img.then( function(i) { @{&self.canvas_render_context_2_d}.drawImage(img, @{&clip_x}, @{&clip_y}, @{&clip_width}, @{&clip_height}, @{&x}, @{&y}, @{&width}, @{&height}); } ) } else { @{&self.canvas_render_context_2_d}.drawImage(img, @{&x}, @{&y}); } ); } pub fn draw_pipeline( &mut self, x: f64, y: f64, width: f64, height: f64, pipeline: Box<dyn RenderPipeline>, ) { let mut render_target = RenderTarget::new(width as u32, height as u32); pipeline.draw_pipeline(&mut render_target); let image_data = self .canvas_render_context_2_d .create_image_data(width, height) .unwrap(); for i in 0..(render_target.data.len() - 1) { let pixel = render_target.data.get(i).unwrap(); let r = ((pixel & 0x00FF0000) >> 16) as u8; let g = ((pixel & 0x0000FF00) >> 8) as u8; let b = (pixel & 0x000000FF) as u8; let a = ((pixel & 0xFF000000) >> 24) as u8; let index = i as u32 * 4; js!( @{&image_data}.data[@{index} + 0] = @{r}; // R value @{&image_data}.data[@{index} + 1] = @{g}; // G value @{&image_data}.data[@{index} + 2] = @{b}; // B value @{&image_data}.data[@{index} + 3] = @{a}; // A value ); } let canvas: CanvasElement = document() .create_element("canvas") .unwrap() .try_into() .unwrap(); canvas.set_width(width as u32); canvas.set_height(height as u32); let context: CanvasRenderingContext2d = canvas.get_context().unwrap(); context .put_image_data(image_data, 0.0, 0.0) .expect("Could no draw pipeline."); // todo: use await after stdweb futures are stable js!( // use the tempCanvas.toDataURL to create an img object var img = new Image(); img.onload = function () { @{&self.canvas_render_context_2_d}.drawImage(img,@{&x},@{&y}); }; img.src = @{&canvas}.toDataURL(); ); } /// Creates a clipping path from the current sub-paths. Everything drawn after clip() is called appears inside the clipping path only. pub fn clip(&mut self) { self.canvas_render_context_2_d.clip(FillRule::EvenOdd); } // Line styles /// Sets the thickness of lines. pub fn set_line_width(&mut self, line_width: f64) { self.canvas_render_context_2_d.set_line_width(line_width); } /// Specific the font family. pub fn set_font_family(&mut self, family: impl Into<String>) { self.font_config.family = family.into().replace(" Regular", ""); self.canvas_render_context_2_d .set_font(&self.font_config.to_string()); } /// Specifies the font size. pub fn set_font_size(&mut self, size: f64) { self.font_config.font_size = size; self.canvas_render_context_2_d .set_font(&self.font_config.to_string()); } // Fill and stroke style /// Specifies the fill color to use inside shapes. pub fn set_fill_style(&mut self, brush: Brush) { match brush { Brush::SolidColor(color) => { self.canvas_render_context_2_d .set_fill_style_color(&color.to_string()); } _ => (), } } /// Specifies the fill stroke to use inside shapes. pub fn set_stroke_style(&mut self, brush: Brush) { match brush { Brush::SolidColor(color) => { self.canvas_render_context_2_d .set_stroke_style_color(&color.to_string()); } _ => (), } } // Transformations /// Sets the tranformation. pub fn set_transform(&mut self, a: f64, b: f64, c: f64, d: f64, e: f64, f: f64) { self.canvas_render_context_2_d .set_transform(a, b, c, d, e, f); } // Canvas states /// Saves the entire state of the canvas by pushing the current state onto a stack. pub fn save(&mut self) { self.canvas_render_context_2_d.save(); } /// Restores the most recently saved canvas state by popping the top entry in the drawing state stack. If there is no saved state, this method does nothing. pub fn restore(&mut self) { self.canvas_render_context_2_d.restore(); } pub fn clear(&mut self, brush: &Brush) { let color = match brush { Brush::SolidColor(color) => color.to_string(), _ => Color::rgba(0, 0, 0, 0).to_string(), }; self.save(); self.canvas_render_context_2_d .set_fill_style_color(color.as_str()); let canvas = self.canvas_render_context_2_d.get_canvas(); self.canvas_render_context_2_d.fill_rect( 0.0, 0.0, canvas.width() as f64, canvas.height() as f64, ); self.restore(); } pub fn set_canvas_render_context_2d( &mut self, canvas_render_context_2_d: CanvasRenderingContext2d, ) { self.canvas_render_context_2_d = canvas_render_context_2_d; } pub fn data(&self) -> &[u32] { let width = self.canvas_render_context_2_d.get_canvas().width(); let height = self.canvas_render_context_2_d.get_canvas().height(); let image_data = self.canvas_render_context_2_d.get_image_data(0.0, 0.0, width as f64, height as f64); js!( ); &self.export_data } pub fn start(&mut self) {} pub fn finish(&mut self) {} } // --- Conversions --- impl From<&str> for Image { fn from(s: &str) -> Image { Image::new(s) } } impl From<String> for Image { fn from(s: String) -> Image { Image::new(s) } }
mod float; mod int; mod uint; pub use float::*; pub use int::*; pub use uint::*; // Vectors of pointers are not for public use at the current time. pub(crate) mod ptr; use crate::simd::intrinsics; use crate::simd::{LaneCount, Mask, MaskElement, SupportedLaneCount}; /// A SIMD vector of `LANES` elements of type `T`. #[repr(simd)] pub struct Simd<T, const LANES: usize>([T; LANES]) where T: SimdElement, LaneCount<LANES>: SupportedLaneCount; impl<T, const LANES: usize> Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { /// Number of lanes in this vector. pub const LANES: usize = LANES; /// Get the number of lanes in this vector. pub const fn lanes(&self) -> usize { LANES } /// Construct a SIMD vector by setting all lanes to the given value. pub const fn splat(value: T) -> Self { Self([value; LANES]) } /// Returns an array reference containing the entire SIMD vector. pub const fn as_array(&self) -> &[T; LANES] { &self.0 } /// Returns a mutable array reference containing the entire SIMD vector. pub fn as_mut_array(&mut self) -> &mut [T; LANES] { &mut self.0 } /// Converts an array to a SIMD vector. pub const fn from_array(array: [T; LANES]) -> Self { Self(array) } /// Converts a SIMD vector to an array. pub const fn to_array(self) -> [T; LANES] { self.0 } /// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices. /// If an index is out of bounds, that lane instead selects the value from the "or" vector. /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::Simd; /// # #[cfg(not(feature = "std"))] use core::simd::Simd; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// let alt = Simd::from_array([-5, -4, -3, -2]); /// /// let result = Simd::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds. /// assert_eq!(result, Simd::from_array([-5, 13, 10, 15])); /// ``` #[must_use] #[inline] pub fn gather_or(slice: &[T], idxs: Simd<usize, LANES>, or: Self) -> Self { Self::gather_select(slice, Mask::splat(true), idxs, or) } /// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices. /// Out-of-bounds indices instead use the default value for that lane (0). /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::Simd; /// # #[cfg(not(feature = "std"))] use core::simd::Simd; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// /// let result = Simd::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds. /// assert_eq!(result, Simd::from_array([0, 13, 10, 15])); /// ``` #[must_use] #[inline] pub fn gather_or_default(slice: &[T], idxs: Simd<usize, LANES>) -> Self where T: Default, { Self::gather_or(slice, idxs, Self::splat(T::default())) } /// SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices. /// Out-of-bounds or masked indices instead select the value from the "or" vector. /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// let alt = Simd::from_array([-5, -4, -3, -2]); /// let mask = Mask::from_array([true, true, true, false]); // Note the mask of the last lane. /// /// let result = Simd::gather_select(&vec, mask, idxs, alt); // Note the lane that is out-of-bounds. /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2])); /// ``` #[must_use] #[inline] pub fn gather_select( slice: &[T], mask: Mask<isize, LANES>, idxs: Simd<usize, LANES>, or: Self, ) -> Self { let mask: Mask<isize, LANES> = mask & idxs.lanes_lt(Simd::splat(slice.len())); // SAFETY: We have masked-off out-of-bounds lanes. unsafe { Self::gather_select_unchecked(slice, mask, idxs, or) } } /// Unsafe SIMD gather: construct a SIMD vector by reading from a slice, using potentially discontiguous indices. /// Masked indices instead select the value from the "or" vector. /// `gather_select_unchecked` is unsound if any unmasked index is out-of-bounds of the slice. /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// let alt = Simd::from_array([-5, -4, -3, -2]); /// let mask = Mask::from_array([true, true, true, false]); // Note the final mask lane. /// // If this mask was used to gather, it would be unsound. Let's fix that. /// let mask = mask & idxs.lanes_lt(Simd::splat(vec.len())); /// /// // We have masked the OOB lane, so it's safe to gather now. /// let result = unsafe { Simd::gather_select_unchecked(&vec, mask, idxs, alt) }; /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2])); /// ``` #[must_use] #[inline] pub unsafe fn gather_select_unchecked( slice: &[T], mask: Mask<isize, LANES>, idxs: Simd<usize, LANES>, or: Self, ) -> Self { let base_ptr = crate::simd::ptr::SimdConstPtr::splat(slice.as_ptr()); // Ferris forgive me, I have done pointer arithmetic here. let ptrs = base_ptr.wrapping_add(idxs); // SAFETY: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah unsafe { intrinsics::simd_gather(or, ptrs, mask.to_int()) } } /// SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices. /// Out-of-bounds indices are not written. /// `scatter` writes "in order", so if an index receives two writes, only the last is guaranteed. /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::Simd; /// # #[cfg(not(feature = "std"))] use core::simd::Simd; /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 0]); /// let vals = Simd::from_array([-27, 82, -41, 124]); /// /// vals.scatter(&mut vec, idxs); // index 0 receives two writes. /// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]); /// ``` #[inline] pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, LANES>) { self.scatter_select(slice, Mask::splat(true), idxs) } /// SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices. /// Out-of-bounds or masked indices are not written. /// `scatter_select` writes "in order", so if an index receives two writes, only the last is guaranteed. /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 0]); /// let vals = Simd::from_array([-27, 82, -41, 124]); /// let mask = Mask::from_array([true, true, true, false]); // Note the mask of the last lane. /// /// vals.scatter_select(&mut vec, mask, idxs); // index 0's second write is masked, thus omitted. /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]); /// ``` #[inline] pub fn scatter_select( self, slice: &mut [T], mask: Mask<isize, LANES>, idxs: Simd<usize, LANES>, ) { let mask: Mask<isize, LANES> = mask & idxs.lanes_lt(Simd::splat(slice.len())); // SAFETY: We have masked-off out-of-bounds lanes. unsafe { self.scatter_select_unchecked(slice, mask, idxs) } } /// Unsafe SIMD scatter: write a SIMD vector's values into a slice, using potentially discontiguous indices. /// Out-of-bounds or masked indices are not written. /// `scatter_select_unchecked` is unsound if any unmasked index is out of bounds of the slice. /// `scatter_select_unchecked` writes "in order", so if the same index receives two writes, only the last is guaranteed. /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 0]); /// let vals = Simd::from_array([-27, 82, -41, 124]); /// let mask = Mask::from_array([true, true, true, false]); // Note the mask of the last lane. /// // If this mask was used to scatter, it would be unsound. Let's fix that. /// let mask = mask & idxs.lanes_lt(Simd::splat(vec.len())); /// /// // We have masked the OOB lane, so it's safe to scatter now. /// unsafe { vals.scatter_select_unchecked(&mut vec, mask, idxs); } /// // index 0's second write is masked, thus was omitted. /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]); /// ``` #[inline] pub unsafe fn scatter_select_unchecked( self, slice: &mut [T], mask: Mask<isize, LANES>, idxs: Simd<usize, LANES>, ) { // SAFETY: This block works with *mut T derived from &mut 'a [T], // which means it is delicate in Rust's borrowing model, circa 2021: // &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts! // Even though this block is largely safe methods, it must be exactly this way // to prevent invalidating the raw ptrs while they're live. // Thus, entering this block requires all values to use being already ready: // 0. idxs we want to write to, which are used to construct the mask. // 1. mask, which depends on an initial &'a [T] and the idxs. // 2. actual values to scatter (self). // 3. &mut [T] which will become our base ptr. unsafe { // Now Entering ☢️ *mut T Zone let base_ptr = crate::simd::ptr::SimdMutPtr::splat(slice.as_mut_ptr()); // Ferris forgive me, I have done pointer arithmetic here. let ptrs = base_ptr.wrapping_add(idxs); // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah intrinsics::simd_scatter(self, ptrs, mask.to_int()) // Cleared ☢️ *mut T Zone } } } impl<T, const LANES: usize> Copy for Simd<T, LANES> where T: SimdElement, LaneCount<LANES>: SupportedLaneCount, { } impl<T, const LANES: usize> Clone for Simd<T, LANES> where T: SimdElement, LaneCount<LANES>: SupportedLaneCount, { fn clone(&self) -> Self { *self } } impl<T, const LANES: usize> Default for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + Default, { #[inline] fn default() -> Self { Self::splat(T::default()) } } impl<T, const LANES: usize> PartialEq for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + PartialEq, { #[inline] fn eq(&self, other: &Self) -> bool { // TODO use SIMD equality self.to_array() == other.to_array() } } impl<T, const LANES: usize> PartialOrd for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + PartialOrd, { #[inline] fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { // TODO use SIMD equality self.to_array().partial_cmp(other.as_ref()) } } impl<T, const LANES: usize> Eq for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + Eq, { } impl<T, const LANES: usize> Ord for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + Ord, { #[inline] fn cmp(&self, other: &Self) -> core::cmp::Ordering { // TODO use SIMD equality self.to_array().cmp(other.as_ref()) } } impl<T, const LANES: usize> core::hash::Hash for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + core::hash::Hash, { #[inline] fn hash<H>(&self, state: &mut H) where H: core::hash::Hasher, { self.as_array().hash(state) } } // array references impl<T, const LANES: usize> AsRef<[T; LANES]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_ref(&self) -> &[T; LANES] { &self.0 } } impl<T, const LANES: usize> AsMut<[T; LANES]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_mut(&mut self) -> &mut [T; LANES] { &mut self.0 } } // slice references impl<T, const LANES: usize> AsRef<[T]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_ref(&self) -> &[T] { &self.0 } } impl<T, const LANES: usize> AsMut<[T]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_mut(&mut self) -> &mut [T] { &mut self.0 } } // vector/array conversion impl<T, const LANES: usize> From<[T; LANES]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { fn from(array: [T; LANES]) -> Self { Self(array) } } impl<T, const LANES: usize> From<Simd<T, LANES>> for [T; LANES] where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { fn from(vector: Simd<T, LANES>) -> Self { vector.to_array() } } mod sealed { pub trait Sealed {} } use sealed::Sealed; /// Marker trait for types that may be used as SIMD vector elements. /// SAFETY: This trait, when implemented, asserts the compiler can monomorphize /// `#[repr(simd)]` structs with the marked type as an element. /// Strictly, it is valid to impl if the vector will not be miscompiled. /// Practically, it is user-unfriendly to impl it if the vector won't compile, /// even when no soundness guarantees are broken by allowing the user to try. pub unsafe trait SimdElement: Sealed + Copy { /// The mask element type corresponding to this element type. type Mask: MaskElement; } impl Sealed for u8 {} unsafe impl SimdElement for u8 { type Mask = i8; } impl Sealed for u16 {} unsafe impl SimdElement for u16 { type Mask = i16; } impl Sealed for u32 {} unsafe impl SimdElement for u32 { type Mask = i32; } impl Sealed for u64 {} unsafe impl SimdElement for u64 { type Mask = i64; } impl Sealed for usize {} unsafe impl SimdElement for usize { type Mask = isize; } impl Sealed for i8 {} unsafe impl SimdElement for i8 { type Mask = i8; } impl Sealed for i16 {} unsafe impl SimdElement for i16 { type Mask = i16; } impl Sealed for i32 {} unsafe impl SimdElement for i32 { type Mask = i32; } impl Sealed for i64 {} unsafe impl SimdElement for i64 { type Mask = i64; } impl Sealed for isize {} unsafe impl SimdElement for isize { type Mask = isize; } impl Sealed for f32 {} unsafe impl SimdElement for f32 { type Mask = i32; } impl Sealed for f64 {} unsafe impl SimdElement for f64 { type Mask = i64; } Rewrite gather/scatter docs Headings with # Safety and # Examples are more "std style". Use terms like "enable" and "disable", rather than "mask" jargon. mod float; mod int; mod uint; pub use float::*; pub use int::*; pub use uint::*; // Vectors of pointers are not for public use at the current time. pub(crate) mod ptr; use crate::simd::intrinsics; use crate::simd::{LaneCount, Mask, MaskElement, SupportedLaneCount}; /// A SIMD vector of `LANES` elements of type `T`. #[repr(simd)] pub struct Simd<T, const LANES: usize>([T; LANES]) where T: SimdElement, LaneCount<LANES>: SupportedLaneCount; impl<T, const LANES: usize> Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { /// Number of lanes in this vector. pub const LANES: usize = LANES; /// Get the number of lanes in this vector. pub const fn lanes(&self) -> usize { LANES } /// Construct a SIMD vector by setting all lanes to the given value. pub const fn splat(value: T) -> Self { Self([value; LANES]) } /// Returns an array reference containing the entire SIMD vector. pub const fn as_array(&self) -> &[T; LANES] { &self.0 } /// Returns a mutable array reference containing the entire SIMD vector. pub fn as_mut_array(&mut self) -> &mut [T; LANES] { &mut self.0 } /// Converts an array to a SIMD vector. pub const fn from_array(array: [T; LANES]) -> Self { Self(array) } /// Converts a SIMD vector to an array. pub const fn to_array(self) -> [T; LANES] { self.0 } /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector. /// Lanes given an out-of-bounds index instead select values from the `or` vector. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::Simd; /// # #[cfg(not(feature = "std"))] use core::simd::Simd; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// let alt = Simd::from_array([-5, -4, -3, -2]); /// /// let result = Simd::gather_or(&vec, idxs, alt); // Note the lane that is out-of-bounds. /// assert_eq!(result, Simd::from_array([-5, 13, 10, 15])); /// ``` #[must_use] #[inline] pub fn gather_or(slice: &[T], idxs: Simd<usize, LANES>, or: Self) -> Self { Self::gather_select(slice, Mask::splat(true), idxs, or) } /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector. /// Lanes given an out-of-bounds index instead are set the default value for the type. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::Simd; /// # #[cfg(not(feature = "std"))] use core::simd::Simd; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// /// let result = Simd::gather_or_default(&vec, idxs); // Note the lane that is out-of-bounds. /// assert_eq!(result, Simd::from_array([0, 13, 10, 15])); /// ``` #[must_use] #[inline] pub fn gather_or_default(slice: &[T], idxs: Simd<usize, LANES>) -> Self where T: Default, { Self::gather_or(slice, idxs, Self::splat(T::default())) } /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector. /// The mask `enable`s all `true` lanes and disables all `false` lanes. /// If an index is disabled or is out-of-bounds, the lane is selected from the `or` vector. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// let alt = Simd::from_array([-5, -4, -3, -2]); /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane. /// /// let result = Simd::gather_select(&vec, enable, idxs, alt); // Note the lane that is out-of-bounds. /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2])); /// ``` #[must_use] #[inline] pub fn gather_select( slice: &[T], enable: Mask<isize, LANES>, idxs: Simd<usize, LANES>, or: Self, ) -> Self { let enable: Mask<isize, LANES> = enable & idxs.lanes_lt(Simd::splat(slice.len())); // SAFETY: We have masked-off out-of-bounds lanes. unsafe { Self::gather_select_unchecked(slice, enable, idxs, or) } } /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector. /// The mask `enable`s all `true` lanes and disables all `false` lanes. /// If an index is disabled, the lane is selected from the `or` vector. /// /// # Safety /// /// Calling this function with an `enable`d out-of-bounds index is *[undefined behavior]* /// even if the resulting value is not used. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 5]); /// let alt = Simd::from_array([-5, -4, -3, -2]); /// let enable = Mask::from_array([true, true, true, false]); // Note the final mask lane. /// // If this mask was used to gather, it would be unsound. Let's fix that. /// let enable = enable & idxs.lanes_lt(Simd::splat(vec.len())); /// /// // We have masked the OOB lane, so it's safe to gather now. /// let result = unsafe { Simd::gather_select_unchecked(&vec, enable, idxs, alt) }; /// assert_eq!(result, Simd::from_array([-5, 13, 10, -2])); /// ``` /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[must_use] #[inline] pub unsafe fn gather_select_unchecked( slice: &[T], enable: Mask<isize, LANES>, idxs: Simd<usize, LANES>, or: Self, ) -> Self { let base_ptr = crate::simd::ptr::SimdConstPtr::splat(slice.as_ptr()); // Ferris forgive me, I have done pointer arithmetic here. let ptrs = base_ptr.wrapping_add(idxs); // SAFETY: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah unsafe { intrinsics::simd_gather(or, ptrs, enable.to_int()) } } /// Writes the values in a SIMD vector to potentially discontiguous indices in `slice`. /// If two lanes in the scattered vector would write to the same index /// only the last lane is guaranteed to actually be written. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::Simd; /// # #[cfg(not(feature = "std"))] use core::simd::Simd; /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 0]); /// let vals = Simd::from_array([-27, 82, -41, 124]); /// /// vals.scatter(&mut vec, idxs); // index 0 receives two writes. /// assert_eq!(vec, vec![124, 11, 12, 82, 14, 15, 16, 17, 18]); /// ``` #[inline] pub fn scatter(self, slice: &mut [T], idxs: Simd<usize, LANES>) { self.scatter_select(slice, Mask::splat(true), idxs) } /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`. /// The mask `enable`s all `true` lanes and disables all `false` lanes. /// If an enabled index is out-of-bounds, the lane is not written. /// If two enabled lanes in the scattered vector would write to the same index, /// only the last lane is guaranteed to actually be written. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 0]); /// let vals = Simd::from_array([-27, 82, -41, 124]); /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane. /// /// vals.scatter_select(&mut vec, enable, idxs); // index 0's second write is masked, thus omitted. /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]); /// ``` #[inline] pub fn scatter_select( self, slice: &mut [T], enable: Mask<isize, LANES>, idxs: Simd<usize, LANES>, ) { let enable: Mask<isize, LANES> = enable & idxs.lanes_lt(Simd::splat(slice.len())); // SAFETY: We have masked-off out-of-bounds lanes. unsafe { self.scatter_select_unchecked(slice, enable, idxs) } } /// Writes the values in a SIMD vector to multiple potentially discontiguous indices in `slice`. /// The mask `enable`s all `true` lanes and disables all `false` lanes. /// If two enabled lanes in the scattered vector would write to the same index, /// only the last lane is guaranteed to actually be written. /// /// # Safety /// /// Calling this function with an enabled out-of-bounds index is *[undefined behavior]*, /// and may lead to memory corruption. /// /// # Examples /// ``` /// # #![feature(portable_simd)] /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask}; /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask}; /// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18]; /// let idxs = Simd::from_array([9, 3, 0, 0]); /// let vals = Simd::from_array([-27, 82, -41, 124]); /// let enable = Mask::from_array([true, true, true, false]); // Note the mask of the last lane. /// // If this mask was used to scatter, it would be unsound. Let's fix that. /// let enable = enable & idxs.lanes_lt(Simd::splat(vec.len())); /// /// // We have masked the OOB lane, so it's safe to scatter now. /// unsafe { vals.scatter_select_unchecked(&mut vec, enable, idxs); } /// // index 0's second write is masked, thus was omitted. /// assert_eq!(vec, vec![-41, 11, 12, 82, 14, 15, 16, 17, 18]); /// ``` /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] pub unsafe fn scatter_select_unchecked( self, slice: &mut [T], enable: Mask<isize, LANES>, idxs: Simd<usize, LANES>, ) { // SAFETY: This block works with *mut T derived from &mut 'a [T], // which means it is delicate in Rust's borrowing model, circa 2021: // &mut 'a [T] asserts uniqueness, so deriving &'a [T] invalidates live *mut Ts! // Even though this block is largely safe methods, it must be exactly this way // to prevent invalidating the raw ptrs while they're live. // Thus, entering this block requires all values to use being already ready: // 0. idxs we want to write to, which are used to construct the mask. // 1. enable, which depends on an initial &'a [T] and the idxs. // 2. actual values to scatter (self). // 3. &mut [T] which will become our base ptr. unsafe { // Now Entering ☢️ *mut T Zone let base_ptr = crate::simd::ptr::SimdMutPtr::splat(slice.as_mut_ptr()); // Ferris forgive me, I have done pointer arithmetic here. let ptrs = base_ptr.wrapping_add(idxs); // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah intrinsics::simd_scatter(self, ptrs, enable.to_int()) // Cleared ☢️ *mut T Zone } } } impl<T, const LANES: usize> Copy for Simd<T, LANES> where T: SimdElement, LaneCount<LANES>: SupportedLaneCount, { } impl<T, const LANES: usize> Clone for Simd<T, LANES> where T: SimdElement, LaneCount<LANES>: SupportedLaneCount, { fn clone(&self) -> Self { *self } } impl<T, const LANES: usize> Default for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + Default, { #[inline] fn default() -> Self { Self::splat(T::default()) } } impl<T, const LANES: usize> PartialEq for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + PartialEq, { #[inline] fn eq(&self, other: &Self) -> bool { // TODO use SIMD equality self.to_array() == other.to_array() } } impl<T, const LANES: usize> PartialOrd for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + PartialOrd, { #[inline] fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { // TODO use SIMD equality self.to_array().partial_cmp(other.as_ref()) } } impl<T, const LANES: usize> Eq for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + Eq, { } impl<T, const LANES: usize> Ord for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + Ord, { #[inline] fn cmp(&self, other: &Self) -> core::cmp::Ordering { // TODO use SIMD equality self.to_array().cmp(other.as_ref()) } } impl<T, const LANES: usize> core::hash::Hash for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement + core::hash::Hash, { #[inline] fn hash<H>(&self, state: &mut H) where H: core::hash::Hasher, { self.as_array().hash(state) } } // array references impl<T, const LANES: usize> AsRef<[T; LANES]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_ref(&self) -> &[T; LANES] { &self.0 } } impl<T, const LANES: usize> AsMut<[T; LANES]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_mut(&mut self) -> &mut [T; LANES] { &mut self.0 } } // slice references impl<T, const LANES: usize> AsRef<[T]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_ref(&self) -> &[T] { &self.0 } } impl<T, const LANES: usize> AsMut<[T]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { #[inline] fn as_mut(&mut self) -> &mut [T] { &mut self.0 } } // vector/array conversion impl<T, const LANES: usize> From<[T; LANES]> for Simd<T, LANES> where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { fn from(array: [T; LANES]) -> Self { Self(array) } } impl<T, const LANES: usize> From<Simd<T, LANES>> for [T; LANES] where LaneCount<LANES>: SupportedLaneCount, T: SimdElement, { fn from(vector: Simd<T, LANES>) -> Self { vector.to_array() } } mod sealed { pub trait Sealed {} } use sealed::Sealed; /// Marker trait for types that may be used as SIMD vector elements. /// SAFETY: This trait, when implemented, asserts the compiler can monomorphize /// `#[repr(simd)]` structs with the marked type as an element. /// Strictly, it is valid to impl if the vector will not be miscompiled. /// Practically, it is user-unfriendly to impl it if the vector won't compile, /// even when no soundness guarantees are broken by allowing the user to try. pub unsafe trait SimdElement: Sealed + Copy { /// The mask element type corresponding to this element type. type Mask: MaskElement; } impl Sealed for u8 {} unsafe impl SimdElement for u8 { type Mask = i8; } impl Sealed for u16 {} unsafe impl SimdElement for u16 { type Mask = i16; } impl Sealed for u32 {} unsafe impl SimdElement for u32 { type Mask = i32; } impl Sealed for u64 {} unsafe impl SimdElement for u64 { type Mask = i64; } impl Sealed for usize {} unsafe impl SimdElement for usize { type Mask = isize; } impl Sealed for i8 {} unsafe impl SimdElement for i8 { type Mask = i8; } impl Sealed for i16 {} unsafe impl SimdElement for i16 { type Mask = i16; } impl Sealed for i32 {} unsafe impl SimdElement for i32 { type Mask = i32; } impl Sealed for i64 {} unsafe impl SimdElement for i64 { type Mask = i64; } impl Sealed for isize {} unsafe impl SimdElement for isize { type Mask = isize; } impl Sealed for f32 {} unsafe impl SimdElement for f32 { type Mask = i32; } impl Sealed for f64 {} unsafe impl SimdElement for f64 { type Mask = i64; }
extern crate path_slash; extern crate url; use crate::env::EnvConfig; use crate::error::{Error, Result}; use crate::page::{DiffOp, Line, Page}; use url::Url; #[cfg(target_os = "windows")] fn to_slash<S: AsRef<str>>(s: &S) -> String { use path_slash::PathExt; use std::path::Path; Path::new(s.as_ref()).to_slash_lossy() } // Do nothing on Windows #[cfg(not(target_os = "windows"))] fn to_slash<S: AsRef<str>>(s: &S) -> &str { s.as_ref() } fn build_github_like_url( host: &str, user: &str, repo: &str, branch: &Option<String>, page: &Page, ) -> String { match page { Page::Open => { if let Some(ref b) = branch { format!("https://{}/{}/{}/tree/{}", host, user, repo, b) } else { format!("https://{}/{}/{}", host, user, repo) } } Page::Diff { ref lhs, ref rhs, ref op, } => format!( "https://{}/{}/{}/compare/{}{}{}", host, user, repo, lhs, op, rhs, ), Page::Commit { ref hash } => format!("https://{}/{}/{}/commit/{}", host, user, repo, hash), Page::FileOrDir { ref relative_path, ref hash, line: None, } => format!( "https://{}/{}/{}/blob/{}/{}", host, user, repo, hash, to_slash(relative_path), ), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::At(line)), } => format!( "https://{}/{}/{}/blob/{}/{}#L{}", host, user, repo, hash, to_slash(relative_path), line, ), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::Range(start, end)), } => format!( "https://{}/{}/{}/blob/{}/{}#L{}-L{}", host, user, repo, hash, to_slash(relative_path), start, end, ), Page::Issue { number } => format!("https://{}/{}/{}/issues/{}", host, user, repo, number), } } fn build_custom_github_like_url( host: &str, user: &str, repo: &str, branch: &Option<String>, page: &Page, ssh_port: &Option<u16>, ) -> String { match ssh_port { Some(ref p) => build_github_like_url( &format!("{}:{}", host, p).as_str(), user, repo, branch, page, ), _ => build_github_like_url(host, user, repo, branch, page), } } fn build_gitlab_url( host: &str, user: &str, repo: &str, branch: &Option<String>, page: &Page, ) -> Result<String> { if let Page::Diff { op, .. } = page { if *op == DiffOp::TwoDots { return Err(Error::GitLabDiffNotSupported); } } Ok(build_github_like_url(host, user, repo, branch, page)) } fn build_bitbucket_url( user: &str, repo: &str, branch: &Option<String>, page: &Page, ) -> Result<String> { match page { Page::Open => { if let Some(ref b) = branch { Ok(format!( "https://bitbucket.org/{}/{}/branch/{}", user, repo, b, )) } else { Ok(format!("https://bitbucket.org/{}/{}", user, repo)) } } Page::Diff { .. } => Err(Error::BitbucketDiffNotSupported), Page::Commit { ref hash } => Ok(format!( "https://bitbucket.org/{}/{}/commits/{}", user, repo, hash, )), Page::FileOrDir { ref relative_path, ref hash, line: None, } => Ok(format!( "https://bitbucket.org/{}/{}/src/{}/{}", user, repo, hash, to_slash(relative_path), )), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::At(line)), } => Ok(format!( "https://bitbucket.org/{}/{}/src/{}/{}#lines-{}", user, repo, hash, to_slash(relative_path), line, )), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::Range(start, end)), } => Ok(format!( "https://bitbucket.org/{}/{}/src/{}/{}#lines-{}:{}", user, repo, hash, to_slash(relative_path), start, end, )), Page::Issue { number } => Ok(format!( "https://bitbucket.org/{}/{}/issues/{}", user, repo, number, )), } } // Note: Parse '/user/repo.git' or '/user/repo' or 'user/repo' into 'user' and 'repo' pub fn slug_from_path<'a>(path: &'a str) -> Result<(&'a str, &'a str)> { let mut split = path.split('/').skip_while(|s| s.is_empty()); let user = split.next().ok_or_else(|| Error::NoUserInPath { path: path.to_string(), })?; let mut repo = split.next().ok_or_else(|| Error::NoRepoInPath { path: path.to_string(), })?; if repo.ends_with(".git") { // Slice '.git' from 'repo.git' repo = &repo[0..repo.len() - 4]; } Ok((user, repo)) } // Known URL formats // 1. https://hosting_service.com/user/repo.git // 2. git@hosting_service.com:user/repo.git (-> ssh://git@hosting_service.com:22/user/repo.git) pub fn build_page_url( repo: &str, page: &Page, branch: &Option<String>, env: &EnvConfig, ) -> Result<String> { let url = Url::parse(repo).map_err(|e| Error::BrokenUrl { url: repo.to_string(), msg: format!("{}", e), })?; let path = url.path(); let (user, repo_name) = slug_from_path(path)?; let host = url.host_str().ok_or_else(|| Error::BrokenUrl { url: repo.to_string(), msg: "No host in URL".to_string(), })?; match host { "github.com" => Ok(build_github_like_url(host, user, repo_name, branch, page)), "gitlab.com" => build_gitlab_url(host, user, repo_name, branch, page), "bitbucket.org" => build_bitbucket_url(user, repo_name, branch, page), _ => { let port = if host.starts_with("github.") { &env.ghe_ssh_port } else if host.starts_with("gitlab.") { &env.gitlab_ssh_port } else { match env.ghe_url_host { Some(ref v) if v == host => &env.ghe_ssh_port, _ => { return Err(Error::UnknownHostingService { url: repo.to_string(), }); } } }; Ok(build_custom_github_like_url( host, user, repo_name, branch, page, port, )) } } } deal with Option<u16> by copy extern crate path_slash; extern crate url; use crate::env::EnvConfig; use crate::error::{Error, Result}; use crate::page::{DiffOp, Line, Page}; use url::Url; #[cfg(target_os = "windows")] fn to_slash<S: AsRef<str>>(s: &S) -> String { use path_slash::PathExt; use std::path::Path; Path::new(s.as_ref()).to_slash_lossy() } // Do nothing on Windows #[cfg(not(target_os = "windows"))] fn to_slash<S: AsRef<str>>(s: &S) -> &str { s.as_ref() } fn build_github_like_url( host: &str, user: &str, repo: &str, branch: &Option<String>, page: &Page, ) -> String { match page { Page::Open => { if let Some(ref b) = branch { format!("https://{}/{}/{}/tree/{}", host, user, repo, b) } else { format!("https://{}/{}/{}", host, user, repo) } } Page::Diff { ref lhs, ref rhs, ref op, } => format!( "https://{}/{}/{}/compare/{}{}{}", host, user, repo, lhs, op, rhs, ), Page::Commit { ref hash } => format!("https://{}/{}/{}/commit/{}", host, user, repo, hash), Page::FileOrDir { ref relative_path, ref hash, line: None, } => format!( "https://{}/{}/{}/blob/{}/{}", host, user, repo, hash, to_slash(relative_path), ), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::At(line)), } => format!( "https://{}/{}/{}/blob/{}/{}#L{}", host, user, repo, hash, to_slash(relative_path), line, ), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::Range(start, end)), } => format!( "https://{}/{}/{}/blob/{}/{}#L{}-L{}", host, user, repo, hash, to_slash(relative_path), start, end, ), Page::Issue { number } => format!("https://{}/{}/{}/issues/{}", host, user, repo, number), } } fn build_custom_github_like_url( host: &str, user: &str, repo: &str, branch: &Option<String>, page: &Page, ssh_port: Option<u16>, ) -> String { match ssh_port { Some(ref p) => build_github_like_url( &format!("{}:{}", host, p).as_str(), user, repo, branch, page, ), _ => build_github_like_url(host, user, repo, branch, page), } } fn build_gitlab_url( host: &str, user: &str, repo: &str, branch: &Option<String>, page: &Page, ) -> Result<String> { if let Page::Diff { op, .. } = page { if *op == DiffOp::TwoDots { return Err(Error::GitLabDiffNotSupported); } } Ok(build_github_like_url(host, user, repo, branch, page)) } fn build_bitbucket_url( user: &str, repo: &str, branch: &Option<String>, page: &Page, ) -> Result<String> { match page { Page::Open => { if let Some(ref b) = branch { Ok(format!( "https://bitbucket.org/{}/{}/branch/{}", user, repo, b, )) } else { Ok(format!("https://bitbucket.org/{}/{}", user, repo)) } } Page::Diff { .. } => Err(Error::BitbucketDiffNotSupported), Page::Commit { ref hash } => Ok(format!( "https://bitbucket.org/{}/{}/commits/{}", user, repo, hash, )), Page::FileOrDir { ref relative_path, ref hash, line: None, } => Ok(format!( "https://bitbucket.org/{}/{}/src/{}/{}", user, repo, hash, to_slash(relative_path), )), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::At(line)), } => Ok(format!( "https://bitbucket.org/{}/{}/src/{}/{}#lines-{}", user, repo, hash, to_slash(relative_path), line, )), Page::FileOrDir { ref relative_path, ref hash, line: Some(Line::Range(start, end)), } => Ok(format!( "https://bitbucket.org/{}/{}/src/{}/{}#lines-{}:{}", user, repo, hash, to_slash(relative_path), start, end, )), Page::Issue { number } => Ok(format!( "https://bitbucket.org/{}/{}/issues/{}", user, repo, number, )), } } // Note: Parse '/user/repo.git' or '/user/repo' or 'user/repo' into 'user' and 'repo' pub fn slug_from_path<'a>(path: &'a str) -> Result<(&'a str, &'a str)> { let mut split = path.split('/').skip_while(|s| s.is_empty()); let user = split.next().ok_or_else(|| Error::NoUserInPath { path: path.to_string(), })?; let mut repo = split.next().ok_or_else(|| Error::NoRepoInPath { path: path.to_string(), })?; if repo.ends_with(".git") { // Slice '.git' from 'repo.git' repo = &repo[0..repo.len() - 4]; } Ok((user, repo)) } // Known URL formats // 1. https://hosting_service.com/user/repo.git // 2. git@hosting_service.com:user/repo.git (-> ssh://git@hosting_service.com:22/user/repo.git) pub fn build_page_url( repo: &str, page: &Page, branch: &Option<String>, env: &EnvConfig, ) -> Result<String> { let url = Url::parse(repo).map_err(|e| Error::BrokenUrl { url: repo.to_string(), msg: format!("{}", e), })?; let path = url.path(); let (user, repo_name) = slug_from_path(path)?; let host = url.host_str().ok_or_else(|| Error::BrokenUrl { url: repo.to_string(), msg: "No host in URL".to_string(), })?; match host { "github.com" => Ok(build_github_like_url(host, user, repo_name, branch, page)), "gitlab.com" => build_gitlab_url(host, user, repo_name, branch, page), "bitbucket.org" => build_bitbucket_url(user, repo_name, branch, page), _ => { let port = if host.starts_with("github.") { env.ghe_ssh_port } else if host.starts_with("gitlab.") { env.gitlab_ssh_port } else { match env.ghe_url_host { Some(ref v) if v == host => env.ghe_ssh_port, _ => { return Err(Error::UnknownHostingService { url: repo.to_string(), }); } } }; Ok(build_custom_github_like_url( host, user, repo_name, branch, page, port, )) } } }
/* * Copyright (c) 2012, Ben Noordhuis <info@bnoordhuis.nl> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #[license = "ISC"]; #[link(name = "epoll", vers = "1.0", author = "Ben Noordhuis <info@bnoordhuis.nl>")]; //export EPOLL_NONBLOCK; export EPOLL_CLOEXEC; export EPOLL_CTL_ADD; export EPOLL_CTL_DEL; export EPOLLIN; export EPOLLPRI; export EPOLLOUT; export EPOLLERR; export EPOLLHUP; export EPOLLONESHOT; export EPOLLET; export epoll_event; export epoll_create1; export epoll_ctl; export epoll_wait; use std; // required by the tests import c_int = ctypes::c_int; //const EPOLL_NONBLOCK: int = 0x800; const EPOLL_CLOEXEC: int = 0x80000; const EPOLL_CTL_ADD: int = 1; const EPOLL_CTL_DEL: int = 2; const EPOLL_CTL_MOD: int = 3; const EPOLLIN: i32 = 0x01i32; const EPOLLPRI: i32 = 0x02i32; const EPOLLOUT: i32 = 0x04i32; const EPOLLERR: i32 = 0x08i32; const EPOLLHUP: i32 = 0x10i32; const EPOLLONESHOT: i32 = 0x40000000i32; const EPOLLET: i32 = 0x80000000i32; type epoll_event = { events: i32, data: u64 }; #[nolink] native mod __glibc { fn epoll_create1(flags: c_int) -> c_int; /* fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: epoll_event) -> c_int; fn epoll_wait(epfd: c_int, events: *mutable epoll_event, maxevents: c_int, timeout: c_int) -> c_int; */ fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *u8) -> c_int; fn epoll_wait(epfd: c_int, events: *mutable u8, maxevents: c_int, timeout: c_int) -> c_int; } fn epoll_create1(flags: int) -> int { __glibc::epoll_create1(flags as c_int) as int } fn epoll_ctl(epfd: int, op: int, fd: int, event: epoll_event) -> int { /* __glibc::epoll_ctl(epfd as c_int, op as c_int, fd as c_int, event) as int */ let buf: [mutable u8] = vec::init_elt_mut(12u, 0u8); // rust as of 2012-02-06 does not support packed types, hence we have to do // the packing and unpacking ourselves unsafe { let p1: *mutable i32 = unsafe::reinterpret_cast(ptr::mut_addr_of(buf[0])); let p2: *mutable u64 = unsafe::reinterpret_cast(ptr::mut_addr_of(buf[4])); *p1 = event.events; *p2 = event.data; } ret __glibc::epoll_ctl(epfd as c_int, op as c_int, fd as c_int, ptr::addr_of(buf[0])) as int } fn epoll_wait(epfd: int, events: [mutable epoll_event], timeout: int) -> int { /* let pevents: *mutable epoll_event = ptr::mut_addr_of(events[0]); let maxevents: c_int = vec::len(events) as c_int; ret __glibc::epoll_wait(epfd as c_int, pevents, maxevents, timeout as c_int) as int; */ let buf: [mutable u8] = vec::init_elt_mut(12u * vec::len(events), 0u8); let nevents = __glibc::epoll_wait(epfd as c_int, ptr::mut_addr_of(buf[0]), vec::len(events) as c_int, timeout as c_int) as int; if (nevents == -1) { ret -1; } // rust as of 2012-02-06 does not support packed types, hence we have to do // the packing and unpacking ourselves let i = 0; while (i < nevents) { unsafe { let p1: *i32 = unsafe::reinterpret_cast(ptr::addr_of(buf[i * 12])); let p2: *u64 = unsafe::reinterpret_cast(ptr::addr_of(buf[i * 12 + 4])); events[i] = {events: *p1, data: *p2}; } i += 1; } ret nevents; } #[test] fn test_epoll_create1() { assert epoll_create1(0) >= 0; assert epoll_create1(EPOLL_CLOEXEC) >= 0; assert epoll_create1(-1) == -1; } #[test] fn test_epoll_ctl() { let epfd = epoll_create1(0); assert epfd >= 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 0, {events:EPOLLIN, data:0u64}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 0, {events:EPOLLIN, data:0u64}) == -1; assert epoll_ctl(epfd, EPOLL_CTL_MOD, 0, {events:EPOLLOUT, data:0u64}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_DEL, 0, {events:EPOLLIN, data:0u64}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, -1, {events:EPOLLIN, data:0u64}) == -1; assert epoll_ctl(epfd, EPOLL_CTL_MOD, -1, {events:EPOLLIN, data:0u64}) == -1; assert epoll_ctl(epfd, EPOLL_CTL_DEL, -1, {events:EPOLLIN, data:0u64}) == -1; } #[test] fn test_epoll_wait() { // add stdout to epoll set and wait for it to become writable // should be immediate, it's an error if we hit the 50 ms timeout let epfd = epoll_create1(0); assert epfd >= 0; let magic = 42u64; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 1, {events:EPOLLOUT, data:magic}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 2, {events:EPOLLOUT, data:magic}) == 0; let events: [mutable epoll_event] = [ mutable {events:0i32, data:0u64}, {events:0i32, data:0u64}]; let n = epoll_wait(epfd, events, 50); assert n == 2; assert events[0].data == magic; assert events[0].events & EPOLLOUT == EPOLLOUT; } Add epoll_create(). /* * Copyright (c) 2012, Ben Noordhuis <info@bnoordhuis.nl> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #[license = "ISC"]; #[link(name = "epoll", vers = "1.0", author = "Ben Noordhuis <info@bnoordhuis.nl>")]; //export EPOLL_NONBLOCK; export EPOLL_CLOEXEC; export EPOLL_CTL_ADD; export EPOLL_CTL_DEL; export EPOLLIN; export EPOLLPRI; export EPOLLOUT; export EPOLLERR; export EPOLLHUP; export EPOLLONESHOT; export EPOLLET; export epoll_event; export epoll_create; export epoll_create1; export epoll_ctl; export epoll_wait; use std; // required by the tests import c_int = ctypes::c_int; //const EPOLL_NONBLOCK: int = 0x800; const EPOLL_CLOEXEC: int = 0x80000; const EPOLL_CTL_ADD: int = 1; const EPOLL_CTL_DEL: int = 2; const EPOLL_CTL_MOD: int = 3; const EPOLLIN: i32 = 0x01i32; const EPOLLPRI: i32 = 0x02i32; const EPOLLOUT: i32 = 0x04i32; const EPOLLERR: i32 = 0x08i32; const EPOLLHUP: i32 = 0x10i32; const EPOLLONESHOT: i32 = 0x40000000i32; const EPOLLET: i32 = 0x80000000i32; type epoll_event = { events: i32, data: u64 }; #[nolink] native mod __glibc { fn epoll_create1(flags: c_int) -> c_int; /* fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: epoll_event) -> c_int; fn epoll_wait(epfd: c_int, events: *mutable epoll_event, maxevents: c_int, timeout: c_int) -> c_int; */ fn epoll_ctl(epfd: c_int, op: c_int, fd: c_int, event: *u8) -> c_int; fn epoll_wait(epfd: c_int, events: *mutable u8, maxevents: c_int, timeout: c_int) -> c_int; } fn epoll_create() -> int { epoll_create1(0) } fn epoll_create1(flags: int) -> int { __glibc::epoll_create1(flags as c_int) as int } fn epoll_ctl(epfd: int, op: int, fd: int, event: epoll_event) -> int { /* __glibc::epoll_ctl(epfd as c_int, op as c_int, fd as c_int, event) as int */ let buf: [mutable u8] = vec::init_elt_mut(12u, 0u8); // rust as of 2012-02-06 does not support packed types, hence we have to do // the packing and unpacking ourselves unsafe { let p1: *mutable i32 = unsafe::reinterpret_cast(ptr::mut_addr_of(buf[0])); let p2: *mutable u64 = unsafe::reinterpret_cast(ptr::mut_addr_of(buf[4])); *p1 = event.events; *p2 = event.data; } ret __glibc::epoll_ctl(epfd as c_int, op as c_int, fd as c_int, ptr::addr_of(buf[0])) as int } fn epoll_wait(epfd: int, events: [mutable epoll_event], timeout: int) -> int { /* let pevents: *mutable epoll_event = ptr::mut_addr_of(events[0]); let maxevents: c_int = vec::len(events) as c_int; ret __glibc::epoll_wait(epfd as c_int, pevents, maxevents, timeout as c_int) as int; */ let buf: [mutable u8] = vec::init_elt_mut(12u * vec::len(events), 0u8); let nevents = __glibc::epoll_wait(epfd as c_int, ptr::mut_addr_of(buf[0]), vec::len(events) as c_int, timeout as c_int) as int; if (nevents == -1) { ret -1; } // rust as of 2012-02-06 does not support packed types, hence we have to do // the packing and unpacking ourselves let i = 0; while (i < nevents) { unsafe { let p1: *i32 = unsafe::reinterpret_cast(ptr::addr_of(buf[i * 12])); let p2: *u64 = unsafe::reinterpret_cast(ptr::addr_of(buf[i * 12 + 4])); events[i] = {events: *p1, data: *p2}; } i += 1; } ret nevents; } #[test] fn test_epoll_create1() { assert epoll_create1(0) >= 0; assert epoll_create1(EPOLL_CLOEXEC) >= 0; assert epoll_create1(-1) == -1; } #[test] fn test_epoll_ctl() { let epfd = epoll_create1(0); assert epfd >= 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 0, {events:EPOLLIN, data:0u64}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 0, {events:EPOLLIN, data:0u64}) == -1; assert epoll_ctl(epfd, EPOLL_CTL_MOD, 0, {events:EPOLLOUT, data:0u64}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_DEL, 0, {events:EPOLLIN, data:0u64}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, -1, {events:EPOLLIN, data:0u64}) == -1; assert epoll_ctl(epfd, EPOLL_CTL_MOD, -1, {events:EPOLLIN, data:0u64}) == -1; assert epoll_ctl(epfd, EPOLL_CTL_DEL, -1, {events:EPOLLIN, data:0u64}) == -1; } #[test] fn test_epoll_wait() { // add stdout to epoll set and wait for it to become writable // should be immediate, it's an error if we hit the 50 ms timeout let epfd = epoll_create1(0); assert epfd >= 0; let magic = 42u64; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 1, {events:EPOLLOUT, data:magic}) == 0; assert epoll_ctl(epfd, EPOLL_CTL_ADD, 2, {events:EPOLLOUT, data:magic}) == 0; let events: [mutable epoll_event] = [ mutable {events:0i32, data:0u64}, {events:0i32, data:0u64}]; let n = epoll_wait(epfd, events, 50); assert n == 2; assert events[0].data == magic; assert events[0].events & EPOLLOUT == EPOLLOUT; }
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use std::any::Any; use std::borrow::Borrow; use std::convert::{From, TryFrom}; use std::fmt; use std::io::Write; use std::iter::{FromIterator, IntoIterator}; use std::mem; use std::sync::Arc; use chrono::prelude::*; use num::Num; use super::*; use crate::array::builder::StringDictionaryBuilder; use crate::array::equal::JsonEqual; use crate::buffer::{buffer_bin_or, Buffer, MutableBuffer}; use crate::datatypes::DataType::Struct; use crate::datatypes::*; use crate::memory; use crate::{ error::{ArrowError, Result}, util::bit_util, }; /// Number of seconds in a day const SECONDS_IN_DAY: i64 = 86_400; /// Number of milliseconds in a second const MILLISECONDS: i64 = 1_000; /// Number of microseconds in a second const MICROSECONDS: i64 = 1_000_000; /// Number of nanoseconds in a second const NANOSECONDS: i64 = 1_000_000_000; /// Trait for dealing with different types of array at runtime when the type of the /// array is not known in advance. pub trait Array: fmt::Debug + Send + Sync + ArrayEqual + JsonEqual { /// Returns the array as [`Any`](std::any::Any) so that it can be /// downcasted to a specific implementation. /// /// # Example: /// /// ``` /// use std::sync::Arc; /// use arrow::array::Int32Array; /// use arrow::datatypes::{Schema, Field, DataType}; /// use arrow::record_batch::RecordBatch; /// /// # fn main() -> arrow::error::Result<()> { /// let id = Int32Array::from(vec![1, 2, 3, 4, 5]); /// let batch = RecordBatch::try_new( /// Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)])), /// vec![Arc::new(id)] /// )?; /// /// let int32array = batch /// .column(0) /// .as_any() /// .downcast_ref::<Int32Array>() /// .expect("Failed to downcast"); /// # Ok(()) /// # } /// ``` fn as_any(&self) -> &Any; /// Returns a reference-counted pointer to the underlying data of this array. fn data(&self) -> ArrayDataRef; /// Returns a borrowed & reference-counted pointer to the underlying data of this array. fn data_ref(&self) -> &ArrayDataRef; /// Returns a reference to the [`DataType`](crate::datatypes::DataType) of this array. /// /// # Example: /// /// ``` /// use arrow::datatypes::DataType; /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(*array.data_type(), DataType::Int32); /// ``` fn data_type(&self) -> &DataType { self.data_ref().data_type() } /// Returns a zero-copy slice of this array with the indicated offset and length. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// // Make slice over the values [2, 3, 4] /// let array_slice = array.slice(1, 3); /// /// assert!(array_slice.equals(&Int32Array::from(vec![2, 3, 4]))); /// ``` fn slice(&self, offset: usize, length: usize) -> ArrayRef { make_array(slice_data(self.data_ref(), offset, length)) } /// Returns the length (i.e., number of elements) of this array. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(array.len(), 5); /// ``` fn len(&self) -> usize { self.data_ref().len() } /// Returns whether this array is empty. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(array.is_empty(), false); /// ``` fn is_empty(&self) -> bool { self.data_ref().is_empty() } /// Returns the offset into the underlying data used by this array(-slice). /// Note that the underlying data can be shared by many arrays. /// This defaults to `0`. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// // Make slice over the values [2, 3, 4] /// let array_slice = array.slice(1, 3); /// /// assert_eq!(array.offset(), 0); /// assert_eq!(array_slice.offset(), 1); /// ``` fn offset(&self) -> usize { self.data_ref().offset() } /// Returns whether the element at `index` is null. /// When using this function on a slice, the index is relative to the slice. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![Some(1), None]); /// /// assert_eq!(array.is_null(0), false); /// assert_eq!(array.is_null(1), true); /// ``` fn is_null(&self, index: usize) -> bool { let data = self.data_ref(); data.is_null(data.offset() + index) } /// Returns whether the element at `index` is not null. /// When using this function on a slice, the index is relative to the slice. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![Some(1), None]); /// /// assert_eq!(array.is_valid(0), true); /// assert_eq!(array.is_valid(1), false); /// ``` fn is_valid(&self, index: usize) -> bool { let data = self.data_ref(); data.is_valid(data.offset() + index) } /// Returns the total number of null values in this array. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// // Construct an array with values [1, NULL, NULL] /// let array = Int32Array::from(vec![Some(1), None, None]); /// /// assert_eq!(array.null_count(), 2); /// ``` fn null_count(&self) -> usize { self.data_ref().null_count() } /// Returns the total number of bytes of memory occupied by the buffers owned by this array. fn get_buffer_memory_size(&self) -> usize; /// Returns the total number of bytes of memory occupied physically by this array. fn get_array_memory_size(&self) -> usize; } /// A reference-counted reference to a generic `Array`. pub type ArrayRef = Arc<Array>; /// Constructs an array using the input `data`. /// Returns a reference-counted `Array` instance. pub fn make_array(data: ArrayDataRef) -> ArrayRef { match data.data_type() { DataType::Boolean => Arc::new(BooleanArray::from(data)) as ArrayRef, DataType::Int8 => Arc::new(Int8Array::from(data)) as ArrayRef, DataType::Int16 => Arc::new(Int16Array::from(data)) as ArrayRef, DataType::Int32 => Arc::new(Int32Array::from(data)) as ArrayRef, DataType::Int64 => Arc::new(Int64Array::from(data)) as ArrayRef, DataType::UInt8 => Arc::new(UInt8Array::from(data)) as ArrayRef, DataType::UInt16 => Arc::new(UInt16Array::from(data)) as ArrayRef, DataType::UInt32 => Arc::new(UInt32Array::from(data)) as ArrayRef, DataType::UInt64 => Arc::new(UInt64Array::from(data)) as ArrayRef, DataType::Float16 => panic!("Float16 datatype not supported"), DataType::Float32 => Arc::new(Float32Array::from(data)) as ArrayRef, DataType::Float64 => Arc::new(Float64Array::from(data)) as ArrayRef, DataType::Date32(DateUnit::Day) => Arc::new(Date32Array::from(data)) as ArrayRef, DataType::Date64(DateUnit::Millisecond) => { Arc::new(Date64Array::from(data)) as ArrayRef } DataType::Time32(TimeUnit::Second) => { Arc::new(Time32SecondArray::from(data)) as ArrayRef } DataType::Time32(TimeUnit::Millisecond) => { Arc::new(Time32MillisecondArray::from(data)) as ArrayRef } DataType::Time64(TimeUnit::Microsecond) => { Arc::new(Time64MicrosecondArray::from(data)) as ArrayRef } DataType::Time64(TimeUnit::Nanosecond) => { Arc::new(Time64NanosecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Second, _) => { Arc::new(TimestampSecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Millisecond, _) => { Arc::new(TimestampMillisecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Microsecond, _) => { Arc::new(TimestampMicrosecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Nanosecond, _) => { Arc::new(TimestampNanosecondArray::from(data)) as ArrayRef } DataType::Interval(IntervalUnit::YearMonth) => { Arc::new(IntervalYearMonthArray::from(data)) as ArrayRef } DataType::Interval(IntervalUnit::DayTime) => { Arc::new(IntervalDayTimeArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Second) => { Arc::new(DurationSecondArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Millisecond) => { Arc::new(DurationMillisecondArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Microsecond) => { Arc::new(DurationMicrosecondArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Nanosecond) => { Arc::new(DurationNanosecondArray::from(data)) as ArrayRef } DataType::Binary => Arc::new(BinaryArray::from(data)) as ArrayRef, DataType::LargeBinary => Arc::new(LargeBinaryArray::from(data)) as ArrayRef, DataType::FixedSizeBinary(_) => { Arc::new(FixedSizeBinaryArray::from(data)) as ArrayRef } DataType::Utf8 => Arc::new(StringArray::from(data)) as ArrayRef, DataType::LargeUtf8 => Arc::new(LargeStringArray::from(data)) as ArrayRef, DataType::List(_) => Arc::new(ListArray::from(data)) as ArrayRef, DataType::LargeList(_) => Arc::new(LargeListArray::from(data)) as ArrayRef, DataType::Struct(_) => Arc::new(StructArray::from(data)) as ArrayRef, DataType::Union(_) => Arc::new(UnionArray::from(data)) as ArrayRef, DataType::FixedSizeList(_, _) => { Arc::new(FixedSizeListArray::from(data)) as ArrayRef } DataType::Dictionary(ref key_type, _) => match key_type.as_ref() { DataType::Int8 => { Arc::new(DictionaryArray::<Int8Type>::from(data)) as ArrayRef } DataType::Int16 => { Arc::new(DictionaryArray::<Int16Type>::from(data)) as ArrayRef } DataType::Int32 => { Arc::new(DictionaryArray::<Int32Type>::from(data)) as ArrayRef } DataType::Int64 => { Arc::new(DictionaryArray::<Int64Type>::from(data)) as ArrayRef } DataType::UInt8 => { Arc::new(DictionaryArray::<UInt8Type>::from(data)) as ArrayRef } DataType::UInt16 => { Arc::new(DictionaryArray::<UInt16Type>::from(data)) as ArrayRef } DataType::UInt32 => { Arc::new(DictionaryArray::<UInt32Type>::from(data)) as ArrayRef } DataType::UInt64 => { Arc::new(DictionaryArray::<UInt64Type>::from(data)) as ArrayRef } dt => panic!("Unexpected dictionary key type {:?}", dt), }, DataType::Null => Arc::new(NullArray::from(data)) as ArrayRef, dt => panic!("Unexpected data type {:?}", dt), } } /// Creates a zero-copy slice of the array's data. /// /// # Panics /// /// Panics if `offset + length > data.len()`. fn slice_data(data: &ArrayDataRef, mut offset: usize, length: usize) -> ArrayDataRef { assert!((offset + length) <= data.len()); let mut new_data = data.as_ref().clone(); let len = std::cmp::min(new_data.len - offset, length); offset += data.offset; new_data.len = len; new_data.offset = offset; // Calculate the new null count based on the offset new_data.null_count = if let Some(bitmap) = new_data.null_bitmap() { let valid_bits = bitmap.bits.data(); len.checked_sub(bit_util::count_set_bits_offset(valid_bits, offset, length)) .unwrap() } else { 0 }; Arc::new(new_data) } // creates a new MutableBuffer initializes all falsed // this is useful to populate null bitmaps fn make_null_buffer(len: usize) -> MutableBuffer { let num_bytes = bit_util::ceil(len, 8); MutableBuffer::new(num_bytes).with_bitset(num_bytes, false) } /// ---------------------------------------------------------------------------- /// Implementations of different array types struct RawPtrBox<T> { inner: *const T, } impl<T> RawPtrBox<T> { fn new(inner: *const T) -> Self { Self { inner } } fn get(&self) -> *const T { self.inner } } unsafe impl<T> Send for RawPtrBox<T> {} unsafe impl<T> Sync for RawPtrBox<T> {} fn as_aligned_pointer<T>(p: *const u8) -> *const T { assert!( memory::is_aligned(p, mem::align_of::<T>()), "memory is not aligned" ); p as *const T } /// Array whose elements are of primitive types. pub struct PrimitiveArray<T: ArrowPrimitiveType> { data: ArrayDataRef, /// Pointer to the value array. The lifetime of this must be <= to the value buffer /// stored in `data`, so it's safe to store. /// Also note that boolean arrays are bit-packed, so although the underlying pointer /// is of type bool it should be cast back to u8 before being used. /// i.e. `self.raw_values.get() as *const u8` raw_values: RawPtrBox<T::Native>, } impl<T: ArrowPrimitiveType> PrimitiveArray<T> { pub fn new(length: usize, values: Buffer, null_count: usize, offset: usize) -> Self { let array_data = ArrayData::builder(T::DATA_TYPE) .len(length) .add_buffer(values) .null_count(null_count) .offset(offset) .build(); PrimitiveArray::from(array_data) } /// Returns the length of this array. pub fn len(&self) -> usize { self.data.len() } /// Returns whether this array is empty. pub fn is_empty(&self) -> bool { self.data.is_empty() } /// Returns a raw pointer to the values of this array. pub fn raw_values(&self) -> *const T::Native { unsafe { self.raw_values.get().add(self.data.offset()) } } /// Returns a slice for the given offset and length /// /// Note this doesn't do any bound checking, for performance reason. pub fn value_slice(&self, offset: usize, len: usize) -> &[T::Native] { let raw = unsafe { std::slice::from_raw_parts(self.raw_values().add(offset), len) }; &raw[..] } // Returns a new primitive array builder pub fn builder(capacity: usize) -> PrimitiveBuilder<T> { PrimitiveBuilder::<T>::new(capacity) } /// Returns a `Buffer` holding all the values of this array. /// /// Note this doesn't take the offset of this array into account. pub fn values(&self) -> Buffer { self.data.buffers()[0].clone() } /// Returns the primitive value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. pub fn value(&self, i: usize) -> T::Native { let offset = i + self.offset(); unsafe { T::index(self.raw_values.get(), offset) } } } impl<T: ArrowPrimitiveType> Array for PrimitiveArray<T> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [PrimitiveArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [PrimitiveArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } fn as_datetime<T: ArrowPrimitiveType>(v: i64) -> Option<NaiveDateTime> { match T::DATA_TYPE { DataType::Date32(_) => { // convert days into seconds Some(NaiveDateTime::from_timestamp(v as i64 * SECONDS_IN_DAY, 0)) } DataType::Date64(_) => Some(NaiveDateTime::from_timestamp( // extract seconds from milliseconds v / MILLISECONDS, // discard extracted seconds and convert milliseconds to nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, )), DataType::Time32(_) | DataType::Time64(_) => None, DataType::Timestamp(unit, _) => match unit { TimeUnit::Second => Some(NaiveDateTime::from_timestamp(v, 0)), TimeUnit::Millisecond => Some(NaiveDateTime::from_timestamp( // extract seconds from milliseconds v / MILLISECONDS, // discard extracted seconds and convert milliseconds to nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, )), TimeUnit::Microsecond => Some(NaiveDateTime::from_timestamp( // extract seconds from microseconds v / MICROSECONDS, // discard extracted seconds and convert microseconds to nanoseconds (v % MICROSECONDS * MILLISECONDS) as u32, )), TimeUnit::Nanosecond => Some(NaiveDateTime::from_timestamp( // extract seconds from nanoseconds v / NANOSECONDS, // discard extracted seconds (v % NANOSECONDS) as u32, )), }, // interval is not yet fully documented [ARROW-3097] DataType::Interval(_) => None, _ => None, } } fn as_date<T: ArrowPrimitiveType>(v: i64) -> Option<NaiveDate> { as_datetime::<T>(v).map(|datetime| datetime.date()) } fn as_time<T: ArrowPrimitiveType>(v: i64) -> Option<NaiveTime> { match T::DATA_TYPE { DataType::Time32(unit) => { // safe to immediately cast to u32 as `self.value(i)` is positive i32 let v = v as u32; match unit { TimeUnit::Second => Some(NaiveTime::from_num_seconds_from_midnight(v, 0)), TimeUnit::Millisecond => { Some(NaiveTime::from_num_seconds_from_midnight( // extract seconds from milliseconds v / MILLISECONDS as u32, // discard extracted seconds and convert milliseconds to // nanoseconds v % MILLISECONDS as u32 * MICROSECONDS as u32, )) } _ => None, } } DataType::Time64(unit) => { match unit { TimeUnit::Microsecond => { Some(NaiveTime::from_num_seconds_from_midnight( // extract seconds from microseconds (v / MICROSECONDS) as u32, // discard extracted seconds and convert microseconds to // nanoseconds (v % MICROSECONDS * MILLISECONDS) as u32, )) } TimeUnit::Nanosecond => { Some(NaiveTime::from_num_seconds_from_midnight( // extract seconds from nanoseconds (v / NANOSECONDS) as u32, // discard extracted seconds (v % NANOSECONDS) as u32, )) } _ => None, } } DataType::Timestamp(_, _) => as_datetime::<T>(v).map(|datetime| datetime.time()), DataType::Date32(_) | DataType::Date64(_) => Some(NaiveTime::from_hms(0, 0, 0)), DataType::Interval(_) => None, _ => None, } } impl<T: ArrowTemporalType + ArrowNumericType> PrimitiveArray<T> where i64: std::convert::From<T::Native>, { /// Returns value as a chrono `NaiveDateTime`, handling time resolution /// /// If a data type cannot be converted to `NaiveDateTime`, a `None` is returned. /// A valid value is expected, thus the user should first check for validity. pub fn value_as_datetime(&self, i: usize) -> Option<NaiveDateTime> { as_datetime::<T>(i64::from(self.value(i))) } /// Returns value as a chrono `NaiveDate` by using `Self::datetime()` /// /// If a data type cannot be converted to `NaiveDate`, a `None` is returned pub fn value_as_date(&self, i: usize) -> Option<NaiveDate> { self.value_as_datetime(i).map(|datetime| datetime.date()) } /// Returns a value as a chrono `NaiveTime` /// /// `Date32` and `Date64` return UTC midnight as they do not have time resolution pub fn value_as_time(&self, i: usize) -> Option<NaiveTime> { as_time::<T>(i64::from(self.value(i))) } } impl<T: ArrowPrimitiveType> fmt::Debug for PrimitiveArray<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "PrimitiveArray<{:?}>\n[\n", T::DATA_TYPE)?; print_long_array(self, f, |array, index, f| match T::DATA_TYPE { DataType::Date32(_) | DataType::Date64(_) => { let v = self.value(index).to_usize().unwrap() as i64; match as_date::<T>(v) { Some(date) => write!(f, "{:?}", date), None => write!(f, "null"), } } DataType::Time32(_) | DataType::Time64(_) => { let v = self.value(index).to_usize().unwrap() as i64; match as_time::<T>(v) { Some(time) => write!(f, "{:?}", time), None => write!(f, "null"), } } DataType::Timestamp(_, _) => { let v = self.value(index).to_usize().unwrap() as i64; match as_datetime::<T>(v) { Some(datetime) => write!(f, "{:?}", datetime), None => write!(f, "null"), } } _ => fmt::Debug::fmt(&array.value(index), f), })?; write!(f, "]") } } impl<'a, T: ArrowPrimitiveType> IntoIterator for &'a PrimitiveArray<T> { type Item = Option<<T as ArrowPrimitiveType>::Native>; type IntoIter = PrimitiveIter<'a, T>; fn into_iter(self) -> Self::IntoIter { PrimitiveIter::<'a, T>::new(self) } } impl<'a, T: ArrowPrimitiveType> PrimitiveArray<T> { /// constructs a new iterator pub fn iter(&'a self) -> PrimitiveIter<'a, T> { PrimitiveIter::<'a, T>::new(&self) } } impl<T: ArrowPrimitiveType, Ptr: Borrow<Option<<T as ArrowPrimitiveType>::Native>>> FromIterator<Ptr> for PrimitiveArray<T> { fn from_iter<I: IntoIterator<Item = Ptr>>(iter: I) -> Self { let iter = iter.into_iter(); let (_, data_len) = iter.size_hint(); let data_len = data_len.expect("Iterator must be sized"); // panic if no upper bound. let num_bytes = bit_util::ceil(data_len, 8); let mut null_buf = MutableBuffer::new(num_bytes).with_bitset(num_bytes, false); let mut val_buf = MutableBuffer::new( data_len * mem::size_of::<<T as ArrowPrimitiveType>::Native>(), ); let null = vec![0; mem::size_of::<<T as ArrowPrimitiveType>::Native>()]; let null_slice = null_buf.data_mut(); iter.enumerate().for_each(|(i, item)| { if let Some(a) = item.borrow() { bit_util::set_bit(null_slice, i); val_buf.write_all(a.to_byte_slice()).unwrap(); } else { val_buf.write_all(&null).unwrap(); } }); let data = ArrayData::new( T::DATA_TYPE, data_len, None, Some(null_buf.freeze()), 0, vec![val_buf.freeze()], vec![], ); PrimitiveArray::from(Arc::new(data)) } } // TODO: the macro is needed here because we'd get "conflicting implementations" error // otherwise with both `From<Vec<T::Native>>` and `From<Vec<Option<T::Native>>>`. // We should revisit this in future. macro_rules! def_numeric_from_vec { ( $ty:ident ) => { impl From<Vec<<$ty as ArrowPrimitiveType>::Native>> for PrimitiveArray<$ty> { fn from(data: Vec<<$ty as ArrowPrimitiveType>::Native>) -> Self { let array_data = ArrayData::builder($ty::DATA_TYPE) .len(data.len()) .add_buffer(Buffer::from(data.to_byte_slice())) .build(); PrimitiveArray::from(array_data) } } // Constructs a primitive array from a vector. Should only be used for testing. impl From<Vec<Option<<$ty as ArrowPrimitiveType>::Native>>> for PrimitiveArray<$ty> { fn from(data: Vec<Option<<$ty as ArrowPrimitiveType>::Native>>) -> Self { PrimitiveArray::from_iter(data.iter()) } } }; } def_numeric_from_vec!(Int8Type); def_numeric_from_vec!(Int16Type); def_numeric_from_vec!(Int32Type); def_numeric_from_vec!(Int64Type); def_numeric_from_vec!(UInt8Type); def_numeric_from_vec!(UInt16Type); def_numeric_from_vec!(UInt32Type); def_numeric_from_vec!(UInt64Type); def_numeric_from_vec!(Float32Type); def_numeric_from_vec!(Float64Type); def_numeric_from_vec!(Date32Type); def_numeric_from_vec!(Date64Type); def_numeric_from_vec!(Time32SecondType); def_numeric_from_vec!(Time32MillisecondType); def_numeric_from_vec!(Time64MicrosecondType); def_numeric_from_vec!(Time64NanosecondType); def_numeric_from_vec!(IntervalYearMonthType); def_numeric_from_vec!(IntervalDayTimeType); def_numeric_from_vec!(DurationSecondType); def_numeric_from_vec!(DurationMillisecondType); def_numeric_from_vec!(DurationMicrosecondType); def_numeric_from_vec!(DurationNanosecondType); def_numeric_from_vec!(TimestampMillisecondType); def_numeric_from_vec!(TimestampMicrosecondType); impl<T: ArrowTimestampType> PrimitiveArray<T> { /// Construct a timestamp array from a vec of i64 values and an optional timezone pub fn from_vec(data: Vec<i64>, timezone: Option<Arc<String>>) -> Self { let array_data = ArrayData::builder(DataType::Timestamp(T::get_time_unit(), timezone)) .len(data.len()) .add_buffer(Buffer::from(data.to_byte_slice())) .build(); PrimitiveArray::from(array_data) } } impl<T: ArrowTimestampType> PrimitiveArray<T> { /// Construct a timestamp array from a vec of Option<i64> values and an optional timezone pub fn from_opt_vec(data: Vec<Option<i64>>, timezone: Option<Arc<String>>) -> Self { // TODO: duplicated from def_numeric_from_vec! macro, it looks possible to convert to generic let data_len = data.len(); let mut null_buf = make_null_buffer(data_len); let mut val_buf = MutableBuffer::new(data_len * mem::size_of::<i64>()); { let null = vec![0; mem::size_of::<i64>()]; let null_slice = null_buf.data_mut(); for (i, v) in data.iter().enumerate() { if let Some(n) = v { bit_util::set_bit(null_slice, i); // unwrap() in the following should be safe here since we've // made sure enough space is allocated for the values. val_buf.write_all(&n.to_byte_slice()).unwrap(); } else { val_buf.write_all(&null).unwrap(); } } } let array_data = ArrayData::builder(DataType::Timestamp(T::get_time_unit(), timezone)) .len(data_len) .add_buffer(val_buf.freeze()) .null_bit_buffer(null_buf.freeze()) .build(); PrimitiveArray::from(array_data) } } /// Constructs a boolean array from a vector. Should only be used for testing. impl From<Vec<bool>> for BooleanArray { fn from(data: Vec<bool>) -> Self { let mut mut_buf = make_null_buffer(data.len()); { let mut_slice = mut_buf.data_mut(); for (i, b) in data.iter().enumerate() { if *b { bit_util::set_bit(mut_slice, i); } } } let array_data = ArrayData::builder(DataType::Boolean) .len(data.len()) .add_buffer(mut_buf.freeze()) .build(); BooleanArray::from(array_data) } } impl From<Vec<Option<bool>>> for BooleanArray { fn from(data: Vec<Option<bool>>) -> Self { let data_len = data.len(); let num_byte = bit_util::ceil(data_len, 8); let mut null_buf = make_null_buffer(data.len()); let mut val_buf = MutableBuffer::new(num_byte).with_bitset(num_byte, false); { let null_slice = null_buf.data_mut(); let val_slice = val_buf.data_mut(); for (i, v) in data.iter().enumerate() { if let Some(b) = v { bit_util::set_bit(null_slice, i); if *b { bit_util::set_bit(val_slice, i); } } } } let array_data = ArrayData::builder(DataType::Boolean) .len(data_len) .add_buffer(val_buf.freeze()) .null_bit_buffer(null_buf.freeze()) .build(); BooleanArray::from(array_data) } } /// Constructs a `PrimitiveArray` from an array data reference. impl<T: ArrowPrimitiveType> From<ArrayDataRef> for PrimitiveArray<T> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "PrimitiveArray data should contain a single buffer only (values buffer)" ); let raw_values = data.buffers()[0].raw_data(); assert!( memory::is_aligned::<u8>(raw_values, mem::align_of::<T::Native>()), "memory is not aligned" ); Self { data, raw_values: RawPtrBox::new(raw_values as *const T::Native), } } } /// Common operations for List types. pub trait ListArrayOps<OffsetSize: OffsetSizeTrait> { fn value_offset_at(&self, i: usize) -> OffsetSize; } /// trait declaring an offset size, relevant for i32 vs i64 array types. pub trait OffsetSizeTrait: ArrowNativeType + Num + Ord { fn prefix() -> &'static str; fn to_isize(&self) -> isize; } impl OffsetSizeTrait for i32 { fn prefix() -> &'static str { "" } fn to_isize(&self) -> isize { num::ToPrimitive::to_isize(self).unwrap() } } impl OffsetSizeTrait for i64 { fn prefix() -> &'static str { "Large" } fn to_isize(&self) -> isize { num::ToPrimitive::to_isize(self).unwrap() } } pub struct GenericListArray<OffsetSize> { data: ArrayDataRef, values: ArrayRef, value_offsets: RawPtrBox<OffsetSize>, } impl<OffsetSize: OffsetSizeTrait> GenericListArray<OffsetSize> { /// Returns a reference to the values of this list. pub fn values(&self) -> ArrayRef { self.values.clone() } /// Returns a clone of the value type of this list. pub fn value_type(&self) -> DataType { self.values.data_ref().data_type().clone() } /// Returns ith value of this list array. pub fn value(&self, i: usize) -> ArrayRef { self.values.slice( self.value_offset(i).to_usize().unwrap(), self.value_length(i).to_usize().unwrap(), ) } /// Returns the offset for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> OffsetSize { self.value_offset_at(self.data.offset() + i) } /// Returns the length for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_length(&self, mut i: usize) -> OffsetSize { i += self.data.offset(); self.value_offset_at(i + 1) - self.value_offset_at(i) } #[inline] fn value_offset_at(&self, i: usize) -> OffsetSize { unsafe { *self.value_offsets.get().add(i) } } } impl<OffsetSize: OffsetSizeTrait> From<ArrayDataRef> for GenericListArray<OffsetSize> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "ListArray data should contain a single buffer only (value offsets)" ); assert_eq!( data.child_data().len(), 1, "ListArray should contain a single child array (values array)" ); let values = make_array(data.child_data()[0].clone()); let raw_value_offsets = data.buffers()[0].raw_data(); let value_offsets: *const OffsetSize = as_aligned_pointer(raw_value_offsets); unsafe { assert!( (*value_offsets.offset(0)).is_zero(), "offsets do not start at zero" ); } Self { data, values, value_offsets: RawPtrBox::new(value_offsets), } } } impl<OffsetSize: 'static + OffsetSizeTrait> Array for GenericListArray<OffsetSize> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [ListArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [ListArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } // Helper function for printing potentially long arrays. fn print_long_array<A, F>(array: &A, f: &mut fmt::Formatter, print_item: F) -> fmt::Result where A: Array, F: Fn(&A, usize, &mut fmt::Formatter) -> fmt::Result, { let head = std::cmp::min(10, array.len()); for i in 0..head { if array.is_null(i) { writeln!(f, " null,")?; } else { write!(f, " ")?; print_item(&array, i, f)?; writeln!(f, ",")?; } } if array.len() > 10 { if array.len() > 20 { writeln!(f, " ...{} elements...,", array.len() - 20)?; } let tail = std::cmp::max(head, array.len() - 10); for i in tail..array.len() { if array.is_null(i) { writeln!(f, " null,")?; } else { write!(f, " ")?; print_item(&array, i, f)?; writeln!(f, ",")?; } } } Ok(()) } impl<OffsetSize: OffsetSizeTrait> fmt::Debug for GenericListArray<OffsetSize> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}ListArray\n[\n", OffsetSize::prefix())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl<OffsetSize: OffsetSizeTrait> ListArrayOps<OffsetSize> for GenericListArray<OffsetSize> { fn value_offset_at(&self, i: usize) -> OffsetSize { self.value_offset_at(i) } } /// A list array where each element is a variable-sized sequence of values with the same /// type whose memory offsets between elements are represented by a i32. pub type ListArray = GenericListArray<i32>; /// A list array where each element is a variable-sized sequence of values with the same /// type whose memory offsets between elements are represented by a i64. pub type LargeListArray = GenericListArray<i64>; /// A list array where each element is a fixed-size sequence of values with the same /// type whose maximum length is represented by a i32. pub struct FixedSizeListArray { data: ArrayDataRef, values: ArrayRef, length: i32, } impl FixedSizeListArray { /// Returns a reference to the values of this list. pub fn values(&self) -> ArrayRef { self.values.clone() } /// Returns a clone of the value type of this list. pub fn value_type(&self) -> DataType { self.values.data_ref().data_type().clone() } /// Returns ith value of this list array. pub fn value(&self, i: usize) -> ArrayRef { self.values .slice(self.value_offset(i) as usize, self.value_length() as usize) } /// Returns the offset for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> i32 { self.value_offset_at(self.data.offset() + i) } /// Returns the length for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub const fn value_length(&self) -> i32 { self.length } #[inline] const fn value_offset_at(&self, i: usize) -> i32 { i as i32 * self.length } } impl From<ArrayDataRef> for FixedSizeListArray { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 0, "FixedSizeListArray data should not contain a buffer for value offsets" ); assert_eq!( data.child_data().len(), 1, "FixedSizeListArray should contain a single child array (values array)" ); let values = make_array(data.child_data()[0].clone()); let length = match data.data_type() { DataType::FixedSizeList(_, len) => { // check that child data is multiple of length assert_eq!( values.len() % *len as usize, 0, "FixedSizeListArray child array length should be a multiple of {}", len ); *len } _ => { panic!("FixedSizeListArray data should contain a FixedSizeList data type") } }; Self { data, values, length, } } } impl Array for FixedSizeListArray { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [FixedSizeListArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() + self.values().get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [FixedSizeListArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + self.values().get_array_memory_size() + mem::size_of_val(self) } } impl fmt::Debug for FixedSizeListArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "FixedSizeListArray<{}>\n[\n", self.value_length())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } /// Like OffsetSizeTrait, but specialized for Binary // This allow us to expose a constant datatype for the GenericBinaryArray pub trait BinaryOffsetSizeTrait: OffsetSizeTrait { const DATA_TYPE: DataType; } impl BinaryOffsetSizeTrait for i32 { const DATA_TYPE: DataType = DataType::Binary; } impl BinaryOffsetSizeTrait for i64 { const DATA_TYPE: DataType = DataType::LargeBinary; } pub struct GenericBinaryArray<OffsetSize: BinaryOffsetSizeTrait> { data: ArrayDataRef, value_offsets: RawPtrBox<OffsetSize>, value_data: RawPtrBox<u8>, } impl<OffsetSize: BinaryOffsetSizeTrait> GenericBinaryArray<OffsetSize> { /// Returns the offset for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> OffsetSize { self.value_offset_at(self.data.offset() + i) } /// Returns the length for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_length(&self, mut i: usize) -> OffsetSize { i += self.data.offset(); self.value_offset_at(i + 1) - self.value_offset_at(i) } /// Returns a clone of the value offset buffer pub fn value_offsets(&self) -> Buffer { self.data.buffers()[0].clone() } /// Returns a clone of the value data buffer pub fn value_data(&self) -> Buffer { self.data.buffers()[1].clone() } #[inline] fn value_offset_at(&self, i: usize) -> OffsetSize { unsafe { *self.value_offsets.get().add(i) } } /// Returns the element at index `i` as a byte slice. pub fn value(&self, i: usize) -> &[u8] { assert!(i < self.data.len(), "BinaryArray out of bounds access"); let offset = i.checked_add(self.data.offset()).unwrap(); unsafe { let pos = self.value_offset_at(offset); std::slice::from_raw_parts( self.value_data.get().offset(pos.to_isize()), (self.value_offset_at(offset + 1) - pos).to_usize().unwrap(), ) } } /// Creates a [GenericBinaryArray] from a vector of byte slices pub fn from_vec(v: Vec<&[u8]>) -> Self { let mut offsets = Vec::with_capacity(v.len() + 1); let mut values = Vec::new(); let mut length_so_far: OffsetSize = OffsetSize::zero(); offsets.push(length_so_far); for s in &v { length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); offsets.push(length_so_far); values.extend_from_slice(s); } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); GenericBinaryArray::<OffsetSize>::from(array_data) } /// Creates a [GenericBinaryArray] from a vector of Optional (null) byte slices pub fn from_opt_vec(v: Vec<Option<&[u8]>>) -> Self { v.into_iter().collect() } fn from_list(v: GenericListArray<OffsetSize>) -> Self { assert_eq!( v.data_ref().child_data()[0].child_data().len(), 0, "BinaryArray can only be created from list array of u8 values \ (i.e. List<PrimitiveArray<u8>>)." ); assert_eq!( v.data_ref().child_data()[0].data_type(), &DataType::UInt8, "BinaryArray can only be created from List<u8> arrays, mismatched data types." ); let mut builder = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(v.data_ref().buffers()[0].clone()) .add_buffer(v.data_ref().child_data()[0].buffers()[0].clone()); if let Some(bitmap) = v.data_ref().null_bitmap() { builder = builder .null_count(v.data_ref().null_count()) .null_bit_buffer(bitmap.bits.clone()) } let data = builder.build(); Self::from(data) } } impl<'a, T: BinaryOffsetSizeTrait> GenericBinaryArray<T> { /// constructs a new iterator pub fn iter(&'a self) -> GenericBinaryIter<'a, T> { GenericBinaryIter::<'a, T>::new(&self) } } impl<OffsetSize: BinaryOffsetSizeTrait> fmt::Debug for GenericBinaryArray<OffsetSize> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}BinaryArray\n[\n", OffsetSize::prefix())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl<OffsetSize: BinaryOffsetSizeTrait> Array for GenericBinaryArray<OffsetSize> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [$name]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [$name]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } impl<OffsetSize: BinaryOffsetSizeTrait> ListArrayOps<OffsetSize> for GenericBinaryArray<OffsetSize> { fn value_offset_at(&self, i: usize) -> OffsetSize { self.value_offset_at(i) } } impl<OffsetSize: BinaryOffsetSizeTrait> From<ArrayDataRef> for GenericBinaryArray<OffsetSize> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.data_type(), &<OffsetSize as BinaryOffsetSizeTrait>::DATA_TYPE, "[Large]BinaryArray expects Datatype::[Large]Binary" ); assert_eq!( data.buffers().len(), 2, "BinaryArray data should contain 2 buffers only (offsets and values)" ); let raw_value_offsets = data.buffers()[0].raw_data(); let value_data = data.buffers()[1].raw_data(); Self { data, value_offsets: RawPtrBox::new(as_aligned_pointer::<OffsetSize>( raw_value_offsets, )), value_data: RawPtrBox::new(value_data), } } } impl<Ptr, OffsetSize: BinaryOffsetSizeTrait> FromIterator<Option<Ptr>> for GenericBinaryArray<OffsetSize> where Ptr: AsRef<[u8]>, { fn from_iter<I: IntoIterator<Item = Option<Ptr>>>(iter: I) -> Self { let iter = iter.into_iter(); let (_, data_len) = iter.size_hint(); let data_len = data_len.expect("Iterator must be sized"); // panic if no upper bound. let mut offsets = Vec::with_capacity(data_len + 1); let mut values = Vec::new(); let mut null_buf = make_null_buffer(data_len); let mut length_so_far: OffsetSize = OffsetSize::zero(); offsets.push(length_so_far); { let null_slice = null_buf.data_mut(); for (i, s) in iter.enumerate() { if let Some(s) = s { let s = s.as_ref(); bit_util::set_bit(null_slice, i); length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); values.extend_from_slice(s); } // always add an element in offsets offsets.push(length_so_far); } } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(data_len) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .null_bit_buffer(null_buf.freeze()) .build(); Self::from(array_data) } } /// An array where each element is a byte whose maximum length is represented by a i32. pub type BinaryArray = GenericBinaryArray<i32>; /// An array where each element is a byte whose maximum length is represented by a i64. pub type LargeBinaryArray = GenericBinaryArray<i64>; impl<'a, T: BinaryOffsetSizeTrait> IntoIterator for &'a GenericBinaryArray<T> { type Item = Option<&'a [u8]>; type IntoIter = GenericBinaryIter<'a, T>; fn into_iter(self) -> Self::IntoIter { GenericBinaryIter::<'a, T>::new(self) } } impl From<Vec<&[u8]>> for BinaryArray { fn from(v: Vec<&[u8]>) -> Self { BinaryArray::from_vec(v) } } impl From<Vec<Option<&[u8]>>> for BinaryArray { fn from(v: Vec<Option<&[u8]>>) -> Self { BinaryArray::from_opt_vec(v) } } impl From<Vec<&[u8]>> for LargeBinaryArray { fn from(v: Vec<&[u8]>) -> Self { LargeBinaryArray::from_vec(v) } } impl From<Vec<Option<&[u8]>>> for LargeBinaryArray { fn from(v: Vec<Option<&[u8]>>) -> Self { LargeBinaryArray::from_opt_vec(v) } } impl From<ListArray> for BinaryArray { fn from(v: ListArray) -> Self { BinaryArray::from_list(v) } } impl From<LargeListArray> for LargeBinaryArray { fn from(v: LargeListArray) -> Self { LargeBinaryArray::from_list(v) } } /// Like OffsetSizeTrait, but specialized for Strings // This allow us to expose a constant datatype for the GenericStringArray pub trait StringOffsetSizeTrait: OffsetSizeTrait { const DATA_TYPE: DataType; } impl StringOffsetSizeTrait for i32 { const DATA_TYPE: DataType = DataType::Utf8; } impl StringOffsetSizeTrait for i64 { const DATA_TYPE: DataType = DataType::LargeUtf8; } /// Generic struct for \[Large\]StringArray pub struct GenericStringArray<OffsetSize: StringOffsetSizeTrait> { data: ArrayDataRef, value_offsets: RawPtrBox<OffsetSize>, value_data: RawPtrBox<u8>, } impl<OffsetSize: StringOffsetSizeTrait> GenericStringArray<OffsetSize> { /// Returns the offset for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> OffsetSize { self.value_offset_at(self.data.offset() + i) } /// Returns the length for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_length(&self, mut i: usize) -> OffsetSize { i += self.data.offset(); self.value_offset_at(i + 1) - self.value_offset_at(i) } /// Returns a clone of the value offset buffer pub fn value_offsets(&self) -> Buffer { self.data.buffers()[0].clone() } /// Returns a clone of the value data buffer pub fn value_data(&self) -> Buffer { self.data.buffers()[1].clone() } #[inline] fn value_offset_at(&self, i: usize) -> OffsetSize { unsafe { *self.value_offsets.get().add(i) } } /// Returns the element at index `i` as &str pub fn value(&self, i: usize) -> &str { assert!(i < self.data.len(), "StringArray out of bounds access"); let offset = i.checked_add(self.data.offset()).unwrap(); unsafe { let pos = self.value_offset_at(offset); let slice = std::slice::from_raw_parts( self.value_data.get().offset(pos.to_isize()), (self.value_offset_at(offset + 1) - pos).to_usize().unwrap(), ); std::str::from_utf8_unchecked(slice) } } fn from_list(v: GenericListArray<OffsetSize>) -> Self { assert_eq!( v.data().child_data()[0].child_data().len(), 0, "StringArray can only be created from list array of u8 values \ (i.e. List<PrimitiveArray<u8>>)." ); assert_eq!( v.data_ref().child_data()[0].data_type(), &DataType::UInt8, "StringArray can only be created from List<u8> arrays, mismatched data types." ); let mut builder = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(v.data_ref().buffers()[0].clone()) .add_buffer(v.data_ref().child_data()[0].buffers()[0].clone()); if let Some(bitmap) = v.data().null_bitmap() { builder = builder .null_count(v.data_ref().null_count()) .null_bit_buffer(bitmap.bits.clone()) } let data = builder.build(); Self::from(data) } pub(crate) fn from_vec(v: Vec<&str>) -> Self { let mut offsets = Vec::with_capacity(v.len() + 1); let mut values = Vec::new(); let mut length_so_far = OffsetSize::zero(); offsets.push(length_so_far); for s in &v { length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); offsets.push(length_so_far); values.extend_from_slice(s.as_bytes()); } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); Self::from(array_data) } pub(crate) fn from_opt_vec(v: Vec<Option<&str>>) -> Self { let iter = v.iter().map(|e| e.map(|e| e.to_string())); GenericStringArray::from_iter(iter) } } impl<'a, Ptr, OffsetSize: StringOffsetSizeTrait> FromIterator<Ptr> for GenericStringArray<OffsetSize> where Ptr: Borrow<Option<String>>, { fn from_iter<I: IntoIterator<Item = Ptr>>(iter: I) -> Self { let iter = iter.into_iter(); let (_, data_len) = iter.size_hint(); let data_len = data_len.expect("Iterator must be sized"); // panic if no upper bound. let mut offsets = Vec::with_capacity(data_len + 1); let mut values = Vec::new(); let mut null_buf = make_null_buffer(data_len); let mut length_so_far = OffsetSize::zero(); offsets.push(length_so_far); for (i, s) in iter.enumerate() { if let Some(s) = s.borrow() { // set null bit let null_slice = null_buf.data_mut(); bit_util::set_bit(null_slice, i); length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); offsets.push(length_so_far); values.extend_from_slice(s.as_bytes()); } else { offsets.push(length_so_far); values.extend_from_slice(b""); } } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(data_len) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .null_bit_buffer(null_buf.freeze()) .build(); Self::from(array_data) } } impl<'a, T: StringOffsetSizeTrait> IntoIterator for &'a GenericStringArray<T> { type Item = Option<&'a str>; type IntoIter = GenericStringIter<'a, T>; fn into_iter(self) -> Self::IntoIter { GenericStringIter::<'a, T>::new(self) } } impl<'a, T: StringOffsetSizeTrait> GenericStringArray<T> { /// constructs a new iterator pub fn iter(&'a self) -> GenericStringIter<'a, T> { GenericStringIter::<'a, T>::new(&self) } } impl<OffsetSize: StringOffsetSizeTrait> fmt::Debug for GenericStringArray<OffsetSize> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}StringArray\n[\n", OffsetSize::prefix())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl<OffsetSize: StringOffsetSizeTrait> Array for GenericStringArray<OffsetSize> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [$name]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [$name]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } impl<OffsetSize: StringOffsetSizeTrait> From<ArrayDataRef> for GenericStringArray<OffsetSize> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.data_type(), &<OffsetSize as StringOffsetSizeTrait>::DATA_TYPE, "[Large]StringArray expects Datatype::[Large]Utf8" ); assert_eq!( data.buffers().len(), 2, "StringArray data should contain 2 buffers only (offsets and values)" ); let raw_value_offsets = data.buffers()[0].raw_data(); let value_data = data.buffers()[1].raw_data(); Self { data, value_offsets: RawPtrBox::new(as_aligned_pointer::<OffsetSize>( raw_value_offsets, )), value_data: RawPtrBox::new(value_data), } } } impl<OffsetSize: StringOffsetSizeTrait> ListArrayOps<OffsetSize> for GenericStringArray<OffsetSize> { fn value_offset_at(&self, i: usize) -> OffsetSize { self.value_offset_at(i) } } /// An array where each element is a variable-sized sequence of bytes representing a string /// whose maximum length (in bytes) is represented by a i32. pub type StringArray = GenericStringArray<i32>; /// An array where each element is a variable-sized sequence of bytes representing a string /// whose maximum length (in bytes) is represented by a i64. pub type LargeStringArray = GenericStringArray<i64>; impl From<ListArray> for StringArray { fn from(v: ListArray) -> Self { StringArray::from_list(v) } } impl From<LargeListArray> for LargeStringArray { fn from(v: LargeListArray) -> Self { LargeStringArray::from_list(v) } } impl From<Vec<&str>> for StringArray { fn from(v: Vec<&str>) -> Self { StringArray::from_vec(v) } } impl From<Vec<&str>> for LargeStringArray { fn from(v: Vec<&str>) -> Self { LargeStringArray::from_vec(v) } } impl From<Vec<Option<&str>>> for StringArray { fn from(v: Vec<Option<&str>>) -> Self { StringArray::from_opt_vec(v) } } impl From<Vec<Option<&str>>> for LargeStringArray { fn from(v: Vec<Option<&str>>) -> Self { LargeStringArray::from_opt_vec(v) } } /// A type of `FixedSizeListArray` whose elements are binaries. pub struct FixedSizeBinaryArray { data: ArrayDataRef, value_data: RawPtrBox<u8>, length: i32, } impl FixedSizeBinaryArray { /// Returns the element at index `i` as a byte slice. pub fn value(&self, i: usize) -> &[u8] { assert!( i < self.data.len(), "FixedSizeBinaryArray out of bounds access" ); let offset = i.checked_add(self.data.offset()).unwrap(); unsafe { let pos = self.value_offset_at(offset); std::slice::from_raw_parts( self.value_data.get().offset(pos as isize), (self.value_offset_at(offset + 1) - pos) as usize, ) } } /// Returns the offset for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> i32 { self.value_offset_at(self.data.offset() + i) } /// Returns the length for an element. /// /// All elements have the same length as the array is a fixed size. #[inline] pub fn value_length(&self) -> i32 { self.length } /// Returns a clone of the value data buffer pub fn value_data(&self) -> Buffer { self.data.buffers()[0].clone() } #[inline] fn value_offset_at(&self, i: usize) -> i32 { self.length * i as i32 } } impl ListArrayOps<i32> for FixedSizeBinaryArray { fn value_offset_at(&self, i: usize) -> i32 { self.value_offset_at(i) } } impl From<ArrayDataRef> for FixedSizeBinaryArray { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "FixedSizeBinaryArray data should contain 1 buffer only (values)" ); let value_data = data.buffers()[0].raw_data(); let length = match data.data_type() { DataType::FixedSizeBinary(len) => *len, _ => panic!("Expected data type to be FixedSizeBinary"), }; Self { data, value_data: RawPtrBox::new(value_data), length, } } } /// Creates a `FixedSizeBinaryArray` from `FixedSizeList<u8>` array impl From<FixedSizeListArray> for FixedSizeBinaryArray { fn from(v: FixedSizeListArray) -> Self { assert_eq!( v.data_ref().child_data()[0].child_data().len(), 0, "FixedSizeBinaryArray can only be created from list array of u8 values \ (i.e. FixedSizeList<PrimitiveArray<u8>>)." ); assert_eq!( v.data_ref().child_data()[0].data_type(), &DataType::UInt8, "FixedSizeBinaryArray can only be created from FixedSizeList<u8> arrays, mismatched data types." ); let mut builder = ArrayData::builder(DataType::FixedSizeBinary(v.value_length())) .len(v.len()) .add_buffer(v.data_ref().child_data()[0].buffers()[0].clone()); if let Some(bitmap) = v.data_ref().null_bitmap() { builder = builder .null_count(v.data_ref().null_count()) .null_bit_buffer(bitmap.bits.clone()) } let data = builder.build(); Self::from(data) } } impl fmt::Debug for FixedSizeBinaryArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "FixedSizeBinaryArray<{}>\n[\n", self.value_length())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl Array for FixedSizeBinaryArray { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [FixedSizeBinaryArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [FixedSizeBinaryArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } /// A nested array type where each child (called *field*) is represented by a separate /// array. pub struct StructArray { data: ArrayDataRef, pub(crate) boxed_fields: Vec<ArrayRef>, } impl StructArray { /// Returns the field at `pos`. pub fn column(&self, pos: usize) -> &ArrayRef { &self.boxed_fields[pos] } /// Return the number of fields in this struct array pub fn num_columns(&self) -> usize { self.boxed_fields.len() } /// Returns the fields of the struct array pub fn columns(&self) -> Vec<&ArrayRef> { self.boxed_fields.iter().collect() } /// Returns child array refs of the struct array pub fn columns_ref(&self) -> Vec<ArrayRef> { self.boxed_fields.clone() } /// Return field names in this struct array pub fn column_names(&self) -> Vec<&str> { match self.data.data_type() { Struct(fields) => fields .iter() .map(|f| f.name().as_str()) .collect::<Vec<&str>>(), _ => unreachable!("Struct array's data type is not struct!"), } } /// Return child array whose field name equals to column_name pub fn column_by_name(&self, column_name: &str) -> Option<&ArrayRef> { self.column_names() .iter() .position(|c| c == &column_name) .map(|pos| self.column(pos)) } } impl From<ArrayDataRef> for StructArray { fn from(data: ArrayDataRef) -> Self { let mut boxed_fields = vec![]; for cd in data.child_data() { let child_data = if data.offset != 0 || data.len != cd.len { slice_data(&cd, data.offset, data.len) } else { cd.clone() }; boxed_fields.push(make_array(child_data)); } Self { data, boxed_fields } } } impl TryFrom<Vec<(&str, ArrayRef)>> for StructArray { type Error = ArrowError; /// builds a StructArray from a vector of names and arrays. /// This errors if the values have a different length. /// An entry is set to Null when all values are null. fn try_from(values: Vec<(&str, ArrayRef)>) -> Result<Self> { let values_len = values.len(); // these will be populated let mut fields = Vec::with_capacity(values_len); let mut child_data = Vec::with_capacity(values_len); // len: the size of the arrays. let mut len: Option<usize> = None; // null: the null mask of the arrays. let mut null: Option<Buffer> = None; for (field_name, array) in values { let child_datum = array.data(); let child_datum_len = child_datum.len(); if let Some(len) = len { if len != child_datum_len { return Err(ArrowError::InvalidArgumentError( format!("Array of field \"{}\" has length {}, but previous elements have length {}. All arrays in every entry in a struct array must have the same length.", field_name, child_datum_len, len) )); } } else { len = Some(child_datum_len) } child_data.push(child_datum.clone()); fields.push(Field::new( field_name, array.data_type().clone(), child_datum.null_buffer().is_some(), )); if let Some(child_null_buffer) = child_datum.null_buffer() { null = Some(if let Some(null_buffer) = &null { buffer_bin_or(null_buffer, 0, child_null_buffer, 0, child_datum_len) } else { child_null_buffer.clone() }); } else if null.is_some() { // when one of the fields has no nulls, them there is no null in the array null = None; } } let len = len.unwrap(); let mut builder = ArrayData::builder(DataType::Struct(fields)) .len(len) .child_data(child_data); if let Some(null_buffer) = null { let null_count = len - bit_util::count_set_bits(null_buffer.data()); builder = builder.null_count(null_count).null_bit_buffer(null_buffer); } Ok(StructArray::from(builder.build())) } } impl Array for StructArray { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the length (i.e., number of elements) of this array fn len(&self) -> usize { self.data_ref().len() } /// Returns the total number of bytes of memory occupied by the buffers owned by this [StructArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [StructArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } impl From<Vec<(Field, ArrayRef)>> for StructArray { fn from(v: Vec<(Field, ArrayRef)>) -> Self { let (field_types, field_values): (Vec<_>, Vec<_>) = v.into_iter().unzip(); // Check the length of the child arrays let length = field_values[0].len(); for i in 1..field_values.len() { assert_eq!( length, field_values[i].len(), "all child arrays of a StructArray must have the same length" ); assert_eq!( field_types[i].data_type(), field_values[i].data().data_type(), "the field data types must match the array data in a StructArray" ) } let data = ArrayData::builder(DataType::Struct(field_types)) .child_data(field_values.into_iter().map(|a| a.data()).collect()) .len(length) .build(); Self::from(data) } } impl fmt::Debug for StructArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "StructArray\n[\n")?; for (child_index, name) in self.column_names().iter().enumerate() { let column = self.column(child_index); writeln!( f, "-- child {}: \"{}\" ({:?})", child_index, name, column.data_type() )?; fmt::Debug::fmt(column, f)?; writeln!(f)?; } write!(f, "]") } } impl From<(Vec<(Field, ArrayRef)>, Buffer, usize)> for StructArray { fn from(triple: (Vec<(Field, ArrayRef)>, Buffer, usize)) -> Self { let (field_types, field_values): (Vec<_>, Vec<_>) = triple.0.into_iter().unzip(); // Check the length of the child arrays let length = field_values[0].len(); for i in 1..field_values.len() { assert_eq!( length, field_values[i].len(), "all child arrays of a StructArray must have the same length" ); assert_eq!( field_types[i].data_type(), field_values[i].data().data_type(), "the field data types must match the array data in a StructArray" ) } let data = ArrayData::builder(DataType::Struct(field_types)) .null_bit_buffer(triple.1) .child_data(field_values.into_iter().map(|a| a.data()).collect()) .len(length) .null_count(triple.2) .build(); Self::from(data) } } /// A dictionary array where each element is a single value indexed by an integer key. /// This is mostly used to represent strings or a limited set of primitive types as integers, /// for example when doing NLP analysis or representing chromosomes by name. /// /// Example **with nullable** data: /// /// ``` /// use arrow::array::DictionaryArray; /// use arrow::datatypes::Int8Type; /// let test = vec!["a", "a", "b", "c"]; /// let array : DictionaryArray<Int8Type> = test.iter().map(|&x| if x == "b" {None} else {Some(x)}).collect(); /// assert_eq!(array.keys().collect::<Vec<Option<i8>>>(), vec![Some(0), Some(0), None, Some(1)]); /// ``` /// /// Example **without nullable** data: /// /// ``` /// use arrow::array::DictionaryArray; /// use arrow::datatypes::Int8Type; /// let test = vec!["a", "a", "b", "c"]; /// let array : DictionaryArray<Int8Type> = test.into_iter().collect(); /// assert_eq!(array.keys().collect::<Vec<Option<i8>>>(), vec![Some(0), Some(0), Some(1), Some(2)]); /// ``` pub struct DictionaryArray<K: ArrowPrimitiveType> { /// Array of keys, stored as a PrimitiveArray<K>. data: ArrayDataRef, /// Pointer to the key values. raw_values: RawPtrBox<K::Native>, /// Array of dictionary values (can by any DataType). values: ArrayRef, /// Values are ordered. is_ordered: bool, } #[derive(Debug)] enum Draining { Ready, Iterating, Finished, } #[derive(Debug)] pub struct NullableIter<'a, T> { data: &'a ArrayDataRef, // TODO: Use a pointer to the null bitmap. ptr: *const T, i: usize, len: usize, draining: Draining, } impl<'a, T> std::iter::Iterator for NullableIter<'a, T> where T: Clone, { type Item = Option<T>; fn next(&mut self) -> Option<Self::Item> { let i = self.i; if i >= self.len { None } else if self.data.is_null(i) { self.i += 1; Some(None) } else { self.i += 1; unsafe { Some(Some((&*self.ptr.add(i)).clone())) } } } fn size_hint(&self) -> (usize, Option<usize>) { (self.len, Some(self.len)) } fn nth(&mut self, n: usize) -> Option<Self::Item> { let i = self.i; if i + n >= self.len { self.i = self.len; None } else if self.data.is_null(i + n) { self.i += n + 1; Some(None) } else { self.i += n + 1; unsafe { Some(Some((&*self.ptr.add(i + n)).clone())) } } } } impl<'a, T> std::iter::DoubleEndedIterator for NullableIter<'a, T> where T: Clone, { fn next_back(&mut self) -> Option<Self::Item> { match self.draining { Draining::Ready => { self.draining = Draining::Iterating; self.i = self.len - 1; self.next_back() } Draining::Iterating => { let i = self.i; if i >= self.len { None } else if self.data.is_null(i) { self.i = self.i.checked_sub(1).unwrap_or_else(|| { self.draining = Draining::Finished; 0_usize }); Some(None) } else { match i.checked_sub(1) { Some(idx) => { self.i = idx; unsafe { Some(Some((&*self.ptr.add(i)).clone())) } } _ => { self.draining = Draining::Finished; unsafe { Some(Some((&*self.ptr).clone())) } } } } } Draining::Finished => { self.draining = Draining::Ready; None } } } } impl<'a, K: ArrowPrimitiveType> DictionaryArray<K> { /// Return an iterator to the keys of this dictionary. pub fn keys(&self) -> NullableIter<'_, K::Native> { NullableIter::<'_, K::Native> { data: &self.data, ptr: unsafe { self.raw_values.get().add(self.data.offset()) }, i: 0, len: self.data.len(), draining: Draining::Ready, } } /// Returns an array view of the keys of this dictionary pub fn keys_array(&self) -> PrimitiveArray<K> { let data = self.data_ref(); let keys_data = ArrayData::new( K::DATA_TYPE, data.len(), Some(data.null_count()), data.null_buffer().cloned(), data.offset(), data.buffers().to_vec(), vec![], ); PrimitiveArray::<K>::from(Arc::new(keys_data)) } /// Returns the lookup key by doing reverse dictionary lookup pub fn lookup_key(&self, value: &str) -> Option<K::Native> { let rd_buf: &StringArray = self.values.as_any().downcast_ref::<StringArray>().unwrap(); (0..rd_buf.len()) .position(|i| rd_buf.value(i) == value) .map(K::Native::from_usize) .flatten() } /// Returns an `ArrayRef` to the dictionary values. pub fn values(&self) -> ArrayRef { self.values.clone() } /// Returns a clone of the value type of this list. pub fn value_type(&self) -> DataType { self.values.data_ref().data_type().clone() } /// The length of the dictionary is the length of the keys array. pub fn len(&self) -> usize { self.data.len() } /// Whether this dictionary is empty pub fn is_empty(&self) -> bool { self.data.is_empty() } // Currently exists for compatibility purposes with Arrow IPC. pub fn is_ordered(&self) -> bool { self.is_ordered } } /// Constructs a `DictionaryArray` from an array data reference. impl<T: ArrowPrimitiveType> From<ArrayDataRef> for DictionaryArray<T> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "DictionaryArray data should contain a single buffer only (keys)." ); assert_eq!( data.child_data().len(), 1, "DictionaryArray should contain a single child array (values)." ); let raw_values = data.buffers()[0].raw_data(); let dtype: &DataType = data.data_type(); let values = make_array(data.child_data()[0].clone()); if let DataType::Dictionary(_, _) = dtype { Self { data, raw_values: RawPtrBox::new(raw_values as *const T::Native), values, is_ordered: false, } } else { panic!("DictionaryArray must have Dictionary data type.") } } } /// Constructs a `DictionaryArray` from an iterator of optional strings. impl<'a, T: ArrowPrimitiveType + ArrowDictionaryKeyType> FromIterator<Option<&'a str>> for DictionaryArray<T> { fn from_iter<I: IntoIterator<Item = Option<&'a str>>>(iter: I) -> Self { let it = iter.into_iter(); let (lower, _) = it.size_hint(); let key_builder = PrimitiveBuilder::<T>::new(lower); let value_builder = StringBuilder::new(256); let mut builder = StringDictionaryBuilder::new(key_builder, value_builder); it.for_each(|i| { if let Some(i) = i { // Note: impl ... for Result<DictionaryArray<T>> fails with // error[E0117]: only traits defined in the current crate can be implemented for arbitrary types builder .append(i) .expect("Unable to append a value to a dictionary array."); } else { builder .append_null() .expect("Unable to append a null value to a dictionary array."); } }); builder.finish() } } /// Constructs a `DictionaryArray` from an iterator of strings. impl<'a, T: ArrowPrimitiveType + ArrowDictionaryKeyType> FromIterator<&'a str> for DictionaryArray<T> { fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> Self { let it = iter.into_iter(); let (lower, _) = it.size_hint(); let key_builder = PrimitiveBuilder::<T>::new(lower); let value_builder = StringBuilder::new(256); let mut builder = StringDictionaryBuilder::new(key_builder, value_builder); it.for_each(|i| { builder .append(i) .expect("Unable to append a value to a dictionary array."); }); builder.finish() } } impl<T: ArrowPrimitiveType> Array for DictionaryArray<T> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [DictionaryArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() + self.values().get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [DictionaryArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + self.values().get_array_memory_size() + mem::size_of_val(self) } } impl<T: ArrowPrimitiveType> fmt::Debug for DictionaryArray<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const MAX_LEN: usize = 10; let keys: Vec<_> = self.keys().take(MAX_LEN).collect(); let elipsis = if self.keys().count() > MAX_LEN { "..." } else { "" }; writeln!( f, "DictionaryArray {{keys: {:?}{} values: {:?}}}", keys, elipsis, self.values ) } } #[cfg(test)] mod tests { use super::*; use std::sync::Arc; use std::thread; use crate::buffer::Buffer; use crate::datatypes::{DataType, Field}; use crate::{bitmap::Bitmap, memory}; #[test] fn test_primitive_array_from_vec() { let buf = Buffer::from(&[0, 1, 2, 3, 4].to_byte_slice()); let buf2 = buf.clone(); let arr = Int32Array::new(5, buf, 0, 0); let slice = unsafe { std::slice::from_raw_parts(arr.raw_values(), 5) }; assert_eq!(buf2, arr.values()); assert_eq!(&[0, 1, 2, 3, 4], slice); assert_eq!(5, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..5 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i as i32, arr.value(i)); } assert_eq!(64, arr.get_buffer_memory_size()); let internals_of_primitive_array = 8 + 72; // RawPtrBox & Arc<ArrayData> combined. assert_eq!( arr.get_buffer_memory_size() + internals_of_primitive_array, arr.get_array_memory_size() ); } #[test] fn test_primitive_array_from_vec_option() { // Test building a primitive array with null values let arr = Int32Array::from(vec![Some(0), None, Some(2), None, Some(4)]); assert_eq!(5, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(2, arr.null_count()); for i in 0..5 { if i % 2 == 0 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i as i32, arr.value(i)); } else { assert!(arr.is_null(i)); assert!(!arr.is_valid(i)); } } assert_eq!(128, arr.get_buffer_memory_size()); let internals_of_primitive_array = 8 + 72 + 16; // RawPtrBox & Arc<ArrayData> and it's null_bitmap combined. assert_eq!( arr.get_buffer_memory_size() + internals_of_primitive_array, arr.get_array_memory_size() ); } #[test] fn test_date64_array_from_vec_option() { // Test building a primitive array with null values // we use Int32 and Int64 as a backing array, so all Int32 and Int64 conventions // work let arr: PrimitiveArray<Date64Type> = vec![Some(1550902545147), None, Some(1550902545147)].into(); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); for i in 0..3 { if i % 2 == 0 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(1550902545147, arr.value(i)); // roundtrip to and from datetime assert_eq!( 1550902545147, arr.value_as_datetime(i).unwrap().timestamp_millis() ); } else { assert!(arr.is_null(i)); assert!(!arr.is_valid(i)); } } } #[test] fn test_time32_millisecond_array_from_vec() { // 1: 00:00:00.001 // 37800005: 10:30:00.005 // 86399210: 23:59:59.210 let arr: PrimitiveArray<Time32MillisecondType> = vec![1, 37_800_005, 86_399_210].into(); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); let formatted = vec!["00:00:00.001", "10:30:00.005", "23:59:59.210"]; for i in 0..3 { // check that we can't create dates or datetimes from time instances assert_eq!(None, arr.value_as_datetime(i)); assert_eq!(None, arr.value_as_date(i)); let time = arr.value_as_time(i).unwrap(); assert_eq!(formatted[i], time.format("%H:%M:%S%.3f").to_string()); } } #[test] fn test_time64_nanosecond_array_from_vec() { // Test building a primitive array with null values // we use Int32 and Int64 as a backing array, so all Int32 and Int64 conventions // work // 1e6: 00:00:00.001 // 37800005e6: 10:30:00.005 // 86399210e6: 23:59:59.210 let arr: PrimitiveArray<Time64NanosecondType> = vec![1_000_000, 37_800_005_000_000, 86_399_210_000_000].into(); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); let formatted = vec!["00:00:00.001", "10:30:00.005", "23:59:59.210"]; for i in 0..3 { // check that we can't create dates or datetimes from time instances assert_eq!(None, arr.value_as_datetime(i)); assert_eq!(None, arr.value_as_date(i)); let time = arr.value_as_time(i).unwrap(); assert_eq!(formatted[i], time.format("%H:%M:%S%.3f").to_string()); } } #[test] fn test_interval_array_from_vec() { // intervals are currently not treated specially, but are Int32 and Int64 arrays let arr = IntervalYearMonthArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); // a day_time interval contains days and milliseconds, but we do not yet have accessors for the values let arr = IntervalDayTimeArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); } #[test] fn test_duration_array_from_vec() { let arr = DurationSecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); let arr = DurationMillisecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); let arr = DurationMicrosecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); let arr = DurationNanosecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); } #[test] fn test_timestamp_array_from_vec() { let arr = TimestampSecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); let arr = TimestampMillisecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); let arr = TimestampMicrosecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); let arr = TimestampNanosecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); } #[test] fn test_primitive_array_slice() { let arr = Int32Array::from(vec![ Some(0), None, Some(2), None, Some(4), Some(5), Some(6), None, None, ]); assert_eq!(9, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(4, arr.null_count()); let arr2 = arr.slice(2, 5); assert_eq!(5, arr2.len()); assert_eq!(2, arr2.offset()); assert_eq!(1, arr2.null_count()); for i in 0..arr2.len() { assert_eq!(i == 1, arr2.is_null(i)); assert_eq!(i != 1, arr2.is_valid(i)); } let arr3 = arr2.slice(2, 3); assert_eq!(3, arr3.len()); assert_eq!(4, arr3.offset()); assert_eq!(0, arr3.null_count()); let int_arr = arr3.as_any().downcast_ref::<Int32Array>().unwrap(); assert_eq!(4, int_arr.value(0)); assert_eq!(5, int_arr.value(1)); assert_eq!(6, int_arr.value(2)); } #[test] fn test_boolean_array_slice() { let arr = BooleanArray::from(vec![ Some(true), None, Some(false), None, Some(true), Some(false), Some(true), Some(false), None, Some(true), ]); assert_eq!(10, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(3, arr.null_count()); let arr2 = arr.slice(3, 5); assert_eq!(5, arr2.len()); assert_eq!(3, arr2.offset()); assert_eq!(1, arr2.null_count()); let bool_arr = arr2.as_any().downcast_ref::<BooleanArray>().unwrap(); assert_eq!(false, bool_arr.is_valid(0)); assert_eq!(true, bool_arr.is_valid(1)); assert_eq!(true, bool_arr.value(1)); assert_eq!(true, bool_arr.is_valid(2)); assert_eq!(false, bool_arr.value(2)); assert_eq!(true, bool_arr.is_valid(3)); assert_eq!(true, bool_arr.value(3)); assert_eq!(true, bool_arr.is_valid(4)); assert_eq!(false, bool_arr.value(4)); } #[test] fn test_value_slice_no_bounds_check() { let arr = Int32Array::from(vec![2, 3, 4]); let _slice = arr.value_slice(0, 4); } #[test] fn test_int32_fmt_debug() { let buf = Buffer::from(&[0, 1, 2, 3, 4].to_byte_slice()); let arr = Int32Array::new(5, buf, 0, 0); assert_eq!( "PrimitiveArray<Int32>\n[\n 0,\n 1,\n 2,\n 3,\n 4,\n]", format!("{:?}", arr) ); } #[test] fn test_fmt_debug_up_to_20_elements() { (1..=20).for_each(|i| { let values = (0..i).collect::<Vec<i16>>(); let array_expected = format!( "PrimitiveArray<Int16>\n[\n{}\n]", values .iter() .map(|v| { format!(" {},", v) }) .collect::<Vec<String>>() .join("\n") ); let array = Int16Array::from(values); assert_eq!(array_expected, format!("{:?}", array)); }) } #[test] fn test_int32_with_null_fmt_debug() { let mut builder = Int32Array::builder(3); builder.append_slice(&[0, 1]).unwrap(); builder.append_null().unwrap(); builder.append_slice(&[3, 4]).unwrap(); let arr = builder.finish(); assert_eq!( "PrimitiveArray<Int32>\n[\n 0,\n 1,\n null,\n 3,\n 4,\n]", format!("{:?}", arr) ); } #[test] fn test_boolean_fmt_debug() { let buf = Buffer::from(&[true, false, false].to_byte_slice()); let arr = BooleanArray::new(3, buf, 0, 0); assert_eq!( "PrimitiveArray<Boolean>\n[\n true,\n false,\n false,\n]", format!("{:?}", arr) ); } #[test] fn test_boolean_with_null_fmt_debug() { let mut builder = BooleanArray::builder(3); builder.append_value(true).unwrap(); builder.append_null().unwrap(); builder.append_value(false).unwrap(); let arr = builder.finish(); assert_eq!( "PrimitiveArray<Boolean>\n[\n true,\n null,\n false,\n]", format!("{:?}", arr) ); } #[test] fn test_timestamp_fmt_debug() { let arr: PrimitiveArray<TimestampMillisecondType> = TimestampMillisecondArray::from_vec(vec![1546214400000, 1546214400000], None); assert_eq!( "PrimitiveArray<Timestamp(Millisecond, None)>\n[\n 2018-12-31T00:00:00,\n 2018-12-31T00:00:00,\n]", format!("{:?}", arr) ); } #[test] fn test_date32_fmt_debug() { let arr: PrimitiveArray<Date32Type> = vec![12356, 13548].into(); assert_eq!( "PrimitiveArray<Date32(Day)>\n[\n 2003-10-31,\n 2007-02-04,\n]", format!("{:?}", arr) ); } #[test] fn test_time32second_fmt_debug() { let arr: PrimitiveArray<Time32SecondType> = vec![7201, 60054].into(); assert_eq!( "PrimitiveArray<Time32(Second)>\n[\n 02:00:01,\n 16:40:54,\n]", format!("{:?}", arr) ); } #[test] fn test_primitive_array_builder() { // Test building a primitive array with ArrayData builder and offset let buf = Buffer::from(&[0, 1, 2, 3, 4].to_byte_slice()); let buf2 = buf.clone(); let data = ArrayData::builder(DataType::Int32) .len(5) .offset(2) .add_buffer(buf) .build(); let arr = Int32Array::from(data); assert_eq!(buf2, arr.values()); assert_eq!(5, arr.len()); assert_eq!(0, arr.null_count()); for i in 0..3 { assert_eq!((i + 2) as i32, arr.value(i)); } } #[test] #[should_panic(expected = "PrimitiveArray data should contain a single buffer only \ (values buffer)")] fn test_primitive_array_invalid_buffer_len() { let data = ArrayData::builder(DataType::Int32).len(5).build(); Int32Array::from(data); } #[test] fn test_boolean_array_new() { // 00000010 01001000 let buf = Buffer::from([72_u8, 2_u8]); let buf2 = buf.clone(); let arr = BooleanArray::new(10, buf, 0, 0); assert_eq!(buf2, arr.values()); assert_eq!(10, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..10 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i == 3 || i == 6 || i == 9, arr.value(i), "failed at {}", i) } } #[test] fn test_boolean_array_from_vec() { let buf = Buffer::from([10_u8]); let arr = BooleanArray::from(vec![false, true, false, true]); assert_eq!(buf, arr.values()); assert_eq!(4, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..4 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i == 1 || i == 3, arr.value(i), "failed at {}", i) } } #[test] fn test_boolean_array_from_vec_option() { let buf = Buffer::from([10_u8]); let arr = BooleanArray::from(vec![Some(false), Some(true), None, Some(true)]); assert_eq!(buf, arr.values()); assert_eq!(4, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); for i in 0..4 { if i == 2 { assert!(arr.is_null(i)); assert!(!arr.is_valid(i)); } else { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i == 1 || i == 3, arr.value(i), "failed at {}", i) } } } #[test] fn test_boolean_array_builder() { // Test building a boolean array with ArrayData builder and offset // 000011011 let buf = Buffer::from([27_u8]); let buf2 = buf.clone(); let data = ArrayData::builder(DataType::Boolean) .len(5) .offset(2) .add_buffer(buf) .build(); let arr = BooleanArray::from(data); assert_eq!(buf2, arr.values()); assert_eq!(5, arr.len()); assert_eq!(2, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..3 { assert_eq!(i != 0, arr.value(i), "failed at {}", i); } } #[test] #[should_panic(expected = "PrimitiveArray data should contain a single buffer only \ (values buffer)")] fn test_boolean_array_invalid_buffer_len() { let data = ArrayData::builder(DataType::Boolean).len(5).build(); BooleanArray::from(data); } #[test] fn test_list_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1, 2], [3, 4, 5], [6, 7]] let value_offsets = Buffer::from(&[0, 3, 6, 8].to_byte_slice()); // Construct a list array from the above two let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type.clone()) .len(3) .add_buffer(value_offsets.clone()) .add_child_data(value_data.clone()) .build(); let list_array = ListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(2)); assert_eq!(2, list_array.value_length(2)); assert_eq!( 0, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); for i in 0..3 { assert!(list_array.is_valid(i)); assert!(!list_array.is_null(i)); } // Now test with a non-zero offset let list_data = ArrayData::builder(list_data_type) .len(3) .offset(1) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .build(); let list_array = ListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(1)); assert_eq!(2, list_array.value_length(1)); assert_eq!( 3, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); } #[test] fn test_large_list_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1, 2], [3, 4, 5], [6, 7]] let value_offsets = Buffer::from(&[0i64, 3, 6, 8].to_byte_slice()); // Construct a list array from the above two let list_data_type = DataType::LargeList(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type.clone()) .len(3) .add_buffer(value_offsets.clone()) .add_child_data(value_data.clone()) .build(); let list_array = LargeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(2)); assert_eq!(2, list_array.value_length(2)); assert_eq!( 0, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); for i in 0..3 { assert!(list_array.is_valid(i)); assert!(!list_array.is_null(i)); } // Now test with a non-zero offset let list_data = ArrayData::builder(list_data_type) .len(3) .offset(1) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .build(); let list_array = LargeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(1)); assert_eq!(2, list_array.value_length(1)); assert_eq!( 3, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); } #[test] fn test_dictionary_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int8) .len(8) .add_buffer(Buffer::from( &[10_i8, 11, 12, 13, 14, 15, 16, 17].to_byte_slice(), )) .build(); // Construct a buffer for value offsets, for the nested array: let keys = Buffer::from(&[2_i16, 3, 4].to_byte_slice()); // Construct a dictionary array from the above two let key_type = DataType::Int16; let value_type = DataType::Int8; let dict_data_type = DataType::Dictionary(Box::new(key_type), Box::new(value_type)); let dict_data = ArrayData::builder(dict_data_type.clone()) .len(3) .add_buffer(keys.clone()) .add_child_data(value_data.clone()) .build(); let dict_array = Int16DictionaryArray::from(dict_data); let values = dict_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int8, dict_array.value_type()); assert_eq!(3, dict_array.len()); // Null count only makes sense in terms of the component arrays. assert_eq!(0, dict_array.null_count()); assert_eq!(0, dict_array.values().null_count()); assert_eq!(Some(Some(3)), dict_array.keys().nth(1)); assert_eq!(Some(Some(4)), dict_array.keys().nth(2)); assert_eq!( dict_array.keys().collect::<Vec<Option<i16>>>(), vec![Some(2), Some(3), Some(4)] ); assert_eq!( dict_array.keys().rev().collect::<Vec<Option<i16>>>(), vec![Some(4), Some(3), Some(2)] ); assert_eq!( dict_array.keys().rev().rev().collect::<Vec<Option<i16>>>(), vec![Some(2), Some(3), Some(4)] ); // Now test with a non-zero offset let dict_data = ArrayData::builder(dict_data_type) .len(2) .offset(1) .add_buffer(keys) .add_child_data(value_data.clone()) .build(); let dict_array = Int16DictionaryArray::from(dict_data); let values = dict_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int8, dict_array.value_type()); assert_eq!(2, dict_array.len()); assert_eq!(Some(Some(3)), dict_array.keys().nth(0)); assert_eq!(Some(Some(4)), dict_array.keys().nth(1)); assert_eq!( dict_array.keys().collect::<Vec<Option<i16>>>(), vec![Some(3), Some(4)] ); } #[test] fn test_dictionary_array_key_reverse() { let test = vec!["a", "a", "b", "c"]; let array: DictionaryArray<Int8Type> = test .iter() .map(|&x| if x == "b" { None } else { Some(x) }) .collect(); assert_eq!( array.keys().rev().collect::<Vec<Option<i8>>>(), vec![Some(1), None, Some(0), Some(0)] ); } #[test] fn test_fixed_size_list_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(9) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7, 8].to_byte_slice())) .build(); // Construct a list array from the above two let list_data_type = DataType::FixedSizeList(Box::new(DataType::Int32), 3); let list_data = ArrayData::builder(list_data_type.clone()) .len(3) .add_child_data(value_data.clone()) .build(); let list_array = FixedSizeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(2)); assert_eq!(3, list_array.value_length()); assert_eq!( 0, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); for i in 0..3 { assert!(list_array.is_valid(i)); assert!(!list_array.is_null(i)); } // Now test with a non-zero offset let list_data = ArrayData::builder(list_data_type) .len(3) .offset(1) .add_child_data(value_data.clone()) .build(); let list_array = FixedSizeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!( 3, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); assert_eq!(6, list_array.value_offset(1)); assert_eq!(3, list_array.value_length()); } #[test] #[should_panic( expected = "FixedSizeListArray child array length should be a multiple of 3" )] fn test_fixed_size_list_array_unequal_children() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); // Construct a list array from the above two let list_data_type = DataType::FixedSizeList(Box::new(DataType::Int32), 3); let list_data = ArrayData::builder(list_data_type) .len(3) .add_child_data(value_data) .build(); FixedSizeListArray::from(list_data); } #[test] fn test_list_array_slice() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(10) .add_buffer(Buffer::from( &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9].to_byte_slice(), )) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1], null, null, [2, 3], [4, 5], null, [6, 7, 8], null, [9]] let value_offsets = Buffer::from(&[0, 2, 2, 2, 4, 6, 6, 9, 9, 10].to_byte_slice()); // 01011001 00000001 let mut null_bits: [u8; 2] = [0; 2]; bit_util::set_bit(&mut null_bits, 0); bit_util::set_bit(&mut null_bits, 3); bit_util::set_bit(&mut null_bits, 4); bit_util::set_bit(&mut null_bits, 6); bit_util::set_bit(&mut null_bits, 8); // Construct a list array from the above two let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(9) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .null_bit_buffer(Buffer::from(null_bits)) .build(); let list_array = ListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(9, list_array.len()); assert_eq!(4, list_array.null_count()); assert_eq!(2, list_array.value_offset(3)); assert_eq!(2, list_array.value_length(3)); let sliced_array = list_array.slice(1, 6); assert_eq!(6, sliced_array.len()); assert_eq!(1, sliced_array.offset()); assert_eq!(3, sliced_array.null_count()); for i in 0..sliced_array.len() { if bit_util::get_bit(&null_bits, sliced_array.offset() + i) { assert!(sliced_array.is_valid(i)); } else { assert!(sliced_array.is_null(i)); } } // Check offset and length for each non-null value. let sliced_list_array = sliced_array.as_any().downcast_ref::<ListArray>().unwrap(); assert_eq!(2, sliced_list_array.value_offset(2)); assert_eq!(2, sliced_list_array.value_length(2)); assert_eq!(4, sliced_list_array.value_offset(3)); assert_eq!(2, sliced_list_array.value_length(3)); assert_eq!(6, sliced_list_array.value_offset(5)); assert_eq!(3, sliced_list_array.value_length(5)); } #[test] fn test_large_list_array_slice() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(10) .add_buffer(Buffer::from( &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9].to_byte_slice(), )) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1], null, null, [2, 3], [4, 5], null, [6, 7, 8], null, [9]] let value_offsets = Buffer::from(&[0i64, 2, 2, 2, 4, 6, 6, 9, 9, 10].to_byte_slice()); // 01011001 00000001 let mut null_bits: [u8; 2] = [0; 2]; bit_util::set_bit(&mut null_bits, 0); bit_util::set_bit(&mut null_bits, 3); bit_util::set_bit(&mut null_bits, 4); bit_util::set_bit(&mut null_bits, 6); bit_util::set_bit(&mut null_bits, 8); // Construct a list array from the above two let list_data_type = DataType::LargeList(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(9) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .null_bit_buffer(Buffer::from(null_bits)) .build(); let list_array = LargeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(9, list_array.len()); assert_eq!(4, list_array.null_count()); assert_eq!(2, list_array.value_offset(3)); assert_eq!(2, list_array.value_length(3)); let sliced_array = list_array.slice(1, 6); assert_eq!(6, sliced_array.len()); assert_eq!(1, sliced_array.offset()); assert_eq!(3, sliced_array.null_count()); for i in 0..sliced_array.len() { if bit_util::get_bit(&null_bits, sliced_array.offset() + i) { assert!(sliced_array.is_valid(i)); } else { assert!(sliced_array.is_null(i)); } } // Check offset and length for each non-null value. let sliced_list_array = sliced_array .as_any() .downcast_ref::<LargeListArray>() .unwrap(); assert_eq!(2, sliced_list_array.value_offset(2)); assert_eq!(2, sliced_list_array.value_length(2)); assert_eq!(4, sliced_list_array.value_offset(3)); assert_eq!(2, sliced_list_array.value_length(3)); assert_eq!(6, sliced_list_array.value_offset(5)); assert_eq!(3, sliced_list_array.value_length(5)); } #[test] fn test_fixed_size_list_array_slice() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(10) .add_buffer(Buffer::from( &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9].to_byte_slice(), )) .build(); // Set null buts for the nested array: // [[0, 1], null, null, [6, 7], [8, 9]] // 01011001 00000001 let mut null_bits: [u8; 1] = [0; 1]; bit_util::set_bit(&mut null_bits, 0); bit_util::set_bit(&mut null_bits, 3); bit_util::set_bit(&mut null_bits, 4); // Construct a fixed size list array from the above two let list_data_type = DataType::FixedSizeList(Box::new(DataType::Int32), 2); let list_data = ArrayData::builder(list_data_type) .len(5) .add_child_data(value_data.clone()) .null_bit_buffer(Buffer::from(null_bits)) .build(); let list_array = FixedSizeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(5, list_array.len()); assert_eq!(2, list_array.null_count()); assert_eq!(6, list_array.value_offset(3)); assert_eq!(2, list_array.value_length()); let sliced_array = list_array.slice(1, 4); assert_eq!(4, sliced_array.len()); assert_eq!(1, sliced_array.offset()); assert_eq!(2, sliced_array.null_count()); for i in 0..sliced_array.len() { if bit_util::get_bit(&null_bits, sliced_array.offset() + i) { assert!(sliced_array.is_valid(i)); } else { assert!(sliced_array.is_null(i)); } } // Check offset and length for each non-null value. let sliced_list_array = sliced_array .as_any() .downcast_ref::<FixedSizeListArray>() .unwrap(); assert_eq!(2, sliced_list_array.value_length()); assert_eq!(6, sliced_list_array.value_offset(2)); assert_eq!(8, sliced_list_array.value_offset(3)); } #[test] #[should_panic( expected = "ListArray data should contain a single buffer only (value offsets)" )] fn test_list_array_invalid_buffer_len() { let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(3) .add_child_data(value_data) .build(); ListArray::from(list_data); } #[test] #[should_panic( expected = "ListArray should contain a single child array (values array)" )] fn test_list_array_invalid_child_array_len() { let value_offsets = Buffer::from(&[0, 2, 5, 7].to_byte_slice()); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(3) .add_buffer(value_offsets) .build(); ListArray::from(list_data); } #[test] #[should_panic(expected = "offsets do not start at zero")] fn test_list_array_invalid_value_offset_start() { let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); let value_offsets = Buffer::from(&[2, 2, 5, 7].to_byte_slice()); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(3) .add_buffer(value_offsets) .add_child_data(value_data) .build(); ListArray::from(list_data); } #[test] fn test_binary_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let offsets: [i32; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = BinaryArray::from(array_data); assert_eq!(3, binary_array.len()); assert_eq!(0, binary_array.null_count()); assert_eq!([b'h', b'e', b'l', b'l', b'o'], binary_array.value(0)); assert_eq!([] as [u8; 0], binary_array.value(1)); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(2) ); assert_eq!(5, binary_array.value_offset(2)); assert_eq!(7, binary_array.value_length(2)); for i in 0..3 { assert!(binary_array.is_valid(i)); assert!(!binary_array.is_null(i)); } // Test binary array with offset let array_data = ArrayData::builder(DataType::Binary) .len(4) .offset(1) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = BinaryArray::from(array_data); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(1) ); assert_eq!(5, binary_array.value_offset(0)); assert_eq!(0, binary_array.value_length(0)); assert_eq!(5, binary_array.value_offset(1)); assert_eq!(7, binary_array.value_length(1)); } #[test] fn test_large_binary_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let offsets: [i64; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data = ArrayData::builder(DataType::LargeBinary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = LargeBinaryArray::from(array_data); assert_eq!(3, binary_array.len()); assert_eq!(0, binary_array.null_count()); assert_eq!([b'h', b'e', b'l', b'l', b'o'], binary_array.value(0)); assert_eq!([] as [u8; 0], binary_array.value(1)); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(2) ); assert_eq!(5, binary_array.value_offset(2)); assert_eq!(7, binary_array.value_length(2)); for i in 0..3 { assert!(binary_array.is_valid(i)); assert!(!binary_array.is_null(i)); } // Test binary array with offset let array_data = ArrayData::builder(DataType::LargeBinary) .len(4) .offset(1) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = LargeBinaryArray::from(array_data); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(1) ); assert_eq!(5, binary_array.value_offset(0)); assert_eq!(0, binary_array.value_length(0)); assert_eq!(5, binary_array.value_offset(1)); assert_eq!(7, binary_array.value_length(1)); } #[test] fn test_binary_array_from_list_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let values_data = ArrayData::builder(DataType::UInt8) .len(12) .add_buffer(Buffer::from(&values[..])) .build(); let offsets: [i32; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data1 = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array1 = BinaryArray::from(array_data1); let array_data2 = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = ListArray::from(array_data2); let binary_array2 = BinaryArray::from(list_array); assert_eq!(2, binary_array2.data().buffers().len()); assert_eq!(0, binary_array2.data().child_data().len()); assert_eq!(binary_array1.len(), binary_array2.len()); assert_eq!(binary_array1.null_count(), binary_array2.null_count()); for i in 0..binary_array1.len() { assert_eq!(binary_array1.value(i), binary_array2.value(i)); assert_eq!(binary_array1.value_offset(i), binary_array2.value_offset(i)); assert_eq!(binary_array1.value_length(i), binary_array2.value_length(i)); } } #[test] fn test_large_binary_array_from_list_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let values_data = ArrayData::builder(DataType::UInt8) .len(12) .add_buffer(Buffer::from(&values[..])) .build(); let offsets: [i64; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data1 = ArrayData::builder(DataType::LargeBinary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array1 = LargeBinaryArray::from(array_data1); let array_data2 = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = LargeListArray::from(array_data2); let binary_array2 = LargeBinaryArray::from(list_array); assert_eq!(2, binary_array2.data().buffers().len()); assert_eq!(0, binary_array2.data().child_data().len()); assert_eq!(binary_array1.len(), binary_array2.len()); assert_eq!(binary_array1.null_count(), binary_array2.null_count()); for i in 0..binary_array1.len() { assert_eq!(binary_array1.value(i), binary_array2.value(i)); assert_eq!(binary_array1.value_offset(i), binary_array2.value_offset(i)); assert_eq!(binary_array1.value_length(i), binary_array2.value_length(i)); } } fn test_generic_binary_array_from_opt_vec<T: BinaryOffsetSizeTrait>() { let values: Vec<Option<&[u8]>> = vec![Some(b"one"), Some(b"two"), None, Some(b""), Some(b"three")]; let array = GenericBinaryArray::<T>::from_opt_vec(values); assert_eq!(array.len(), 5); assert_eq!(array.value(0), b"one"); assert_eq!(array.value(1), b"two"); assert_eq!(array.value(3), b""); assert_eq!(array.value(4), b"three"); assert_eq!(array.is_null(0), false); assert_eq!(array.is_null(1), false); assert_eq!(array.is_null(2), true); assert_eq!(array.is_null(3), false); assert_eq!(array.is_null(4), false); } #[test] fn test_large_binary_array_from_opt_vec() { test_generic_binary_array_from_opt_vec::<i64>() } #[test] fn test_binary_array_from_opt_vec() { test_generic_binary_array_from_opt_vec::<i32>() } #[test] fn test_string_array_from_u8_slice() { let values: Vec<&str> = vec!["hello", "", "parquet"]; // Array data: ["hello", "", "parquet"] let string_array = StringArray::from(values); assert_eq!(3, string_array.len()); assert_eq!(0, string_array.null_count()); assert_eq!("hello", string_array.value(0)); assert_eq!("", string_array.value(1)); assert_eq!("parquet", string_array.value(2)); assert_eq!(5, string_array.value_offset(2)); assert_eq!(7, string_array.value_length(2)); for i in 0..3 { assert!(string_array.is_valid(i)); assert!(!string_array.is_null(i)); } } #[test] #[should_panic(expected = "[Large]StringArray expects Datatype::[Large]Utf8")] fn test_string_array_from_int() { let array = LargeStringArray::from(vec!["a", "b"]); StringArray::from(array.data()); } #[test] fn test_large_string_array_from_u8_slice() { let values: Vec<&str> = vec!["hello", "", "parquet"]; // Array data: ["hello", "", "parquet"] let string_array = LargeStringArray::from(values); assert_eq!(3, string_array.len()); assert_eq!(0, string_array.null_count()); assert_eq!("hello", string_array.value(0)); assert_eq!("", string_array.value(1)); assert_eq!("parquet", string_array.value(2)); assert_eq!(5, string_array.value_offset(2)); assert_eq!(7, string_array.value_length(2)); for i in 0..3 { assert!(string_array.is_valid(i)); assert!(!string_array.is_null(i)); } } #[test] fn test_nested_string_array() { let string_builder = StringBuilder::new(3); let mut list_of_string_builder = ListBuilder::new(string_builder); list_of_string_builder.values().append_value("foo").unwrap(); list_of_string_builder.values().append_value("bar").unwrap(); list_of_string_builder.append(true).unwrap(); list_of_string_builder .values() .append_value("foobar") .unwrap(); list_of_string_builder.append(true).unwrap(); let list_of_strings = list_of_string_builder.finish(); assert_eq!(list_of_strings.len(), 2); let first_slot = list_of_strings.value(0); let first_list = first_slot.as_any().downcast_ref::<StringArray>().unwrap(); assert_eq!(first_list.len(), 2); assert_eq!(first_list.value(0), "foo"); assert_eq!(first_list.value(1), "bar"); let second_slot = list_of_strings.value(1); let second_list = second_slot.as_any().downcast_ref::<StringArray>().unwrap(); assert_eq!(second_list.len(), 1); assert_eq!(second_list.value(0), "foobar"); } #[test] #[should_panic( expected = "BinaryArray can only be created from List<u8> arrays, mismatched \ data types." )] fn test_binary_array_from_incorrect_list_array_type() { let values: [u32; 12] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let values_data = ArrayData::builder(DataType::UInt32) .len(12) .add_buffer(Buffer::from(values[..].to_byte_slice())) .build(); let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Utf8) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = ListArray::from(array_data); BinaryArray::from(list_array); } #[test] #[should_panic( expected = "BinaryArray can only be created from list array of u8 values \ (i.e. List<PrimitiveArray<u8>>)." )] fn test_binary_array_from_incorrect_list_array() { let values: [u32; 12] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let values_data = ArrayData::builder(DataType::UInt32) .len(12) .add_buffer(Buffer::from(values[..].to_byte_slice())) .add_child_data(ArrayData::builder(DataType::Boolean).build()) .build(); let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Utf8) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = ListArray::from(array_data); BinaryArray::from(list_array); } #[test] fn test_fixed_size_binary_array() { let values: [u8; 15] = *b"hellotherearrow"; let array_data = ArrayData::builder(DataType::FixedSizeBinary(5)) .len(3) .add_buffer(Buffer::from(&values[..])) .build(); let fixed_size_binary_array = FixedSizeBinaryArray::from(array_data); assert_eq!(3, fixed_size_binary_array.len()); assert_eq!(0, fixed_size_binary_array.null_count()); assert_eq!( [b'h', b'e', b'l', b'l', b'o'], fixed_size_binary_array.value(0) ); assert_eq!( [b't', b'h', b'e', b'r', b'e'], fixed_size_binary_array.value(1) ); assert_eq!( [b'a', b'r', b'r', b'o', b'w'], fixed_size_binary_array.value(2) ); assert_eq!(5, fixed_size_binary_array.value_length()); assert_eq!(10, fixed_size_binary_array.value_offset(2)); for i in 0..3 { assert!(fixed_size_binary_array.is_valid(i)); assert!(!fixed_size_binary_array.is_null(i)); } // Test binary array with offset let array_data = ArrayData::builder(DataType::FixedSizeBinary(5)) .len(2) .offset(1) .add_buffer(Buffer::from(&values[..])) .build(); let fixed_size_binary_array = FixedSizeBinaryArray::from(array_data); assert_eq!( [b't', b'h', b'e', b'r', b'e'], fixed_size_binary_array.value(0) ); assert_eq!( [b'a', b'r', b'r', b'o', b'w'], fixed_size_binary_array.value(1) ); assert_eq!(2, fixed_size_binary_array.len()); assert_eq!(5, fixed_size_binary_array.value_offset(0)); assert_eq!(5, fixed_size_binary_array.value_length()); assert_eq!(10, fixed_size_binary_array.value_offset(1)); } #[test] #[should_panic( expected = "FixedSizeBinaryArray can only be created from list array of u8 values \ (i.e. FixedSizeList<PrimitiveArray<u8>>)." )] fn test_fixed_size_binary_array_from_incorrect_list_array() { let values: [u32; 12] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let values_data = ArrayData::builder(DataType::UInt32) .len(12) .add_buffer(Buffer::from(values[..].to_byte_slice())) .add_child_data(ArrayData::builder(DataType::Boolean).build()) .build(); let array_data = ArrayData::builder(DataType::FixedSizeList(Box::new(DataType::Binary), 4)) .len(3) .add_child_data(values_data) .build(); let list_array = FixedSizeListArray::from(array_data); FixedSizeBinaryArray::from(list_array); } #[test] #[should_panic(expected = "BinaryArray out of bounds access")] fn test_binary_array_get_value_index_out_of_bound() { let values: [u8; 12] = [104, 101, 108, 108, 111, 112, 97, 114, 113, 117, 101, 116]; let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = BinaryArray::from(array_data); binary_array.value(4); } #[test] #[should_panic(expected = "StringArray out of bounds access")] fn test_string_array_get_value_index_out_of_bound() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Utf8) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let string_array = StringArray::from(array_data); string_array.value(4); } #[test] fn test_binary_array_fmt_debug() { let values: [u8; 15] = *b"hellotherearrow"; let array_data = ArrayData::builder(DataType::FixedSizeBinary(5)) .len(3) .add_buffer(Buffer::from(&values[..])) .build(); let arr = FixedSizeBinaryArray::from(array_data); assert_eq!( "FixedSizeBinaryArray<5>\n[\n [104, 101, 108, 108, 111],\n [116, 104, 101, 114, 101],\n [97, 114, 114, 111, 119],\n]", format!("{:?}", arr) ); } #[test] fn test_string_array_fmt_debug() { let arr: StringArray = vec!["hello", "arrow"].into(); assert_eq!( "StringArray\n[\n \"hello\",\n \"arrow\",\n]", format!("{:?}", arr) ); } #[test] fn test_large_string_array_fmt_debug() { let arr: LargeStringArray = vec!["hello", "arrow"].into(); assert_eq!( "LargeStringArray\n[\n \"hello\",\n \"arrow\",\n]", format!("{:?}", arr) ); } #[test] fn test_struct_array_builder() { let boolean_data = ArrayData::builder(DataType::Boolean) .len(4) .add_buffer(Buffer::from([false, false, true, true].to_byte_slice())) .build(); let int_data = ArrayData::builder(DataType::Int64) .len(4) .add_buffer(Buffer::from([42, 28, 19, 31].to_byte_slice())) .build(); let mut field_types = vec![]; field_types.push(Field::new("a", DataType::Boolean, false)); field_types.push(Field::new("b", DataType::Int64, false)); let struct_array_data = ArrayData::builder(DataType::Struct(field_types)) .len(4) .add_child_data(boolean_data.clone()) .add_child_data(int_data.clone()) .build(); let struct_array = StructArray::from(struct_array_data); assert_eq!(boolean_data, struct_array.column(0).data()); assert_eq!(int_data, struct_array.column(1).data()); } #[test] fn test_struct_array_from() { let boolean_data = ArrayData::builder(DataType::Boolean) .len(4) .add_buffer(Buffer::from([12_u8])) .build(); let int_data = ArrayData::builder(DataType::Int32) .len(4) .add_buffer(Buffer::from([42, 28, 19, 31].to_byte_slice())) .build(); let struct_array = StructArray::from(vec![ ( Field::new("b", DataType::Boolean, false), Arc::new(BooleanArray::from(vec![false, false, true, true])) as Arc<Array>, ), ( Field::new("c", DataType::Int32, false), Arc::new(Int32Array::from(vec![42, 28, 19, 31])), ), ]); assert_eq!(boolean_data, struct_array.column(0).data()); assert_eq!(int_data, struct_array.column(1).data()); assert_eq!(4, struct_array.len()); assert_eq!(0, struct_array.null_count()); assert_eq!(0, struct_array.offset()); } /// validates that the in-memory representation follows [the spec](https://arrow.apache.org/docs/format/Columnar.html#struct-layout) #[test] fn test_struct_array_from_vec() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, Some("mark"), ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![Some(1), Some(2), None, Some(4)])); let arr = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]) .unwrap(); let struct_data = arr.data(); assert_eq!(4, struct_data.len()); assert_eq!(1, struct_data.null_count()); assert_eq!( // 00001011 &Some(Bitmap::from(Buffer::from(&[11_u8]))), struct_data.null_bitmap() ); let expected_string_data = ArrayData::builder(DataType::Utf8) .len(4) .null_count(2) .null_bit_buffer(Buffer::from(&[9_u8])) .add_buffer(Buffer::from(&[0, 3, 3, 3, 7].to_byte_slice())) .add_buffer(Buffer::from("joemark".as_bytes())) .build(); let expected_int_data = ArrayData::builder(DataType::Int32) .len(4) .null_count(1) .null_bit_buffer(Buffer::from(&[11_u8])) .add_buffer(Buffer::from(&[1, 2, 0, 4].to_byte_slice())) .build(); assert_eq!(expected_string_data, arr.column(0).data()); // TODO: implement equality for ArrayData assert_eq!(expected_int_data.len(), arr.column(1).data().len()); assert_eq!( expected_int_data.null_count(), arr.column(1).data().null_count() ); assert_eq!( expected_int_data.null_bitmap(), arr.column(1).data().null_bitmap() ); let expected_value_buf = expected_int_data.buffers()[0].clone(); let actual_value_buf = arr.column(1).data().buffers()[0].clone(); for i in 0..expected_int_data.len() { if !expected_int_data.is_null(i) { assert_eq!( expected_value_buf.data()[i * 4..(i + 1) * 4], actual_value_buf.data()[i * 4..(i + 1) * 4] ); } } } #[test] fn test_struct_array_from_vec_error() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, // 3 elements, not 4 ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![Some(1), Some(2), None, Some(4)])); let arr = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]); match arr { Err(ArrowError::InvalidArgumentError(e)) => { assert!(e.starts_with("Array of field \"f2\" has length 4, but previous elements have length 3.")); } _ => assert!(false, "This test got an unexpected error type"), }; } #[test] #[should_panic( expected = "the field data types must match the array data in a StructArray" )] fn test_struct_array_from_mismatched_types() { StructArray::from(vec![ ( Field::new("b", DataType::Int16, false), Arc::new(BooleanArray::from(vec![false, false, true, true])) as Arc<Array>, ), ( Field::new("c", DataType::Utf8, false), Arc::new(Int32Array::from(vec![42, 28, 19, 31])), ), ]); } #[test] fn test_struct_array_slice() { let boolean_data = ArrayData::builder(DataType::Boolean) .len(5) .add_buffer(Buffer::from([0b00010000])) .null_bit_buffer(Buffer::from([0b00010001])) .build(); let int_data = ArrayData::builder(DataType::Int32) .len(5) .add_buffer(Buffer::from([0, 28, 42, 0, 0].to_byte_slice())) .null_bit_buffer(Buffer::from([0b00000110])) .build(); let mut field_types = vec![]; field_types.push(Field::new("a", DataType::Boolean, false)); field_types.push(Field::new("b", DataType::Int32, false)); let struct_array_data = ArrayData::builder(DataType::Struct(field_types)) .len(5) .add_child_data(boolean_data.clone()) .add_child_data(int_data.clone()) .null_bit_buffer(Buffer::from([0b00010111])) .build(); let struct_array = StructArray::from(struct_array_data); assert_eq!(5, struct_array.len()); assert_eq!(1, struct_array.null_count()); assert!(struct_array.is_valid(0)); assert!(struct_array.is_valid(1)); assert!(struct_array.is_valid(2)); assert!(struct_array.is_null(3)); assert!(struct_array.is_valid(4)); assert_eq!(boolean_data, struct_array.column(0).data()); assert_eq!(int_data, struct_array.column(1).data()); let c0 = struct_array.column(0); let c0 = c0.as_any().downcast_ref::<BooleanArray>().unwrap(); assert_eq!(5, c0.len()); assert_eq!(3, c0.null_count()); assert!(c0.is_valid(0)); assert_eq!(false, c0.value(0)); assert!(c0.is_null(1)); assert!(c0.is_null(2)); assert!(c0.is_null(3)); assert!(c0.is_valid(4)); assert_eq!(true, c0.value(4)); let c1 = struct_array.column(1); let c1 = c1.as_any().downcast_ref::<Int32Array>().unwrap(); assert_eq!(5, c1.len()); assert_eq!(3, c1.null_count()); assert!(c1.is_null(0)); assert!(c1.is_valid(1)); assert_eq!(28, c1.value(1)); assert!(c1.is_valid(2)); assert_eq!(42, c1.value(2)); assert!(c1.is_null(3)); assert!(c1.is_null(4)); let sliced_array = struct_array.slice(2, 3); let sliced_array = sliced_array.as_any().downcast_ref::<StructArray>().unwrap(); assert_eq!(3, sliced_array.len()); assert_eq!(2, sliced_array.offset()); assert_eq!(1, sliced_array.null_count()); assert!(sliced_array.is_valid(0)); assert!(sliced_array.is_null(1)); assert!(sliced_array.is_valid(2)); let sliced_c0 = sliced_array.column(0); let sliced_c0 = sliced_c0.as_any().downcast_ref::<BooleanArray>().unwrap(); assert_eq!(3, sliced_c0.len()); assert_eq!(2, sliced_c0.offset()); assert!(sliced_c0.is_null(0)); assert!(sliced_c0.is_null(1)); assert!(sliced_c0.is_valid(2)); assert_eq!(true, sliced_c0.value(2)); let sliced_c1 = sliced_array.column(1); let sliced_c1 = sliced_c1.as_any().downcast_ref::<Int32Array>().unwrap(); assert_eq!(3, sliced_c1.len()); assert_eq!(2, sliced_c1.offset()); assert!(sliced_c1.is_valid(0)); assert_eq!(42, sliced_c1.value(0)); assert!(sliced_c1.is_null(1)); assert!(sliced_c1.is_null(2)); } #[test] #[should_panic( expected = "all child arrays of a StructArray must have the same length" )] fn test_invalid_struct_child_array_lengths() { StructArray::from(vec![ ( Field::new("b", DataType::Float32, false), Arc::new(Float32Array::from(vec![1.1])) as Arc<Array>, ), ( Field::new("c", DataType::Float64, false), Arc::new(Float64Array::from(vec![2.2, 3.3])), ), ]); } #[test] #[should_panic(expected = "memory is not aligned")] fn test_primitive_array_alignment() { let ptr = memory::allocate_aligned(8); let buf = unsafe { Buffer::from_raw_parts(ptr, 8, 8) }; let buf2 = buf.slice(1); let array_data = ArrayData::builder(DataType::Int32).add_buffer(buf2).build(); Int32Array::from(array_data); } #[test] #[should_panic(expected = "memory is not aligned")] fn test_list_array_alignment() { let ptr = memory::allocate_aligned(8); let buf = unsafe { Buffer::from_raw_parts(ptr, 8, 8) }; let buf2 = buf.slice(1); let values: [i32; 8] = [0; 8]; let value_data = ArrayData::builder(DataType::Int32) .add_buffer(Buffer::from(values.to_byte_slice())) .build(); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .add_buffer(buf2) .add_child_data(value_data) .build(); ListArray::from(list_data); } #[test] #[should_panic(expected = "memory is not aligned")] fn test_binary_array_alignment() { let ptr = memory::allocate_aligned(8); let buf = unsafe { Buffer::from_raw_parts(ptr, 8, 8) }; let buf2 = buf.slice(1); let values: [u8; 12] = [0; 12]; let array_data = ArrayData::builder(DataType::Binary) .add_buffer(buf2) .add_buffer(Buffer::from(&values[..])) .build(); BinaryArray::from(array_data); } #[test] fn test_access_array_concurrently() { let a = Int32Array::from(vec![5, 6, 7, 8, 9]); let ret = thread::spawn(move || a.value(3)).join(); assert!(ret.is_ok()); assert_eq!(8, ret.ok().unwrap()); } #[test] fn test_dictionary_array_fmt_debug() { let key_builder = PrimitiveBuilder::<UInt8Type>::new(3); let value_builder = PrimitiveBuilder::<UInt32Type>::new(2); let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder); builder.append(12345678).unwrap(); builder.append_null().unwrap(); builder.append(22345678).unwrap(); let array = builder.finish(); assert_eq!( "DictionaryArray {keys: [Some(0), None, Some(1)] values: PrimitiveArray<UInt32>\n[\n 12345678,\n 22345678,\n]}\n", format!("{:?}", array) ); let key_builder = PrimitiveBuilder::<UInt8Type>::new(20); let value_builder = PrimitiveBuilder::<UInt32Type>::new(2); let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder); for _ in 0..20 { builder.append(1).unwrap(); } let array = builder.finish(); assert_eq!( "DictionaryArray {keys: [Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0)]... values: PrimitiveArray<UInt32>\n[\n 1,\n]}\n", format!("{:?}", array) ); } #[test] fn test_dictionary_array_from_iter() { let test = vec!["a", "a", "b", "c"]; let array: DictionaryArray<Int8Type> = test .iter() .map(|&x| if x == "b" { None } else { Some(x) }) .collect(); assert_eq!( "DictionaryArray {keys: [Some(0), Some(0), None, Some(1)] values: StringArray\n[\n \"a\",\n \"c\",\n]}\n", format!("{:?}", array) ); let array: DictionaryArray<Int8Type> = test.into_iter().collect(); assert_eq!( "DictionaryArray {keys: [Some(0), Some(0), Some(1), Some(2)] values: StringArray\n[\n \"a\",\n \"b\",\n \"c\",\n]}\n", format!("{:?}", array) ); } #[test] fn test_dictionary_array_reverse_lookup_key() { let test = vec!["a", "a", "b", "c"]; let array: DictionaryArray<Int8Type> = test.into_iter().collect(); assert_eq!(array.lookup_key("c"), Some(2)); // Direction of building a dictionary is the iterator direction let test = vec!["t3", "t3", "t2", "t2", "t1", "t3", "t4", "t1", "t0"]; let array: DictionaryArray<Int8Type> = test.into_iter().collect(); assert_eq!(array.lookup_key("t1"), Some(2)); assert_eq!(array.lookup_key("non-existent"), None); } #[test] fn test_dictionary_keys_as_primitive_array() { let test = vec!["a", "b", "c", "a"]; let array: DictionaryArray<Int8Type> = test.into_iter().collect(); let keys = array.keys_array(); assert_eq!(&DataType::Int8, keys.data_type()); assert_eq!(0, keys.null_count()); assert_eq!(&[0, 1, 2, 0], keys.value_slice(0, keys.len())); } #[test] fn test_dictionary_keys_as_primitive_array_with_null() { let test = vec![Some("a"), None, Some("b"), None, None, Some("a")]; let array: DictionaryArray<Int32Type> = test.into_iter().collect(); let keys = array.keys_array(); assert_eq!(&DataType::Int32, keys.data_type()); assert_eq!(3, keys.null_count()); assert_eq!(true, keys.is_valid(0)); assert_eq!(false, keys.is_valid(1)); assert_eq!(true, keys.is_valid(2)); assert_eq!(false, keys.is_valid(3)); assert_eq!(false, keys.is_valid(4)); assert_eq!(true, keys.is_valid(5)); assert_eq!(0, keys.value(0)); assert_eq!(1, keys.value(2)); assert_eq!(0, keys.value(5)); } } ARROW-10476: [Rust] Allow string arrays to be built from Option<&str> or Option<String> Currently, our code-base supports creating a `StringArray` from an iterator of `String`. However, from arrow's perspective, we should not care if it is a `String` or a `&str`, as long as it can be represented by an `AsRef<str>`. A user sometimes is able to create an iterator of `&str` instead of `String`, and should not have to convert one to the other before passing it to Arrow. This PR makes this change. Closes #8575 from jorgecarleitao/array_from_str Authored-by: Jorge C. Leitao <dfffc62510208f06492f17a48dc6ce87aa15abf6@gmail.com> Signed-off-by: Neville Dipale <4e1ab218b24f06847d563ff3773c7517c139f863@gmail.com> // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use std::any::Any; use std::borrow::Borrow; use std::convert::{From, TryFrom}; use std::fmt; use std::io::Write; use std::iter::{FromIterator, IntoIterator}; use std::mem; use std::sync::Arc; use chrono::prelude::*; use num::Num; use super::*; use crate::array::builder::StringDictionaryBuilder; use crate::array::equal::JsonEqual; use crate::buffer::{buffer_bin_or, Buffer, MutableBuffer}; use crate::datatypes::DataType::Struct; use crate::datatypes::*; use crate::memory; use crate::{ error::{ArrowError, Result}, util::bit_util, }; /// Number of seconds in a day const SECONDS_IN_DAY: i64 = 86_400; /// Number of milliseconds in a second const MILLISECONDS: i64 = 1_000; /// Number of microseconds in a second const MICROSECONDS: i64 = 1_000_000; /// Number of nanoseconds in a second const NANOSECONDS: i64 = 1_000_000_000; /// Trait for dealing with different types of array at runtime when the type of the /// array is not known in advance. pub trait Array: fmt::Debug + Send + Sync + ArrayEqual + JsonEqual { /// Returns the array as [`Any`](std::any::Any) so that it can be /// downcasted to a specific implementation. /// /// # Example: /// /// ``` /// use std::sync::Arc; /// use arrow::array::Int32Array; /// use arrow::datatypes::{Schema, Field, DataType}; /// use arrow::record_batch::RecordBatch; /// /// # fn main() -> arrow::error::Result<()> { /// let id = Int32Array::from(vec![1, 2, 3, 4, 5]); /// let batch = RecordBatch::try_new( /// Arc::new(Schema::new(vec![Field::new("id", DataType::Int32, false)])), /// vec![Arc::new(id)] /// )?; /// /// let int32array = batch /// .column(0) /// .as_any() /// .downcast_ref::<Int32Array>() /// .expect("Failed to downcast"); /// # Ok(()) /// # } /// ``` fn as_any(&self) -> &Any; /// Returns a reference-counted pointer to the underlying data of this array. fn data(&self) -> ArrayDataRef; /// Returns a borrowed & reference-counted pointer to the underlying data of this array. fn data_ref(&self) -> &ArrayDataRef; /// Returns a reference to the [`DataType`](crate::datatypes::DataType) of this array. /// /// # Example: /// /// ``` /// use arrow::datatypes::DataType; /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(*array.data_type(), DataType::Int32); /// ``` fn data_type(&self) -> &DataType { self.data_ref().data_type() } /// Returns a zero-copy slice of this array with the indicated offset and length. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// // Make slice over the values [2, 3, 4] /// let array_slice = array.slice(1, 3); /// /// assert!(array_slice.equals(&Int32Array::from(vec![2, 3, 4]))); /// ``` fn slice(&self, offset: usize, length: usize) -> ArrayRef { make_array(slice_data(self.data_ref(), offset, length)) } /// Returns the length (i.e., number of elements) of this array. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(array.len(), 5); /// ``` fn len(&self) -> usize { self.data_ref().len() } /// Returns whether this array is empty. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// /// assert_eq!(array.is_empty(), false); /// ``` fn is_empty(&self) -> bool { self.data_ref().is_empty() } /// Returns the offset into the underlying data used by this array(-slice). /// Note that the underlying data can be shared by many arrays. /// This defaults to `0`. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// // Make slice over the values [2, 3, 4] /// let array_slice = array.slice(1, 3); /// /// assert_eq!(array.offset(), 0); /// assert_eq!(array_slice.offset(), 1); /// ``` fn offset(&self) -> usize { self.data_ref().offset() } /// Returns whether the element at `index` is null. /// When using this function on a slice, the index is relative to the slice. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![Some(1), None]); /// /// assert_eq!(array.is_null(0), false); /// assert_eq!(array.is_null(1), true); /// ``` fn is_null(&self, index: usize) -> bool { let data = self.data_ref(); data.is_null(data.offset() + index) } /// Returns whether the element at `index` is not null. /// When using this function on a slice, the index is relative to the slice. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// let array = Int32Array::from(vec![Some(1), None]); /// /// assert_eq!(array.is_valid(0), true); /// assert_eq!(array.is_valid(1), false); /// ``` fn is_valid(&self, index: usize) -> bool { let data = self.data_ref(); data.is_valid(data.offset() + index) } /// Returns the total number of null values in this array. /// /// # Example: /// /// ``` /// use arrow::array::{Array, Int32Array}; /// /// // Construct an array with values [1, NULL, NULL] /// let array = Int32Array::from(vec![Some(1), None, None]); /// /// assert_eq!(array.null_count(), 2); /// ``` fn null_count(&self) -> usize { self.data_ref().null_count() } /// Returns the total number of bytes of memory occupied by the buffers owned by this array. fn get_buffer_memory_size(&self) -> usize; /// Returns the total number of bytes of memory occupied physically by this array. fn get_array_memory_size(&self) -> usize; } /// A reference-counted reference to a generic `Array`. pub type ArrayRef = Arc<Array>; /// Constructs an array using the input `data`. /// Returns a reference-counted `Array` instance. pub fn make_array(data: ArrayDataRef) -> ArrayRef { match data.data_type() { DataType::Boolean => Arc::new(BooleanArray::from(data)) as ArrayRef, DataType::Int8 => Arc::new(Int8Array::from(data)) as ArrayRef, DataType::Int16 => Arc::new(Int16Array::from(data)) as ArrayRef, DataType::Int32 => Arc::new(Int32Array::from(data)) as ArrayRef, DataType::Int64 => Arc::new(Int64Array::from(data)) as ArrayRef, DataType::UInt8 => Arc::new(UInt8Array::from(data)) as ArrayRef, DataType::UInt16 => Arc::new(UInt16Array::from(data)) as ArrayRef, DataType::UInt32 => Arc::new(UInt32Array::from(data)) as ArrayRef, DataType::UInt64 => Arc::new(UInt64Array::from(data)) as ArrayRef, DataType::Float16 => panic!("Float16 datatype not supported"), DataType::Float32 => Arc::new(Float32Array::from(data)) as ArrayRef, DataType::Float64 => Arc::new(Float64Array::from(data)) as ArrayRef, DataType::Date32(DateUnit::Day) => Arc::new(Date32Array::from(data)) as ArrayRef, DataType::Date64(DateUnit::Millisecond) => { Arc::new(Date64Array::from(data)) as ArrayRef } DataType::Time32(TimeUnit::Second) => { Arc::new(Time32SecondArray::from(data)) as ArrayRef } DataType::Time32(TimeUnit::Millisecond) => { Arc::new(Time32MillisecondArray::from(data)) as ArrayRef } DataType::Time64(TimeUnit::Microsecond) => { Arc::new(Time64MicrosecondArray::from(data)) as ArrayRef } DataType::Time64(TimeUnit::Nanosecond) => { Arc::new(Time64NanosecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Second, _) => { Arc::new(TimestampSecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Millisecond, _) => { Arc::new(TimestampMillisecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Microsecond, _) => { Arc::new(TimestampMicrosecondArray::from(data)) as ArrayRef } DataType::Timestamp(TimeUnit::Nanosecond, _) => { Arc::new(TimestampNanosecondArray::from(data)) as ArrayRef } DataType::Interval(IntervalUnit::YearMonth) => { Arc::new(IntervalYearMonthArray::from(data)) as ArrayRef } DataType::Interval(IntervalUnit::DayTime) => { Arc::new(IntervalDayTimeArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Second) => { Arc::new(DurationSecondArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Millisecond) => { Arc::new(DurationMillisecondArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Microsecond) => { Arc::new(DurationMicrosecondArray::from(data)) as ArrayRef } DataType::Duration(TimeUnit::Nanosecond) => { Arc::new(DurationNanosecondArray::from(data)) as ArrayRef } DataType::Binary => Arc::new(BinaryArray::from(data)) as ArrayRef, DataType::LargeBinary => Arc::new(LargeBinaryArray::from(data)) as ArrayRef, DataType::FixedSizeBinary(_) => { Arc::new(FixedSizeBinaryArray::from(data)) as ArrayRef } DataType::Utf8 => Arc::new(StringArray::from(data)) as ArrayRef, DataType::LargeUtf8 => Arc::new(LargeStringArray::from(data)) as ArrayRef, DataType::List(_) => Arc::new(ListArray::from(data)) as ArrayRef, DataType::LargeList(_) => Arc::new(LargeListArray::from(data)) as ArrayRef, DataType::Struct(_) => Arc::new(StructArray::from(data)) as ArrayRef, DataType::Union(_) => Arc::new(UnionArray::from(data)) as ArrayRef, DataType::FixedSizeList(_, _) => { Arc::new(FixedSizeListArray::from(data)) as ArrayRef } DataType::Dictionary(ref key_type, _) => match key_type.as_ref() { DataType::Int8 => { Arc::new(DictionaryArray::<Int8Type>::from(data)) as ArrayRef } DataType::Int16 => { Arc::new(DictionaryArray::<Int16Type>::from(data)) as ArrayRef } DataType::Int32 => { Arc::new(DictionaryArray::<Int32Type>::from(data)) as ArrayRef } DataType::Int64 => { Arc::new(DictionaryArray::<Int64Type>::from(data)) as ArrayRef } DataType::UInt8 => { Arc::new(DictionaryArray::<UInt8Type>::from(data)) as ArrayRef } DataType::UInt16 => { Arc::new(DictionaryArray::<UInt16Type>::from(data)) as ArrayRef } DataType::UInt32 => { Arc::new(DictionaryArray::<UInt32Type>::from(data)) as ArrayRef } DataType::UInt64 => { Arc::new(DictionaryArray::<UInt64Type>::from(data)) as ArrayRef } dt => panic!("Unexpected dictionary key type {:?}", dt), }, DataType::Null => Arc::new(NullArray::from(data)) as ArrayRef, dt => panic!("Unexpected data type {:?}", dt), } } /// Creates a zero-copy slice of the array's data. /// /// # Panics /// /// Panics if `offset + length > data.len()`. fn slice_data(data: &ArrayDataRef, mut offset: usize, length: usize) -> ArrayDataRef { assert!((offset + length) <= data.len()); let mut new_data = data.as_ref().clone(); let len = std::cmp::min(new_data.len - offset, length); offset += data.offset; new_data.len = len; new_data.offset = offset; // Calculate the new null count based on the offset new_data.null_count = if let Some(bitmap) = new_data.null_bitmap() { let valid_bits = bitmap.bits.data(); len.checked_sub(bit_util::count_set_bits_offset(valid_bits, offset, length)) .unwrap() } else { 0 }; Arc::new(new_data) } // creates a new MutableBuffer initializes all falsed // this is useful to populate null bitmaps fn make_null_buffer(len: usize) -> MutableBuffer { let num_bytes = bit_util::ceil(len, 8); MutableBuffer::new(num_bytes).with_bitset(num_bytes, false) } /// ---------------------------------------------------------------------------- /// Implementations of different array types struct RawPtrBox<T> { inner: *const T, } impl<T> RawPtrBox<T> { fn new(inner: *const T) -> Self { Self { inner } } fn get(&self) -> *const T { self.inner } } unsafe impl<T> Send for RawPtrBox<T> {} unsafe impl<T> Sync for RawPtrBox<T> {} fn as_aligned_pointer<T>(p: *const u8) -> *const T { assert!( memory::is_aligned(p, mem::align_of::<T>()), "memory is not aligned" ); p as *const T } /// Array whose elements are of primitive types. pub struct PrimitiveArray<T: ArrowPrimitiveType> { data: ArrayDataRef, /// Pointer to the value array. The lifetime of this must be <= to the value buffer /// stored in `data`, so it's safe to store. /// Also note that boolean arrays are bit-packed, so although the underlying pointer /// is of type bool it should be cast back to u8 before being used. /// i.e. `self.raw_values.get() as *const u8` raw_values: RawPtrBox<T::Native>, } impl<T: ArrowPrimitiveType> PrimitiveArray<T> { pub fn new(length: usize, values: Buffer, null_count: usize, offset: usize) -> Self { let array_data = ArrayData::builder(T::DATA_TYPE) .len(length) .add_buffer(values) .null_count(null_count) .offset(offset) .build(); PrimitiveArray::from(array_data) } /// Returns the length of this array. pub fn len(&self) -> usize { self.data.len() } /// Returns whether this array is empty. pub fn is_empty(&self) -> bool { self.data.is_empty() } /// Returns a raw pointer to the values of this array. pub fn raw_values(&self) -> *const T::Native { unsafe { self.raw_values.get().add(self.data.offset()) } } /// Returns a slice for the given offset and length /// /// Note this doesn't do any bound checking, for performance reason. pub fn value_slice(&self, offset: usize, len: usize) -> &[T::Native] { let raw = unsafe { std::slice::from_raw_parts(self.raw_values().add(offset), len) }; &raw[..] } // Returns a new primitive array builder pub fn builder(capacity: usize) -> PrimitiveBuilder<T> { PrimitiveBuilder::<T>::new(capacity) } /// Returns a `Buffer` holding all the values of this array. /// /// Note this doesn't take the offset of this array into account. pub fn values(&self) -> Buffer { self.data.buffers()[0].clone() } /// Returns the primitive value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. pub fn value(&self, i: usize) -> T::Native { let offset = i + self.offset(); unsafe { T::index(self.raw_values.get(), offset) } } } impl<T: ArrowPrimitiveType> Array for PrimitiveArray<T> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [PrimitiveArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [PrimitiveArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } fn as_datetime<T: ArrowPrimitiveType>(v: i64) -> Option<NaiveDateTime> { match T::DATA_TYPE { DataType::Date32(_) => { // convert days into seconds Some(NaiveDateTime::from_timestamp(v as i64 * SECONDS_IN_DAY, 0)) } DataType::Date64(_) => Some(NaiveDateTime::from_timestamp( // extract seconds from milliseconds v / MILLISECONDS, // discard extracted seconds and convert milliseconds to nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, )), DataType::Time32(_) | DataType::Time64(_) => None, DataType::Timestamp(unit, _) => match unit { TimeUnit::Second => Some(NaiveDateTime::from_timestamp(v, 0)), TimeUnit::Millisecond => Some(NaiveDateTime::from_timestamp( // extract seconds from milliseconds v / MILLISECONDS, // discard extracted seconds and convert milliseconds to nanoseconds (v % MILLISECONDS * MICROSECONDS) as u32, )), TimeUnit::Microsecond => Some(NaiveDateTime::from_timestamp( // extract seconds from microseconds v / MICROSECONDS, // discard extracted seconds and convert microseconds to nanoseconds (v % MICROSECONDS * MILLISECONDS) as u32, )), TimeUnit::Nanosecond => Some(NaiveDateTime::from_timestamp( // extract seconds from nanoseconds v / NANOSECONDS, // discard extracted seconds (v % NANOSECONDS) as u32, )), }, // interval is not yet fully documented [ARROW-3097] DataType::Interval(_) => None, _ => None, } } fn as_date<T: ArrowPrimitiveType>(v: i64) -> Option<NaiveDate> { as_datetime::<T>(v).map(|datetime| datetime.date()) } fn as_time<T: ArrowPrimitiveType>(v: i64) -> Option<NaiveTime> { match T::DATA_TYPE { DataType::Time32(unit) => { // safe to immediately cast to u32 as `self.value(i)` is positive i32 let v = v as u32; match unit { TimeUnit::Second => Some(NaiveTime::from_num_seconds_from_midnight(v, 0)), TimeUnit::Millisecond => { Some(NaiveTime::from_num_seconds_from_midnight( // extract seconds from milliseconds v / MILLISECONDS as u32, // discard extracted seconds and convert milliseconds to // nanoseconds v % MILLISECONDS as u32 * MICROSECONDS as u32, )) } _ => None, } } DataType::Time64(unit) => { match unit { TimeUnit::Microsecond => { Some(NaiveTime::from_num_seconds_from_midnight( // extract seconds from microseconds (v / MICROSECONDS) as u32, // discard extracted seconds and convert microseconds to // nanoseconds (v % MICROSECONDS * MILLISECONDS) as u32, )) } TimeUnit::Nanosecond => { Some(NaiveTime::from_num_seconds_from_midnight( // extract seconds from nanoseconds (v / NANOSECONDS) as u32, // discard extracted seconds (v % NANOSECONDS) as u32, )) } _ => None, } } DataType::Timestamp(_, _) => as_datetime::<T>(v).map(|datetime| datetime.time()), DataType::Date32(_) | DataType::Date64(_) => Some(NaiveTime::from_hms(0, 0, 0)), DataType::Interval(_) => None, _ => None, } } impl<T: ArrowTemporalType + ArrowNumericType> PrimitiveArray<T> where i64: std::convert::From<T::Native>, { /// Returns value as a chrono `NaiveDateTime`, handling time resolution /// /// If a data type cannot be converted to `NaiveDateTime`, a `None` is returned. /// A valid value is expected, thus the user should first check for validity. pub fn value_as_datetime(&self, i: usize) -> Option<NaiveDateTime> { as_datetime::<T>(i64::from(self.value(i))) } /// Returns value as a chrono `NaiveDate` by using `Self::datetime()` /// /// If a data type cannot be converted to `NaiveDate`, a `None` is returned pub fn value_as_date(&self, i: usize) -> Option<NaiveDate> { self.value_as_datetime(i).map(|datetime| datetime.date()) } /// Returns a value as a chrono `NaiveTime` /// /// `Date32` and `Date64` return UTC midnight as they do not have time resolution pub fn value_as_time(&self, i: usize) -> Option<NaiveTime> { as_time::<T>(i64::from(self.value(i))) } } impl<T: ArrowPrimitiveType> fmt::Debug for PrimitiveArray<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "PrimitiveArray<{:?}>\n[\n", T::DATA_TYPE)?; print_long_array(self, f, |array, index, f| match T::DATA_TYPE { DataType::Date32(_) | DataType::Date64(_) => { let v = self.value(index).to_usize().unwrap() as i64; match as_date::<T>(v) { Some(date) => write!(f, "{:?}", date), None => write!(f, "null"), } } DataType::Time32(_) | DataType::Time64(_) => { let v = self.value(index).to_usize().unwrap() as i64; match as_time::<T>(v) { Some(time) => write!(f, "{:?}", time), None => write!(f, "null"), } } DataType::Timestamp(_, _) => { let v = self.value(index).to_usize().unwrap() as i64; match as_datetime::<T>(v) { Some(datetime) => write!(f, "{:?}", datetime), None => write!(f, "null"), } } _ => fmt::Debug::fmt(&array.value(index), f), })?; write!(f, "]") } } impl<'a, T: ArrowPrimitiveType> IntoIterator for &'a PrimitiveArray<T> { type Item = Option<<T as ArrowPrimitiveType>::Native>; type IntoIter = PrimitiveIter<'a, T>; fn into_iter(self) -> Self::IntoIter { PrimitiveIter::<'a, T>::new(self) } } impl<'a, T: ArrowPrimitiveType> PrimitiveArray<T> { /// constructs a new iterator pub fn iter(&'a self) -> PrimitiveIter<'a, T> { PrimitiveIter::<'a, T>::new(&self) } } impl<T: ArrowPrimitiveType, Ptr: Borrow<Option<<T as ArrowPrimitiveType>::Native>>> FromIterator<Ptr> for PrimitiveArray<T> { fn from_iter<I: IntoIterator<Item = Ptr>>(iter: I) -> Self { let iter = iter.into_iter(); let (_, data_len) = iter.size_hint(); let data_len = data_len.expect("Iterator must be sized"); // panic if no upper bound. let num_bytes = bit_util::ceil(data_len, 8); let mut null_buf = MutableBuffer::new(num_bytes).with_bitset(num_bytes, false); let mut val_buf = MutableBuffer::new( data_len * mem::size_of::<<T as ArrowPrimitiveType>::Native>(), ); let null = vec![0; mem::size_of::<<T as ArrowPrimitiveType>::Native>()]; let null_slice = null_buf.data_mut(); iter.enumerate().for_each(|(i, item)| { if let Some(a) = item.borrow() { bit_util::set_bit(null_slice, i); val_buf.write_all(a.to_byte_slice()).unwrap(); } else { val_buf.write_all(&null).unwrap(); } }); let data = ArrayData::new( T::DATA_TYPE, data_len, None, Some(null_buf.freeze()), 0, vec![val_buf.freeze()], vec![], ); PrimitiveArray::from(Arc::new(data)) } } // TODO: the macro is needed here because we'd get "conflicting implementations" error // otherwise with both `From<Vec<T::Native>>` and `From<Vec<Option<T::Native>>>`. // We should revisit this in future. macro_rules! def_numeric_from_vec { ( $ty:ident ) => { impl From<Vec<<$ty as ArrowPrimitiveType>::Native>> for PrimitiveArray<$ty> { fn from(data: Vec<<$ty as ArrowPrimitiveType>::Native>) -> Self { let array_data = ArrayData::builder($ty::DATA_TYPE) .len(data.len()) .add_buffer(Buffer::from(data.to_byte_slice())) .build(); PrimitiveArray::from(array_data) } } // Constructs a primitive array from a vector. Should only be used for testing. impl From<Vec<Option<<$ty as ArrowPrimitiveType>::Native>>> for PrimitiveArray<$ty> { fn from(data: Vec<Option<<$ty as ArrowPrimitiveType>::Native>>) -> Self { PrimitiveArray::from_iter(data.iter()) } } }; } def_numeric_from_vec!(Int8Type); def_numeric_from_vec!(Int16Type); def_numeric_from_vec!(Int32Type); def_numeric_from_vec!(Int64Type); def_numeric_from_vec!(UInt8Type); def_numeric_from_vec!(UInt16Type); def_numeric_from_vec!(UInt32Type); def_numeric_from_vec!(UInt64Type); def_numeric_from_vec!(Float32Type); def_numeric_from_vec!(Float64Type); def_numeric_from_vec!(Date32Type); def_numeric_from_vec!(Date64Type); def_numeric_from_vec!(Time32SecondType); def_numeric_from_vec!(Time32MillisecondType); def_numeric_from_vec!(Time64MicrosecondType); def_numeric_from_vec!(Time64NanosecondType); def_numeric_from_vec!(IntervalYearMonthType); def_numeric_from_vec!(IntervalDayTimeType); def_numeric_from_vec!(DurationSecondType); def_numeric_from_vec!(DurationMillisecondType); def_numeric_from_vec!(DurationMicrosecondType); def_numeric_from_vec!(DurationNanosecondType); def_numeric_from_vec!(TimestampMillisecondType); def_numeric_from_vec!(TimestampMicrosecondType); impl<T: ArrowTimestampType> PrimitiveArray<T> { /// Construct a timestamp array from a vec of i64 values and an optional timezone pub fn from_vec(data: Vec<i64>, timezone: Option<Arc<String>>) -> Self { let array_data = ArrayData::builder(DataType::Timestamp(T::get_time_unit(), timezone)) .len(data.len()) .add_buffer(Buffer::from(data.to_byte_slice())) .build(); PrimitiveArray::from(array_data) } } impl<T: ArrowTimestampType> PrimitiveArray<T> { /// Construct a timestamp array from a vec of Option<i64> values and an optional timezone pub fn from_opt_vec(data: Vec<Option<i64>>, timezone: Option<Arc<String>>) -> Self { // TODO: duplicated from def_numeric_from_vec! macro, it looks possible to convert to generic let data_len = data.len(); let mut null_buf = make_null_buffer(data_len); let mut val_buf = MutableBuffer::new(data_len * mem::size_of::<i64>()); { let null = vec![0; mem::size_of::<i64>()]; let null_slice = null_buf.data_mut(); for (i, v) in data.iter().enumerate() { if let Some(n) = v { bit_util::set_bit(null_slice, i); // unwrap() in the following should be safe here since we've // made sure enough space is allocated for the values. val_buf.write_all(&n.to_byte_slice()).unwrap(); } else { val_buf.write_all(&null).unwrap(); } } } let array_data = ArrayData::builder(DataType::Timestamp(T::get_time_unit(), timezone)) .len(data_len) .add_buffer(val_buf.freeze()) .null_bit_buffer(null_buf.freeze()) .build(); PrimitiveArray::from(array_data) } } /// Constructs a boolean array from a vector. Should only be used for testing. impl From<Vec<bool>> for BooleanArray { fn from(data: Vec<bool>) -> Self { let mut mut_buf = make_null_buffer(data.len()); { let mut_slice = mut_buf.data_mut(); for (i, b) in data.iter().enumerate() { if *b { bit_util::set_bit(mut_slice, i); } } } let array_data = ArrayData::builder(DataType::Boolean) .len(data.len()) .add_buffer(mut_buf.freeze()) .build(); BooleanArray::from(array_data) } } impl From<Vec<Option<bool>>> for BooleanArray { fn from(data: Vec<Option<bool>>) -> Self { let data_len = data.len(); let num_byte = bit_util::ceil(data_len, 8); let mut null_buf = make_null_buffer(data.len()); let mut val_buf = MutableBuffer::new(num_byte).with_bitset(num_byte, false); { let null_slice = null_buf.data_mut(); let val_slice = val_buf.data_mut(); for (i, v) in data.iter().enumerate() { if let Some(b) = v { bit_util::set_bit(null_slice, i); if *b { bit_util::set_bit(val_slice, i); } } } } let array_data = ArrayData::builder(DataType::Boolean) .len(data_len) .add_buffer(val_buf.freeze()) .null_bit_buffer(null_buf.freeze()) .build(); BooleanArray::from(array_data) } } /// Constructs a `PrimitiveArray` from an array data reference. impl<T: ArrowPrimitiveType> From<ArrayDataRef> for PrimitiveArray<T> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "PrimitiveArray data should contain a single buffer only (values buffer)" ); let raw_values = data.buffers()[0].raw_data(); assert!( memory::is_aligned::<u8>(raw_values, mem::align_of::<T::Native>()), "memory is not aligned" ); Self { data, raw_values: RawPtrBox::new(raw_values as *const T::Native), } } } /// Common operations for List types. pub trait ListArrayOps<OffsetSize: OffsetSizeTrait> { fn value_offset_at(&self, i: usize) -> OffsetSize; } /// trait declaring an offset size, relevant for i32 vs i64 array types. pub trait OffsetSizeTrait: ArrowNativeType + Num + Ord { fn prefix() -> &'static str; fn to_isize(&self) -> isize; } impl OffsetSizeTrait for i32 { fn prefix() -> &'static str { "" } fn to_isize(&self) -> isize { num::ToPrimitive::to_isize(self).unwrap() } } impl OffsetSizeTrait for i64 { fn prefix() -> &'static str { "Large" } fn to_isize(&self) -> isize { num::ToPrimitive::to_isize(self).unwrap() } } pub struct GenericListArray<OffsetSize> { data: ArrayDataRef, values: ArrayRef, value_offsets: RawPtrBox<OffsetSize>, } impl<OffsetSize: OffsetSizeTrait> GenericListArray<OffsetSize> { /// Returns a reference to the values of this list. pub fn values(&self) -> ArrayRef { self.values.clone() } /// Returns a clone of the value type of this list. pub fn value_type(&self) -> DataType { self.values.data_ref().data_type().clone() } /// Returns ith value of this list array. pub fn value(&self, i: usize) -> ArrayRef { self.values.slice( self.value_offset(i).to_usize().unwrap(), self.value_length(i).to_usize().unwrap(), ) } /// Returns the offset for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> OffsetSize { self.value_offset_at(self.data.offset() + i) } /// Returns the length for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_length(&self, mut i: usize) -> OffsetSize { i += self.data.offset(); self.value_offset_at(i + 1) - self.value_offset_at(i) } #[inline] fn value_offset_at(&self, i: usize) -> OffsetSize { unsafe { *self.value_offsets.get().add(i) } } } impl<OffsetSize: OffsetSizeTrait> From<ArrayDataRef> for GenericListArray<OffsetSize> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "ListArray data should contain a single buffer only (value offsets)" ); assert_eq!( data.child_data().len(), 1, "ListArray should contain a single child array (values array)" ); let values = make_array(data.child_data()[0].clone()); let raw_value_offsets = data.buffers()[0].raw_data(); let value_offsets: *const OffsetSize = as_aligned_pointer(raw_value_offsets); unsafe { assert!( (*value_offsets.offset(0)).is_zero(), "offsets do not start at zero" ); } Self { data, values, value_offsets: RawPtrBox::new(value_offsets), } } } impl<OffsetSize: 'static + OffsetSizeTrait> Array for GenericListArray<OffsetSize> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [ListArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [ListArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } // Helper function for printing potentially long arrays. fn print_long_array<A, F>(array: &A, f: &mut fmt::Formatter, print_item: F) -> fmt::Result where A: Array, F: Fn(&A, usize, &mut fmt::Formatter) -> fmt::Result, { let head = std::cmp::min(10, array.len()); for i in 0..head { if array.is_null(i) { writeln!(f, " null,")?; } else { write!(f, " ")?; print_item(&array, i, f)?; writeln!(f, ",")?; } } if array.len() > 10 { if array.len() > 20 { writeln!(f, " ...{} elements...,", array.len() - 20)?; } let tail = std::cmp::max(head, array.len() - 10); for i in tail..array.len() { if array.is_null(i) { writeln!(f, " null,")?; } else { write!(f, " ")?; print_item(&array, i, f)?; writeln!(f, ",")?; } } } Ok(()) } impl<OffsetSize: OffsetSizeTrait> fmt::Debug for GenericListArray<OffsetSize> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}ListArray\n[\n", OffsetSize::prefix())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl<OffsetSize: OffsetSizeTrait> ListArrayOps<OffsetSize> for GenericListArray<OffsetSize> { fn value_offset_at(&self, i: usize) -> OffsetSize { self.value_offset_at(i) } } /// A list array where each element is a variable-sized sequence of values with the same /// type whose memory offsets between elements are represented by a i32. pub type ListArray = GenericListArray<i32>; /// A list array where each element is a variable-sized sequence of values with the same /// type whose memory offsets between elements are represented by a i64. pub type LargeListArray = GenericListArray<i64>; /// A list array where each element is a fixed-size sequence of values with the same /// type whose maximum length is represented by a i32. pub struct FixedSizeListArray { data: ArrayDataRef, values: ArrayRef, length: i32, } impl FixedSizeListArray { /// Returns a reference to the values of this list. pub fn values(&self) -> ArrayRef { self.values.clone() } /// Returns a clone of the value type of this list. pub fn value_type(&self) -> DataType { self.values.data_ref().data_type().clone() } /// Returns ith value of this list array. pub fn value(&self, i: usize) -> ArrayRef { self.values .slice(self.value_offset(i) as usize, self.value_length() as usize) } /// Returns the offset for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> i32 { self.value_offset_at(self.data.offset() + i) } /// Returns the length for value at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub const fn value_length(&self) -> i32 { self.length } #[inline] const fn value_offset_at(&self, i: usize) -> i32 { i as i32 * self.length } } impl From<ArrayDataRef> for FixedSizeListArray { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 0, "FixedSizeListArray data should not contain a buffer for value offsets" ); assert_eq!( data.child_data().len(), 1, "FixedSizeListArray should contain a single child array (values array)" ); let values = make_array(data.child_data()[0].clone()); let length = match data.data_type() { DataType::FixedSizeList(_, len) => { // check that child data is multiple of length assert_eq!( values.len() % *len as usize, 0, "FixedSizeListArray child array length should be a multiple of {}", len ); *len } _ => { panic!("FixedSizeListArray data should contain a FixedSizeList data type") } }; Self { data, values, length, } } } impl Array for FixedSizeListArray { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [FixedSizeListArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() + self.values().get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [FixedSizeListArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + self.values().get_array_memory_size() + mem::size_of_val(self) } } impl fmt::Debug for FixedSizeListArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "FixedSizeListArray<{}>\n[\n", self.value_length())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } /// Like OffsetSizeTrait, but specialized for Binary // This allow us to expose a constant datatype for the GenericBinaryArray pub trait BinaryOffsetSizeTrait: OffsetSizeTrait { const DATA_TYPE: DataType; } impl BinaryOffsetSizeTrait for i32 { const DATA_TYPE: DataType = DataType::Binary; } impl BinaryOffsetSizeTrait for i64 { const DATA_TYPE: DataType = DataType::LargeBinary; } pub struct GenericBinaryArray<OffsetSize: BinaryOffsetSizeTrait> { data: ArrayDataRef, value_offsets: RawPtrBox<OffsetSize>, value_data: RawPtrBox<u8>, } impl<OffsetSize: BinaryOffsetSizeTrait> GenericBinaryArray<OffsetSize> { /// Returns the offset for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> OffsetSize { self.value_offset_at(self.data.offset() + i) } /// Returns the length for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_length(&self, mut i: usize) -> OffsetSize { i += self.data.offset(); self.value_offset_at(i + 1) - self.value_offset_at(i) } /// Returns a clone of the value offset buffer pub fn value_offsets(&self) -> Buffer { self.data.buffers()[0].clone() } /// Returns a clone of the value data buffer pub fn value_data(&self) -> Buffer { self.data.buffers()[1].clone() } #[inline] fn value_offset_at(&self, i: usize) -> OffsetSize { unsafe { *self.value_offsets.get().add(i) } } /// Returns the element at index `i` as a byte slice. pub fn value(&self, i: usize) -> &[u8] { assert!(i < self.data.len(), "BinaryArray out of bounds access"); let offset = i.checked_add(self.data.offset()).unwrap(); unsafe { let pos = self.value_offset_at(offset); std::slice::from_raw_parts( self.value_data.get().offset(pos.to_isize()), (self.value_offset_at(offset + 1) - pos).to_usize().unwrap(), ) } } /// Creates a [GenericBinaryArray] from a vector of byte slices pub fn from_vec(v: Vec<&[u8]>) -> Self { let mut offsets = Vec::with_capacity(v.len() + 1); let mut values = Vec::new(); let mut length_so_far: OffsetSize = OffsetSize::zero(); offsets.push(length_so_far); for s in &v { length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); offsets.push(length_so_far); values.extend_from_slice(s); } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); GenericBinaryArray::<OffsetSize>::from(array_data) } /// Creates a [GenericBinaryArray] from a vector of Optional (null) byte slices pub fn from_opt_vec(v: Vec<Option<&[u8]>>) -> Self { v.into_iter().collect() } fn from_list(v: GenericListArray<OffsetSize>) -> Self { assert_eq!( v.data_ref().child_data()[0].child_data().len(), 0, "BinaryArray can only be created from list array of u8 values \ (i.e. List<PrimitiveArray<u8>>)." ); assert_eq!( v.data_ref().child_data()[0].data_type(), &DataType::UInt8, "BinaryArray can only be created from List<u8> arrays, mismatched data types." ); let mut builder = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(v.data_ref().buffers()[0].clone()) .add_buffer(v.data_ref().child_data()[0].buffers()[0].clone()); if let Some(bitmap) = v.data_ref().null_bitmap() { builder = builder .null_count(v.data_ref().null_count()) .null_bit_buffer(bitmap.bits.clone()) } let data = builder.build(); Self::from(data) } } impl<'a, T: BinaryOffsetSizeTrait> GenericBinaryArray<T> { /// constructs a new iterator pub fn iter(&'a self) -> GenericBinaryIter<'a, T> { GenericBinaryIter::<'a, T>::new(&self) } } impl<OffsetSize: BinaryOffsetSizeTrait> fmt::Debug for GenericBinaryArray<OffsetSize> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}BinaryArray\n[\n", OffsetSize::prefix())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl<OffsetSize: BinaryOffsetSizeTrait> Array for GenericBinaryArray<OffsetSize> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [$name]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [$name]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } impl<OffsetSize: BinaryOffsetSizeTrait> ListArrayOps<OffsetSize> for GenericBinaryArray<OffsetSize> { fn value_offset_at(&self, i: usize) -> OffsetSize { self.value_offset_at(i) } } impl<OffsetSize: BinaryOffsetSizeTrait> From<ArrayDataRef> for GenericBinaryArray<OffsetSize> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.data_type(), &<OffsetSize as BinaryOffsetSizeTrait>::DATA_TYPE, "[Large]BinaryArray expects Datatype::[Large]Binary" ); assert_eq!( data.buffers().len(), 2, "BinaryArray data should contain 2 buffers only (offsets and values)" ); let raw_value_offsets = data.buffers()[0].raw_data(); let value_data = data.buffers()[1].raw_data(); Self { data, value_offsets: RawPtrBox::new(as_aligned_pointer::<OffsetSize>( raw_value_offsets, )), value_data: RawPtrBox::new(value_data), } } } impl<Ptr, OffsetSize: BinaryOffsetSizeTrait> FromIterator<Option<Ptr>> for GenericBinaryArray<OffsetSize> where Ptr: AsRef<[u8]>, { fn from_iter<I: IntoIterator<Item = Option<Ptr>>>(iter: I) -> Self { let iter = iter.into_iter(); let (_, data_len) = iter.size_hint(); let data_len = data_len.expect("Iterator must be sized"); // panic if no upper bound. let mut offsets = Vec::with_capacity(data_len + 1); let mut values = Vec::new(); let mut null_buf = make_null_buffer(data_len); let mut length_so_far: OffsetSize = OffsetSize::zero(); offsets.push(length_so_far); { let null_slice = null_buf.data_mut(); for (i, s) in iter.enumerate() { if let Some(s) = s { let s = s.as_ref(); bit_util::set_bit(null_slice, i); length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); values.extend_from_slice(s); } // always add an element in offsets offsets.push(length_so_far); } } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(data_len) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .null_bit_buffer(null_buf.freeze()) .build(); Self::from(array_data) } } /// An array where each element is a byte whose maximum length is represented by a i32. pub type BinaryArray = GenericBinaryArray<i32>; /// An array where each element is a byte whose maximum length is represented by a i64. pub type LargeBinaryArray = GenericBinaryArray<i64>; impl<'a, T: BinaryOffsetSizeTrait> IntoIterator for &'a GenericBinaryArray<T> { type Item = Option<&'a [u8]>; type IntoIter = GenericBinaryIter<'a, T>; fn into_iter(self) -> Self::IntoIter { GenericBinaryIter::<'a, T>::new(self) } } impl From<Vec<&[u8]>> for BinaryArray { fn from(v: Vec<&[u8]>) -> Self { BinaryArray::from_vec(v) } } impl From<Vec<Option<&[u8]>>> for BinaryArray { fn from(v: Vec<Option<&[u8]>>) -> Self { BinaryArray::from_opt_vec(v) } } impl From<Vec<&[u8]>> for LargeBinaryArray { fn from(v: Vec<&[u8]>) -> Self { LargeBinaryArray::from_vec(v) } } impl From<Vec<Option<&[u8]>>> for LargeBinaryArray { fn from(v: Vec<Option<&[u8]>>) -> Self { LargeBinaryArray::from_opt_vec(v) } } impl From<ListArray> for BinaryArray { fn from(v: ListArray) -> Self { BinaryArray::from_list(v) } } impl From<LargeListArray> for LargeBinaryArray { fn from(v: LargeListArray) -> Self { LargeBinaryArray::from_list(v) } } /// Like OffsetSizeTrait, but specialized for Strings // This allow us to expose a constant datatype for the GenericStringArray pub trait StringOffsetSizeTrait: OffsetSizeTrait { const DATA_TYPE: DataType; } impl StringOffsetSizeTrait for i32 { const DATA_TYPE: DataType = DataType::Utf8; } impl StringOffsetSizeTrait for i64 { const DATA_TYPE: DataType = DataType::LargeUtf8; } /// Generic struct for \[Large\]StringArray pub struct GenericStringArray<OffsetSize: StringOffsetSizeTrait> { data: ArrayDataRef, value_offsets: RawPtrBox<OffsetSize>, value_data: RawPtrBox<u8>, } impl<OffsetSize: StringOffsetSizeTrait> GenericStringArray<OffsetSize> { /// Returns the offset for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> OffsetSize { self.value_offset_at(self.data.offset() + i) } /// Returns the length for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_length(&self, mut i: usize) -> OffsetSize { i += self.data.offset(); self.value_offset_at(i + 1) - self.value_offset_at(i) } /// Returns a clone of the value offset buffer pub fn value_offsets(&self) -> Buffer { self.data.buffers()[0].clone() } /// Returns a clone of the value data buffer pub fn value_data(&self) -> Buffer { self.data.buffers()[1].clone() } #[inline] fn value_offset_at(&self, i: usize) -> OffsetSize { unsafe { *self.value_offsets.get().add(i) } } /// Returns the element at index `i` as &str pub fn value(&self, i: usize) -> &str { assert!(i < self.data.len(), "StringArray out of bounds access"); let offset = i.checked_add(self.data.offset()).unwrap(); unsafe { let pos = self.value_offset_at(offset); let slice = std::slice::from_raw_parts( self.value_data.get().offset(pos.to_isize()), (self.value_offset_at(offset + 1) - pos).to_usize().unwrap(), ); std::str::from_utf8_unchecked(slice) } } fn from_list(v: GenericListArray<OffsetSize>) -> Self { assert_eq!( v.data().child_data()[0].child_data().len(), 0, "StringArray can only be created from list array of u8 values \ (i.e. List<PrimitiveArray<u8>>)." ); assert_eq!( v.data_ref().child_data()[0].data_type(), &DataType::UInt8, "StringArray can only be created from List<u8> arrays, mismatched data types." ); let mut builder = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(v.data_ref().buffers()[0].clone()) .add_buffer(v.data_ref().child_data()[0].buffers()[0].clone()); if let Some(bitmap) = v.data().null_bitmap() { builder = builder .null_count(v.data_ref().null_count()) .null_bit_buffer(bitmap.bits.clone()) } let data = builder.build(); Self::from(data) } pub(crate) fn from_vec(v: Vec<&str>) -> Self { let mut offsets = Vec::with_capacity(v.len() + 1); let mut values = Vec::new(); let mut length_so_far = OffsetSize::zero(); offsets.push(length_so_far); for s in &v { length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); offsets.push(length_so_far); values.extend_from_slice(s.as_bytes()); } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(v.len()) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); Self::from(array_data) } pub(crate) fn from_opt_vec(v: Vec<Option<&str>>) -> Self { GenericStringArray::from_iter(v.into_iter()) } } impl<'a, Ptr, OffsetSize: StringOffsetSizeTrait> FromIterator<Option<Ptr>> for GenericStringArray<OffsetSize> where Ptr: AsRef<str>, { fn from_iter<I: IntoIterator<Item = Option<Ptr>>>(iter: I) -> Self { let iter = iter.into_iter(); let (_, data_len) = iter.size_hint(); let data_len = data_len.expect("Iterator must be sized"); // panic if no upper bound. let mut offsets = Vec::with_capacity(data_len + 1); let mut values = Vec::new(); let mut null_buf = make_null_buffer(data_len); let mut length_so_far = OffsetSize::zero(); offsets.push(length_so_far); for (i, s) in iter.enumerate() { if let Some(s) = s { let s = s.as_ref(); // set null bit let null_slice = null_buf.data_mut(); bit_util::set_bit(null_slice, i); length_so_far = length_so_far + OffsetSize::from_usize(s.len()).unwrap(); offsets.push(length_so_far); values.extend_from_slice(s.as_bytes()); } else { offsets.push(length_so_far); values.extend_from_slice(b""); } } let array_data = ArrayData::builder(OffsetSize::DATA_TYPE) .len(data_len) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .null_bit_buffer(null_buf.freeze()) .build(); Self::from(array_data) } } impl<'a, T: StringOffsetSizeTrait> IntoIterator for &'a GenericStringArray<T> { type Item = Option<&'a str>; type IntoIter = GenericStringIter<'a, T>; fn into_iter(self) -> Self::IntoIter { GenericStringIter::<'a, T>::new(self) } } impl<'a, T: StringOffsetSizeTrait> GenericStringArray<T> { /// constructs a new iterator pub fn iter(&'a self) -> GenericStringIter<'a, T> { GenericStringIter::<'a, T>::new(&self) } } impl<OffsetSize: StringOffsetSizeTrait> fmt::Debug for GenericStringArray<OffsetSize> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}StringArray\n[\n", OffsetSize::prefix())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl<OffsetSize: StringOffsetSizeTrait> Array for GenericStringArray<OffsetSize> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [$name]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [$name]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } impl<OffsetSize: StringOffsetSizeTrait> From<ArrayDataRef> for GenericStringArray<OffsetSize> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.data_type(), &<OffsetSize as StringOffsetSizeTrait>::DATA_TYPE, "[Large]StringArray expects Datatype::[Large]Utf8" ); assert_eq!( data.buffers().len(), 2, "StringArray data should contain 2 buffers only (offsets and values)" ); let raw_value_offsets = data.buffers()[0].raw_data(); let value_data = data.buffers()[1].raw_data(); Self { data, value_offsets: RawPtrBox::new(as_aligned_pointer::<OffsetSize>( raw_value_offsets, )), value_data: RawPtrBox::new(value_data), } } } impl<OffsetSize: StringOffsetSizeTrait> ListArrayOps<OffsetSize> for GenericStringArray<OffsetSize> { fn value_offset_at(&self, i: usize) -> OffsetSize { self.value_offset_at(i) } } /// An array where each element is a variable-sized sequence of bytes representing a string /// whose maximum length (in bytes) is represented by a i32. pub type StringArray = GenericStringArray<i32>; /// An array where each element is a variable-sized sequence of bytes representing a string /// whose maximum length (in bytes) is represented by a i64. pub type LargeStringArray = GenericStringArray<i64>; impl From<ListArray> for StringArray { fn from(v: ListArray) -> Self { StringArray::from_list(v) } } impl From<LargeListArray> for LargeStringArray { fn from(v: LargeListArray) -> Self { LargeStringArray::from_list(v) } } impl From<Vec<&str>> for StringArray { fn from(v: Vec<&str>) -> Self { StringArray::from_vec(v) } } impl From<Vec<&str>> for LargeStringArray { fn from(v: Vec<&str>) -> Self { LargeStringArray::from_vec(v) } } impl From<Vec<Option<&str>>> for StringArray { fn from(v: Vec<Option<&str>>) -> Self { StringArray::from_opt_vec(v) } } impl From<Vec<Option<&str>>> for LargeStringArray { fn from(v: Vec<Option<&str>>) -> Self { LargeStringArray::from_opt_vec(v) } } /// A type of `FixedSizeListArray` whose elements are binaries. pub struct FixedSizeBinaryArray { data: ArrayDataRef, value_data: RawPtrBox<u8>, length: i32, } impl FixedSizeBinaryArray { /// Returns the element at index `i` as a byte slice. pub fn value(&self, i: usize) -> &[u8] { assert!( i < self.data.len(), "FixedSizeBinaryArray out of bounds access" ); let offset = i.checked_add(self.data.offset()).unwrap(); unsafe { let pos = self.value_offset_at(offset); std::slice::from_raw_parts( self.value_data.get().offset(pos as isize), (self.value_offset_at(offset + 1) - pos) as usize, ) } } /// Returns the offset for the element at index `i`. /// /// Note this doesn't do any bound checking, for performance reason. #[inline] pub fn value_offset(&self, i: usize) -> i32 { self.value_offset_at(self.data.offset() + i) } /// Returns the length for an element. /// /// All elements have the same length as the array is a fixed size. #[inline] pub fn value_length(&self) -> i32 { self.length } /// Returns a clone of the value data buffer pub fn value_data(&self) -> Buffer { self.data.buffers()[0].clone() } #[inline] fn value_offset_at(&self, i: usize) -> i32 { self.length * i as i32 } } impl ListArrayOps<i32> for FixedSizeBinaryArray { fn value_offset_at(&self, i: usize) -> i32 { self.value_offset_at(i) } } impl From<ArrayDataRef> for FixedSizeBinaryArray { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "FixedSizeBinaryArray data should contain 1 buffer only (values)" ); let value_data = data.buffers()[0].raw_data(); let length = match data.data_type() { DataType::FixedSizeBinary(len) => *len, _ => panic!("Expected data type to be FixedSizeBinary"), }; Self { data, value_data: RawPtrBox::new(value_data), length, } } } /// Creates a `FixedSizeBinaryArray` from `FixedSizeList<u8>` array impl From<FixedSizeListArray> for FixedSizeBinaryArray { fn from(v: FixedSizeListArray) -> Self { assert_eq!( v.data_ref().child_data()[0].child_data().len(), 0, "FixedSizeBinaryArray can only be created from list array of u8 values \ (i.e. FixedSizeList<PrimitiveArray<u8>>)." ); assert_eq!( v.data_ref().child_data()[0].data_type(), &DataType::UInt8, "FixedSizeBinaryArray can only be created from FixedSizeList<u8> arrays, mismatched data types." ); let mut builder = ArrayData::builder(DataType::FixedSizeBinary(v.value_length())) .len(v.len()) .add_buffer(v.data_ref().child_data()[0].buffers()[0].clone()); if let Some(bitmap) = v.data_ref().null_bitmap() { builder = builder .null_count(v.data_ref().null_count()) .null_bit_buffer(bitmap.bits.clone()) } let data = builder.build(); Self::from(data) } } impl fmt::Debug for FixedSizeBinaryArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "FixedSizeBinaryArray<{}>\n[\n", self.value_length())?; print_long_array(self, f, |array, index, f| { fmt::Debug::fmt(&array.value(index), f) })?; write!(f, "]") } } impl Array for FixedSizeBinaryArray { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [FixedSizeBinaryArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [FixedSizeBinaryArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } /// A nested array type where each child (called *field*) is represented by a separate /// array. pub struct StructArray { data: ArrayDataRef, pub(crate) boxed_fields: Vec<ArrayRef>, } impl StructArray { /// Returns the field at `pos`. pub fn column(&self, pos: usize) -> &ArrayRef { &self.boxed_fields[pos] } /// Return the number of fields in this struct array pub fn num_columns(&self) -> usize { self.boxed_fields.len() } /// Returns the fields of the struct array pub fn columns(&self) -> Vec<&ArrayRef> { self.boxed_fields.iter().collect() } /// Returns child array refs of the struct array pub fn columns_ref(&self) -> Vec<ArrayRef> { self.boxed_fields.clone() } /// Return field names in this struct array pub fn column_names(&self) -> Vec<&str> { match self.data.data_type() { Struct(fields) => fields .iter() .map(|f| f.name().as_str()) .collect::<Vec<&str>>(), _ => unreachable!("Struct array's data type is not struct!"), } } /// Return child array whose field name equals to column_name pub fn column_by_name(&self, column_name: &str) -> Option<&ArrayRef> { self.column_names() .iter() .position(|c| c == &column_name) .map(|pos| self.column(pos)) } } impl From<ArrayDataRef> for StructArray { fn from(data: ArrayDataRef) -> Self { let mut boxed_fields = vec![]; for cd in data.child_data() { let child_data = if data.offset != 0 || data.len != cd.len { slice_data(&cd, data.offset, data.len) } else { cd.clone() }; boxed_fields.push(make_array(child_data)); } Self { data, boxed_fields } } } impl TryFrom<Vec<(&str, ArrayRef)>> for StructArray { type Error = ArrowError; /// builds a StructArray from a vector of names and arrays. /// This errors if the values have a different length. /// An entry is set to Null when all values are null. fn try_from(values: Vec<(&str, ArrayRef)>) -> Result<Self> { let values_len = values.len(); // these will be populated let mut fields = Vec::with_capacity(values_len); let mut child_data = Vec::with_capacity(values_len); // len: the size of the arrays. let mut len: Option<usize> = None; // null: the null mask of the arrays. let mut null: Option<Buffer> = None; for (field_name, array) in values { let child_datum = array.data(); let child_datum_len = child_datum.len(); if let Some(len) = len { if len != child_datum_len { return Err(ArrowError::InvalidArgumentError( format!("Array of field \"{}\" has length {}, but previous elements have length {}. All arrays in every entry in a struct array must have the same length.", field_name, child_datum_len, len) )); } } else { len = Some(child_datum_len) } child_data.push(child_datum.clone()); fields.push(Field::new( field_name, array.data_type().clone(), child_datum.null_buffer().is_some(), )); if let Some(child_null_buffer) = child_datum.null_buffer() { null = Some(if let Some(null_buffer) = &null { buffer_bin_or(null_buffer, 0, child_null_buffer, 0, child_datum_len) } else { child_null_buffer.clone() }); } else if null.is_some() { // when one of the fields has no nulls, them there is no null in the array null = None; } } let len = len.unwrap(); let mut builder = ArrayData::builder(DataType::Struct(fields)) .len(len) .child_data(child_data); if let Some(null_buffer) = null { let null_count = len - bit_util::count_set_bits(null_buffer.data()); builder = builder.null_count(null_count).null_bit_buffer(null_buffer); } Ok(StructArray::from(builder.build())) } } impl Array for StructArray { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the length (i.e., number of elements) of this array fn len(&self) -> usize { self.data_ref().len() } /// Returns the total number of bytes of memory occupied by the buffers owned by this [StructArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [StructArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + mem::size_of_val(self) } } impl From<Vec<(Field, ArrayRef)>> for StructArray { fn from(v: Vec<(Field, ArrayRef)>) -> Self { let (field_types, field_values): (Vec<_>, Vec<_>) = v.into_iter().unzip(); // Check the length of the child arrays let length = field_values[0].len(); for i in 1..field_values.len() { assert_eq!( length, field_values[i].len(), "all child arrays of a StructArray must have the same length" ); assert_eq!( field_types[i].data_type(), field_values[i].data().data_type(), "the field data types must match the array data in a StructArray" ) } let data = ArrayData::builder(DataType::Struct(field_types)) .child_data(field_values.into_iter().map(|a| a.data()).collect()) .len(length) .build(); Self::from(data) } } impl fmt::Debug for StructArray { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "StructArray\n[\n")?; for (child_index, name) in self.column_names().iter().enumerate() { let column = self.column(child_index); writeln!( f, "-- child {}: \"{}\" ({:?})", child_index, name, column.data_type() )?; fmt::Debug::fmt(column, f)?; writeln!(f)?; } write!(f, "]") } } impl From<(Vec<(Field, ArrayRef)>, Buffer, usize)> for StructArray { fn from(triple: (Vec<(Field, ArrayRef)>, Buffer, usize)) -> Self { let (field_types, field_values): (Vec<_>, Vec<_>) = triple.0.into_iter().unzip(); // Check the length of the child arrays let length = field_values[0].len(); for i in 1..field_values.len() { assert_eq!( length, field_values[i].len(), "all child arrays of a StructArray must have the same length" ); assert_eq!( field_types[i].data_type(), field_values[i].data().data_type(), "the field data types must match the array data in a StructArray" ) } let data = ArrayData::builder(DataType::Struct(field_types)) .null_bit_buffer(triple.1) .child_data(field_values.into_iter().map(|a| a.data()).collect()) .len(length) .null_count(triple.2) .build(); Self::from(data) } } /// A dictionary array where each element is a single value indexed by an integer key. /// This is mostly used to represent strings or a limited set of primitive types as integers, /// for example when doing NLP analysis or representing chromosomes by name. /// /// Example **with nullable** data: /// /// ``` /// use arrow::array::DictionaryArray; /// use arrow::datatypes::Int8Type; /// let test = vec!["a", "a", "b", "c"]; /// let array : DictionaryArray<Int8Type> = test.iter().map(|&x| if x == "b" {None} else {Some(x)}).collect(); /// assert_eq!(array.keys().collect::<Vec<Option<i8>>>(), vec![Some(0), Some(0), None, Some(1)]); /// ``` /// /// Example **without nullable** data: /// /// ``` /// use arrow::array::DictionaryArray; /// use arrow::datatypes::Int8Type; /// let test = vec!["a", "a", "b", "c"]; /// let array : DictionaryArray<Int8Type> = test.into_iter().collect(); /// assert_eq!(array.keys().collect::<Vec<Option<i8>>>(), vec![Some(0), Some(0), Some(1), Some(2)]); /// ``` pub struct DictionaryArray<K: ArrowPrimitiveType> { /// Array of keys, stored as a PrimitiveArray<K>. data: ArrayDataRef, /// Pointer to the key values. raw_values: RawPtrBox<K::Native>, /// Array of dictionary values (can by any DataType). values: ArrayRef, /// Values are ordered. is_ordered: bool, } #[derive(Debug)] enum Draining { Ready, Iterating, Finished, } #[derive(Debug)] pub struct NullableIter<'a, T> { data: &'a ArrayDataRef, // TODO: Use a pointer to the null bitmap. ptr: *const T, i: usize, len: usize, draining: Draining, } impl<'a, T> std::iter::Iterator for NullableIter<'a, T> where T: Clone, { type Item = Option<T>; fn next(&mut self) -> Option<Self::Item> { let i = self.i; if i >= self.len { None } else if self.data.is_null(i) { self.i += 1; Some(None) } else { self.i += 1; unsafe { Some(Some((&*self.ptr.add(i)).clone())) } } } fn size_hint(&self) -> (usize, Option<usize>) { (self.len, Some(self.len)) } fn nth(&mut self, n: usize) -> Option<Self::Item> { let i = self.i; if i + n >= self.len { self.i = self.len; None } else if self.data.is_null(i + n) { self.i += n + 1; Some(None) } else { self.i += n + 1; unsafe { Some(Some((&*self.ptr.add(i + n)).clone())) } } } } impl<'a, T> std::iter::DoubleEndedIterator for NullableIter<'a, T> where T: Clone, { fn next_back(&mut self) -> Option<Self::Item> { match self.draining { Draining::Ready => { self.draining = Draining::Iterating; self.i = self.len - 1; self.next_back() } Draining::Iterating => { let i = self.i; if i >= self.len { None } else if self.data.is_null(i) { self.i = self.i.checked_sub(1).unwrap_or_else(|| { self.draining = Draining::Finished; 0_usize }); Some(None) } else { match i.checked_sub(1) { Some(idx) => { self.i = idx; unsafe { Some(Some((&*self.ptr.add(i)).clone())) } } _ => { self.draining = Draining::Finished; unsafe { Some(Some((&*self.ptr).clone())) } } } } } Draining::Finished => { self.draining = Draining::Ready; None } } } } impl<'a, K: ArrowPrimitiveType> DictionaryArray<K> { /// Return an iterator to the keys of this dictionary. pub fn keys(&self) -> NullableIter<'_, K::Native> { NullableIter::<'_, K::Native> { data: &self.data, ptr: unsafe { self.raw_values.get().add(self.data.offset()) }, i: 0, len: self.data.len(), draining: Draining::Ready, } } /// Returns an array view of the keys of this dictionary pub fn keys_array(&self) -> PrimitiveArray<K> { let data = self.data_ref(); let keys_data = ArrayData::new( K::DATA_TYPE, data.len(), Some(data.null_count()), data.null_buffer().cloned(), data.offset(), data.buffers().to_vec(), vec![], ); PrimitiveArray::<K>::from(Arc::new(keys_data)) } /// Returns the lookup key by doing reverse dictionary lookup pub fn lookup_key(&self, value: &str) -> Option<K::Native> { let rd_buf: &StringArray = self.values.as_any().downcast_ref::<StringArray>().unwrap(); (0..rd_buf.len()) .position(|i| rd_buf.value(i) == value) .map(K::Native::from_usize) .flatten() } /// Returns an `ArrayRef` to the dictionary values. pub fn values(&self) -> ArrayRef { self.values.clone() } /// Returns a clone of the value type of this list. pub fn value_type(&self) -> DataType { self.values.data_ref().data_type().clone() } /// The length of the dictionary is the length of the keys array. pub fn len(&self) -> usize { self.data.len() } /// Whether this dictionary is empty pub fn is_empty(&self) -> bool { self.data.is_empty() } // Currently exists for compatibility purposes with Arrow IPC. pub fn is_ordered(&self) -> bool { self.is_ordered } } /// Constructs a `DictionaryArray` from an array data reference. impl<T: ArrowPrimitiveType> From<ArrayDataRef> for DictionaryArray<T> { fn from(data: ArrayDataRef) -> Self { assert_eq!( data.buffers().len(), 1, "DictionaryArray data should contain a single buffer only (keys)." ); assert_eq!( data.child_data().len(), 1, "DictionaryArray should contain a single child array (values)." ); let raw_values = data.buffers()[0].raw_data(); let dtype: &DataType = data.data_type(); let values = make_array(data.child_data()[0].clone()); if let DataType::Dictionary(_, _) = dtype { Self { data, raw_values: RawPtrBox::new(raw_values as *const T::Native), values, is_ordered: false, } } else { panic!("DictionaryArray must have Dictionary data type.") } } } /// Constructs a `DictionaryArray` from an iterator of optional strings. impl<'a, T: ArrowPrimitiveType + ArrowDictionaryKeyType> FromIterator<Option<&'a str>> for DictionaryArray<T> { fn from_iter<I: IntoIterator<Item = Option<&'a str>>>(iter: I) -> Self { let it = iter.into_iter(); let (lower, _) = it.size_hint(); let key_builder = PrimitiveBuilder::<T>::new(lower); let value_builder = StringBuilder::new(256); let mut builder = StringDictionaryBuilder::new(key_builder, value_builder); it.for_each(|i| { if let Some(i) = i { // Note: impl ... for Result<DictionaryArray<T>> fails with // error[E0117]: only traits defined in the current crate can be implemented for arbitrary types builder .append(i) .expect("Unable to append a value to a dictionary array."); } else { builder .append_null() .expect("Unable to append a null value to a dictionary array."); } }); builder.finish() } } /// Constructs a `DictionaryArray` from an iterator of strings. impl<'a, T: ArrowPrimitiveType + ArrowDictionaryKeyType> FromIterator<&'a str> for DictionaryArray<T> { fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> Self { let it = iter.into_iter(); let (lower, _) = it.size_hint(); let key_builder = PrimitiveBuilder::<T>::new(lower); let value_builder = StringBuilder::new(256); let mut builder = StringDictionaryBuilder::new(key_builder, value_builder); it.for_each(|i| { builder .append(i) .expect("Unable to append a value to a dictionary array."); }); builder.finish() } } impl<T: ArrowPrimitiveType> Array for DictionaryArray<T> { fn as_any(&self) -> &Any { self } fn data(&self) -> ArrayDataRef { self.data.clone() } fn data_ref(&self) -> &ArrayDataRef { &self.data } /// Returns the total number of bytes of memory occupied by the buffers owned by this [DictionaryArray]. fn get_buffer_memory_size(&self) -> usize { self.data.get_buffer_memory_size() + self.values().get_buffer_memory_size() } /// Returns the total number of bytes of memory occupied physically by this [DictionaryArray]. fn get_array_memory_size(&self) -> usize { self.data.get_array_memory_size() + self.values().get_array_memory_size() + mem::size_of_val(self) } } impl<T: ArrowPrimitiveType> fmt::Debug for DictionaryArray<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { const MAX_LEN: usize = 10; let keys: Vec<_> = self.keys().take(MAX_LEN).collect(); let elipsis = if self.keys().count() > MAX_LEN { "..." } else { "" }; writeln!( f, "DictionaryArray {{keys: {:?}{} values: {:?}}}", keys, elipsis, self.values ) } } #[cfg(test)] mod tests { use super::*; use std::sync::Arc; use std::thread; use crate::buffer::Buffer; use crate::datatypes::{DataType, Field}; use crate::{bitmap::Bitmap, memory}; #[test] fn test_primitive_array_from_vec() { let buf = Buffer::from(&[0, 1, 2, 3, 4].to_byte_slice()); let buf2 = buf.clone(); let arr = Int32Array::new(5, buf, 0, 0); let slice = unsafe { std::slice::from_raw_parts(arr.raw_values(), 5) }; assert_eq!(buf2, arr.values()); assert_eq!(&[0, 1, 2, 3, 4], slice); assert_eq!(5, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..5 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i as i32, arr.value(i)); } assert_eq!(64, arr.get_buffer_memory_size()); let internals_of_primitive_array = 8 + 72; // RawPtrBox & Arc<ArrayData> combined. assert_eq!( arr.get_buffer_memory_size() + internals_of_primitive_array, arr.get_array_memory_size() ); } #[test] fn test_primitive_array_from_vec_option() { // Test building a primitive array with null values let arr = Int32Array::from(vec![Some(0), None, Some(2), None, Some(4)]); assert_eq!(5, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(2, arr.null_count()); for i in 0..5 { if i % 2 == 0 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i as i32, arr.value(i)); } else { assert!(arr.is_null(i)); assert!(!arr.is_valid(i)); } } assert_eq!(128, arr.get_buffer_memory_size()); let internals_of_primitive_array = 8 + 72 + 16; // RawPtrBox & Arc<ArrayData> and it's null_bitmap combined. assert_eq!( arr.get_buffer_memory_size() + internals_of_primitive_array, arr.get_array_memory_size() ); } #[test] fn test_date64_array_from_vec_option() { // Test building a primitive array with null values // we use Int32 and Int64 as a backing array, so all Int32 and Int64 conventions // work let arr: PrimitiveArray<Date64Type> = vec![Some(1550902545147), None, Some(1550902545147)].into(); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); for i in 0..3 { if i % 2 == 0 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(1550902545147, arr.value(i)); // roundtrip to and from datetime assert_eq!( 1550902545147, arr.value_as_datetime(i).unwrap().timestamp_millis() ); } else { assert!(arr.is_null(i)); assert!(!arr.is_valid(i)); } } } #[test] fn test_time32_millisecond_array_from_vec() { // 1: 00:00:00.001 // 37800005: 10:30:00.005 // 86399210: 23:59:59.210 let arr: PrimitiveArray<Time32MillisecondType> = vec![1, 37_800_005, 86_399_210].into(); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); let formatted = vec!["00:00:00.001", "10:30:00.005", "23:59:59.210"]; for i in 0..3 { // check that we can't create dates or datetimes from time instances assert_eq!(None, arr.value_as_datetime(i)); assert_eq!(None, arr.value_as_date(i)); let time = arr.value_as_time(i).unwrap(); assert_eq!(formatted[i], time.format("%H:%M:%S%.3f").to_string()); } } #[test] fn test_time64_nanosecond_array_from_vec() { // Test building a primitive array with null values // we use Int32 and Int64 as a backing array, so all Int32 and Int64 conventions // work // 1e6: 00:00:00.001 // 37800005e6: 10:30:00.005 // 86399210e6: 23:59:59.210 let arr: PrimitiveArray<Time64NanosecondType> = vec![1_000_000, 37_800_005_000_000, 86_399_210_000_000].into(); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); let formatted = vec!["00:00:00.001", "10:30:00.005", "23:59:59.210"]; for i in 0..3 { // check that we can't create dates or datetimes from time instances assert_eq!(None, arr.value_as_datetime(i)); assert_eq!(None, arr.value_as_date(i)); let time = arr.value_as_time(i).unwrap(); assert_eq!(formatted[i], time.format("%H:%M:%S%.3f").to_string()); } } #[test] fn test_interval_array_from_vec() { // intervals are currently not treated specially, but are Int32 and Int64 arrays let arr = IntervalYearMonthArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); // a day_time interval contains days and milliseconds, but we do not yet have accessors for the values let arr = IntervalDayTimeArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); } #[test] fn test_duration_array_from_vec() { let arr = DurationSecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); let arr = DurationMillisecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); let arr = DurationMicrosecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); let arr = DurationNanosecondArray::from(vec![Some(1), None, Some(-5)]); assert_eq!(3, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); assert_eq!(1, arr.value(0)); assert!(arr.is_null(1)); assert_eq!(-5, arr.value(2)); } #[test] fn test_timestamp_array_from_vec() { let arr = TimestampSecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); let arr = TimestampMillisecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); let arr = TimestampMicrosecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); let arr = TimestampNanosecondArray::from_vec(vec![1, -5], None); assert_eq!(2, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); assert_eq!(1, arr.value(0)); assert_eq!(-5, arr.value(1)); } #[test] fn test_primitive_array_slice() { let arr = Int32Array::from(vec![ Some(0), None, Some(2), None, Some(4), Some(5), Some(6), None, None, ]); assert_eq!(9, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(4, arr.null_count()); let arr2 = arr.slice(2, 5); assert_eq!(5, arr2.len()); assert_eq!(2, arr2.offset()); assert_eq!(1, arr2.null_count()); for i in 0..arr2.len() { assert_eq!(i == 1, arr2.is_null(i)); assert_eq!(i != 1, arr2.is_valid(i)); } let arr3 = arr2.slice(2, 3); assert_eq!(3, arr3.len()); assert_eq!(4, arr3.offset()); assert_eq!(0, arr3.null_count()); let int_arr = arr3.as_any().downcast_ref::<Int32Array>().unwrap(); assert_eq!(4, int_arr.value(0)); assert_eq!(5, int_arr.value(1)); assert_eq!(6, int_arr.value(2)); } #[test] fn test_boolean_array_slice() { let arr = BooleanArray::from(vec![ Some(true), None, Some(false), None, Some(true), Some(false), Some(true), Some(false), None, Some(true), ]); assert_eq!(10, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(3, arr.null_count()); let arr2 = arr.slice(3, 5); assert_eq!(5, arr2.len()); assert_eq!(3, arr2.offset()); assert_eq!(1, arr2.null_count()); let bool_arr = arr2.as_any().downcast_ref::<BooleanArray>().unwrap(); assert_eq!(false, bool_arr.is_valid(0)); assert_eq!(true, bool_arr.is_valid(1)); assert_eq!(true, bool_arr.value(1)); assert_eq!(true, bool_arr.is_valid(2)); assert_eq!(false, bool_arr.value(2)); assert_eq!(true, bool_arr.is_valid(3)); assert_eq!(true, bool_arr.value(3)); assert_eq!(true, bool_arr.is_valid(4)); assert_eq!(false, bool_arr.value(4)); } #[test] fn test_value_slice_no_bounds_check() { let arr = Int32Array::from(vec![2, 3, 4]); let _slice = arr.value_slice(0, 4); } #[test] fn test_int32_fmt_debug() { let buf = Buffer::from(&[0, 1, 2, 3, 4].to_byte_slice()); let arr = Int32Array::new(5, buf, 0, 0); assert_eq!( "PrimitiveArray<Int32>\n[\n 0,\n 1,\n 2,\n 3,\n 4,\n]", format!("{:?}", arr) ); } #[test] fn test_fmt_debug_up_to_20_elements() { (1..=20).for_each(|i| { let values = (0..i).collect::<Vec<i16>>(); let array_expected = format!( "PrimitiveArray<Int16>\n[\n{}\n]", values .iter() .map(|v| { format!(" {},", v) }) .collect::<Vec<String>>() .join("\n") ); let array = Int16Array::from(values); assert_eq!(array_expected, format!("{:?}", array)); }) } #[test] fn test_int32_with_null_fmt_debug() { let mut builder = Int32Array::builder(3); builder.append_slice(&[0, 1]).unwrap(); builder.append_null().unwrap(); builder.append_slice(&[3, 4]).unwrap(); let arr = builder.finish(); assert_eq!( "PrimitiveArray<Int32>\n[\n 0,\n 1,\n null,\n 3,\n 4,\n]", format!("{:?}", arr) ); } #[test] fn test_boolean_fmt_debug() { let buf = Buffer::from(&[true, false, false].to_byte_slice()); let arr = BooleanArray::new(3, buf, 0, 0); assert_eq!( "PrimitiveArray<Boolean>\n[\n true,\n false,\n false,\n]", format!("{:?}", arr) ); } #[test] fn test_boolean_with_null_fmt_debug() { let mut builder = BooleanArray::builder(3); builder.append_value(true).unwrap(); builder.append_null().unwrap(); builder.append_value(false).unwrap(); let arr = builder.finish(); assert_eq!( "PrimitiveArray<Boolean>\n[\n true,\n null,\n false,\n]", format!("{:?}", arr) ); } #[test] fn test_timestamp_fmt_debug() { let arr: PrimitiveArray<TimestampMillisecondType> = TimestampMillisecondArray::from_vec(vec![1546214400000, 1546214400000], None); assert_eq!( "PrimitiveArray<Timestamp(Millisecond, None)>\n[\n 2018-12-31T00:00:00,\n 2018-12-31T00:00:00,\n]", format!("{:?}", arr) ); } #[test] fn test_date32_fmt_debug() { let arr: PrimitiveArray<Date32Type> = vec![12356, 13548].into(); assert_eq!( "PrimitiveArray<Date32(Day)>\n[\n 2003-10-31,\n 2007-02-04,\n]", format!("{:?}", arr) ); } #[test] fn test_time32second_fmt_debug() { let arr: PrimitiveArray<Time32SecondType> = vec![7201, 60054].into(); assert_eq!( "PrimitiveArray<Time32(Second)>\n[\n 02:00:01,\n 16:40:54,\n]", format!("{:?}", arr) ); } #[test] fn test_primitive_array_builder() { // Test building a primitive array with ArrayData builder and offset let buf = Buffer::from(&[0, 1, 2, 3, 4].to_byte_slice()); let buf2 = buf.clone(); let data = ArrayData::builder(DataType::Int32) .len(5) .offset(2) .add_buffer(buf) .build(); let arr = Int32Array::from(data); assert_eq!(buf2, arr.values()); assert_eq!(5, arr.len()); assert_eq!(0, arr.null_count()); for i in 0..3 { assert_eq!((i + 2) as i32, arr.value(i)); } } #[test] #[should_panic(expected = "PrimitiveArray data should contain a single buffer only \ (values buffer)")] fn test_primitive_array_invalid_buffer_len() { let data = ArrayData::builder(DataType::Int32).len(5).build(); Int32Array::from(data); } #[test] fn test_boolean_array_new() { // 00000010 01001000 let buf = Buffer::from([72_u8, 2_u8]); let buf2 = buf.clone(); let arr = BooleanArray::new(10, buf, 0, 0); assert_eq!(buf2, arr.values()); assert_eq!(10, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..10 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i == 3 || i == 6 || i == 9, arr.value(i), "failed at {}", i) } } #[test] fn test_boolean_array_from_vec() { let buf = Buffer::from([10_u8]); let arr = BooleanArray::from(vec![false, true, false, true]); assert_eq!(buf, arr.values()); assert_eq!(4, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..4 { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i == 1 || i == 3, arr.value(i), "failed at {}", i) } } #[test] fn test_boolean_array_from_vec_option() { let buf = Buffer::from([10_u8]); let arr = BooleanArray::from(vec![Some(false), Some(true), None, Some(true)]); assert_eq!(buf, arr.values()); assert_eq!(4, arr.len()); assert_eq!(0, arr.offset()); assert_eq!(1, arr.null_count()); for i in 0..4 { if i == 2 { assert!(arr.is_null(i)); assert!(!arr.is_valid(i)); } else { assert!(!arr.is_null(i)); assert!(arr.is_valid(i)); assert_eq!(i == 1 || i == 3, arr.value(i), "failed at {}", i) } } } #[test] fn test_boolean_array_builder() { // Test building a boolean array with ArrayData builder and offset // 000011011 let buf = Buffer::from([27_u8]); let buf2 = buf.clone(); let data = ArrayData::builder(DataType::Boolean) .len(5) .offset(2) .add_buffer(buf) .build(); let arr = BooleanArray::from(data); assert_eq!(buf2, arr.values()); assert_eq!(5, arr.len()); assert_eq!(2, arr.offset()); assert_eq!(0, arr.null_count()); for i in 0..3 { assert_eq!(i != 0, arr.value(i), "failed at {}", i); } } #[test] #[should_panic(expected = "PrimitiveArray data should contain a single buffer only \ (values buffer)")] fn test_boolean_array_invalid_buffer_len() { let data = ArrayData::builder(DataType::Boolean).len(5).build(); BooleanArray::from(data); } #[test] fn test_list_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1, 2], [3, 4, 5], [6, 7]] let value_offsets = Buffer::from(&[0, 3, 6, 8].to_byte_slice()); // Construct a list array from the above two let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type.clone()) .len(3) .add_buffer(value_offsets.clone()) .add_child_data(value_data.clone()) .build(); let list_array = ListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(2)); assert_eq!(2, list_array.value_length(2)); assert_eq!( 0, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); for i in 0..3 { assert!(list_array.is_valid(i)); assert!(!list_array.is_null(i)); } // Now test with a non-zero offset let list_data = ArrayData::builder(list_data_type) .len(3) .offset(1) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .build(); let list_array = ListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(1)); assert_eq!(2, list_array.value_length(1)); assert_eq!( 3, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); } #[test] fn test_large_list_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1, 2], [3, 4, 5], [6, 7]] let value_offsets = Buffer::from(&[0i64, 3, 6, 8].to_byte_slice()); // Construct a list array from the above two let list_data_type = DataType::LargeList(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type.clone()) .len(3) .add_buffer(value_offsets.clone()) .add_child_data(value_data.clone()) .build(); let list_array = LargeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(2)); assert_eq!(2, list_array.value_length(2)); assert_eq!( 0, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); for i in 0..3 { assert!(list_array.is_valid(i)); assert!(!list_array.is_null(i)); } // Now test with a non-zero offset let list_data = ArrayData::builder(list_data_type) .len(3) .offset(1) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .build(); let list_array = LargeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(1)); assert_eq!(2, list_array.value_length(1)); assert_eq!( 3, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); } #[test] fn test_dictionary_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int8) .len(8) .add_buffer(Buffer::from( &[10_i8, 11, 12, 13, 14, 15, 16, 17].to_byte_slice(), )) .build(); // Construct a buffer for value offsets, for the nested array: let keys = Buffer::from(&[2_i16, 3, 4].to_byte_slice()); // Construct a dictionary array from the above two let key_type = DataType::Int16; let value_type = DataType::Int8; let dict_data_type = DataType::Dictionary(Box::new(key_type), Box::new(value_type)); let dict_data = ArrayData::builder(dict_data_type.clone()) .len(3) .add_buffer(keys.clone()) .add_child_data(value_data.clone()) .build(); let dict_array = Int16DictionaryArray::from(dict_data); let values = dict_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int8, dict_array.value_type()); assert_eq!(3, dict_array.len()); // Null count only makes sense in terms of the component arrays. assert_eq!(0, dict_array.null_count()); assert_eq!(0, dict_array.values().null_count()); assert_eq!(Some(Some(3)), dict_array.keys().nth(1)); assert_eq!(Some(Some(4)), dict_array.keys().nth(2)); assert_eq!( dict_array.keys().collect::<Vec<Option<i16>>>(), vec![Some(2), Some(3), Some(4)] ); assert_eq!( dict_array.keys().rev().collect::<Vec<Option<i16>>>(), vec![Some(4), Some(3), Some(2)] ); assert_eq!( dict_array.keys().rev().rev().collect::<Vec<Option<i16>>>(), vec![Some(2), Some(3), Some(4)] ); // Now test with a non-zero offset let dict_data = ArrayData::builder(dict_data_type) .len(2) .offset(1) .add_buffer(keys) .add_child_data(value_data.clone()) .build(); let dict_array = Int16DictionaryArray::from(dict_data); let values = dict_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int8, dict_array.value_type()); assert_eq!(2, dict_array.len()); assert_eq!(Some(Some(3)), dict_array.keys().nth(0)); assert_eq!(Some(Some(4)), dict_array.keys().nth(1)); assert_eq!( dict_array.keys().collect::<Vec<Option<i16>>>(), vec![Some(3), Some(4)] ); } #[test] fn test_dictionary_array_key_reverse() { let test = vec!["a", "a", "b", "c"]; let array: DictionaryArray<Int8Type> = test .iter() .map(|&x| if x == "b" { None } else { Some(x) }) .collect(); assert_eq!( array.keys().rev().collect::<Vec<Option<i8>>>(), vec![Some(1), None, Some(0), Some(0)] ); } #[test] fn test_fixed_size_list_array() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(9) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7, 8].to_byte_slice())) .build(); // Construct a list array from the above two let list_data_type = DataType::FixedSizeList(Box::new(DataType::Int32), 3); let list_data = ArrayData::builder(list_data_type.clone()) .len(3) .add_child_data(value_data.clone()) .build(); let list_array = FixedSizeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!(6, list_array.value_offset(2)); assert_eq!(3, list_array.value_length()); assert_eq!( 0, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); for i in 0..3 { assert!(list_array.is_valid(i)); assert!(!list_array.is_null(i)); } // Now test with a non-zero offset let list_data = ArrayData::builder(list_data_type) .len(3) .offset(1) .add_child_data(value_data.clone()) .build(); let list_array = FixedSizeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(3, list_array.len()); assert_eq!(0, list_array.null_count()); assert_eq!( 3, list_array .value(0) .as_any() .downcast_ref::<Int32Array>() .unwrap() .value(0) ); assert_eq!(6, list_array.value_offset(1)); assert_eq!(3, list_array.value_length()); } #[test] #[should_panic( expected = "FixedSizeListArray child array length should be a multiple of 3" )] fn test_fixed_size_list_array_unequal_children() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); // Construct a list array from the above two let list_data_type = DataType::FixedSizeList(Box::new(DataType::Int32), 3); let list_data = ArrayData::builder(list_data_type) .len(3) .add_child_data(value_data) .build(); FixedSizeListArray::from(list_data); } #[test] fn test_list_array_slice() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(10) .add_buffer(Buffer::from( &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9].to_byte_slice(), )) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1], null, null, [2, 3], [4, 5], null, [6, 7, 8], null, [9]] let value_offsets = Buffer::from(&[0, 2, 2, 2, 4, 6, 6, 9, 9, 10].to_byte_slice()); // 01011001 00000001 let mut null_bits: [u8; 2] = [0; 2]; bit_util::set_bit(&mut null_bits, 0); bit_util::set_bit(&mut null_bits, 3); bit_util::set_bit(&mut null_bits, 4); bit_util::set_bit(&mut null_bits, 6); bit_util::set_bit(&mut null_bits, 8); // Construct a list array from the above two let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(9) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .null_bit_buffer(Buffer::from(null_bits)) .build(); let list_array = ListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(9, list_array.len()); assert_eq!(4, list_array.null_count()); assert_eq!(2, list_array.value_offset(3)); assert_eq!(2, list_array.value_length(3)); let sliced_array = list_array.slice(1, 6); assert_eq!(6, sliced_array.len()); assert_eq!(1, sliced_array.offset()); assert_eq!(3, sliced_array.null_count()); for i in 0..sliced_array.len() { if bit_util::get_bit(&null_bits, sliced_array.offset() + i) { assert!(sliced_array.is_valid(i)); } else { assert!(sliced_array.is_null(i)); } } // Check offset and length for each non-null value. let sliced_list_array = sliced_array.as_any().downcast_ref::<ListArray>().unwrap(); assert_eq!(2, sliced_list_array.value_offset(2)); assert_eq!(2, sliced_list_array.value_length(2)); assert_eq!(4, sliced_list_array.value_offset(3)); assert_eq!(2, sliced_list_array.value_length(3)); assert_eq!(6, sliced_list_array.value_offset(5)); assert_eq!(3, sliced_list_array.value_length(5)); } #[test] fn test_large_list_array_slice() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(10) .add_buffer(Buffer::from( &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9].to_byte_slice(), )) .build(); // Construct a buffer for value offsets, for the nested array: // [[0, 1], null, null, [2, 3], [4, 5], null, [6, 7, 8], null, [9]] let value_offsets = Buffer::from(&[0i64, 2, 2, 2, 4, 6, 6, 9, 9, 10].to_byte_slice()); // 01011001 00000001 let mut null_bits: [u8; 2] = [0; 2]; bit_util::set_bit(&mut null_bits, 0); bit_util::set_bit(&mut null_bits, 3); bit_util::set_bit(&mut null_bits, 4); bit_util::set_bit(&mut null_bits, 6); bit_util::set_bit(&mut null_bits, 8); // Construct a list array from the above two let list_data_type = DataType::LargeList(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(9) .add_buffer(value_offsets) .add_child_data(value_data.clone()) .null_bit_buffer(Buffer::from(null_bits)) .build(); let list_array = LargeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(9, list_array.len()); assert_eq!(4, list_array.null_count()); assert_eq!(2, list_array.value_offset(3)); assert_eq!(2, list_array.value_length(3)); let sliced_array = list_array.slice(1, 6); assert_eq!(6, sliced_array.len()); assert_eq!(1, sliced_array.offset()); assert_eq!(3, sliced_array.null_count()); for i in 0..sliced_array.len() { if bit_util::get_bit(&null_bits, sliced_array.offset() + i) { assert!(sliced_array.is_valid(i)); } else { assert!(sliced_array.is_null(i)); } } // Check offset and length for each non-null value. let sliced_list_array = sliced_array .as_any() .downcast_ref::<LargeListArray>() .unwrap(); assert_eq!(2, sliced_list_array.value_offset(2)); assert_eq!(2, sliced_list_array.value_length(2)); assert_eq!(4, sliced_list_array.value_offset(3)); assert_eq!(2, sliced_list_array.value_length(3)); assert_eq!(6, sliced_list_array.value_offset(5)); assert_eq!(3, sliced_list_array.value_length(5)); } #[test] fn test_fixed_size_list_array_slice() { // Construct a value array let value_data = ArrayData::builder(DataType::Int32) .len(10) .add_buffer(Buffer::from( &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9].to_byte_slice(), )) .build(); // Set null buts for the nested array: // [[0, 1], null, null, [6, 7], [8, 9]] // 01011001 00000001 let mut null_bits: [u8; 1] = [0; 1]; bit_util::set_bit(&mut null_bits, 0); bit_util::set_bit(&mut null_bits, 3); bit_util::set_bit(&mut null_bits, 4); // Construct a fixed size list array from the above two let list_data_type = DataType::FixedSizeList(Box::new(DataType::Int32), 2); let list_data = ArrayData::builder(list_data_type) .len(5) .add_child_data(value_data.clone()) .null_bit_buffer(Buffer::from(null_bits)) .build(); let list_array = FixedSizeListArray::from(list_data); let values = list_array.values(); assert_eq!(value_data, values.data()); assert_eq!(DataType::Int32, list_array.value_type()); assert_eq!(5, list_array.len()); assert_eq!(2, list_array.null_count()); assert_eq!(6, list_array.value_offset(3)); assert_eq!(2, list_array.value_length()); let sliced_array = list_array.slice(1, 4); assert_eq!(4, sliced_array.len()); assert_eq!(1, sliced_array.offset()); assert_eq!(2, sliced_array.null_count()); for i in 0..sliced_array.len() { if bit_util::get_bit(&null_bits, sliced_array.offset() + i) { assert!(sliced_array.is_valid(i)); } else { assert!(sliced_array.is_null(i)); } } // Check offset and length for each non-null value. let sliced_list_array = sliced_array .as_any() .downcast_ref::<FixedSizeListArray>() .unwrap(); assert_eq!(2, sliced_list_array.value_length()); assert_eq!(6, sliced_list_array.value_offset(2)); assert_eq!(8, sliced_list_array.value_offset(3)); } #[test] #[should_panic( expected = "ListArray data should contain a single buffer only (value offsets)" )] fn test_list_array_invalid_buffer_len() { let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(3) .add_child_data(value_data) .build(); ListArray::from(list_data); } #[test] #[should_panic( expected = "ListArray should contain a single child array (values array)" )] fn test_list_array_invalid_child_array_len() { let value_offsets = Buffer::from(&[0, 2, 5, 7].to_byte_slice()); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(3) .add_buffer(value_offsets) .build(); ListArray::from(list_data); } #[test] #[should_panic(expected = "offsets do not start at zero")] fn test_list_array_invalid_value_offset_start() { let value_data = ArrayData::builder(DataType::Int32) .len(8) .add_buffer(Buffer::from(&[0, 1, 2, 3, 4, 5, 6, 7].to_byte_slice())) .build(); let value_offsets = Buffer::from(&[2, 2, 5, 7].to_byte_slice()); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .len(3) .add_buffer(value_offsets) .add_child_data(value_data) .build(); ListArray::from(list_data); } #[test] fn test_binary_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let offsets: [i32; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = BinaryArray::from(array_data); assert_eq!(3, binary_array.len()); assert_eq!(0, binary_array.null_count()); assert_eq!([b'h', b'e', b'l', b'l', b'o'], binary_array.value(0)); assert_eq!([] as [u8; 0], binary_array.value(1)); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(2) ); assert_eq!(5, binary_array.value_offset(2)); assert_eq!(7, binary_array.value_length(2)); for i in 0..3 { assert!(binary_array.is_valid(i)); assert!(!binary_array.is_null(i)); } // Test binary array with offset let array_data = ArrayData::builder(DataType::Binary) .len(4) .offset(1) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = BinaryArray::from(array_data); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(1) ); assert_eq!(5, binary_array.value_offset(0)); assert_eq!(0, binary_array.value_length(0)); assert_eq!(5, binary_array.value_offset(1)); assert_eq!(7, binary_array.value_length(1)); } #[test] fn test_large_binary_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let offsets: [i64; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data = ArrayData::builder(DataType::LargeBinary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = LargeBinaryArray::from(array_data); assert_eq!(3, binary_array.len()); assert_eq!(0, binary_array.null_count()); assert_eq!([b'h', b'e', b'l', b'l', b'o'], binary_array.value(0)); assert_eq!([] as [u8; 0], binary_array.value(1)); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(2) ); assert_eq!(5, binary_array.value_offset(2)); assert_eq!(7, binary_array.value_length(2)); for i in 0..3 { assert!(binary_array.is_valid(i)); assert!(!binary_array.is_null(i)); } // Test binary array with offset let array_data = ArrayData::builder(DataType::LargeBinary) .len(4) .offset(1) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = LargeBinaryArray::from(array_data); assert_eq!( [b'p', b'a', b'r', b'q', b'u', b'e', b't'], binary_array.value(1) ); assert_eq!(5, binary_array.value_offset(0)); assert_eq!(0, binary_array.value_length(0)); assert_eq!(5, binary_array.value_offset(1)); assert_eq!(7, binary_array.value_length(1)); } #[test] fn test_binary_array_from_list_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let values_data = ArrayData::builder(DataType::UInt8) .len(12) .add_buffer(Buffer::from(&values[..])) .build(); let offsets: [i32; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data1 = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array1 = BinaryArray::from(array_data1); let array_data2 = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = ListArray::from(array_data2); let binary_array2 = BinaryArray::from(list_array); assert_eq!(2, binary_array2.data().buffers().len()); assert_eq!(0, binary_array2.data().child_data().len()); assert_eq!(binary_array1.len(), binary_array2.len()); assert_eq!(binary_array1.null_count(), binary_array2.null_count()); for i in 0..binary_array1.len() { assert_eq!(binary_array1.value(i), binary_array2.value(i)); assert_eq!(binary_array1.value_offset(i), binary_array2.value_offset(i)); assert_eq!(binary_array1.value_length(i), binary_array2.value_length(i)); } } #[test] fn test_large_binary_array_from_list_array() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let values_data = ArrayData::builder(DataType::UInt8) .len(12) .add_buffer(Buffer::from(&values[..])) .build(); let offsets: [i64; 4] = [0, 5, 5, 12]; // Array data: ["hello", "", "parquet"] let array_data1 = ArrayData::builder(DataType::LargeBinary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array1 = LargeBinaryArray::from(array_data1); let array_data2 = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = LargeListArray::from(array_data2); let binary_array2 = LargeBinaryArray::from(list_array); assert_eq!(2, binary_array2.data().buffers().len()); assert_eq!(0, binary_array2.data().child_data().len()); assert_eq!(binary_array1.len(), binary_array2.len()); assert_eq!(binary_array1.null_count(), binary_array2.null_count()); for i in 0..binary_array1.len() { assert_eq!(binary_array1.value(i), binary_array2.value(i)); assert_eq!(binary_array1.value_offset(i), binary_array2.value_offset(i)); assert_eq!(binary_array1.value_length(i), binary_array2.value_length(i)); } } fn test_generic_binary_array_from_opt_vec<T: BinaryOffsetSizeTrait>() { let values: Vec<Option<&[u8]>> = vec![Some(b"one"), Some(b"two"), None, Some(b""), Some(b"three")]; let array = GenericBinaryArray::<T>::from_opt_vec(values); assert_eq!(array.len(), 5); assert_eq!(array.value(0), b"one"); assert_eq!(array.value(1), b"two"); assert_eq!(array.value(3), b""); assert_eq!(array.value(4), b"three"); assert_eq!(array.is_null(0), false); assert_eq!(array.is_null(1), false); assert_eq!(array.is_null(2), true); assert_eq!(array.is_null(3), false); assert_eq!(array.is_null(4), false); } #[test] fn test_large_binary_array_from_opt_vec() { test_generic_binary_array_from_opt_vec::<i64>() } #[test] fn test_binary_array_from_opt_vec() { test_generic_binary_array_from_opt_vec::<i32>() } #[test] fn test_string_array_from_u8_slice() { let values: Vec<&str> = vec!["hello", "", "parquet"]; // Array data: ["hello", "", "parquet"] let string_array = StringArray::from(values); assert_eq!(3, string_array.len()); assert_eq!(0, string_array.null_count()); assert_eq!("hello", string_array.value(0)); assert_eq!("", string_array.value(1)); assert_eq!("parquet", string_array.value(2)); assert_eq!(5, string_array.value_offset(2)); assert_eq!(7, string_array.value_length(2)); for i in 0..3 { assert!(string_array.is_valid(i)); assert!(!string_array.is_null(i)); } } #[test] #[should_panic(expected = "[Large]StringArray expects Datatype::[Large]Utf8")] fn test_string_array_from_int() { let array = LargeStringArray::from(vec!["a", "b"]); StringArray::from(array.data()); } #[test] fn test_large_string_array_from_u8_slice() { let values: Vec<&str> = vec!["hello", "", "parquet"]; // Array data: ["hello", "", "parquet"] let string_array = LargeStringArray::from(values); assert_eq!(3, string_array.len()); assert_eq!(0, string_array.null_count()); assert_eq!("hello", string_array.value(0)); assert_eq!("", string_array.value(1)); assert_eq!("parquet", string_array.value(2)); assert_eq!(5, string_array.value_offset(2)); assert_eq!(7, string_array.value_length(2)); for i in 0..3 { assert!(string_array.is_valid(i)); assert!(!string_array.is_null(i)); } } #[test] fn test_nested_string_array() { let string_builder = StringBuilder::new(3); let mut list_of_string_builder = ListBuilder::new(string_builder); list_of_string_builder.values().append_value("foo").unwrap(); list_of_string_builder.values().append_value("bar").unwrap(); list_of_string_builder.append(true).unwrap(); list_of_string_builder .values() .append_value("foobar") .unwrap(); list_of_string_builder.append(true).unwrap(); let list_of_strings = list_of_string_builder.finish(); assert_eq!(list_of_strings.len(), 2); let first_slot = list_of_strings.value(0); let first_list = first_slot.as_any().downcast_ref::<StringArray>().unwrap(); assert_eq!(first_list.len(), 2); assert_eq!(first_list.value(0), "foo"); assert_eq!(first_list.value(1), "bar"); let second_slot = list_of_strings.value(1); let second_list = second_slot.as_any().downcast_ref::<StringArray>().unwrap(); assert_eq!(second_list.len(), 1); assert_eq!(second_list.value(0), "foobar"); } #[test] #[should_panic( expected = "BinaryArray can only be created from List<u8> arrays, mismatched \ data types." )] fn test_binary_array_from_incorrect_list_array_type() { let values: [u32; 12] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let values_data = ArrayData::builder(DataType::UInt32) .len(12) .add_buffer(Buffer::from(values[..].to_byte_slice())) .build(); let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Utf8) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = ListArray::from(array_data); BinaryArray::from(list_array); } #[test] #[should_panic( expected = "BinaryArray can only be created from list array of u8 values \ (i.e. List<PrimitiveArray<u8>>)." )] fn test_binary_array_from_incorrect_list_array() { let values: [u32; 12] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let values_data = ArrayData::builder(DataType::UInt32) .len(12) .add_buffer(Buffer::from(values[..].to_byte_slice())) .add_child_data(ArrayData::builder(DataType::Boolean).build()) .build(); let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Utf8) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_child_data(values_data) .build(); let list_array = ListArray::from(array_data); BinaryArray::from(list_array); } #[test] fn test_fixed_size_binary_array() { let values: [u8; 15] = *b"hellotherearrow"; let array_data = ArrayData::builder(DataType::FixedSizeBinary(5)) .len(3) .add_buffer(Buffer::from(&values[..])) .build(); let fixed_size_binary_array = FixedSizeBinaryArray::from(array_data); assert_eq!(3, fixed_size_binary_array.len()); assert_eq!(0, fixed_size_binary_array.null_count()); assert_eq!( [b'h', b'e', b'l', b'l', b'o'], fixed_size_binary_array.value(0) ); assert_eq!( [b't', b'h', b'e', b'r', b'e'], fixed_size_binary_array.value(1) ); assert_eq!( [b'a', b'r', b'r', b'o', b'w'], fixed_size_binary_array.value(2) ); assert_eq!(5, fixed_size_binary_array.value_length()); assert_eq!(10, fixed_size_binary_array.value_offset(2)); for i in 0..3 { assert!(fixed_size_binary_array.is_valid(i)); assert!(!fixed_size_binary_array.is_null(i)); } // Test binary array with offset let array_data = ArrayData::builder(DataType::FixedSizeBinary(5)) .len(2) .offset(1) .add_buffer(Buffer::from(&values[..])) .build(); let fixed_size_binary_array = FixedSizeBinaryArray::from(array_data); assert_eq!( [b't', b'h', b'e', b'r', b'e'], fixed_size_binary_array.value(0) ); assert_eq!( [b'a', b'r', b'r', b'o', b'w'], fixed_size_binary_array.value(1) ); assert_eq!(2, fixed_size_binary_array.len()); assert_eq!(5, fixed_size_binary_array.value_offset(0)); assert_eq!(5, fixed_size_binary_array.value_length()); assert_eq!(10, fixed_size_binary_array.value_offset(1)); } #[test] #[should_panic( expected = "FixedSizeBinaryArray can only be created from list array of u8 values \ (i.e. FixedSizeList<PrimitiveArray<u8>>)." )] fn test_fixed_size_binary_array_from_incorrect_list_array() { let values: [u32; 12] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let values_data = ArrayData::builder(DataType::UInt32) .len(12) .add_buffer(Buffer::from(values[..].to_byte_slice())) .add_child_data(ArrayData::builder(DataType::Boolean).build()) .build(); let array_data = ArrayData::builder(DataType::FixedSizeList(Box::new(DataType::Binary), 4)) .len(3) .add_child_data(values_data) .build(); let list_array = FixedSizeListArray::from(array_data); FixedSizeBinaryArray::from(list_array); } #[test] #[should_panic(expected = "BinaryArray out of bounds access")] fn test_binary_array_get_value_index_out_of_bound() { let values: [u8; 12] = [104, 101, 108, 108, 111, 112, 97, 114, 113, 117, 101, 116]; let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Binary) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let binary_array = BinaryArray::from(array_data); binary_array.value(4); } #[test] #[should_panic(expected = "StringArray out of bounds access")] fn test_string_array_get_value_index_out_of_bound() { let values: [u8; 12] = [ b'h', b'e', b'l', b'l', b'o', b'p', b'a', b'r', b'q', b'u', b'e', b't', ]; let offsets: [i32; 4] = [0, 5, 5, 12]; let array_data = ArrayData::builder(DataType::Utf8) .len(3) .add_buffer(Buffer::from(offsets.to_byte_slice())) .add_buffer(Buffer::from(&values[..])) .build(); let string_array = StringArray::from(array_data); string_array.value(4); } #[test] fn test_binary_array_fmt_debug() { let values: [u8; 15] = *b"hellotherearrow"; let array_data = ArrayData::builder(DataType::FixedSizeBinary(5)) .len(3) .add_buffer(Buffer::from(&values[..])) .build(); let arr = FixedSizeBinaryArray::from(array_data); assert_eq!( "FixedSizeBinaryArray<5>\n[\n [104, 101, 108, 108, 111],\n [116, 104, 101, 114, 101],\n [97, 114, 114, 111, 119],\n]", format!("{:?}", arr) ); } #[test] fn test_string_array_fmt_debug() { let arr: StringArray = vec!["hello", "arrow"].into(); assert_eq!( "StringArray\n[\n \"hello\",\n \"arrow\",\n]", format!("{:?}", arr) ); } #[test] fn test_large_string_array_fmt_debug() { let arr: LargeStringArray = vec!["hello", "arrow"].into(); assert_eq!( "LargeStringArray\n[\n \"hello\",\n \"arrow\",\n]", format!("{:?}", arr) ); } #[test] fn test_string_array_from_iter() { let data = vec![Some("hello"), None, Some("arrow")]; // from Vec<Option<&str>> let array1 = StringArray::from(data.clone()); // from Iterator<Option<&str>> let array2: StringArray = data.clone().into_iter().collect(); // from Iterator<Option<String>> let array3: StringArray = data .into_iter() .map(|x| x.map(|s| format!("{}", s))) .collect(); assert_eq!(array1, array2); assert_eq!(array2, array3); } #[test] fn test_struct_array_builder() { let boolean_data = ArrayData::builder(DataType::Boolean) .len(4) .add_buffer(Buffer::from([false, false, true, true].to_byte_slice())) .build(); let int_data = ArrayData::builder(DataType::Int64) .len(4) .add_buffer(Buffer::from([42, 28, 19, 31].to_byte_slice())) .build(); let mut field_types = vec![]; field_types.push(Field::new("a", DataType::Boolean, false)); field_types.push(Field::new("b", DataType::Int64, false)); let struct_array_data = ArrayData::builder(DataType::Struct(field_types)) .len(4) .add_child_data(boolean_data.clone()) .add_child_data(int_data.clone()) .build(); let struct_array = StructArray::from(struct_array_data); assert_eq!(boolean_data, struct_array.column(0).data()); assert_eq!(int_data, struct_array.column(1).data()); } #[test] fn test_struct_array_from() { let boolean_data = ArrayData::builder(DataType::Boolean) .len(4) .add_buffer(Buffer::from([12_u8])) .build(); let int_data = ArrayData::builder(DataType::Int32) .len(4) .add_buffer(Buffer::from([42, 28, 19, 31].to_byte_slice())) .build(); let struct_array = StructArray::from(vec![ ( Field::new("b", DataType::Boolean, false), Arc::new(BooleanArray::from(vec![false, false, true, true])) as Arc<Array>, ), ( Field::new("c", DataType::Int32, false), Arc::new(Int32Array::from(vec![42, 28, 19, 31])), ), ]); assert_eq!(boolean_data, struct_array.column(0).data()); assert_eq!(int_data, struct_array.column(1).data()); assert_eq!(4, struct_array.len()); assert_eq!(0, struct_array.null_count()); assert_eq!(0, struct_array.offset()); } /// validates that the in-memory representation follows [the spec](https://arrow.apache.org/docs/format/Columnar.html#struct-layout) #[test] fn test_struct_array_from_vec() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, Some("mark"), ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![Some(1), Some(2), None, Some(4)])); let arr = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]) .unwrap(); let struct_data = arr.data(); assert_eq!(4, struct_data.len()); assert_eq!(1, struct_data.null_count()); assert_eq!( // 00001011 &Some(Bitmap::from(Buffer::from(&[11_u8]))), struct_data.null_bitmap() ); let expected_string_data = ArrayData::builder(DataType::Utf8) .len(4) .null_count(2) .null_bit_buffer(Buffer::from(&[9_u8])) .add_buffer(Buffer::from(&[0, 3, 3, 3, 7].to_byte_slice())) .add_buffer(Buffer::from("joemark".as_bytes())) .build(); let expected_int_data = ArrayData::builder(DataType::Int32) .len(4) .null_count(1) .null_bit_buffer(Buffer::from(&[11_u8])) .add_buffer(Buffer::from(&[1, 2, 0, 4].to_byte_slice())) .build(); assert_eq!(expected_string_data, arr.column(0).data()); // TODO: implement equality for ArrayData assert_eq!(expected_int_data.len(), arr.column(1).data().len()); assert_eq!( expected_int_data.null_count(), arr.column(1).data().null_count() ); assert_eq!( expected_int_data.null_bitmap(), arr.column(1).data().null_bitmap() ); let expected_value_buf = expected_int_data.buffers()[0].clone(); let actual_value_buf = arr.column(1).data().buffers()[0].clone(); for i in 0..expected_int_data.len() { if !expected_int_data.is_null(i) { assert_eq!( expected_value_buf.data()[i * 4..(i + 1) * 4], actual_value_buf.data()[i * 4..(i + 1) * 4] ); } } } #[test] fn test_struct_array_from_vec_error() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, // 3 elements, not 4 ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![Some(1), Some(2), None, Some(4)])); let arr = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]); match arr { Err(ArrowError::InvalidArgumentError(e)) => { assert!(e.starts_with("Array of field \"f2\" has length 4, but previous elements have length 3.")); } _ => assert!(false, "This test got an unexpected error type"), }; } #[test] #[should_panic( expected = "the field data types must match the array data in a StructArray" )] fn test_struct_array_from_mismatched_types() { StructArray::from(vec![ ( Field::new("b", DataType::Int16, false), Arc::new(BooleanArray::from(vec![false, false, true, true])) as Arc<Array>, ), ( Field::new("c", DataType::Utf8, false), Arc::new(Int32Array::from(vec![42, 28, 19, 31])), ), ]); } #[test] fn test_struct_array_slice() { let boolean_data = ArrayData::builder(DataType::Boolean) .len(5) .add_buffer(Buffer::from([0b00010000])) .null_bit_buffer(Buffer::from([0b00010001])) .build(); let int_data = ArrayData::builder(DataType::Int32) .len(5) .add_buffer(Buffer::from([0, 28, 42, 0, 0].to_byte_slice())) .null_bit_buffer(Buffer::from([0b00000110])) .build(); let mut field_types = vec![]; field_types.push(Field::new("a", DataType::Boolean, false)); field_types.push(Field::new("b", DataType::Int32, false)); let struct_array_data = ArrayData::builder(DataType::Struct(field_types)) .len(5) .add_child_data(boolean_data.clone()) .add_child_data(int_data.clone()) .null_bit_buffer(Buffer::from([0b00010111])) .build(); let struct_array = StructArray::from(struct_array_data); assert_eq!(5, struct_array.len()); assert_eq!(1, struct_array.null_count()); assert!(struct_array.is_valid(0)); assert!(struct_array.is_valid(1)); assert!(struct_array.is_valid(2)); assert!(struct_array.is_null(3)); assert!(struct_array.is_valid(4)); assert_eq!(boolean_data, struct_array.column(0).data()); assert_eq!(int_data, struct_array.column(1).data()); let c0 = struct_array.column(0); let c0 = c0.as_any().downcast_ref::<BooleanArray>().unwrap(); assert_eq!(5, c0.len()); assert_eq!(3, c0.null_count()); assert!(c0.is_valid(0)); assert_eq!(false, c0.value(0)); assert!(c0.is_null(1)); assert!(c0.is_null(2)); assert!(c0.is_null(3)); assert!(c0.is_valid(4)); assert_eq!(true, c0.value(4)); let c1 = struct_array.column(1); let c1 = c1.as_any().downcast_ref::<Int32Array>().unwrap(); assert_eq!(5, c1.len()); assert_eq!(3, c1.null_count()); assert!(c1.is_null(0)); assert!(c1.is_valid(1)); assert_eq!(28, c1.value(1)); assert!(c1.is_valid(2)); assert_eq!(42, c1.value(2)); assert!(c1.is_null(3)); assert!(c1.is_null(4)); let sliced_array = struct_array.slice(2, 3); let sliced_array = sliced_array.as_any().downcast_ref::<StructArray>().unwrap(); assert_eq!(3, sliced_array.len()); assert_eq!(2, sliced_array.offset()); assert_eq!(1, sliced_array.null_count()); assert!(sliced_array.is_valid(0)); assert!(sliced_array.is_null(1)); assert!(sliced_array.is_valid(2)); let sliced_c0 = sliced_array.column(0); let sliced_c0 = sliced_c0.as_any().downcast_ref::<BooleanArray>().unwrap(); assert_eq!(3, sliced_c0.len()); assert_eq!(2, sliced_c0.offset()); assert!(sliced_c0.is_null(0)); assert!(sliced_c0.is_null(1)); assert!(sliced_c0.is_valid(2)); assert_eq!(true, sliced_c0.value(2)); let sliced_c1 = sliced_array.column(1); let sliced_c1 = sliced_c1.as_any().downcast_ref::<Int32Array>().unwrap(); assert_eq!(3, sliced_c1.len()); assert_eq!(2, sliced_c1.offset()); assert!(sliced_c1.is_valid(0)); assert_eq!(42, sliced_c1.value(0)); assert!(sliced_c1.is_null(1)); assert!(sliced_c1.is_null(2)); } #[test] #[should_panic( expected = "all child arrays of a StructArray must have the same length" )] fn test_invalid_struct_child_array_lengths() { StructArray::from(vec![ ( Field::new("b", DataType::Float32, false), Arc::new(Float32Array::from(vec![1.1])) as Arc<Array>, ), ( Field::new("c", DataType::Float64, false), Arc::new(Float64Array::from(vec![2.2, 3.3])), ), ]); } #[test] #[should_panic(expected = "memory is not aligned")] fn test_primitive_array_alignment() { let ptr = memory::allocate_aligned(8); let buf = unsafe { Buffer::from_raw_parts(ptr, 8, 8) }; let buf2 = buf.slice(1); let array_data = ArrayData::builder(DataType::Int32).add_buffer(buf2).build(); Int32Array::from(array_data); } #[test] #[should_panic(expected = "memory is not aligned")] fn test_list_array_alignment() { let ptr = memory::allocate_aligned(8); let buf = unsafe { Buffer::from_raw_parts(ptr, 8, 8) }; let buf2 = buf.slice(1); let values: [i32; 8] = [0; 8]; let value_data = ArrayData::builder(DataType::Int32) .add_buffer(Buffer::from(values.to_byte_slice())) .build(); let list_data_type = DataType::List(Box::new(DataType::Int32)); let list_data = ArrayData::builder(list_data_type) .add_buffer(buf2) .add_child_data(value_data) .build(); ListArray::from(list_data); } #[test] #[should_panic(expected = "memory is not aligned")] fn test_binary_array_alignment() { let ptr = memory::allocate_aligned(8); let buf = unsafe { Buffer::from_raw_parts(ptr, 8, 8) }; let buf2 = buf.slice(1); let values: [u8; 12] = [0; 12]; let array_data = ArrayData::builder(DataType::Binary) .add_buffer(buf2) .add_buffer(Buffer::from(&values[..])) .build(); BinaryArray::from(array_data); } #[test] fn test_access_array_concurrently() { let a = Int32Array::from(vec![5, 6, 7, 8, 9]); let ret = thread::spawn(move || a.value(3)).join(); assert!(ret.is_ok()); assert_eq!(8, ret.ok().unwrap()); } #[test] fn test_dictionary_array_fmt_debug() { let key_builder = PrimitiveBuilder::<UInt8Type>::new(3); let value_builder = PrimitiveBuilder::<UInt32Type>::new(2); let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder); builder.append(12345678).unwrap(); builder.append_null().unwrap(); builder.append(22345678).unwrap(); let array = builder.finish(); assert_eq!( "DictionaryArray {keys: [Some(0), None, Some(1)] values: PrimitiveArray<UInt32>\n[\n 12345678,\n 22345678,\n]}\n", format!("{:?}", array) ); let key_builder = PrimitiveBuilder::<UInt8Type>::new(20); let value_builder = PrimitiveBuilder::<UInt32Type>::new(2); let mut builder = PrimitiveDictionaryBuilder::new(key_builder, value_builder); for _ in 0..20 { builder.append(1).unwrap(); } let array = builder.finish(); assert_eq!( "DictionaryArray {keys: [Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0), Some(0)]... values: PrimitiveArray<UInt32>\n[\n 1,\n]}\n", format!("{:?}", array) ); } #[test] fn test_dictionary_array_from_iter() { let test = vec!["a", "a", "b", "c"]; let array: DictionaryArray<Int8Type> = test .iter() .map(|&x| if x == "b" { None } else { Some(x) }) .collect(); assert_eq!( "DictionaryArray {keys: [Some(0), Some(0), None, Some(1)] values: StringArray\n[\n \"a\",\n \"c\",\n]}\n", format!("{:?}", array) ); let array: DictionaryArray<Int8Type> = test.into_iter().collect(); assert_eq!( "DictionaryArray {keys: [Some(0), Some(0), Some(1), Some(2)] values: StringArray\n[\n \"a\",\n \"b\",\n \"c\",\n]}\n", format!("{:?}", array) ); } #[test] fn test_dictionary_array_reverse_lookup_key() { let test = vec!["a", "a", "b", "c"]; let array: DictionaryArray<Int8Type> = test.into_iter().collect(); assert_eq!(array.lookup_key("c"), Some(2)); // Direction of building a dictionary is the iterator direction let test = vec!["t3", "t3", "t2", "t2", "t1", "t3", "t4", "t1", "t0"]; let array: DictionaryArray<Int8Type> = test.into_iter().collect(); assert_eq!(array.lookup_key("t1"), Some(2)); assert_eq!(array.lookup_key("non-existent"), None); } #[test] fn test_dictionary_keys_as_primitive_array() { let test = vec!["a", "b", "c", "a"]; let array: DictionaryArray<Int8Type> = test.into_iter().collect(); let keys = array.keys_array(); assert_eq!(&DataType::Int8, keys.data_type()); assert_eq!(0, keys.null_count()); assert_eq!(&[0, 1, 2, 0], keys.value_slice(0, keys.len())); } #[test] fn test_dictionary_keys_as_primitive_array_with_null() { let test = vec![Some("a"), None, Some("b"), None, None, Some("a")]; let array: DictionaryArray<Int32Type> = test.into_iter().collect(); let keys = array.keys_array(); assert_eq!(&DataType::Int32, keys.data_type()); assert_eq!(3, keys.null_count()); assert_eq!(true, keys.is_valid(0)); assert_eq!(false, keys.is_valid(1)); assert_eq!(true, keys.is_valid(2)); assert_eq!(false, keys.is_valid(3)); assert_eq!(false, keys.is_valid(4)); assert_eq!(true, keys.is_valid(5)); assert_eq!(0, keys.value(0)); assert_eq!(1, keys.value(2)); assert_eq!(0, keys.value(5)); } }
use std::{ collections::HashSet, env, ffi::OsString, fs::{create_dir_all, read_dir, read_to_string, remove_file, write}, path::{Path, PathBuf}, process, }; use binary::{http_utils::download_sync, Binary, BinaryTrait}; use buildpack::{ eyre::{self, eyre}, libcnb::{ self, build::{BuildContext, BuildResult, BuildResultBuilder}, data::{build_plan::BuildPlan, layer_content_metadata::LayerTypes, layer_name}, detect::{DetectContext, DetectResult, DetectResultBuilder}, generic::{GenericMetadata, GenericPlatform}, layer::{ExistingLayerStrategy, Layer, LayerResult, LayerResultBuilder}, layer_env::{LayerEnv, ModificationBehavior, Scope}, Buildpack, }, maplit::hashmap, tracing, BuildpackTrait, LayerOptions, }; use serde::{Deserialize, Serialize}; use utils::vec_string; pub struct AptBuildpack; impl BuildpackTrait for AptBuildpack { fn toml() -> &'static str { include_str!("../buildpack.toml") } } // The name of the file that is detected in the app dir const APT_FILE: &str = "Aptfile"; // The name of the layer that the buildpack creates const APT_PACKAGES: &str = "apt_packages"; impl Buildpack for AptBuildpack { type Platform = GenericPlatform; type Metadata = GenericMetadata; type Error = eyre::Report; fn detect(&self, _context: DetectContext<Self>) -> libcnb::Result<DetectResult, Self::Error> { // Detect `Aptfile` let aptfile = PathBuf::from(APT_FILE); // Get the Linux release for reuse below let linux_flavour = sys_info::linux_os_release().ok(); // Fail if no Aptfile, or Aptfile exists but not on Ubuntu Linux if !aptfile.exists() { return DetectResultBuilder::fail().build(); } else if env::consts::OS != "linux" || linux_flavour .as_ref() .map_or_else(|| "".to_string(), |rel| rel.id().to_string()) != "ubuntu" { tracing::warn!("Aptfile detected but will be ignored because not on Ubuntu Linux"); return DetectResultBuilder::fail().build(); } let mut build_plan = BuildPlan::new(); // Require `apt_packages` layer if there is an `Aptfile` if aptfile.exists() { let version = linux_flavour .expect("Should have returned by now if not on Linux") .version_codename .expect("Should have an Ubuntu version codename"); let (require, provide) = Self::require_and_provide( APT_PACKAGES, APT_FILE, format!("Install `apt` packages for Ubuntu '{}'", version).trim(), Some(hashmap! { "version" => version, "file" => APT_FILE.to_string() }), ); build_plan.requires.push(require); build_plan.provides.push(provide); } DetectResultBuilder::pass().build_plan(build_plan).build() } fn build(&self, context: BuildContext<Self>) -> libcnb::Result<BuildResult, Self::Error> { let entries = self.buildpack_plan_entries(&context.buildpack_plan); if let Some(options) = entries.get(APT_PACKAGES) { context.handle_layer( layer_name!("apt_packages"), AptPackagesLayer::new(options, Some(&context.app_dir)), )?; } BuildResultBuilder::new().build() } } #[derive(Clone, Deserialize, Serialize)] pub struct AptPackagesLayer { /// The version of Ubuntu that packages will be installed for e.g `bionic`, `focal` version: String, /// The path to the `Aptfile` (or similar name) that specifies packages to be installed file: Option<PathBuf>, /// Should Ubuntu deb repository mirrors be used? mirrors: bool, /// Should packages that are no longer in the Aptfile be removed clean: bool, /// A list of package names, or deb URLs to be installed /// /// Usually instead of an `Aptfile` but can be specified in addition to it packages: Vec<String>, /// A list of repos to be used /// /// Usually instead of `:repo:` entries in an `Aptfile` but can be specified in addition to it repos: Vec<String>, } impl AptPackagesLayer { pub fn new(options: &LayerOptions, app_path: Option<&Path>) -> Self { let version = match options.get("version") { Some(version) => version.to_string(), None => sys_info::linux_os_release() .ok() .and_then(|info| info.version_codename) .unwrap_or_default(), }; let file = options.get("file").map(PathBuf::from); // Split `Aptfile` into packages and repos and detect options let mut mirrors = env::var("STENCILA_APT_MIRRORS").ok(); let mut clean = env::var("STENCILA_APT_CLEAN").ok(); let mut repos = Vec::new(); let mut packages = match (&file, &app_path) { (Some(file), Some(path)) => read_to_string(path.join(file)) .unwrap_or_default() .lines() .filter_map(|line| { let line = line.trim(); if line.is_empty() || line.starts_with('#') { None } else if let Some(repo) = line.strip_prefix(":repo:") { repos.push(repo.to_string()); None } else if let Some(value) = line.strip_prefix(":mirrors:") { mirrors = Some(value.trim().to_string()); None } else if let Some(value) = line.strip_prefix(":clean:") { clean = Some(value.trim().to_string()); None } else { Some(line.to_string()) } }) .collect(), _ => Vec::new(), }; // Turn off use of mirrors? let mirrors = !matches!( mirrors.as_deref(), Some("no") | Some("off") | Some("false") | Some("0") ); // Turn off cleaning? let clean = !matches!( clean.as_deref(), Some("no") | Some("off") | Some("false") | Some("0") ); // Add any other packages if let Some(list) = options.get("packages") { packages.append(&mut list.split(',').map(|pkg| pkg.trim().to_string()).collect()); } // Add any other repos if let Some(list) = options.get("repos") { repos.append(&mut list.split(',').map(|pkg| pkg.trim().to_string()).collect()); } Self { version, file, mirrors, clean, packages, repos, } } } impl Layer for AptPackagesLayer { type Buildpack = AptBuildpack; type Metadata = AptPackagesLayer; fn types(&self) -> LayerTypes { LayerTypes { build: true, launch: true, cache: true, } } fn existing_layer_strategy( &self, _context: &BuildContext<Self::Buildpack>, layer_data: &libcnb::layer::LayerData<Self::Metadata>, ) -> Result<libcnb::layer::ExistingLayerStrategy, <Self::Buildpack as Buildpack>::Error> { let existing = &layer_data.content_metadata.metadata; let strategy = if self.version != existing.version { tracing::info!( "Existing `apt_packages` layer is for different Ubuntu version (`{}` => `{}`); will recreate", existing.version, self.version, ); ExistingLayerStrategy::Recreate } else if self.repos != existing.repos { tracing::info!( "Existing `apt_packages` layer has different repos (`{}` => `{}`); will recreate", existing.repos.join(","), self.repos.join(","), ); ExistingLayerStrategy::Recreate } else if self.packages != existing.packages { tracing::info!( "Existing `apt_packages` layer has different packages (`{}` => `{}`); will update", existing.packages.join(","), self.packages.join(",") ); ExistingLayerStrategy::Update } else { tracing::info!("Existing `apt_packages` layer meets requirements; will keep",); ExistingLayerStrategy::Keep }; Ok(strategy) } fn create( &self, _context: &BuildContext<Self::Buildpack>, layer_path: &Path, ) -> Result<LayerResult<Self::Metadata>, eyre::Report> { tracing::info!("Creating `apt_packages` layer"); self.install(layer_path) } fn update( &self, _context: &BuildContext<Self::Buildpack>, layer_data: &libcnb::layer::LayerData<Self::Metadata>, ) -> Result<LayerResult<Self::Metadata>, <Self::Buildpack as Buildpack>::Error> { tracing::info!("Updating `apt_packages` layer"); self.install(&layer_data.path) } } impl AptPackagesLayer { pub fn install( &self, layer_path: &Path, ) -> Result<LayerResult<AptPackagesLayer>, eyre::Report> { let layer_path = &layer_path.canonicalize()?; // Create the directories that `apt-get` needs let apt_cache_dir = layer_path.join("cache"); let apt_archives_dir = apt_cache_dir.join("archives"); let apt_state_dir = layer_path.join("state"); let apt_sources_dir = layer_path.join("sources"); create_dir_all(apt_archives_dir.join("partial"))?; create_dir_all(apt_state_dir.join("lists").join("partial"))?; create_dir_all(&apt_sources_dir)?; // Create a list of base deb repositories let repos = if self.mirrors { // Generate a new sources list using the mirror protocol // In the future we may allow the `STENCILA_APT_MIRRORS` env var to contain a // list of mirrors to use let release = sys_info::linux_os_release() .ok() .and_then(|info| info.version_codename) .ok_or_else(|| eyre!("Unable to get Linux OS release"))?; format!( r#" deb mirror://mirrors.ubuntu.com/mirrors.txt {release} main restricted universe multiverse deb mirror://mirrors.ubuntu.com/mirrors.txt {release}-updates main restricted universe multiverse deb mirror://mirrors.ubuntu.com/mirrors.txt {release}-backports main restricted universe multiverse deb mirror://mirrors.ubuntu.com/mirrors.txt {release}-security main restricted universe multiverse "#, ) } else { // Use the existing system sources list read_to_string( PathBuf::from("/") .join("etc") .join("apt") .join("sources.list"), )? }; // Add any repositories added in the `Aptfile` let repos = [&repos, "\n", &self.repos.join("\n")].concat(); let apt_sources_list = apt_sources_dir.join("sources.list"); write(&apt_sources_list, repos)?; // Configure apt-get and update cache let apt = Binary::named("apt-get").require_sync()?; let apt_options: Vec<String> = vec![ "debug::nolocking=true", &format!("dir::cache={}", apt_cache_dir.display()), &format!("dir::state={}", apt_state_dir.display()), &format!("dir::etc::sourcelist={}", apt_sources_list.display()), "dir::etc::sourceparts=/dev/null", ] .into_iter() .map(|option| ["-o", option].concat()) .collect(); tracing::info!("Updating apt caches"); apt.run_sync([apt_options.clone(), vec_string!["update"]].concat())?; // Read in the list of packages that are currently installed let installed_packages_dir = layer_path.join("installed").join("packages"); create_dir_all(&installed_packages_dir)?; let mut installed_packages = read_dir(&installed_packages_dir)? .flatten() .map(|entry| entry.file_name().to_string_lossy().to_string()) .collect::<HashSet<String>>(); // Ensure the `installed/debs` dir is created (reading of this done later only if needed) let installed_debs_dir = layer_path.join("installed").join("debs"); create_dir_all(&installed_debs_dir)?; let dpkg = Binary::named("dpkg").require_sync()?; // Closure to get a list of the debs in archives dir let get_debs = || -> Vec<OsString> { apt_archives_dir .read_dir() .expect("Archives directory should be readable") .flatten() .filter_map(|entry| { let path = entry.path(); if path.extension() == Some(&OsString::from("deb")) { path.file_name().map(|name| name.to_os_string()) } else { None } }) .collect() }; // Get deb files, including those of dependencies, extract them and record the list // of files associated with each for package in &self.packages { // Slugify URLs to be more filesystem friendly let package_id = if package.starts_with("http") && package.ends_with(".deb") { package .replace("://", "-") .replace("/", "-") } else { package.to_string() }; // If the package has already been installed then skip it (but remove so it is not // uninstalled later since it is still wanted) if installed_packages.remove(&package_id) { tracing::info!("Package `{}` is already installed", package); continue; } else { tracing::info!("Installing package `{}`", package); } // Record list of debs in archive before download let debs_before = get_debs(); // Download debs for this package (including any dependencies if not a URL) if package.starts_with("http") && package.ends_with(".deb") { tracing::info!("Downloading `{}`", package); let path = apt_archives_dir.join(format!("{}.deb", package_id)); download_sync(package, &path)?; } else { tracing::info!("Fetching deb files for package `{}`", package); // Assumes using `apt-get` >= 1.1 which replaced `--force-yes` with `--allow-*` options apt.run_sync( [ apt_options.clone(), vec_string![ "--assume-yes", "--allow-downgrades", "--allow-remove-essential", "--allow-change-held-packages", "--download-only", "--reinstall", "install", package ], ] .concat(), )?; } // Record the debs that were downloaded for the package // TODO: This is not very reliable since it will be empty if the package has // already been downloaded because it is an dependency of another. let debs_after = get_debs(); let debs_downloaded: Vec<OsString> = debs_after .into_iter() .filter(|item| !debs_before.contains(item)) .collect(); // Extract the downloaded deb files into the layer and record tracing::info!("Extracting debs for package `{}`", package); for deb in &debs_downloaded { let deb_path = apt_archives_dir.join(deb); dpkg.run_sync([ "--extract", &deb_path.display().to_string(), &layer_path.display().to_string(), ])?; // Now that the deb has been extracted write it's manifest file with the list of files extracted // TODO: This does not need to be done here but can instead deferred to if / when the // package needs to be removed. let contents = process::Command::new("dpkg") .arg("--contents") .arg(deb_path) .output()? .stdout; let files_extracted = String::from_utf8(contents)? .split('\n') .filter_map(|line| { let mut cols = line.split_whitespace(); let size = cols.nth(2).unwrap_or("0"); if size != "0" { line.rfind("./").map(|pos| line[pos..].to_string()) } else { None } }) .collect::<Vec<String>>() .join("\n"); write(installed_debs_dir.join(deb), files_extracted)?; } // Now that the package has been successfully installed write its manifest file write( installed_packages_dir.join(package_id), debs_downloaded .iter() .map(|deb| deb.to_string_lossy().to_string()) .collect::<Vec<String>>() .join("\n"), )?; } // Function to read a manifests file (list of debs, or files within a deb) fn read_manifest(file: &Path) -> Option<Vec<String>> { read_to_string(file).ok().map(|content| { content .split('\n') .map(|line| line.to_string()) .collect::<Vec<String>>() }) } // Remove previously installed but currently unwanted packages (those not yet removed from the list) if self.clean && !installed_packages.is_empty() { for package in installed_packages { tracing::info!("Uninstalling package `{}`", package); // Read in the list of debs installed for this package let package_debs = installed_packages_dir.join(&package); let debs = read_manifest(&package_debs).unwrap_or_default(); if debs.len() > 1 { tracing::warn!("Dependencies were installed when package `{}` was installed. These will be removed also but may this may affect other packages subsequently installed that share those dependencies", package); } for deb in debs { // Read in the list of files that were installed for the deb and remove them all let deb_files = installed_debs_dir.join(&deb); if let Some(files) = read_manifest(&deb_files) { for file_path in files { let layer_file_path = layer_path.join(file_path); remove_file(layer_file_path).ok(); } } // Remove the deb from the archive // If we don't do this then if the package get's re-added we do not "see" // the deb as getting added. Also, it saves space. remove_file(apt_archives_dir.join(&deb)).ok(); // Remove the manifest remove_file(&deb_files).ok(); } remove_file(&package_debs).ok(); } } // Prepend a lot of env vars let prefix_paths = |paths: &[&str]| { // The trailing colon here is important to separate what we prepend // from the existing path env::join_paths(paths.iter().map(|path| layer_path.join(path))) .map(|joined| format!("{}:", joined.to_string_lossy())) }; let mut layer_env = LayerEnv::new().chainable_insert( Scope::All, ModificationBehavior::Prepend, "PATH", prefix_paths(&["usr/bin", "usr/local/bin"])?, ); let include_path_prepend = prefix_paths(&[ "usr/include/x86_64-linux-gnu", "usr/include/i386-linux-gnu", "usr/include", ])?; for var in ["INCLUDE_PATH", "CPATH", "CPPPATH"] { layer_env.insert( Scope::All, ModificationBehavior::Prepend, var, &include_path_prepend, ); } let library_paths = prefix_paths(&[ "usr/lib/x86_64-linux-gnu", "usr/lib/i386-linux-gnu", "usr/lib", "lib/x86_64-linux-gnu", "lib/i386-linux-gnu", "lib", ])?; for var in ["LD_LIBRARY_PATH", "LIBRARY_PATH"] { layer_env.insert( Scope::All, ModificationBehavior::Prepend, var, &library_paths, ); } layer_env.insert( Scope::All, ModificationBehavior::Prepend, "PKG_CONFIG_PATH", prefix_paths(&[ "usr/lib/x86_64-linux-gnu/pkgconfig", "usr/lib/i386-linux-gnu/pkgconfig", "usr/lib/pkgconfig", ])?, ); LayerResultBuilder::new(self.clone()).env(layer_env).build() } } fix(Apt buildpack): Fix bug causing layer to be recreated See https://github.com/heroku/libcnb.rs/issues/396 use std::{ collections::HashSet, env, ffi::OsString, fs::{create_dir_all, read_dir, read_to_string, remove_file, write}, path::{Path, PathBuf}, process, }; use binary::{http_utils::download_sync, Binary, BinaryTrait}; use buildpack::{ eyre::{self, eyre}, libcnb::{ self, build::{BuildContext, BuildResult, BuildResultBuilder}, data::{build_plan::BuildPlan, layer_content_metadata::LayerTypes, layer_name}, detect::{DetectContext, DetectResult, DetectResultBuilder}, generic::{GenericMetadata, GenericPlatform}, layer::{ExistingLayerStrategy, Layer, LayerResult, LayerResultBuilder}, layer_env::{LayerEnv, ModificationBehavior, Scope}, Buildpack, }, maplit::hashmap, tracing, BuildpackTrait, LayerOptions, }; use serde::{Deserialize, Serialize}; use utils::vec_string; pub struct AptBuildpack; impl BuildpackTrait for AptBuildpack { fn toml() -> &'static str { include_str!("../buildpack.toml") } } // The name of the file that is detected in the app dir const APT_FILE: &str = "Aptfile"; // The name of the layer that the buildpack creates const APT_PACKAGES: &str = "apt_packages"; impl Buildpack for AptBuildpack { type Platform = GenericPlatform; type Metadata = GenericMetadata; type Error = eyre::Report; fn detect(&self, _context: DetectContext<Self>) -> libcnb::Result<DetectResult, Self::Error> { // Detect `Aptfile` let aptfile = PathBuf::from(APT_FILE); // Get the Linux release for reuse below let linux_flavour = sys_info::linux_os_release().ok(); // Fail if no Aptfile, or Aptfile exists but not on Ubuntu Linux if !aptfile.exists() { return DetectResultBuilder::fail().build(); } else if env::consts::OS != "linux" || linux_flavour .as_ref() .map_or_else(|| "".to_string(), |rel| rel.id().to_string()) != "ubuntu" { tracing::warn!("Aptfile detected but will be ignored because not on Ubuntu Linux"); return DetectResultBuilder::fail().build(); } let mut build_plan = BuildPlan::new(); // Require `apt_packages` layer if there is an `Aptfile` if aptfile.exists() { let version = linux_flavour .expect("Should have returned by now if not on Linux") .version_codename .expect("Should have an Ubuntu version codename"); let (require, provide) = Self::require_and_provide( APT_PACKAGES, APT_FILE, format!("Install `apt` packages for Ubuntu '{}'", version).trim(), Some(hashmap! { "version" => version, "file" => APT_FILE.to_string() }), ); build_plan.requires.push(require); build_plan.provides.push(provide); } DetectResultBuilder::pass().build_plan(build_plan).build() } fn build(&self, context: BuildContext<Self>) -> libcnb::Result<BuildResult, Self::Error> { let entries = self.buildpack_plan_entries(&context.buildpack_plan); if let Some(options) = entries.get(APT_PACKAGES) { context.handle_layer( layer_name!("apt_packages"), AptPackagesLayer::new(options, Some(&context.app_dir)), )?; } BuildResultBuilder::new().build() } } #[derive(Debug, Clone, Default, Deserialize, Serialize)] #[serde(default)] pub struct AptPackagesLayer { /// The version of Ubuntu that packages will be installed for e.g `bionic`, `focal` version: String, /// The path to the `Aptfile` (or similar name) that specifies packages to be installed file: Option<PathBuf>, /// Should Ubuntu deb repository mirrors be used? mirrors: bool, /// Should packages that are no longer in the Aptfile be removed clean: bool, /// A list of package names, or deb URLs, to be installed /// /// Usually instead of an `Aptfile` but can be specified in addition to it packages: Vec<String>, /// A list of repos to be used /// /// Usually instead of `:repo:` entries in an `Aptfile` but can be specified in addition to it repos: Vec<String>, } impl AptPackagesLayer { pub fn new(options: &LayerOptions, app_path: Option<&Path>) -> Self { let version = match options.get("version") { Some(version) => version.to_string(), None => sys_info::linux_os_release() .ok() .and_then(|info| info.version_codename) .unwrap_or_default(), }; let file = options.get("file").map(PathBuf::from); // Split `Aptfile` into packages and repos and detect options let mut mirrors = env::var("STENCILA_APT_MIRRORS").ok(); let mut clean = env::var("STENCILA_APT_CLEAN").ok(); let mut repos = Vec::new(); let mut packages = match (&file, &app_path) { (Some(file), Some(path)) => read_to_string(path.join(file)) .unwrap_or_default() .lines() .filter_map(|line| { let line = line.trim(); if line.is_empty() || line.starts_with('#') { None } else if let Some(repo) = line.strip_prefix(":repo:") { repos.push(repo.to_string()); None } else if let Some(value) = line.strip_prefix(":mirrors:") { mirrors = Some(value.trim().to_string()); None } else if let Some(value) = line.strip_prefix(":clean:") { clean = Some(value.trim().to_string()); None } else { Some(line.to_string()) } }) .collect(), _ => Vec::new(), }; // Turn off use of mirrors? let mirrors = !matches!( mirrors.as_deref(), Some("no") | Some("off") | Some("false") | Some("0") ); // Turn off cleaning? let clean = !matches!( clean.as_deref(), Some("no") | Some("off") | Some("false") | Some("0") ); // Add any other packages if let Some(list) = options.get("packages") { packages.append(&mut list.split(',').map(|pkg| pkg.trim().to_string()).collect()); } // Add any other repos if let Some(list) = options.get("repos") { repos.append(&mut list.split(',').map(|pkg| pkg.trim().to_string()).collect()); } Self { version, file, mirrors, clean, packages, repos, } } } impl Layer for AptPackagesLayer { type Buildpack = AptBuildpack; type Metadata = AptPackagesLayer; fn types(&self) -> LayerTypes { LayerTypes { build: true, launch: true, cache: true, } } fn existing_layer_strategy( &self, _context: &BuildContext<Self::Buildpack>, layer_data: &libcnb::layer::LayerData<Self::Metadata>, ) -> Result<libcnb::layer::ExistingLayerStrategy, <Self::Buildpack as Buildpack>::Error> { let existing = &layer_data.content_metadata.metadata; let strategy = if self.version != existing.version { tracing::info!( "Existing `apt_packages` layer is for different Ubuntu version (`{}` => `{}`); will recreate", existing.version, self.version, ); ExistingLayerStrategy::Recreate } else if self.repos != existing.repos { tracing::info!( "Existing `apt_packages` layer has different repos (`{}` => `{}`); will recreate", existing.repos.join(","), self.repos.join(","), ); ExistingLayerStrategy::Recreate } else if self.packages != existing.packages { tracing::info!( "Existing `apt_packages` layer has different packages (`{}` => `{}`); will update", existing.packages.join(","), self.packages.join(",") ); ExistingLayerStrategy::Update } else { tracing::info!("Existing `apt_packages` layer meets requirements; will keep",); ExistingLayerStrategy::Keep }; Ok(strategy) } fn create( &self, _context: &BuildContext<Self::Buildpack>, layer_path: &Path, ) -> Result<LayerResult<Self::Metadata>, eyre::Report> { tracing::info!("Creating `apt_packages` layer"); self.install(layer_path) } fn update( &self, _context: &BuildContext<Self::Buildpack>, layer_data: &libcnb::layer::LayerData<Self::Metadata>, ) -> Result<LayerResult<Self::Metadata>, <Self::Buildpack as Buildpack>::Error> { tracing::info!("Updating `apt_packages` layer"); self.install(&layer_data.path) } } impl AptPackagesLayer { pub fn install( &self, layer_path: &Path, ) -> Result<LayerResult<AptPackagesLayer>, eyre::Report> { let layer_path = &layer_path.canonicalize()?; // Create the directories that `apt-get` needs let apt_cache_dir = layer_path.join("cache"); let apt_archives_dir = apt_cache_dir.join("archives"); let apt_state_dir = layer_path.join("state"); let apt_sources_dir = layer_path.join("sources"); create_dir_all(apt_archives_dir.join("partial"))?; create_dir_all(apt_state_dir.join("lists").join("partial"))?; create_dir_all(&apt_sources_dir)?; // Create a list of base deb repositories let repos = if self.mirrors { // Generate a new sources list using the mirror protocol // In the future we may allow the `STENCILA_APT_MIRRORS` env var to contain a // list of mirrors to use let release = sys_info::linux_os_release() .ok() .and_then(|info| info.version_codename) .ok_or_else(|| eyre!("Unable to get Linux OS release"))?; format!( r#" deb mirror://mirrors.ubuntu.com/mirrors.txt {release} main restricted universe multiverse deb mirror://mirrors.ubuntu.com/mirrors.txt {release}-updates main restricted universe multiverse deb mirror://mirrors.ubuntu.com/mirrors.txt {release}-backports main restricted universe multiverse deb mirror://mirrors.ubuntu.com/mirrors.txt {release}-security main restricted universe multiverse "#, ) } else { // Use the existing system sources list read_to_string( PathBuf::from("/") .join("etc") .join("apt") .join("sources.list"), )? }; // Add any repositories added in the `Aptfile` let repos = [&repos, "\n", &self.repos.join("\n")].concat(); let apt_sources_list = apt_sources_dir.join("sources.list"); write(&apt_sources_list, repos)?; // Configure apt-get and update cache let apt = Binary::named("apt-get").require_sync()?; let apt_options: Vec<String> = vec![ "debug::nolocking=true", &format!("dir::cache={}", apt_cache_dir.display()), &format!("dir::state={}", apt_state_dir.display()), &format!("dir::etc::sourcelist={}", apt_sources_list.display()), "dir::etc::sourceparts=/dev/null", ] .into_iter() .map(|option| ["-o", option].concat()) .collect(); tracing::info!("Updating apt caches"); apt.run_sync([apt_options.clone(), vec_string!["update"]].concat())?; // Read in the list of packages that are currently installed let installed_packages_dir = layer_path.join("installed").join("packages"); create_dir_all(&installed_packages_dir)?; let mut installed_packages = read_dir(&installed_packages_dir)? .flatten() .map(|entry| entry.file_name().to_string_lossy().to_string()) .collect::<HashSet<String>>(); // Ensure the `installed/debs` dir is created (reading of this done later only if needed) let installed_debs_dir = layer_path.join("installed").join("debs"); create_dir_all(&installed_debs_dir)?; let dpkg = Binary::named("dpkg").require_sync()?; // Closure to get a list of the debs in archives dir let get_debs = || -> Vec<OsString> { apt_archives_dir .read_dir() .expect("Archives directory should be readable") .flatten() .filter_map(|entry| { let path = entry.path(); if path.extension() == Some(&OsString::from("deb")) { path.file_name().map(|name| name.to_os_string()) } else { None } }) .collect() }; // Get deb files, including those of dependencies, extract them and record the list // of files associated with each for package in &self.packages { // Slugify URLs to be more filesystem friendly let package_id = if package.starts_with("http") && package.ends_with(".deb") { package.replace("://", "-").replace("/", "-") } else { package.to_string() }; // If the package has already been installed then skip it (but remove so it is not // uninstalled later since it is still wanted) if installed_packages.remove(&package_id) { tracing::info!("Package `{}` is already installed", package); continue; } else { tracing::info!("Installing package `{}`", package); } // Record list of debs in archive before download let debs_before = get_debs(); // Download debs for this package (including any dependencies if not a URL) if package.starts_with("http") && package.ends_with(".deb") { tracing::info!("Downloading `{}`", package); let path = apt_archives_dir.join(format!("{}.deb", package_id)); download_sync(package, &path)?; } else { tracing::info!("Fetching deb files for package `{}`", package); // Assumes using `apt-get` >= 1.1 which replaced `--force-yes` with `--allow-*` options apt.run_sync( [ apt_options.clone(), vec_string![ "--assume-yes", "--allow-downgrades", "--allow-remove-essential", "--allow-change-held-packages", "--download-only", "--reinstall", "install", package ], ] .concat(), )?; } // Record the debs that were downloaded for the package // TODO: This is not very reliable since it will be empty if the package has // already been downloaded because it is an dependency of another. let debs_after = get_debs(); let debs_downloaded: Vec<OsString> = debs_after .into_iter() .filter(|item| !debs_before.contains(item)) .collect(); // Extract the downloaded deb files into the layer and record tracing::info!("Extracting debs for package `{}`", package); for deb in &debs_downloaded { let deb_path = apt_archives_dir.join(deb); dpkg.run_sync([ "--extract", &deb_path.display().to_string(), &layer_path.display().to_string(), ])?; // Now that the deb has been extracted write it's manifest file with the list of files extracted // TODO: This does not need to be done here but can instead deferred to if / when the // package needs to be removed. let contents = process::Command::new("dpkg") .arg("--contents") .arg(deb_path) .output()? .stdout; let files_extracted = String::from_utf8(contents)? .split('\n') .filter_map(|line| { let mut cols = line.split_whitespace(); let size = cols.nth(2).unwrap_or("0"); if size != "0" { line.rfind("./").map(|pos| line[pos..].to_string()) } else { None } }) .collect::<Vec<String>>() .join("\n"); write(installed_debs_dir.join(deb), files_extracted)?; } // Now that the package has been successfully installed write its manifest file write( installed_packages_dir.join(package_id), debs_downloaded .iter() .map(|deb| deb.to_string_lossy().to_string()) .collect::<Vec<String>>() .join("\n"), )?; } // Function to read a manifests file (list of debs, or files within a deb) fn read_manifest(file: &Path) -> Option<Vec<String>> { read_to_string(file).ok().map(|content| { content .split('\n') .map(|line| line.to_string()) .collect::<Vec<String>>() }) } // Remove previously installed but currently unwanted packages (those not yet removed from the list) if self.clean && !installed_packages.is_empty() { for package in installed_packages { tracing::info!("Uninstalling package `{}`", package); // Read in the list of debs installed for this package let package_debs = installed_packages_dir.join(&package); let debs = read_manifest(&package_debs).unwrap_or_default(); if debs.len() > 1 { tracing::warn!("Dependencies were installed when package `{}` was installed. These will be removed also but may this may affect other packages subsequently installed that share those dependencies", package); } for deb in debs { // Read in the list of files that were installed for the deb and remove them all let deb_files = installed_debs_dir.join(&deb); if let Some(files) = read_manifest(&deb_files) { for file_path in files { let layer_file_path = layer_path.join(file_path); remove_file(layer_file_path).ok(); } } // Remove the deb from the archive // If we don't do this then if the package get's re-added we do not "see" // the deb as getting added. Also, it saves space. remove_file(apt_archives_dir.join(&deb)).ok(); // Remove the manifest remove_file(&deb_files).ok(); } remove_file(&package_debs).ok(); } } // Prepend a lot of env vars let prefix_paths = |paths: &[&str]| { // The trailing colon here is important to separate what we prepend // from the existing path env::join_paths(paths.iter().map(|path| layer_path.join(path))) .map(|joined| format!("{}:", joined.to_string_lossy())) }; let mut layer_env = LayerEnv::new().chainable_insert( Scope::All, ModificationBehavior::Prepend, "PATH", prefix_paths(&["usr/bin", "usr/local/bin"])?, ); let include_path_prepend = prefix_paths(&[ "usr/include/x86_64-linux-gnu", "usr/include/i386-linux-gnu", "usr/include", ])?; for var in ["INCLUDE_PATH", "CPATH", "CPPPATH"] { layer_env.insert( Scope::All, ModificationBehavior::Prepend, var, &include_path_prepend, ); } let library_paths = prefix_paths(&[ "usr/lib/x86_64-linux-gnu", "usr/lib/i386-linux-gnu", "usr/lib", "lib/x86_64-linux-gnu", "lib/i386-linux-gnu", "lib", ])?; for var in ["LD_LIBRARY_PATH", "LIBRARY_PATH"] { layer_env.insert( Scope::All, ModificationBehavior::Prepend, var, &library_paths, ); } layer_env.insert( Scope::All, ModificationBehavior::Prepend, "PKG_CONFIG_PATH", prefix_paths(&[ "usr/lib/x86_64-linux-gnu/pkgconfig", "usr/lib/i386-linux-gnu/pkgconfig", "usr/lib/pkgconfig", ])?, ); LayerResultBuilder::new(self.clone()).env(layer_env).build() } }
use std::mem; use std::collections::HashMap; use edit_types::{BufferEvent, EventDomain}; /// A container that manages and holds all recordings for the current editing session pub(crate) struct Recorder { active_recording: Option<String>, recordings: HashMap<String, Recording>, } impl Recorder { pub(crate) fn new() -> Recorder { Recorder { active_recording: None, recordings: HashMap::new(), } } pub(crate) fn is_recording(&self) -> bool { self.active_recording.is_some() } /// Starts or stops the specified recording. /// /// /// There are three outcome behaviors: /// - If the current recording name is specified, the active recording is saved /// - If no recording name is specified, the currently active recording is saved /// - If a recording name other than the active recording is specified, /// the current recording will be thrown out and will be switched to the new name /// /// In addition to the above: /// - If the recording was saved, there is no active recording /// - If the recording was switched, there will be a new active recording pub(crate) fn toggle_recording(&mut self, recording_name: Option<String>) { let is_recording = self.is_recording(); let last_recording = self.active_recording.take(); match (is_recording, &last_recording, &recording_name) { (true, Some(last_recording), None) => self.filter_recording(last_recording), (true, Some(last_recording), Some(recording_name)) => { if last_recording != recording_name { self.clear(last_recording); } else { self.filter_recording(last_recording); return; } } _ => {} } mem::replace(&mut self.active_recording, recording_name); } /// Saves an event into the currently active recording. /// If no recording is active, the event passed in is ignored. pub(crate) fn record(&mut self, cmd: EventDomain) { if !self.is_recording() { warn!("Recorder not active-- ignoring event {:?}", cmd); return; } let current_recording = self.active_recording.as_ref().unwrap(); let recording = self.recordings.entry(current_recording.clone()) .or_insert(Recording::new()); recording.events.push(cmd); } /// Iterates over a specified recording's buffer and runs the specified action /// on each event. pub(crate) fn play<F>(&self, recording_name: &str, action: F) where F: FnMut(&EventDomain) -> () { self.recordings.get(recording_name) .and_then(|recording| { recording.play(action); Some(()) }); } /// Completely removes the specified recording from the Recorder pub(crate) fn clear(&mut self, recording_name: &str) { self.recordings.remove(recording_name); } fn filter_recording(&mut self, recording_name: &str) { self.recordings.get_mut(recording_name) .and_then(|recording| { recording.filter_undos(); Some(()) }); } } struct Recording { events: Vec<EventDomain> } impl Recording { fn new() -> Recording { Recording { events: Vec::new() } } /// Iterates over the recording buffer and runs the specified action /// on each event. fn play<F>(&self, action: F) where F: FnMut(&EventDomain) -> () { self.events.iter().for_each(action) } /// Cleans the recording buffer by filtering out any undo or redo events. /// /// A recording should not store any undos or redos-- /// call this once a recording is 'finalized.' fn filter_undos(&mut self) { let mut saw_undo = false; let mut saw_redo = false; // Walk the recording backwards and remove any undo / redo events let filtered: Vec<EventDomain> = self.events.clone() .into_iter() .rev() .filter(|event| { if let EventDomain::Buffer(event) = event { return match event { BufferEvent::Undo => { saw_undo = !saw_redo; saw_redo = false; false } BufferEvent::Redo => { saw_redo = !saw_undo; saw_undo = false; false } _ => { let ret = !saw_undo; saw_undo = false; saw_redo = false; ret } }; } true }) .collect::<Vec<EventDomain>>() .into_iter() .rev() .collect(); mem::replace(&mut self.events, filtered); } } // Tests for filtering undo / redo from the recording buffer // A = Event // B = Event // U = Undo // R = Redo #[cfg(test)] mod tests { use recorder::Recorder; use edit_types::{BufferEvent, EventDomain}; #[test] fn play_recording() { let mut recorder = Recorder::new(); let recording_name = String::new(); let mut expected_events: Vec<EventDomain> = vec![ BufferEvent::Indent.into(), BufferEvent::Outdent.into(), BufferEvent::DuplicateLine.into(), BufferEvent::Transpose.into(), ]; recorder.toggle_recording(Some(recording_name.clone())); for event in expected_events.iter().rev() { recorder.record(event.clone()); } recorder.toggle_recording(Some(recording_name.clone())); recorder.play(&recording_name, |event| { // We shouldn't iterate more times than we added items! let expected_event = expected_events.pop(); assert!(expected_event.is_some()); // Should be the event we expect assert_eq!(*event, expected_event.unwrap()); }); // We should have iterated over everything we inserted assert_eq!(expected_events.len(), 0); } #[test] fn clear_recording() { let mut recorder = Recorder::new(); let recording_name = String::new(); recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Outdent.into()); recorder.record(BufferEvent::Indent.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events.len(), 4); recorder.clear(&recording_name); assert!(recorder.recordings.get(&recording_name).is_none()); } #[test] fn multiple_recordings() { let mut recorder = Recorder::new(); let recording_a = "a".to_string(); let recording_b = "b".to_string(); recorder.toggle_recording(Some(recording_a.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.toggle_recording(Some(recording_a.clone())); recorder.toggle_recording(Some(recording_b.clone())); recorder.record(BufferEvent::Outdent.into()); recorder.record(BufferEvent::Indent.into()); recorder.toggle_recording(Some(recording_b.clone())); assert_eq!(recorder.recordings.get(&recording_a).unwrap().events, vec![BufferEvent::Transpose.into(), BufferEvent::DuplicateLine.into()]); assert_eq!(recorder.recordings.get(&recording_b).unwrap().events, vec![BufferEvent::Outdent.into(), BufferEvent::Indent.into()]); recorder.clear(&recording_a); assert!(recorder.recordings.get(&recording_a).is_none()); assert!(recorder.recordings.get(&recording_b).is_some()); } #[test] fn basic_test() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Undo removes last item, redo only affects undo // A U B R => B recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Redo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::DuplicateLine.into()]); } #[test] fn basic_test_swapped() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Swapping order of undo and redo from the basic test should give us a different leftover item // A R B U => A recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Undo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into()]); } #[test] fn redo_cancels_undo() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Redo cancels out an undo // A U R B => A B recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into(), BufferEvent::DuplicateLine.into()]); } #[test] fn undo_cancels_redo() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Undo should cancel a redo, preventing it from canceling another undo // A U R U => _ recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::Undo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![]); } #[test] fn undo_as_first_item() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Undo shouldn't do anything as the first item // U A B R => A B recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Redo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into(), BufferEvent::DuplicateLine.into()]); } #[test] fn redo_as_first_item() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Redo shouldn't do anything as the first item // R A B U => A recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Undo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into()]); } } Add copyright notice // Copyright 2018 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::mem; use std::collections::HashMap; use edit_types::{BufferEvent, EventDomain}; /// A container that manages and holds all recordings for the current editing session pub(crate) struct Recorder { active_recording: Option<String>, recordings: HashMap<String, Recording>, } impl Recorder { pub(crate) fn new() -> Recorder { Recorder { active_recording: None, recordings: HashMap::new(), } } pub(crate) fn is_recording(&self) -> bool { self.active_recording.is_some() } /// Starts or stops the specified recording. /// /// /// There are three outcome behaviors: /// - If the current recording name is specified, the active recording is saved /// - If no recording name is specified, the currently active recording is saved /// - If a recording name other than the active recording is specified, /// the current recording will be thrown out and will be switched to the new name /// /// In addition to the above: /// - If the recording was saved, there is no active recording /// - If the recording was switched, there will be a new active recording pub(crate) fn toggle_recording(&mut self, recording_name: Option<String>) { let is_recording = self.is_recording(); let last_recording = self.active_recording.take(); match (is_recording, &last_recording, &recording_name) { (true, Some(last_recording), None) => self.filter_recording(last_recording), (true, Some(last_recording), Some(recording_name)) => { if last_recording != recording_name { self.clear(last_recording); } else { self.filter_recording(last_recording); return; } }, _ => {} } mem::replace(&mut self.active_recording, recording_name); } /// Saves an event into the currently active recording. /// If no recording is active, the event passed in is ignored. pub(crate) fn record(&mut self, cmd: EventDomain) { if !self.is_recording() { warn!("Recorder not active-- ignoring event {:?}", cmd); return; } let current_recording = self.active_recording.as_ref().unwrap(); let recording = self.recordings.entry(current_recording.clone()) .or_insert(Recording::new()); recording.events.push(cmd); } /// Iterates over a specified recording's buffer and runs the specified action /// on each event. pub(crate) fn play<F>(&self, recording_name: &str, action: F) where F: FnMut(&EventDomain) -> () { self.recordings.get(recording_name) .and_then(|recording| { recording.play(action); Some(()) }); } /// Completely removes the specified recording from the Recorder pub(crate) fn clear(&mut self, recording_name: &str) { self.recordings.remove(recording_name); } fn filter_recording(&mut self, recording_name: &str) { self.recordings.get_mut(recording_name) .and_then(|recording| { recording.filter_undos(); Some(()) }); } } struct Recording { events: Vec<EventDomain> } impl Recording { fn new() -> Recording { Recording { events: Vec::new() } } /// Iterates over the recording buffer and runs the specified action /// on each event. fn play<F>(&self, action: F) where F: FnMut(&EventDomain) -> () { self.events.iter().for_each(action) } /// Cleans the recording buffer by filtering out any undo or redo events. /// /// A recording should not store any undos or redos-- /// call this once a recording is 'finalized.' fn filter_undos(&mut self) { let mut saw_undo = false; let mut saw_redo = false; // Walk the recording backwards and remove any undo / redo events let filtered: Vec<EventDomain> = self.events.clone() .into_iter() .rev() .filter(|event| { if let EventDomain::Buffer(event) = event { return match event { BufferEvent::Undo => { saw_undo = !saw_redo; saw_redo = false; false } BufferEvent::Redo => { saw_redo = !saw_undo; saw_undo = false; false } _ => { let ret = !saw_undo; saw_undo = false; saw_redo = false; ret } }; } true }) .collect::<Vec<EventDomain>>() .into_iter() .rev() .collect(); mem::replace(&mut self.events, filtered); } } // Tests for filtering undo / redo from the recording buffer // A = Event // B = Event // U = Undo // R = Redo #[cfg(test)] mod tests { use recorder::Recorder; use edit_types::{BufferEvent, EventDomain}; #[test] fn play_recording() { let mut recorder = Recorder::new(); let recording_name = String::new(); let mut expected_events: Vec<EventDomain> = vec![ BufferEvent::Indent.into(), BufferEvent::Outdent.into(), BufferEvent::DuplicateLine.into(), BufferEvent::Transpose.into(), ]; recorder.toggle_recording(Some(recording_name.clone())); for event in expected_events.iter().rev() { recorder.record(event.clone()); } recorder.toggle_recording(Some(recording_name.clone())); recorder.play(&recording_name, |event| { // We shouldn't iterate more times than we added items! let expected_event = expected_events.pop(); assert!(expected_event.is_some()); // Should be the event we expect assert_eq!(*event, expected_event.unwrap()); }); // We should have iterated over everything we inserted assert_eq!(expected_events.len(), 0); } #[test] fn clear_recording() { let mut recorder = Recorder::new(); let recording_name = String::new(); recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Outdent.into()); recorder.record(BufferEvent::Indent.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events.len(), 4); recorder.clear(&recording_name); assert!(recorder.recordings.get(&recording_name).is_none()); } #[test] fn multiple_recordings() { let mut recorder = Recorder::new(); let recording_a = "a".to_string(); let recording_b = "b".to_string(); recorder.toggle_recording(Some(recording_a.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.toggle_recording(Some(recording_a.clone())); recorder.toggle_recording(Some(recording_b.clone())); recorder.record(BufferEvent::Outdent.into()); recorder.record(BufferEvent::Indent.into()); recorder.toggle_recording(Some(recording_b.clone())); assert_eq!(recorder.recordings.get(&recording_a).unwrap().events, vec![BufferEvent::Transpose.into(), BufferEvent::DuplicateLine.into()]); assert_eq!(recorder.recordings.get(&recording_b).unwrap().events, vec![BufferEvent::Outdent.into(), BufferEvent::Indent.into()]); recorder.clear(&recording_a); assert!(recorder.recordings.get(&recording_a).is_none()); assert!(recorder.recordings.get(&recording_b).is_some()); } #[test] fn basic_test() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Undo removes last item, redo only affects undo // A U B R => B recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Redo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::DuplicateLine.into()]); } #[test] fn basic_test_swapped() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Swapping order of undo and redo from the basic test should give us a different leftover item // A R B U => A recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Undo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into()]); } #[test] fn redo_cancels_undo() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Redo cancels out an undo // A U R B => A B recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into(), BufferEvent::DuplicateLine.into()]); } #[test] fn undo_cancels_redo() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Undo should cancel a redo, preventing it from canceling another undo // A U R U => _ recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::Undo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![]); } #[test] fn undo_as_first_item() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Undo shouldn't do anything as the first item // U A B R => A B recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Undo.into()); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Redo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into(), BufferEvent::DuplicateLine.into()]); } #[test] fn redo_as_first_item() { let mut recorder = Recorder::new(); let recording_name = String::new(); // Redo shouldn't do anything as the first item // R A B U => A recorder.toggle_recording(Some(recording_name.clone())); recorder.record(BufferEvent::Redo.into()); recorder.record(BufferEvent::Transpose.into()); recorder.record(BufferEvent::DuplicateLine.into()); recorder.record(BufferEvent::Undo.into()); recorder.toggle_recording(Some(recording_name.clone())); assert_eq!(recorder.recordings.get(&recording_name).unwrap().events, vec![BufferEvent::Transpose.into()]); } }
extern crate dirs; extern crate rand; extern crate regex; extern crate unicode_segmentation; extern crate unicode_width; extern crate url; use self::regex::Regex; use self::unicode_segmentation::UnicodeSegmentation; use self::unicode_width::UnicodeWidthStr; use self::url::percent_encoding::*; use self::url::Url; use logger::{self, Level}; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; pub fn replace_all(input: String, from: &str, to: &str) -> String { input.replace(from, to) } pub fn consolidate_whitespace(input: String) -> String { let found = input.find(|c: char| !c.is_whitespace()); let mut result = String::new(); if let Some(found) = found { let (leading, rest) = input.split_at(found); let lastchar = input.chars().rev().next().unwrap(); result.push_str(leading); let iter = rest.split_whitespace(); for elem in iter { result.push_str(elem); result.push(' '); } result.pop(); if lastchar.is_whitespace() { result.push(' '); } } result } pub fn to_u(rs_str: String, default_value: u32) -> u32 { let mut result = rs_str.parse::<u32>(); if result.is_err() { result = Ok(default_value); } result.unwrap() } /// Combine a base URL and a link to a new absolute URL. /// If the base URL is malformed or joining with the link fails, link will be returned. /// # Examples /// ``` /// use libnewsboat::utils::absolute_url; /// assert_eq!(absolute_url("http://foobar/hello/crook/", "bar.html"), /// "http://foobar/hello/crook/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "/bar.html"), /// "https://foobar/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "http://quux/bar.html"), /// "http://quux/bar.html".to_owned()); /// assert_eq!(absolute_url("http://foobar", "bla.html"), /// "http://foobar/bla.html".to_owned()); /// assert_eq!(absolute_url("http://test:test@foobar:33", "bla2.html"), /// "http://test:test@foobar:33/bla2.html".to_owned()); /// assert_eq!(absolute_url("foo", "bar"), "bar".to_owned()); /// ``` pub fn absolute_url(base_url: &str, link: &str) -> String { Url::parse(base_url) .and_then(|url| url.join(link)) .as_ref() .map(Url::as_str) .unwrap_or(link) .to_owned() } pub fn resolve_tilde(path: String) -> String { let mut file_path: String = path; let home_path = dirs::home_dir(); if let Some(home_path) = home_path { let home_path_string = home_path.to_string_lossy().into_owned(); if file_path == "~" { file_path = home_path_string; } else { let tmp_file_path = file_path.clone(); if tmp_file_path.len() > 1 { let (tilde, remaining) = tmp_file_path.split_at(2); if tilde == "~/" { file_path = home_path_string + "/" + remaining; } } } } file_path } pub fn resolve_relative(reference: &Path, path: &Path) -> PathBuf { if path.is_relative() { // Will only ever panic if reference is `/`, which shouldn't be the case as reference is // always a file path return reference.parent().unwrap().join(path); } path.to_path_buf() } pub fn is_special_url(url: &str) -> bool { is_query_url(url) || is_filter_url(url) || is_exec_url(url) } /// Check if the given URL is a http(s) URL /// # Example /// ``` /// use libnewsboat::utils::is_http_url; /// assert!(is_http_url("http://example.com")); /// ``` pub fn is_http_url(url: &str) -> bool { url.starts_with("https://") || url.starts_with("http://") } pub fn is_query_url(url: &str) -> bool { url.starts_with("query:") } pub fn is_filter_url(url: &str) -> bool { url.starts_with("filter:") } pub fn is_exec_url(url: &str) -> bool { url.starts_with("exec:") } /// Censor URLs by replacing username and password with '*' /// ``` /// use libnewsboat::utils::censor_url; /// assert_eq!(&censor_url(""), ""); /// assert_eq!(&censor_url("foobar"), "foobar"); /// assert_eq!(&censor_url("foobar://xyz/"), "foobar://xyz/"); /// assert_eq!(&censor_url("http://newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://@newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://@newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foo:bar@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://foo:bar@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://aschas@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://aschas@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("xxx://aschas@newsbeuter.org/"), /// "xxx://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foobar"), "http://foobar/"); /// assert_eq!(&censor_url("https://foobar"), "https://foobar/"); /// /// assert_eq!(&censor_url("http://aschas@host"), "http://*:*@host/"); /// assert_eq!(&censor_url("https://aschas@host"), "https://*:*@host/"); /// /// assert_eq!(&censor_url("query:name:age between 1:10"), /// "query:name:age between 1:10"); /// ``` pub fn censor_url(url: &str) -> String { if !url.is_empty() && !is_special_url(url) { Url::parse(url) .map(|mut url| { if url.username() != "" || url.password().is_some() { // can not panic. If either username or password is present we can change both. url.set_username("*").unwrap(); url.set_password(Some("*")).unwrap(); } url }) .as_ref() .map(Url::as_str) .unwrap_or(url) .to_owned() } else { url.into() } } pub fn get_default_browser() -> String { use std::env; env::var("BROWSER").unwrap_or_else(|_| "lynx".to_string()) } pub fn trim(rs_str: String) -> String { rs_str.trim().to_string() } pub fn trim_end(rs_str: String) -> String { let x: &[_] = &['\n', '\r']; rs_str.trim_right_matches(x).to_string() } pub fn quote(input: String) -> String { let mut input = input.replace("\"", "\\\""); input.insert(0, '"'); input.push('"'); input } pub fn quote_if_necessary(input: String) -> String { match input.find(' ') { Some(_) => quote(input), None => input, } } pub fn get_random_value(max: u32) -> u32 { rand::random::<u32>() % max } pub fn is_valid_color(color: &str) -> bool { const COLORS: [&str; 9] = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", ]; if COLORS.contains(&color) { true } else if color.starts_with("color0") { color == "color0" } else if color.starts_with("color") { let num_part = &color[5..]; num_part.parse::<u8>().is_ok() } else { false } } pub fn is_valid_attribute(attribute: &str) -> bool { const VALID_ATTRIBUTES: [&str; 9] = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; VALID_ATTRIBUTES.contains(&attribute) } pub fn strwidth(rs_str: &str) -> usize { let control = rs_str.chars().fold(true, |acc, x| acc & !x.is_control()); if control { return UnicodeWidthStr::width(rs_str); } else { return rs_str.len(); } } pub fn strwidth_stfl(rs_str: &str) -> usize { let reduce = 3 * rs_str .chars() .zip(rs_str.chars().skip(1)) .filter(|&(c, next_c)| c == '<' && next_c != '>') .count(); let width = strwidth(rs_str); if width < reduce { 0 } else { width - reduce } } pub fn is_valid_podcast_type(mimetype: &str) -> bool { let re = Regex::new(r"(audio|video)/.*").unwrap(); let matches = re.is_match(mimetype); let acceptable = ["application/ogg"]; let found = acceptable.contains(&mimetype); matches || found } pub fn unescape_url(rs_str: String) -> Option<String> { let decoded = percent_decode(rs_str.as_bytes()).decode_utf8(); decoded.ok().map(|s| s.replace("\0", "")) } /// Runs given command in a shell, and returns the output (from stdout; stderr is printed to the /// screen). pub fn get_command_output(cmd: &str) -> String { let cmd = Command::new("sh") .arg("-c") .arg(cmd) // Inherit stdin so that the program can ask something of the user (see // https://github.com/newsboat/newsboat/issues/455 for an example). .stdin(Stdio::inherit()) .output(); // from_utf8_lossy will convert any bad bytes to U+FFFD cmd.map(|cmd| String::from_utf8_lossy(&cmd.stdout).into_owned()) .unwrap_or_else(|_| String::from("")) } // This function assumes that the user is not interested in command's output (not even errors on // stderr!), so it redirects everything to /dev/null. pub fn run_command(cmd: &str, param: &str) { let child = Command::new(cmd) .arg(param) // Prevent the command from blocking Newsboat by asking for input .stdin(Stdio::null()) // Prevent the command from botching the screen by printing onto it. .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn(); if let Err(error) = child { log!( Level::Debug, "utils::run_command: spawning a child for \"{}\" failed: {}", cmd, error ); } // We deliberately *don't* wait for the child to finish. } pub fn run_program(cmd_with_args: &[&str], input: &str) -> String { if cmd_with_args.is_empty() { return String::new(); } Command::new(cmd_with_args[0]) .args(&cmd_with_args[1..]) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() .map_err(|error| { log!( Level::Debug, "utils::run_program: spawning a child for \"{:?}\" \ with input \"{}\" failed: {}", cmd_with_args, input, error ); }) .and_then(|mut child| { if let Some(stdin) = child.stdin.as_mut() { if let Err(error) = stdin.write_all(input.as_bytes()) { log!( Level::Debug, "utils::run_program: failed to write to child's stdin: {}", error ); } } child .wait_with_output() .map_err(|error| { log!( Level::Debug, "utils::run_program: failed to read child's stdout: {}", error ); }) .map(|output| String::from_utf8_lossy(&output.stdout).into_owned()) }) .unwrap_or_else(|_| String::new()) } pub fn make_title(rs_str: String) -> String { /* Sometimes it is possible to construct the title from the URL * This attempts to do just that. eg: * http://domain.com/story/yy/mm/dd/title-with-dashes?a=b */ // Strip out trailing slashes let mut result = rs_str.trim_right_matches('/'); // get to the final part of the URI's path and // extract just the juicy part 'title-with-dashes?a=b' let v: Vec<&str> = result.rsplitn(2, '/').collect(); result = v[0]; // find where query part of URI starts // throw away the query part 'title-with-dashes' let v: Vec<&str> = result.splitn(2, '?').collect(); result = v[0]; // Throw away common webpage suffixes: .html, .php, .aspx, .htm result = result .trim_right_matches(".html") .trim_right_matches(".php") .trim_right_matches(".aspx") .trim_right_matches(".htm"); // 'title with dashes' let result = result.replace('-', " ").replace('_', " "); //'Title with dashes' //let result = ""; let mut c = result.chars(); let result = match c.next() { None => String::new(), Some(f) => f.to_uppercase().collect::<String>() + c.as_str(), }; // Un-escape any percent-encoding, e.g. "It%27s%202017%21" -> "It's // 2017!" match unescape_url(result) { None => String::new(), Some(f) => f, } } /// Counts graphemes in a given string. /// /// ``` /// use libnewsboat::utils::graphemes_count; /// /// assert_eq!(graphemes_count("D"), 1); /// // len() counts bytes, not characters, but all ASCII symbols are represented by one byte in /// // UTF-8, so len() returns 1 in this case /// assert_eq!("D".len(), 1); /// /// // Here's a situation where a single grapheme is represented by multiple bytes /// assert_eq!(graphemes_count("Ж"), 1); /// assert_eq!("Ж".len(), 2); /// /// assert_eq!(graphemes_count("📰"), 1); /// assert_eq!("📰".len(), 4); /// ``` pub fn graphemes_count(input: &str) -> usize { UnicodeSegmentation::graphemes(input, true).count() } /// Extracts up to `n` first graphemes from the given string. /// /// ``` /// use libnewsboat::utils::take_graphemes; /// /// let input = "Привет!"; /// assert_eq!(take_graphemes(input, 1), "П"); /// assert_eq!(take_graphemes(input, 4), "Прив"); /// assert_eq!(take_graphemes(input, 6), "Привет"); /// assert_eq!(take_graphemes(input, 20), input); /// ``` pub fn take_graphemes(input: &str, n: usize) -> String { UnicodeSegmentation::graphemes(input, true) .take(n) .collect::<String>() } #[cfg(test)] mod tests { extern crate tempfile; use super::*; #[test] fn t_replace_all() { assert_eq!( replace_all(String::from("aaa"), "a", "b"), String::from("bbb") ); assert_eq!( replace_all(String::from("aaa"), "aa", "ba"), String::from("baa") ); assert_eq!( replace_all(String::from("aaaaaa"), "aa", "ba"), String::from("bababa") ); assert_eq!(replace_all(String::new(), "a", "b"), String::new()); let input = String::from("aaaa"); assert_eq!(replace_all(input.clone(), "b", "c"), input); assert_eq!( replace_all(String::from("this is a normal test text"), " t", " T"), String::from("this is a normal Test Text") ); assert_eq!( replace_all(String::from("o o o"), "o", "<o>"), String::from("<o> <o> <o>") ); } #[test] fn t_consolidate_whitespace() { assert_eq!( consolidate_whitespace(String::from("LoremIpsum")), String::from("LoremIpsum") ); assert_eq!( consolidate_whitespace(String::from("Lorem Ipsum")), String::from("Lorem Ipsum") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); assert_eq!(consolidate_whitespace(String::new()), String::new()); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); } #[test] fn t_to_u() { assert_eq!(to_u(String::from("0"), 10), 0); assert_eq!(to_u(String::from("23"), 1), 23); assert_eq!(to_u(String::from(""), 0), 0); assert_eq!(to_u(String::from("zero"), 1), 1); } #[test] fn t_is_special_url() { assert!(is_special_url("query:")); assert!(is_special_url("query: example")); assert!(!is_special_url("query")); assert!(!is_special_url(" query:")); assert!(is_special_url("filter:")); assert!(is_special_url("filter: example")); assert!(!is_special_url("filter")); assert!(!is_special_url(" filter:")); assert!(is_special_url("exec:")); assert!(is_special_url("exec: example")); assert!(!is_special_url("exec")); assert!(!is_special_url(" exec:")); } #[test] fn t_is_http_url() { assert!(is_http_url("https://foo.bar")); assert!(is_http_url("http://")); assert!(is_http_url("https://")); assert!(!is_http_url("htt://foo.bar")); assert!(!is_http_url("http:/")); assert!(!is_http_url("foo://bar")); } #[test] fn t_is_query_url() { assert!(is_query_url("query:")); assert!(is_query_url("query: example")); assert!(!is_query_url("query")); assert!(!is_query_url(" query:")); } #[test] fn t_is_filter_url() { assert!(is_filter_url("filter:")); assert!(is_filter_url("filter: example")); assert!(!is_filter_url("filter")); assert!(!is_filter_url(" filter:")); } #[test] fn t_is_exec_url() { assert!(is_exec_url("exec:")); assert!(is_exec_url("exec: example")); assert!(!is_exec_url("exec")); assert!(!is_exec_url(" exec:")); } #[test] fn t_trim() { assert_eq!(trim(String::from(" xxx\r\n")), "xxx"); assert_eq!(trim(String::from("\n\n abc foobar\n")), "abc foobar"); assert_eq!(trim(String::from("")), ""); assert_eq!(trim(String::from(" \n")), ""); } #[test] fn t_trim_end() { assert_eq!(trim_end(String::from("quux\n")), "quux"); } #[test] fn t_quote() { assert_eq!(quote("".to_string()), "\"\""); assert_eq!(quote("Hello World!".to_string()), "\"Hello World!\""); assert_eq!( quote("\"Hello World!\"".to_string()), "\"\\\"Hello World!\\\"\"" ); } #[test] fn t_quote_if_necessary() { assert_eq!(quote_if_necessary("".to_string()), ""); assert_eq!( quote_if_necessary("Hello World!".to_string()), "\"Hello World!\"" ); } #[test] fn t_is_valid_color() { let invalid = [ "awesome", "list", "of", "things", "that", "aren't", "colors", "color0123", "color1024", ]; for color in &invalid { assert!(!is_valid_color(color)); } let valid = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", "color0", "color163", ]; for color in &valid { assert!(is_valid_color(color)); } } #[test] fn t_strwidth() { assert!(strwidth("") == 0); assert!(strwidth("xx") == 2); assert!(strwidth("\u{F91F}") == 2); assert!(strwidth("\u{0007}") == 1); } #[test] fn t_strwidth_stfl() { assert!(strwidth_stfl("") == 0); assert!(strwidth_stfl("x<hi>x") == 3); assert!(strwidth_stfl("x<>x") == 4); assert!(strwidth_stfl("\u{F91F}") == 2); assert!(strwidth_stfl("\u{0007}") == 1); assert!(strwidth_stfl("<a") == 0); // #415 } #[test] fn t_is_valid_podcast_type() { assert!(is_valid_podcast_type("audio/mpeg")); assert!(is_valid_podcast_type("audio/mp3")); assert!(is_valid_podcast_type("audio/x-mp3")); assert!(is_valid_podcast_type("audio/ogg")); assert!(is_valid_podcast_type("application/ogg")); assert!(!is_valid_podcast_type("image/jpeg")); assert!(!is_valid_podcast_type("image/png")); assert!(!is_valid_podcast_type("text/plain")); assert!(!is_valid_podcast_type("application/zip")); } #[test] fn t_is_valid_attribte() { let invalid = ["foo", "bar", "baz", "quux"]; for attr in &invalid { assert!(!is_valid_attribute(attr)); } let valid = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; for attr in &valid { assert!(is_valid_attribute(attr)); } } #[test] fn t_unescape_url() { assert!(unescape_url(String::from("foo%20bar")).unwrap() == String::from("foo bar")); assert!( unescape_url(String::from( "%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D" )) .unwrap() == String::from("!#$&'()*+,/:;=?@[]") ); } #[test] fn t_get_command_output() { assert_eq!( get_command_output("ls /dev/null"), "/dev/null\n".to_string() ); assert_eq!( get_command_output("a-program-that-is-guaranteed-to-not-exists"), "".to_string() ); } #[test] fn t_run_command_executes_given_command_with_given_argument() { use self::tempfile::TempDir; use std::{thread, time}; let tmp = TempDir::new().unwrap(); let filepath = { let mut filepath = tmp.path().to_owned(); filepath.push("sentry"); filepath }; assert!(!filepath.exists()); run_command("touch", filepath.to_str().unwrap()); thread::sleep(time::Duration::from_millis(10)); assert!(filepath.exists()); } #[test] fn t_run_command_doesnt_wait_for_the_command_to_finish() { use std::time::{Duration, Instant}; let start = Instant::now(); let five: &str = "5"; run_command("sleep", five); let runtime = start.elapsed(); assert!(runtime < Duration::from_secs(1)); } #[test] fn t_run_program() { let input1 = "this is a multine-line\ntest string"; assert_eq!(run_program(&["cat"], input1), input1); assert_eq!( run_program(&["echo", "-n", "hello world"], ""), "hello world" ); } #[test] fn t_make_title() { let mut input = String::from("http://example.com/Item"); assert!(make_title(input) == String::from("Item")); input = String::from("http://example.com/This-is-the-title"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/This_is_the_title"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/This_is-the_title"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.php"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.html"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.htm"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.aspx"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/this-is-the-title"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/items/misc/this-is-the-title"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/item/"); assert!(make_title(input) == String::from("Item")); input = String::from("http://example.com/item/////////////"); assert!(make_title(input) == String::from("Item")); input = String::from("blahscheme://example.com/this-is-the-title"); assert!(make_title(input) == String::from("This is the title")); input = String::from("http://example.com/story/aug/title-with-dashes?a=b"); assert!(make_title(input) == String::from("Title with dashes")); input = String::from("http://example.com/title-with-dashes?a=b&x=y&utf8=✓"); assert!(make_title(input) == String::from("Title with dashes")); input = String::from("https://example.com/It%27s%202017%21"); assert!(make_title(input) == String::from("It's 2017!")); input = String::from("https://example.com/?format=rss"); assert!(make_title(input) == String::from("")); assert!(make_title(String::from("")) == String::from("")); } #[test] fn t_resolve_relative() { assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("/baz")), Path::new("/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("/config/baz")), Path::new("/config/baz") ); assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("baz")), Path::new("/foo/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("baz")), Path::new("/baz") ); } } Use assert_eq! for better error message extern crate dirs; extern crate rand; extern crate regex; extern crate unicode_segmentation; extern crate unicode_width; extern crate url; use self::regex::Regex; use self::unicode_segmentation::UnicodeSegmentation; use self::unicode_width::UnicodeWidthStr; use self::url::percent_encoding::*; use self::url::Url; use logger::{self, Level}; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; pub fn replace_all(input: String, from: &str, to: &str) -> String { input.replace(from, to) } pub fn consolidate_whitespace(input: String) -> String { let found = input.find(|c: char| !c.is_whitespace()); let mut result = String::new(); if let Some(found) = found { let (leading, rest) = input.split_at(found); let lastchar = input.chars().rev().next().unwrap(); result.push_str(leading); let iter = rest.split_whitespace(); for elem in iter { result.push_str(elem); result.push(' '); } result.pop(); if lastchar.is_whitespace() { result.push(' '); } } result } pub fn to_u(rs_str: String, default_value: u32) -> u32 { let mut result = rs_str.parse::<u32>(); if result.is_err() { result = Ok(default_value); } result.unwrap() } /// Combine a base URL and a link to a new absolute URL. /// If the base URL is malformed or joining with the link fails, link will be returned. /// # Examples /// ``` /// use libnewsboat::utils::absolute_url; /// assert_eq!(absolute_url("http://foobar/hello/crook/", "bar.html"), /// "http://foobar/hello/crook/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "/bar.html"), /// "https://foobar/bar.html".to_owned()); /// assert_eq!(absolute_url("https://foobar/foo/", "http://quux/bar.html"), /// "http://quux/bar.html".to_owned()); /// assert_eq!(absolute_url("http://foobar", "bla.html"), /// "http://foobar/bla.html".to_owned()); /// assert_eq!(absolute_url("http://test:test@foobar:33", "bla2.html"), /// "http://test:test@foobar:33/bla2.html".to_owned()); /// assert_eq!(absolute_url("foo", "bar"), "bar".to_owned()); /// ``` pub fn absolute_url(base_url: &str, link: &str) -> String { Url::parse(base_url) .and_then(|url| url.join(link)) .as_ref() .map(Url::as_str) .unwrap_or(link) .to_owned() } pub fn resolve_tilde(path: String) -> String { let mut file_path: String = path; let home_path = dirs::home_dir(); if let Some(home_path) = home_path { let home_path_string = home_path.to_string_lossy().into_owned(); if file_path == "~" { file_path = home_path_string; } else { let tmp_file_path = file_path.clone(); if tmp_file_path.len() > 1 { let (tilde, remaining) = tmp_file_path.split_at(2); if tilde == "~/" { file_path = home_path_string + "/" + remaining; } } } } file_path } pub fn resolve_relative(reference: &Path, path: &Path) -> PathBuf { if path.is_relative() { // Will only ever panic if reference is `/`, which shouldn't be the case as reference is // always a file path return reference.parent().unwrap().join(path); } path.to_path_buf() } pub fn is_special_url(url: &str) -> bool { is_query_url(url) || is_filter_url(url) || is_exec_url(url) } /// Check if the given URL is a http(s) URL /// # Example /// ``` /// use libnewsboat::utils::is_http_url; /// assert!(is_http_url("http://example.com")); /// ``` pub fn is_http_url(url: &str) -> bool { url.starts_with("https://") || url.starts_with("http://") } pub fn is_query_url(url: &str) -> bool { url.starts_with("query:") } pub fn is_filter_url(url: &str) -> bool { url.starts_with("filter:") } pub fn is_exec_url(url: &str) -> bool { url.starts_with("exec:") } /// Censor URLs by replacing username and password with '*' /// ``` /// use libnewsboat::utils::censor_url; /// assert_eq!(&censor_url(""), ""); /// assert_eq!(&censor_url("foobar"), "foobar"); /// assert_eq!(&censor_url("foobar://xyz/"), "foobar://xyz/"); /// assert_eq!(&censor_url("http://newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://@newsbeuter.org/"), /// "http://newsbeuter.org/"); /// assert_eq!(&censor_url("https://@newsbeuter.org/"), /// "https://newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foo:bar@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://foo:bar@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://aschas@newsbeuter.org/"), /// "http://*:*@newsbeuter.org/"); /// assert_eq!(&censor_url("https://aschas@newsbeuter.org/"), /// "https://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("xxx://aschas@newsbeuter.org/"), /// "xxx://*:*@newsbeuter.org/"); /// /// assert_eq!(&censor_url("http://foobar"), "http://foobar/"); /// assert_eq!(&censor_url("https://foobar"), "https://foobar/"); /// /// assert_eq!(&censor_url("http://aschas@host"), "http://*:*@host/"); /// assert_eq!(&censor_url("https://aschas@host"), "https://*:*@host/"); /// /// assert_eq!(&censor_url("query:name:age between 1:10"), /// "query:name:age between 1:10"); /// ``` pub fn censor_url(url: &str) -> String { if !url.is_empty() && !is_special_url(url) { Url::parse(url) .map(|mut url| { if url.username() != "" || url.password().is_some() { // can not panic. If either username or password is present we can change both. url.set_username("*").unwrap(); url.set_password(Some("*")).unwrap(); } url }) .as_ref() .map(Url::as_str) .unwrap_or(url) .to_owned() } else { url.into() } } pub fn get_default_browser() -> String { use std::env; env::var("BROWSER").unwrap_or_else(|_| "lynx".to_string()) } pub fn trim(rs_str: String) -> String { rs_str.trim().to_string() } pub fn trim_end(rs_str: String) -> String { let x: &[_] = &['\n', '\r']; rs_str.trim_right_matches(x).to_string() } pub fn quote(input: String) -> String { let mut input = input.replace("\"", "\\\""); input.insert(0, '"'); input.push('"'); input } pub fn quote_if_necessary(input: String) -> String { match input.find(' ') { Some(_) => quote(input), None => input, } } pub fn get_random_value(max: u32) -> u32 { rand::random::<u32>() % max } pub fn is_valid_color(color: &str) -> bool { const COLORS: [&str; 9] = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", ]; if COLORS.contains(&color) { true } else if color.starts_with("color0") { color == "color0" } else if color.starts_with("color") { let num_part = &color[5..]; num_part.parse::<u8>().is_ok() } else { false } } pub fn is_valid_attribute(attribute: &str) -> bool { const VALID_ATTRIBUTES: [&str; 9] = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; VALID_ATTRIBUTES.contains(&attribute) } pub fn strwidth(rs_str: &str) -> usize { let control = rs_str.chars().fold(true, |acc, x| acc & !x.is_control()); if control { return UnicodeWidthStr::width(rs_str); } else { return rs_str.len(); } } pub fn strwidth_stfl(rs_str: &str) -> usize { let reduce = 3 * rs_str .chars() .zip(rs_str.chars().skip(1)) .filter(|&(c, next_c)| c == '<' && next_c != '>') .count(); let width = strwidth(rs_str); if width < reduce { 0 } else { width - reduce } } pub fn is_valid_podcast_type(mimetype: &str) -> bool { let re = Regex::new(r"(audio|video)/.*").unwrap(); let matches = re.is_match(mimetype); let acceptable = ["application/ogg"]; let found = acceptable.contains(&mimetype); matches || found } pub fn unescape_url(rs_str: String) -> Option<String> { let decoded = percent_decode(rs_str.as_bytes()).decode_utf8(); decoded.ok().map(|s| s.replace("\0", "")) } /// Runs given command in a shell, and returns the output (from stdout; stderr is printed to the /// screen). pub fn get_command_output(cmd: &str) -> String { let cmd = Command::new("sh") .arg("-c") .arg(cmd) // Inherit stdin so that the program can ask something of the user (see // https://github.com/newsboat/newsboat/issues/455 for an example). .stdin(Stdio::inherit()) .output(); // from_utf8_lossy will convert any bad bytes to U+FFFD cmd.map(|cmd| String::from_utf8_lossy(&cmd.stdout).into_owned()) .unwrap_or_else(|_| String::from("")) } // This function assumes that the user is not interested in command's output (not even errors on // stderr!), so it redirects everything to /dev/null. pub fn run_command(cmd: &str, param: &str) { let child = Command::new(cmd) .arg(param) // Prevent the command from blocking Newsboat by asking for input .stdin(Stdio::null()) // Prevent the command from botching the screen by printing onto it. .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn(); if let Err(error) = child { log!( Level::Debug, "utils::run_command: spawning a child for \"{}\" failed: {}", cmd, error ); } // We deliberately *don't* wait for the child to finish. } pub fn run_program(cmd_with_args: &[&str], input: &str) -> String { if cmd_with_args.is_empty() { return String::new(); } Command::new(cmd_with_args[0]) .args(&cmd_with_args[1..]) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::null()) .spawn() .map_err(|error| { log!( Level::Debug, "utils::run_program: spawning a child for \"{:?}\" \ with input \"{}\" failed: {}", cmd_with_args, input, error ); }) .and_then(|mut child| { if let Some(stdin) = child.stdin.as_mut() { if let Err(error) = stdin.write_all(input.as_bytes()) { log!( Level::Debug, "utils::run_program: failed to write to child's stdin: {}", error ); } } child .wait_with_output() .map_err(|error| { log!( Level::Debug, "utils::run_program: failed to read child's stdout: {}", error ); }) .map(|output| String::from_utf8_lossy(&output.stdout).into_owned()) }) .unwrap_or_else(|_| String::new()) } pub fn make_title(rs_str: String) -> String { /* Sometimes it is possible to construct the title from the URL * This attempts to do just that. eg: * http://domain.com/story/yy/mm/dd/title-with-dashes?a=b */ // Strip out trailing slashes let mut result = rs_str.trim_right_matches('/'); // get to the final part of the URI's path and // extract just the juicy part 'title-with-dashes?a=b' let v: Vec<&str> = result.rsplitn(2, '/').collect(); result = v[0]; // find where query part of URI starts // throw away the query part 'title-with-dashes' let v: Vec<&str> = result.splitn(2, '?').collect(); result = v[0]; // Throw away common webpage suffixes: .html, .php, .aspx, .htm result = result .trim_right_matches(".html") .trim_right_matches(".php") .trim_right_matches(".aspx") .trim_right_matches(".htm"); // 'title with dashes' let result = result.replace('-', " ").replace('_', " "); //'Title with dashes' //let result = ""; let mut c = result.chars(); let result = match c.next() { None => String::new(), Some(f) => f.to_uppercase().collect::<String>() + c.as_str(), }; // Un-escape any percent-encoding, e.g. "It%27s%202017%21" -> "It's // 2017!" match unescape_url(result) { None => String::new(), Some(f) => f, } } /// Counts graphemes in a given string. /// /// ``` /// use libnewsboat::utils::graphemes_count; /// /// assert_eq!(graphemes_count("D"), 1); /// // len() counts bytes, not characters, but all ASCII symbols are represented by one byte in /// // UTF-8, so len() returns 1 in this case /// assert_eq!("D".len(), 1); /// /// // Here's a situation where a single grapheme is represented by multiple bytes /// assert_eq!(graphemes_count("Ж"), 1); /// assert_eq!("Ж".len(), 2); /// /// assert_eq!(graphemes_count("📰"), 1); /// assert_eq!("📰".len(), 4); /// ``` pub fn graphemes_count(input: &str) -> usize { UnicodeSegmentation::graphemes(input, true).count() } /// Extracts up to `n` first graphemes from the given string. /// /// ``` /// use libnewsboat::utils::take_graphemes; /// /// let input = "Привет!"; /// assert_eq!(take_graphemes(input, 1), "П"); /// assert_eq!(take_graphemes(input, 4), "Прив"); /// assert_eq!(take_graphemes(input, 6), "Привет"); /// assert_eq!(take_graphemes(input, 20), input); /// ``` pub fn take_graphemes(input: &str, n: usize) -> String { UnicodeSegmentation::graphemes(input, true) .take(n) .collect::<String>() } #[cfg(test)] mod tests { extern crate tempfile; use super::*; #[test] fn t_replace_all() { assert_eq!( replace_all(String::from("aaa"), "a", "b"), String::from("bbb") ); assert_eq!( replace_all(String::from("aaa"), "aa", "ba"), String::from("baa") ); assert_eq!( replace_all(String::from("aaaaaa"), "aa", "ba"), String::from("bababa") ); assert_eq!(replace_all(String::new(), "a", "b"), String::new()); let input = String::from("aaaa"); assert_eq!(replace_all(input.clone(), "b", "c"), input); assert_eq!( replace_all(String::from("this is a normal test text"), " t", " T"), String::from("this is a normal Test Text") ); assert_eq!( replace_all(String::from("o o o"), "o", "<o>"), String::from("<o> <o> <o>") ); } #[test] fn t_consolidate_whitespace() { assert_eq!( consolidate_whitespace(String::from("LoremIpsum")), String::from("LoremIpsum") ); assert_eq!( consolidate_whitespace(String::from("Lorem Ipsum")), String::from("Lorem Ipsum") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); assert_eq!(consolidate_whitespace(String::new()), String::new()); assert_eq!( consolidate_whitespace(String::from(" Lorem \t\tIpsum \t ")), String::from(" Lorem Ipsum ") ); assert_eq!( consolidate_whitespace(String::from(" Lorem \r\n\r\n\tIpsum")), String::from(" Lorem Ipsum") ); } #[test] fn t_to_u() { assert_eq!(to_u(String::from("0"), 10), 0); assert_eq!(to_u(String::from("23"), 1), 23); assert_eq!(to_u(String::from(""), 0), 0); assert_eq!(to_u(String::from("zero"), 1), 1); } #[test] fn t_is_special_url() { assert!(is_special_url("query:")); assert!(is_special_url("query: example")); assert!(!is_special_url("query")); assert!(!is_special_url(" query:")); assert!(is_special_url("filter:")); assert!(is_special_url("filter: example")); assert!(!is_special_url("filter")); assert!(!is_special_url(" filter:")); assert!(is_special_url("exec:")); assert!(is_special_url("exec: example")); assert!(!is_special_url("exec")); assert!(!is_special_url(" exec:")); } #[test] fn t_is_http_url() { assert!(is_http_url("https://foo.bar")); assert!(is_http_url("http://")); assert!(is_http_url("https://")); assert!(!is_http_url("htt://foo.bar")); assert!(!is_http_url("http:/")); assert!(!is_http_url("foo://bar")); } #[test] fn t_is_query_url() { assert!(is_query_url("query:")); assert!(is_query_url("query: example")); assert!(!is_query_url("query")); assert!(!is_query_url(" query:")); } #[test] fn t_is_filter_url() { assert!(is_filter_url("filter:")); assert!(is_filter_url("filter: example")); assert!(!is_filter_url("filter")); assert!(!is_filter_url(" filter:")); } #[test] fn t_is_exec_url() { assert!(is_exec_url("exec:")); assert!(is_exec_url("exec: example")); assert!(!is_exec_url("exec")); assert!(!is_exec_url(" exec:")); } #[test] fn t_trim() { assert_eq!(trim(String::from(" xxx\r\n")), "xxx"); assert_eq!(trim(String::from("\n\n abc foobar\n")), "abc foobar"); assert_eq!(trim(String::from("")), ""); assert_eq!(trim(String::from(" \n")), ""); } #[test] fn t_trim_end() { assert_eq!(trim_end(String::from("quux\n")), "quux"); } #[test] fn t_quote() { assert_eq!(quote("".to_string()), "\"\""); assert_eq!(quote("Hello World!".to_string()), "\"Hello World!\""); assert_eq!( quote("\"Hello World!\"".to_string()), "\"\\\"Hello World!\\\"\"" ); } #[test] fn t_quote_if_necessary() { assert_eq!(quote_if_necessary("".to_string()), ""); assert_eq!( quote_if_necessary("Hello World!".to_string()), "\"Hello World!\"" ); } #[test] fn t_is_valid_color() { let invalid = [ "awesome", "list", "of", "things", "that", "aren't", "colors", "color0123", "color1024", ]; for color in &invalid { assert!(!is_valid_color(color)); } let valid = [ "black", "red", "green", "yellow", "blue", "magenta", "cyan", "white", "default", "color0", "color163", ]; for color in &valid { assert!(is_valid_color(color)); } } #[test] fn t_strwidth() { assert_eq!(strwidth(""), 0); assert_eq!(strwidth("xx"), 2); assert_eq!(strwidth("\u{F91F}"), 2); assert_eq!(strwidth("\u{0007}"), 1); } #[test] fn t_strwidth_stfl() { assert_eq!(strwidth_stfl(""), 0); assert_eq!(strwidth_stfl("x<hi>x"), 3); assert_eq!(strwidth_stfl("x<>x"), 4); assert_eq!(strwidth_stfl("\u{F91F}"), 2); assert_eq!(strwidth_stfl("\u{0007}"), 1); assert_eq!(strwidth_stfl("<a"), 0); // #415 } #[test] fn t_is_valid_podcast_type() { assert!(is_valid_podcast_type("audio/mpeg")); assert!(is_valid_podcast_type("audio/mp3")); assert!(is_valid_podcast_type("audio/x-mp3")); assert!(is_valid_podcast_type("audio/ogg")); assert!(is_valid_podcast_type("application/ogg")); assert!(!is_valid_podcast_type("image/jpeg")); assert!(!is_valid_podcast_type("image/png")); assert!(!is_valid_podcast_type("text/plain")); assert!(!is_valid_podcast_type("application/zip")); } #[test] fn t_is_valid_attribte() { let invalid = ["foo", "bar", "baz", "quux"]; for attr in &invalid { assert!(!is_valid_attribute(attr)); } let valid = [ "standout", "underline", "reverse", "blink", "dim", "bold", "protect", "invis", "default", ]; for attr in &valid { assert!(is_valid_attribute(attr)); } } #[test] fn t_unescape_url() { assert!(unescape_url(String::from("foo%20bar")).unwrap() == String::from("foo bar")); assert!( unescape_url(String::from( "%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D" )) .unwrap() == String::from("!#$&'()*+,/:;=?@[]") ); } #[test] fn t_get_command_output() { assert_eq!( get_command_output("ls /dev/null"), "/dev/null\n".to_string() ); assert_eq!( get_command_output("a-program-that-is-guaranteed-to-not-exists"), "".to_string() ); } #[test] fn t_run_command_executes_given_command_with_given_argument() { use self::tempfile::TempDir; use std::{thread, time}; let tmp = TempDir::new().unwrap(); let filepath = { let mut filepath = tmp.path().to_owned(); filepath.push("sentry"); filepath }; assert!(!filepath.exists()); run_command("touch", filepath.to_str().unwrap()); thread::sleep(time::Duration::from_millis(10)); assert!(filepath.exists()); } #[test] fn t_run_command_doesnt_wait_for_the_command_to_finish() { use std::time::{Duration, Instant}; let start = Instant::now(); let five: &str = "5"; run_command("sleep", five); let runtime = start.elapsed(); assert!(runtime < Duration::from_secs(1)); } #[test] fn t_run_program() { let input1 = "this is a multine-line\ntest string"; assert_eq!(run_program(&["cat"], input1), input1); assert_eq!( run_program(&["echo", "-n", "hello world"], ""), "hello world" ); } #[test] fn t_make_title() { let mut input = String::from("http://example.com/Item"); assert_eq!(make_title(input), String::from("Item")); input = String::from("http://example.com/This-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is_the_title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.php"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.html"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.htm"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/This_is-the_title.aspx"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/items/misc/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/item/"); assert_eq!(make_title(input), String::from("Item")); input = String::from("http://example.com/item/////////////"); assert_eq!(make_title(input), String::from("Item")); input = String::from("blahscheme://example.com/this-is-the-title"); assert_eq!(make_title(input), String::from("This is the title")); input = String::from("http://example.com/story/aug/title-with-dashes?a=b"); assert_eq!(make_title(input), String::from("Title with dashes")); input = String::from("http://example.com/title-with-dashes?a=b&x=y&utf8=✓"); assert_eq!(make_title(input), String::from("Title with dashes")); input = String::from("https://example.com/It%27s%202017%21"); assert_eq!(make_title(input), String::from("It's 2017!")); input = String::from("https://example.com/?format=rss"); assert_eq!(make_title(input), String::from("")); assert_eq!(make_title(String::from("")), String::from("")); } #[test] fn t_resolve_relative() { assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("/baz")), Path::new("/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("/config/baz")), Path::new("/config/baz") ); assert_eq!( resolve_relative(Path::new("/foo/bar"), Path::new("baz")), Path::new("/foo/baz") ); assert_eq!( resolve_relative(Path::new("/config"), Path::new("baz")), Path::new("/baz") ); } }
extern crate uuid; use self::uuid::Uuid; use problem::LpFileFormat; use problem::{LpProblem, Problem}; use std::collections::HashMap; use std::fs; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::io::Error; use std::process::Command; #[derive(Debug, PartialEq)] pub enum Status { Optimal, SubOptimal, Infeasible, Unbounded, NotSolved, } pub trait SolverTrait { type P: Problem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String>; } pub struct GurobiSolver { name: String, command_name: String, temp_solution_file: String, } pub struct CbcSolver { name: String, command_name: String, temp_solution_file: String, } pub struct GlpkSolver { name: String, command_name: String, temp_solution_file: String, } impl GurobiSolver { pub fn new() -> GurobiSolver { GurobiSolver { name: "Gurobi".to_string(), command_name: "gurobi_cl".to_string(), temp_solution_file: format!("{}.sol", Uuid::new_v4().to_string()), } } pub fn command_name(&self, command_name: String) -> GurobiSolver { GurobiSolver { name: self.name.clone(), command_name: command_name, temp_solution_file: self.temp_solution_file.clone(), } } fn read_solution(&self) -> Result<(Status, HashMap<String, f32>), String> { fn read_specific_solution(f: &File) -> Result<(Status, HashMap<String, f32>), String> { let mut vars_value: HashMap<_, _> = HashMap::new(); let mut file = BufReader::new(f); let mut buffer = String::new(); let _ = file.read_line(&mut buffer); if let Some(_) = buffer.split(" ").next() { for line in file.lines() { let l = line.unwrap(); // Gurobi version 7 add comments on the header file if let Some('#') = l.chars().next() { continue; } let result_line: Vec<_> = l.split_whitespace().collect(); if result_line.len() == 2 { match result_line[1].parse::<f32>() { Ok(n) => { vars_value.insert(result_line[0].to_string(), n); } Err(e) => return Err(format!("{}", e.to_string())), } } else { return Err("Incorrect solution format".to_string()); } } } else { return Err("Incorrect solution format".to_string()); } Ok((Status::Optimal, vars_value)) } match File::open(&self.temp_solution_file) { Ok(f) => { let res = try!(read_specific_solution(&f)); let _ = fs::remove_file(&self.temp_solution_file); Ok(res) } Err(_) => return Err("Cannot open file".to_string()), } } } impl CbcSolver { pub fn new() -> CbcSolver { CbcSolver { name: "Cbc".to_string(), command_name: "cbc".to_string(), temp_solution_file: format!("{}.sol", Uuid::new_v4().to_string()), } } pub fn command_name(&self, command_name: String) -> CbcSolver { CbcSolver { name: self.name.clone(), command_name: command_name, temp_solution_file: self.temp_solution_file.clone(), } } pub fn temp_solution_file(&self, temp_solution_file: String) -> CbcSolver { CbcSolver { name: self.name.clone(), command_name: self.command_name.clone(), temp_solution_file: temp_solution_file, } } pub fn read_solution(&self) -> Result<(Status, HashMap<String, f32>), String> { fn read_specific_solution(f: &File) -> Result<(Status, HashMap<String, f32>), String> { let mut vars_value: HashMap<_, _> = HashMap::new(); let mut file = BufReader::new(f); let mut buffer = String::new(); let _ = file.read_line(&mut buffer); let status = if let Some(status_line) = buffer.split_whitespace().next() { match status_line.split_whitespace().next() { Some("Optimal") => Status::Optimal, // Infeasible status is either "Infeasible" or "Integer infeasible" Some("Infeasible") | Some("Integer") => Status::Infeasible, Some("Unbounded") => Status::Unbounded, // "Stopped" can be "on time", "on iterations", "on difficulties" or "on ctrl-c" Some("Stopped") => Status::SubOptimal, _ => Status::NotSolved, } } else { return Err("Incorrect solution format".to_string()); }; for line in file.lines() { let l = line.unwrap(); let result_line: Vec<_> = l.split_whitespace().collect(); if result_line.len() == 4 { match result_line[2].parse::<f32>() { Ok(n) => { vars_value.insert(result_line[1].to_string(), n); } Err(e) => return Err(e.to_string()), } } else { return Err("Incorrect solution format".to_string()); } } Ok((status, vars_value)) } match File::open(&self.temp_solution_file) { Ok(f) => { let res = try!(read_specific_solution(&f)); let _ = fs::remove_file(&self.temp_solution_file); Ok(res) } Err(_) => return Err("Cannot open file".to_string()), } } } impl GlpkSolver { pub fn new() -> GlpkSolver { GlpkSolver { name: "Glpk".to_string(), command_name: "glpsol".to_string(), temp_solution_file: format!("{}.sol", Uuid::new_v4().to_string()), } } pub fn command_name(&self, command_name: String) -> GlpkSolver { GlpkSolver { name: self.name.clone(), command_name: command_name, temp_solution_file: self.temp_solution_file.clone(), } } pub fn temp_solution_file(&self, temp_solution_file: String) -> GlpkSolver { GlpkSolver { name: self.name.clone(), command_name: self.command_name.clone(), temp_solution_file: temp_solution_file, } } pub fn read_solution(&self) -> Result<(Status, HashMap<String, f32>), String> { fn read_specific_solution(f: &File) -> Result<(Status, HashMap<String, f32>), String> { fn read_size(line: Option<Result<String, Error>>) -> Result<usize, String> { match line { Some(Ok(l)) => match l.split_whitespace().nth(1) { Some(value) => match value.parse::<usize>() { Ok(v) => Ok(v), _ => return Err("Incorrect solution format".to_string()), }, _ => return Err("Incorrect solution format".to_string()), }, _ => return Err("Incorrect solution format".to_string()), } } let mut vars_value: HashMap<_, _> = HashMap::new(); let file = BufReader::new(f); let mut iter = file.lines(); let row = match read_size(iter.nth(1)) { Ok(value) => value, Err(e) => return Err(e.to_string()), }; let col = match read_size(iter.nth(0)) { Ok(value) => value, Err(e) => return Err(e.to_string()), }; let status = match iter.nth(1) { Some(Ok(status_line)) => match &status_line[12..] { "INTEGER OPTIMAL" | "OPTIMAL" => Status::Optimal, "INFEASIBLE (FINAL)" | "INTEGER EMPTY" => Status::Infeasible, "UNDEFINED" => Status::NotSolved, "INTEGER UNDEFINED" | "UNBOUNDED" => Status::Unbounded, _ => { return Err("Incorrect solution format: Unknown solution status".to_string()) } }, _ => return Err("Incorrect solution format: No solution status found".to_string()), }; let mut result_lines = iter.skip(row + 7); for _ in 0..col { let line = match result_lines.next() { Some(Ok(l)) => l, _ => { return Err( "Incorrect solution format: Not all columns are present".to_string() ) } }; let result_line: Vec<_> = line.split_whitespace().collect(); if result_line.len() >= 4 { match result_line[3].parse::<f32>() { Ok(n) => { vars_value.insert(result_line[1].to_string(), n); } Err(e) => return Err(e.to_string()), } } else { return Err( "Incorrect solution format: Column specification has to few fields" .to_string(), ); } } Ok((status, vars_value)) } match File::open(&self.temp_solution_file) { Ok(f) => { let res = try!(read_specific_solution(&f)); let _ = fs::remove_file(&self.temp_solution_file); Ok(res) } Err(_) => return Err("Cannot open file".to_string()), } } } impl SolverTrait for GurobiSolver { type P = LpProblem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String> { let file_model = &format!("{}.lp", problem.unique_name); match problem.write_lp(file_model) { Ok(_) => { let result = match Command::new(&self.command_name) .arg(format!("ResultFile={}", self.temp_solution_file)) .arg(file_model) .output() { Ok(r) => { let mut status = Status::SubOptimal; if String::from_utf8(r.stdout) .expect("") .contains("Optimal solution found") { status = Status::Optimal; } if r.status.success() { let (_, res) = try!(self.read_solution()); Ok((status, res)) } else { Err(r.status.to_string()) } } Err(_) => Err(format!("Error running the {} solver", self.name)), }; let _ = fs::remove_file(&file_model); result } Err(e) => Err(e.to_string()), } } } impl SolverTrait for CbcSolver { type P = LpProblem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String> { let file_model = &format!("{}.lp", problem.unique_name); match problem.write_lp(file_model) { Ok(_) => { let result = match Command::new(&self.command_name) .arg(file_model) .arg("solve") .arg("solution") .arg(&self.temp_solution_file) .output() { Ok(r) => { if r.status.success() { self.read_solution() } else { Err(r.status.to_string()) } } Err(_) => Err(format!("Error running the {} solver", self.name)), }; let _ = fs::remove_file(&file_model); result } Err(e) => Err(e.to_string()), } } } impl SolverTrait for GlpkSolver { type P = LpProblem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String> { let file_model = &format!("{}.lp", problem.unique_name); match problem.write_lp(file_model) { Ok(_) => { let result = match Command::new(&self.command_name) .arg("--lp") .arg(file_model) .arg("-o") .arg(&self.temp_solution_file) .output() { Ok(r) => { if r.status.success() { self.read_solution() } else { Err(r.status.to_string()) } } Err(_) => Err(format!("Error running the {} solver", self.name)), }; let _ = fs::remove_file(&file_model); result } Err(e) => Err(e.to_string()), } } } Fix gurobi infeasible status extern crate uuid; use self::uuid::Uuid; use problem::LpFileFormat; use problem::{LpProblem, Problem}; use std::collections::HashMap; use std::fs; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; use std::io::Error; use std::process::Command; #[derive(Debug, PartialEq)] pub enum Status { Optimal, SubOptimal, Infeasible, Unbounded, NotSolved, } pub trait SolverTrait { type P: Problem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String>; } pub struct GurobiSolver { name: String, command_name: String, temp_solution_file: String, } pub struct CbcSolver { name: String, command_name: String, temp_solution_file: String, } pub struct GlpkSolver { name: String, command_name: String, temp_solution_file: String, } impl GurobiSolver { pub fn new() -> GurobiSolver { GurobiSolver { name: "Gurobi".to_string(), command_name: "gurobi_cl".to_string(), temp_solution_file: format!("{}.sol", Uuid::new_v4().to_string()), } } pub fn command_name(&self, command_name: String) -> GurobiSolver { GurobiSolver { name: self.name.clone(), command_name: command_name, temp_solution_file: self.temp_solution_file.clone(), } } fn read_solution(&self) -> Result<(Status, HashMap<String, f32>), String> { fn read_specific_solution(f: &File) -> Result<(Status, HashMap<String, f32>), String> { let mut vars_value: HashMap<_, _> = HashMap::new(); let mut file = BufReader::new(f); let mut buffer = String::new(); let _ = file.read_line(&mut buffer); if let Some(_) = buffer.split(" ").next() { for line in file.lines() { let l = line.unwrap(); // Gurobi version 7 add comments on the header file if let Some('#') = l.chars().next() { continue; } let result_line: Vec<_> = l.split_whitespace().collect(); if result_line.len() == 2 { match result_line[1].parse::<f32>() { Ok(n) => { vars_value.insert(result_line[0].to_string(), n); } Err(e) => return Err(format!("{}", e.to_string())), } } else { return Err("Incorrect solution format".to_string()); } } } else { return Err("Incorrect solution format".to_string()); } Ok((Status::Optimal, vars_value)) } match File::open(&self.temp_solution_file) { Ok(f) => { let res = try!(read_specific_solution(&f)); let _ = fs::remove_file(&self.temp_solution_file); Ok(res) } Err(_) => return Err("Cannot open file".to_string()), } } } impl CbcSolver { pub fn new() -> CbcSolver { CbcSolver { name: "Cbc".to_string(), command_name: "cbc".to_string(), temp_solution_file: format!("{}.sol", Uuid::new_v4().to_string()), } } pub fn command_name(&self, command_name: String) -> CbcSolver { CbcSolver { name: self.name.clone(), command_name: command_name, temp_solution_file: self.temp_solution_file.clone(), } } pub fn temp_solution_file(&self, temp_solution_file: String) -> CbcSolver { CbcSolver { name: self.name.clone(), command_name: self.command_name.clone(), temp_solution_file: temp_solution_file, } } pub fn read_solution(&self) -> Result<(Status, HashMap<String, f32>), String> { fn read_specific_solution(f: &File) -> Result<(Status, HashMap<String, f32>), String> { let mut vars_value: HashMap<_, _> = HashMap::new(); let mut file = BufReader::new(f); let mut buffer = String::new(); let _ = file.read_line(&mut buffer); let status = if let Some(status_line) = buffer.split_whitespace().next() { match status_line.split_whitespace().next() { Some("Optimal") => Status::Optimal, // Infeasible status is either "Infeasible" or "Integer infeasible" Some("Infeasible") | Some("Integer") => Status::Infeasible, Some("Unbounded") => Status::Unbounded, // "Stopped" can be "on time", "on iterations", "on difficulties" or "on ctrl-c" Some("Stopped") => Status::SubOptimal, _ => Status::NotSolved, } } else { return Err("Incorrect solution format".to_string()); }; for line in file.lines() { let l = line.unwrap(); let result_line: Vec<_> = l.split_whitespace().collect(); if result_line.len() == 4 { match result_line[2].parse::<f32>() { Ok(n) => { vars_value.insert(result_line[1].to_string(), n); } Err(e) => return Err(e.to_string()), } } else { return Err("Incorrect solution format".to_string()); } } Ok((status, vars_value)) } match File::open(&self.temp_solution_file) { Ok(f) => { let res = try!(read_specific_solution(&f)); let _ = fs::remove_file(&self.temp_solution_file); Ok(res) } Err(_) => return Err("Cannot open file".to_string()), } } } impl GlpkSolver { pub fn new() -> GlpkSolver { GlpkSolver { name: "Glpk".to_string(), command_name: "glpsol".to_string(), temp_solution_file: format!("{}.sol", Uuid::new_v4().to_string()), } } pub fn command_name(&self, command_name: String) -> GlpkSolver { GlpkSolver { name: self.name.clone(), command_name: command_name, temp_solution_file: self.temp_solution_file.clone(), } } pub fn temp_solution_file(&self, temp_solution_file: String) -> GlpkSolver { GlpkSolver { name: self.name.clone(), command_name: self.command_name.clone(), temp_solution_file: temp_solution_file, } } pub fn read_solution(&self) -> Result<(Status, HashMap<String, f32>), String> { fn read_specific_solution(f: &File) -> Result<(Status, HashMap<String, f32>), String> { fn read_size(line: Option<Result<String, Error>>) -> Result<usize, String> { match line { Some(Ok(l)) => match l.split_whitespace().nth(1) { Some(value) => match value.parse::<usize>() { Ok(v) => Ok(v), _ => return Err("Incorrect solution format".to_string()), }, _ => return Err("Incorrect solution format".to_string()), }, _ => return Err("Incorrect solution format".to_string()), } } let mut vars_value: HashMap<_, _> = HashMap::new(); let file = BufReader::new(f); let mut iter = file.lines(); let row = match read_size(iter.nth(1)) { Ok(value) => value, Err(e) => return Err(e.to_string()), }; let col = match read_size(iter.nth(0)) { Ok(value) => value, Err(e) => return Err(e.to_string()), }; let status = match iter.nth(1) { Some(Ok(status_line)) => match &status_line[12..] { "INTEGER OPTIMAL" | "OPTIMAL" => Status::Optimal, "INFEASIBLE (FINAL)" | "INTEGER EMPTY" => Status::Infeasible, "UNDEFINED" => Status::NotSolved, "INTEGER UNDEFINED" | "UNBOUNDED" => Status::Unbounded, _ => { return Err("Incorrect solution format: Unknown solution status".to_string()) } }, _ => return Err("Incorrect solution format: No solution status found".to_string()), }; let mut result_lines = iter.skip(row + 7); for _ in 0..col { let line = match result_lines.next() { Some(Ok(l)) => l, _ => { return Err( "Incorrect solution format: Not all columns are present".to_string() ) } }; let result_line: Vec<_> = line.split_whitespace().collect(); if result_line.len() >= 4 { match result_line[3].parse::<f32>() { Ok(n) => { vars_value.insert(result_line[1].to_string(), n); } Err(e) => return Err(e.to_string()), } } else { return Err( "Incorrect solution format: Column specification has to few fields" .to_string(), ); } } Ok((status, vars_value)) } match File::open(&self.temp_solution_file) { Ok(f) => { let res = try!(read_specific_solution(&f)); let _ = fs::remove_file(&self.temp_solution_file); Ok(res) } Err(_) => return Err("Cannot open file".to_string()), } } } impl SolverTrait for GurobiSolver { type P = LpProblem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String> { let file_model = &format!("{}.lp", problem.unique_name); match problem.write_lp(file_model) { Ok(_) => { let result = match Command::new(&self.command_name) .arg(format!("ResultFile={}", self.temp_solution_file)) .arg(file_model) .output() { Ok(r) => { let mut status = Status::SubOptimal; let result = String::from_utf8(r.stdout).expect(""); if result.contains("Optimal solution found") { status = Status::Optimal; } else if result.contains("infeasible") { status = Status::Infeasible; } if r.status.success() { let (_, res) = try!(self.read_solution()); Ok((status, res)) } else { Err(r.status.to_string()) } } Err(_) => Err(format!("Error running the {} solver", self.name)), }; let _ = fs::remove_file(&file_model); result } Err(e) => Err(e.to_string()), } } } impl SolverTrait for CbcSolver { type P = LpProblem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String> { let file_model = &format!("{}.lp", problem.unique_name); match problem.write_lp(file_model) { Ok(_) => { let result = match Command::new(&self.command_name) .arg(file_model) .arg("solve") .arg("solution") .arg(&self.temp_solution_file) .output() { Ok(r) => { if r.status.success() { self.read_solution() } else { Err(r.status.to_string()) } } Err(_) => Err(format!("Error running the {} solver", self.name)), }; let _ = fs::remove_file(&file_model); result } Err(e) => Err(e.to_string()), } } } impl SolverTrait for GlpkSolver { type P = LpProblem; fn run(&self, problem: &Self::P) -> Result<(Status, HashMap<String, f32>), String> { let file_model = &format!("{}.lp", problem.unique_name); match problem.write_lp(file_model) { Ok(_) => { let result = match Command::new(&self.command_name) .arg("--lp") .arg(file_model) .arg("-o") .arg(&self.temp_solution_file) .output() { Ok(r) => { if r.status.success() { self.read_solution() } else { Err(r.status.to_string()) } } Err(_) => Err(format!("Error running the {} solver", self.name)), }; let _ = fs::remove_file(&file_model); result } Err(e) => Err(e.to_string()), } } }
removed extra debug line
use std::collections::HashMap; use std::hash::BuildHasherDefault; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use fnv::FnvHasher; use bitset::BitSet; use join::Join; use world::Component; use {Entity, Index, Generation}; #[doc(hidden)] pub trait PrivateStorage<U> { fn get_mask(&self) -> &BitSet; fn get_inner(&self) -> &U; fn get_inner_mut(&mut self) -> &mut U; } /// The `UnprotectedStorage` together with the `BitSet` that knows /// about which elements are stored, and which are not. pub struct MaskedStorage<T: Component> { mask: BitSet, inner: T::Storage, } impl<T: Component> MaskedStorage<T> { /// Creates a new `MaskedStorage`. This is called when you register /// a new component type within the world. pub fn new() -> MaskedStorage<T> { MaskedStorage { mask: BitSet::new(), inner: UnprotectedStorage::new(), } } fn open(&mut self) -> (&BitSet, &mut T::Storage) { (&self.mask, &mut self.inner) } } impl<T: Component> Drop for MaskedStorage<T> { fn drop(&mut self) { let mask = &self.mask; unsafe { self.inner.clean(|i| mask.contains(i as u32)); } } } impl<T: Component> PrivateStorage<T::Storage> for MaskedStorage<T> { fn get_mask(&self) -> &BitSet { &self.mask } fn get_inner(&self) -> &T::Storage { &self.inner } fn get_inner_mut(&mut self) -> &mut T::Storage { &mut self.inner } } /// A wrapper around the masked storage and the generations vector. /// Can be used for safe lookup of components, insertions and removes. /// This is what `World::read/write` locks for the user. pub struct Storage<T, D, G> { phantom: PhantomData<T>, gens: G, data: D, } impl<T, D, G> Storage<T, D, G> where G: Deref<Target = Vec<Generation>>, { /// Create a new `Storage` pub fn new(data: D, gens: G) -> Storage<T, D, G>{ Storage { phantom: PhantomData, gens: gens, data: data, } } fn has_gen(&self, e: Entity) -> bool { let g1 = Generation(1); e.get_gen() == *self.gens.get(e.get_id() as usize).unwrap_or(&g1) } } impl<T, D, G> Storage<T, D, G> where T: Component, D: Deref<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { /// Tries to read the data associated with an `Entity`. pub fn get(&self, e: Entity) -> Option<&T> { if self.data.mask.contains(e.get_id() as u32) && self.has_gen(e) { Some(unsafe { self.data.inner.get(e.get_id()) }) }else {None} } } impl<T, D, G> Storage<T, D, G> where T: Component, D: DerefMut<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { /// Tries to mutate the data associated with an `Entity`. pub fn get_mut(&mut self, e: Entity) -> Option<&mut T> { if self.data.mask.contains(e.get_id() as u32) && self.has_gen(e) { Some(unsafe { self.data.inner.get_mut(e.get_id()) }) }else {None} } /// Inserts new data for a given `Entity`. pub fn insert(&mut self, e: Entity, v: T) { let id = e.get_id(); assert!(self.has_gen(e)); if self.data.mask.contains(id as u32) { *unsafe{ self.data.inner.get_mut(id) } = v; } else { self.data.mask.add(id as u32); unsafe{ self.data.inner.insert(id, v) }; } } /// Removes the data associated with an `Entity`. pub fn remove(&mut self, e: Entity) -> Option<T> { let id = e.get_id(); if self.has_gen(e) && self.data.mask.remove(id as u32) { Some(unsafe{ self.data.inner.remove(id) }) }else { None } } } impl<'a, T, D, G> Join for &'a Storage<T, D, G> where T: Component, D: Deref<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { type Type = &'a T; type Value = &'a T::Storage; type Mask = &'a BitSet; fn open(self) -> (Self::Mask, Self::Value) { (&self.data.mask, &self.data.inner) } unsafe fn get(v: Self::Value, i: Index) -> &'a T { v.get(i) } } impl<'a, T, D, G> Join for &'a mut Storage<T, D, G> where T: Component, D: DerefMut<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { type Type = &'a mut T; type Value = &'a mut T::Storage; type Mask = &'a BitSet; fn open(self) -> (Self::Mask, Self::Value) { self.data.open() } unsafe fn get(v: Self::Value, i: Index) -> &'a mut T { v.get_mut(i) } } /// Used by the framework to quickly join componets pub trait UnprotectedStorage<T>: Sized { /// Creates a new `Storage<T>`. This is called when you register a new /// component type within the world. fn new() -> Self; /// Clean the storage given a check to figure out if an index /// is valid or not. Allows us to safely drop the storage. unsafe fn clean<F>(&mut self, F) where F: Fn(Index) -> bool; /// Tries reading the data associated with an `Index`. /// This is unsafe because the external set used /// to protect this storage is absent. unsafe fn get(&self, id: Index) -> &T; /// Tries mutating the data associated with an `Index`. /// This is unsafe because the external set used /// to protect this storage is absent. unsafe fn get_mut(&mut self, id: Index) -> &mut T; /// Inserts new data for a given `Index`. unsafe fn insert(&mut self, Index, T); /// Removes the data associated with an `Index`. unsafe fn remove(&mut self, Index) -> T; } /// HashMap-based storage. Best suited for rare components. pub struct HashMapStorage<T>(HashMap<Index, T, BuildHasherDefault<FnvHasher>>); impl<T> UnprotectedStorage<T> for HashMapStorage<T> { fn new() -> Self { let fnv = BuildHasherDefault::<FnvHasher>::default(); HashMapStorage(HashMap::with_hasher(fnv)) } unsafe fn clean<F>(&mut self, _: F) where F: Fn(Index) -> bool { //nothing to do } unsafe fn get(&self, id: Index) -> &T { self.0.get(&id).unwrap() } unsafe fn get_mut(&mut self, id: Index) -> &mut T { self.0.get_mut(&id).unwrap() } unsafe fn insert(&mut self, id: Index, v: T) { self.0.insert(id, v); } unsafe fn remove(&mut self, id: Index) -> T { self.0.remove(&id).unwrap() } } /// Vec-based storage, stores the generations of the data in /// order to match with given entities. Supposed to have maximum /// performance for the components mostly present in entities. pub struct VecStorage<T>(Vec<T>); impl<T> UnprotectedStorage<T> for VecStorage<T> { fn new() -> Self { VecStorage(Vec::new()) } unsafe fn clean<F>(&mut self, has: F) where F: Fn(Index) -> bool { use std::mem; for (i, v) in self.0.drain(..).enumerate() { if !has(i as Index) { // if v was not in the set the data is invalid // and we must forget it instead of dropping it mem::forget(v); } } } unsafe fn get(&self, id: Index) -> &T { self.0.get_unchecked(id as usize) } unsafe fn get_mut(&mut self, id: Index) -> &mut T { self.0.get_unchecked_mut(id as usize) } unsafe fn insert(&mut self, id: Index, v: T) { let id = id as usize; if self.0.len() <= id { let delta = id + 1 - self.0.len(); self.0.reserve(delta); self.0.set_len(id + 1); } self.0[id] = v; } unsafe fn remove(&mut self, id: Index) -> T { use std::ptr; ptr::read(self.get(id)) } } #[cfg(test)] mod map_test { use mopa::Any; use super::{Storage, MaskedStorage, UnprotectedStorage, VecStorage}; use {Component, Entity, Index, Generation}; struct Comp<T>(T); impl<T: Any + Send + Sync> Component for Comp<T> { type Storage = VecStorage<Comp<T>>; } fn ent(i: Index) -> Entity { Entity::new(i, Generation(0)) } #[test] fn insert() { let mut ms = MaskedStorage::new(); let mut gen = Vec::new(); let mut c = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { c.insert(ent(i), Comp(i)); } for i in 0..1_000 { assert_eq!(c.get(ent(i)).unwrap().0, i); } } #[test] fn insert_100k() { let mut ms = MaskedStorage::new(); let mut gen = Vec::new(); let mut c = Storage::new(&mut ms, &mut gen); for i in 0..100_000 { c.insert(ent(i), Comp(i)); } for i in 0..100_000 { assert_eq!(c.get(ent(i)).unwrap().0, i); } } #[test] fn remove() { let mut ms = MaskedStorage::new(); let mut gen = Vec::new(); let mut c = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { c.insert(ent(i), Comp(i)); } for i in 0..1_000 { assert_eq!(c.get(ent(i)).unwrap().0, i); } for i in 0..1_000 { c.remove(ent(i)); } for i in 0..1_000 { assert!(c.get(ent(i)).is_none()); } } #[test] fn test_gen() { let mut ms = MaskedStorage::new(); let mut gen = Vec::new(); let mut c = Storage::new(&mut ms, &mut gen); for i in 0..1_000i32 { c.insert(Entity::new(i as u32, Generation(0)), Comp(i)); c.insert(Entity::new(i as u32, Generation(0)), Comp(-i)); } for i in 0..1_000i32 { assert_eq!(c.get(Entity::new(i as u32, Generation(0))).unwrap().0, -i); } } #[test] fn insert_same_key() { let mut ms = MaskedStorage::new(); let mut gen = Vec::new(); let mut c = Storage::new(&mut ms, &mut gen); for i in 0..10_000 { c.insert(Entity::new(i as u32, Generation(0)), Comp(i)); assert_eq!(c.get(Entity::new(i as u32, Generation(0))).unwrap().0, i); } } #[should_panic] #[test] fn wrap() { let mut ms = MaskedStorage::new(); let mut gen = Vec::new(); let mut c = Storage::new(&mut ms, &mut gen); c.insert(Entity::new(1 << 25, Generation(0)), Comp(7)); } } #[cfg(test)] mod test { use std::convert::AsMut; use std::fmt::Debug; use super::{Storage, MaskedStorage, VecStorage, HashMapStorage}; use {Component, Entity, Generation}; #[derive(PartialEq, Eq, Debug)] struct Cvec(u32); impl From<u32> for Cvec { fn from(v: u32) -> Cvec { Cvec(v) } } impl AsMut<u32> for Cvec { fn as_mut(&mut self) -> &mut u32 { &mut self.0 } } impl Component for Cvec { type Storage = VecStorage<Cvec>; } #[derive(PartialEq, Eq, Debug)] struct Cmap(u32); impl From<u32> for Cmap { fn from(v: u32) -> Cmap { Cmap(v) } } impl AsMut<u32> for Cmap { fn as_mut(&mut self) -> &mut u32 { &mut self.0 } } impl Component for Cmap { type Storage = HashMapStorage<Cmap>; } fn test_add<T: Component + From<u32> + Debug + Eq>() { let mut ms = MaskedStorage::<T>::new(); let mut gen = Vec::new(); let mut s = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); } for i in 0..1_000 { assert_eq!(s.get(Entity::new(i, Generation(1))).unwrap(), &(i + 2718).into()); } } fn test_sub<T: Component + From<u32> + Debug + Eq>() { let mut ms = MaskedStorage::<T>::new(); let mut gen = Vec::new(); let mut s = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); } for i in 0..1_000 { assert_eq!(s.remove(Entity::new(i, Generation(1))).unwrap(), (i + 2718).into()); assert!(s.remove(Entity::new(i, Generation(1))).is_none()); } } fn test_get_mut<T: Component + From<u32> + AsMut<u32> + Debug + Eq>() { let mut ms = MaskedStorage::<T>::new(); let mut gen = Vec::new(); let mut s = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); } for i in 0..1_000 { *s.get_mut(Entity::new(i, Generation(1))).unwrap().as_mut() -= 718; } for i in 0..1_000 { assert_eq!(s.get(Entity::new(i, Generation(1))).unwrap(), &(i + 2000).into()); } } fn test_add_gen<T: Component + From<u32> + Debug + Eq>() { let mut ms = MaskedStorage::<T>::new(); let mut gen = Vec::new(); let mut s = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); s.insert(Entity::new(i, Generation(2)), (i + 31415).into()); } for i in 0..1_000 { // this is removed since vec and hashmap disagree // on how this behavior should work... //assert!(s.get(Entity::new(i, 1)).is_none()); assert_eq!(s.get(Entity::new(i, Generation(2))).unwrap(), &(i + 31415).into()); } } fn test_sub_gen<T: Component + From<u32> + Debug + Eq>() { let mut ms = MaskedStorage::<T>::new(); let mut gen = Vec::new(); let mut s = Storage::new(&mut ms, &mut gen); for i in 0..1_000 { s.insert(Entity::new(i, Generation(2)), (i + 2718).into()); } for i in 0..1_000 { assert!(s.remove(Entity::new(i, Generation(1))).is_none()); } } #[test] fn vec_test_add() { test_add::<Cvec>(); } #[test] fn vec_test_sub() { test_sub::<Cvec>(); } #[test] fn vec_test_get_mut() { test_get_mut::<Cvec>(); } #[test] fn vec_test_add_gen() { test_add_gen::<Cvec>(); } #[test] fn vec_test_sub_gen() { test_sub_gen::<Cvec>(); } #[test] fn hash_test_add() { test_add::<Cmap>(); } #[test] fn hash_test_sub() { test_sub::<Cmap>(); } #[test] fn hash_test_get_mut() { test_get_mut::<Cmap>(); } #[test] fn hash_test_add_gen() { test_add_gen::<Cmap>(); } #[test] fn hash_test_sub_gen() { test_sub_gen::<Cmap>(); } } Refactored storage tests use std::collections::HashMap; use std::hash::BuildHasherDefault; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; use fnv::FnvHasher; use bitset::BitSet; use join::Join; use world::Component; use {Entity, Index, Generation}; #[doc(hidden)] pub trait PrivateStorage<U> { fn get_mask(&self) -> &BitSet; fn get_inner(&self) -> &U; fn get_inner_mut(&mut self) -> &mut U; } /// The `UnprotectedStorage` together with the `BitSet` that knows /// about which elements are stored, and which are not. pub struct MaskedStorage<T: Component> { mask: BitSet, inner: T::Storage, } impl<T: Component> MaskedStorage<T> { /// Creates a new `MaskedStorage`. This is called when you register /// a new component type within the world. pub fn new() -> MaskedStorage<T> { MaskedStorage { mask: BitSet::new(), inner: UnprotectedStorage::new(), } } fn open(&mut self) -> (&BitSet, &mut T::Storage) { (&self.mask, &mut self.inner) } } impl<T: Component> Drop for MaskedStorage<T> { fn drop(&mut self) { let mask = &self.mask; unsafe { self.inner.clean(|i| mask.contains(i as u32)); } } } impl<T: Component> PrivateStorage<T::Storage> for MaskedStorage<T> { fn get_mask(&self) -> &BitSet { &self.mask } fn get_inner(&self) -> &T::Storage { &self.inner } fn get_inner_mut(&mut self) -> &mut T::Storage { &mut self.inner } } /// A wrapper around the masked storage and the generations vector. /// Can be used for safe lookup of components, insertions and removes. /// This is what `World::read/write` locks for the user. pub struct Storage<T, D, G> { phantom: PhantomData<T>, gens: G, data: D, } impl<T, D, G> Storage<T, D, G> where G: Deref<Target = Vec<Generation>>, { /// Create a new `Storage` pub fn new(data: D, gens: G) -> Storage<T, D, G>{ Storage { phantom: PhantomData, gens: gens, data: data, } } fn has_gen(&self, e: Entity) -> bool { let g1 = Generation(1); e.get_gen() == *self.gens.get(e.get_id() as usize).unwrap_or(&g1) } } impl<T, D, G> Storage<T, D, G> where T: Component, D: Deref<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { /// Tries to read the data associated with an `Entity`. pub fn get(&self, e: Entity) -> Option<&T> { if self.data.mask.contains(e.get_id() as u32) && self.has_gen(e) { Some(unsafe { self.data.inner.get(e.get_id()) }) }else {None} } } impl<T, D, G> Storage<T, D, G> where T: Component, D: DerefMut<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { /// Tries to mutate the data associated with an `Entity`. pub fn get_mut(&mut self, e: Entity) -> Option<&mut T> { if self.data.mask.contains(e.get_id() as u32) && self.has_gen(e) { Some(unsafe { self.data.inner.get_mut(e.get_id()) }) }else {None} } /// Inserts new data for a given `Entity`. pub fn insert(&mut self, e: Entity, v: T) { let id = e.get_id(); assert!(self.has_gen(e)); if self.data.mask.contains(id as u32) { *unsafe{ self.data.inner.get_mut(id) } = v; } else { self.data.mask.add(id as u32); unsafe{ self.data.inner.insert(id, v) }; } } /// Removes the data associated with an `Entity`. pub fn remove(&mut self, e: Entity) -> Option<T> { let id = e.get_id(); if self.has_gen(e) && self.data.mask.remove(id as u32) { Some(unsafe{ self.data.inner.remove(id) }) }else { None } } } impl<'a, T, D, G> Join for &'a Storage<T, D, G> where T: Component, D: Deref<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { type Type = &'a T; type Value = &'a T::Storage; type Mask = &'a BitSet; fn open(self) -> (Self::Mask, Self::Value) { (&self.data.mask, &self.data.inner) } unsafe fn get(v: Self::Value, i: Index) -> &'a T { v.get(i) } } impl<'a, T, D, G> Join for &'a mut Storage<T, D, G> where T: Component, D: DerefMut<Target = MaskedStorage<T>>, G: Deref<Target = Vec<Generation>>, { type Type = &'a mut T; type Value = &'a mut T::Storage; type Mask = &'a BitSet; fn open(self) -> (Self::Mask, Self::Value) { self.data.open() } unsafe fn get(v: Self::Value, i: Index) -> &'a mut T { v.get_mut(i) } } /// Used by the framework to quickly join componets pub trait UnprotectedStorage<T>: Sized { /// Creates a new `Storage<T>`. This is called when you register a new /// component type within the world. fn new() -> Self; /// Clean the storage given a check to figure out if an index /// is valid or not. Allows us to safely drop the storage. unsafe fn clean<F>(&mut self, F) where F: Fn(Index) -> bool; /// Tries reading the data associated with an `Index`. /// This is unsafe because the external set used /// to protect this storage is absent. unsafe fn get(&self, id: Index) -> &T; /// Tries mutating the data associated with an `Index`. /// This is unsafe because the external set used /// to protect this storage is absent. unsafe fn get_mut(&mut self, id: Index) -> &mut T; /// Inserts new data for a given `Index`. unsafe fn insert(&mut self, Index, T); /// Removes the data associated with an `Index`. unsafe fn remove(&mut self, Index) -> T; } /// HashMap-based storage. Best suited for rare components. pub struct HashMapStorage<T>(HashMap<Index, T, BuildHasherDefault<FnvHasher>>); impl<T> UnprotectedStorage<T> for HashMapStorage<T> { fn new() -> Self { let fnv = BuildHasherDefault::<FnvHasher>::default(); HashMapStorage(HashMap::with_hasher(fnv)) } unsafe fn clean<F>(&mut self, _: F) where F: Fn(Index) -> bool { //nothing to do } unsafe fn get(&self, id: Index) -> &T { self.0.get(&id).unwrap() } unsafe fn get_mut(&mut self, id: Index) -> &mut T { self.0.get_mut(&id).unwrap() } unsafe fn insert(&mut self, id: Index, v: T) { self.0.insert(id, v); } unsafe fn remove(&mut self, id: Index) -> T { self.0.remove(&id).unwrap() } } /// Vec-based storage, stores the generations of the data in /// order to match with given entities. Supposed to have maximum /// performance for the components mostly present in entities. pub struct VecStorage<T>(Vec<T>); impl<T> UnprotectedStorage<T> for VecStorage<T> { fn new() -> Self { VecStorage(Vec::new()) } unsafe fn clean<F>(&mut self, has: F) where F: Fn(Index) -> bool { use std::mem; for (i, v) in self.0.drain(..).enumerate() { if !has(i as Index) { // if v was not in the set the data is invalid // and we must forget it instead of dropping it mem::forget(v); } } } unsafe fn get(&self, id: Index) -> &T { self.0.get_unchecked(id as usize) } unsafe fn get_mut(&mut self, id: Index) -> &mut T { self.0.get_unchecked_mut(id as usize) } unsafe fn insert(&mut self, id: Index, v: T) { let id = id as usize; if self.0.len() <= id { let delta = id + 1 - self.0.len(); self.0.reserve(delta); self.0.set_len(id + 1); } self.0[id] = v; } unsafe fn remove(&mut self, id: Index) -> T { use std::ptr; ptr::read(self.get(id)) } } #[cfg(test)] mod map_test { use mopa::Any; use super::{Storage, MaskedStorage, UnprotectedStorage, VecStorage}; use {Component, Entity, Index, Generation}; struct Comp<T>(T); impl<T: Any + Send + Sync> Component for Comp<T> { type Storage = VecStorage<Comp<T>>; } fn ent(i: Index) -> Entity { Entity::new(i, Generation(0)) } #[test] fn insert() { let mut c = Storage::new(Box::new(MaskedStorage::new()), Box::new(Vec::new())); for i in 0..1_000 { c.insert(ent(i), Comp(i)); } for i in 0..1_000 { assert_eq!(c.get(ent(i)).unwrap().0, i); } } #[test] fn insert_100k() { let mut c = Storage::new(Box::new(MaskedStorage::new()), Box::new(Vec::new())); for i in 0..100_000 { c.insert(ent(i), Comp(i)); } for i in 0..100_000 { assert_eq!(c.get(ent(i)).unwrap().0, i); } } #[test] fn remove() { let mut c = Storage::new(Box::new(MaskedStorage::new()), Box::new(Vec::new())); for i in 0..1_000 { c.insert(ent(i), Comp(i)); } for i in 0..1_000 { assert_eq!(c.get(ent(i)).unwrap().0, i); } for i in 0..1_000 { c.remove(ent(i)); } for i in 0..1_000 { assert!(c.get(ent(i)).is_none()); } } #[test] fn test_gen() { let mut c = Storage::new(Box::new(MaskedStorage::new()), Box::new(Vec::new())); for i in 0..1_000i32 { c.insert(Entity::new(i as u32, Generation(0)), Comp(i)); c.insert(Entity::new(i as u32, Generation(0)), Comp(-i)); } for i in 0..1_000i32 { assert_eq!(c.get(Entity::new(i as u32, Generation(0))).unwrap().0, -i); } } #[test] fn insert_same_key() { let mut c = Storage::new(Box::new(MaskedStorage::new()), Box::new(Vec::new())); for i in 0..10_000 { c.insert(Entity::new(i as u32, Generation(0)), Comp(i)); assert_eq!(c.get(Entity::new(i as u32, Generation(0))).unwrap().0, i); } } #[should_panic] #[test] fn wrap() { let mut c = Storage::new(Box::new(MaskedStorage::new()), Box::new(Vec::new())); c.insert(Entity::new(1 << 25, Generation(0)), Comp(7)); } } #[cfg(test)] mod test { use std::convert::AsMut; use std::fmt::Debug; use super::{Storage, MaskedStorage, VecStorage, HashMapStorage}; use {Component, Entity, Generation}; #[derive(PartialEq, Eq, Debug)] struct Cvec(u32); impl From<u32> for Cvec { fn from(v: u32) -> Cvec { Cvec(v) } } impl AsMut<u32> for Cvec { fn as_mut(&mut self) -> &mut u32 { &mut self.0 } } impl Component for Cvec { type Storage = VecStorage<Cvec>; } #[derive(PartialEq, Eq, Debug)] struct Cmap(u32); impl From<u32> for Cmap { fn from(v: u32) -> Cmap { Cmap(v) } } impl AsMut<u32> for Cmap { fn as_mut(&mut self) -> &mut u32 { &mut self.0 } } impl Component for Cmap { type Storage = HashMapStorage<Cmap>; } fn test_add<T: Component + From<u32> + Debug + Eq>() { let mut s = Storage::new(Box::new(MaskedStorage::<T>::new()), Box::new(Vec::new())); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); } for i in 0..1_000 { assert_eq!(s.get(Entity::new(i, Generation(1))).unwrap(), &(i + 2718).into()); } } fn test_sub<T: Component + From<u32> + Debug + Eq>() { let mut s = Storage::new(Box::new(MaskedStorage::<T>::new()), Box::new(Vec::new())); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); } for i in 0..1_000 { assert_eq!(s.remove(Entity::new(i, Generation(1))).unwrap(), (i + 2718).into()); assert!(s.remove(Entity::new(i, Generation(1))).is_none()); } } fn test_get_mut<T: Component + From<u32> + AsMut<u32> + Debug + Eq>() { let mut s = Storage::new(Box::new(MaskedStorage::<T>::new()), Box::new(Vec::new())); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); } for i in 0..1_000 { *s.get_mut(Entity::new(i, Generation(1))).unwrap().as_mut() -= 718; } for i in 0..1_000 { assert_eq!(s.get(Entity::new(i, Generation(1))).unwrap(), &(i + 2000).into()); } } fn test_add_gen<T: Component + From<u32> + Debug + Eq>() { let mut s = Storage::new(Box::new(MaskedStorage::<T>::new()), Box::new(Vec::new())); for i in 0..1_000 { s.insert(Entity::new(i, Generation(1)), (i + 2718).into()); s.insert(Entity::new(i, Generation(2)), (i + 31415).into()); } for i in 0..1_000 { // this is removed since vec and hashmap disagree // on how this behavior should work... //assert!(s.get(Entity::new(i, 1)).is_none()); assert_eq!(s.get(Entity::new(i, Generation(2))).unwrap(), &(i + 31415).into()); } } fn test_sub_gen<T: Component + From<u32> + Debug + Eq>() { let mut s = Storage::new(Box::new(MaskedStorage::<T>::new()), Box::new(Vec::new())); for i in 0..1_000 { s.insert(Entity::new(i, Generation(2)), (i + 2718).into()); } for i in 0..1_000 { assert!(s.remove(Entity::new(i, Generation(1))).is_none()); } } #[test] fn vec_test_add() { test_add::<Cvec>(); } #[test] fn vec_test_sub() { test_sub::<Cvec>(); } #[test] fn vec_test_get_mut() { test_get_mut::<Cvec>(); } #[test] fn vec_test_add_gen() { test_add_gen::<Cvec>(); } #[test] fn vec_test_sub_gen() { test_sub_gen::<Cvec>(); } #[test] fn hash_test_add() { test_add::<Cmap>(); } #[test] fn hash_test_sub() { test_sub::<Cmap>(); } #[test] fn hash_test_get_mut() { test_get_mut::<Cmap>(); } #[test] fn hash_test_add_gen() { test_add_gen::<Cmap>(); } #[test] fn hash_test_sub_gen() { test_sub_gen::<Cmap>(); } }
// Copyright 2017 pdb Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::fmt; use std::result; use common::*; use msf::Stream; use FallibleIterator; mod constants; mod data; mod header; mod primitive; use self::data::parse_type_data; use self::header::*; use self::primitive::type_data_for_primitive; pub use self::data::{TypeData,ClassKind,EnumValue,FieldAttributes,FunctionAttributes,MethodListEntry,TypeProperties}; pub use self::primitive::{Indirection,PrimitiveType}; /// `TypeInformation` provides zero-copy access to a PDB type data stream. /// /// PDB type information is stored as a stream of length-prefixed `Type` records, and thus the most /// fundamental operation supported by `TypeInformation` is to iterate over `Type`s. /// /// Types are uniquely identified by `TypeIndex`, and types are stored within the PDB in ascending /// order of `TypeIndex`. /// /// Many types refer to other types by `TypeIndex`, and these references may refer to other types /// forming a chain that's arbitrarily long. Fortunately, PDB format requires that types refer only /// to types with lower `TypeIndex`es; thus, the stream of types form a directed acyclic graph. /// /// `TypeInformation` can iterate by `TypeIndex`, since that's essentially the only operation /// permitted by the data. `TypeFinder` is a secondary data structure to provide efficient /// backtracking. /// /// # Examples /// /// Iterating over the types while building a `TypeFinder`: /// /// ``` /// # use pdb::FallibleIterator; /// # /// # fn test() -> pdb::Result<usize> { /// # let file = std::fs::File::open("fixtures/self/foo.pdb")?; /// # let mut pdb = pdb::PDB::open(file)?; /// /// let type_information = pdb.type_information()?; /// let mut type_finder = type_information.new_type_finder(); /// /// # let expected_count = type_information.len(); /// # let mut count: usize = 0; /// let mut iter = type_information.iter(); /// while let Some(typ) = iter.next()? { /// // build the type finder as we go /// type_finder.update(&iter); /// /// // parse the type record /// match typ.parse() { /// Ok(pdb::TypeData::Class{name, properties, fields: Some(fields), ..}) => { /// // this Type describes a class-like type with fields /// println!("type {} is a class named {}", typ.type_index(), name); /// /// // `fields` is a TypeIndex which refers to a FieldList /// // To find information about the fields, find and parse that Type /// match type_finder.find(fields)?.parse()? { /// pdb::TypeData::FieldList{ fields, continuation } => { /// // `fields` is a Vec<TypeData> /// for field in fields { /// if let pdb::TypeData::Member { offset, name, field_type, .. } = field { /// // follow `field_type` as desired /// println!(" - field {} at offset {:x}", name, offset); /// } else { /// // handle member functions, nested types, etc. /// } /// } /// /// if let Some(more_fields) = continuation { /// // A FieldList can be split across multiple records /// // TODO: follow `more_fields` and handle the next FieldList /// } /// } /// _ => { } /// } /// /// }, /// Ok(_) => { /// // ignore everything that's not a class-like type /// }, /// Err(pdb::Error::UnimplementedTypeKind(_)) => { /// // found an unhandled type record /// // this probably isn't fatal in most use cases /// }, /// Err(e) => { /// // other error, probably is worth failing /// return Err(e); /// } /// } /// # count += 1; /// } /// /// # assert_eq!(expected_count, count); /// # Ok(count) /// # } /// # assert!(test().expect("test") > 8000); /// ``` #[derive(Debug)] pub struct TypeInformation<'t> { stream: Stream<'t>, header: Header, } impl<'t> TypeInformation<'t> { /// Returns an iterator that can traverse the type table in sequential order. pub fn iter(&self) -> TypeIter { // get a parse buffer let mut buf = self.stream.parse_buffer(); // drop the header // this can't fail; we've already read this once buf.take(self.header.header_size as usize).expect("dropping TPI header"); TypeIter{ buf: buf, type_index: self.header.minimum_type_index, } } /// Returns the number of types contained in this `TypeInformation`. /// /// Note that primitive types are not stored in the PDB file, so the number of distinct types /// reachable via this `TypeInformation` will be higher than `len()`. pub fn len(&self) -> usize() { (self.header.maximum_type_index - self.header.minimum_type_index) as usize } /// Returns a `TypeFinder` with a default time-space tradeoff. /// /// The `TypeFinder` is initially empty and must be populated by iterating. pub fn new_type_finder(&self) -> TypeFinder { new_type_finder(self, 3) } } pub fn new_type_information(stream: Stream) -> Result<TypeInformation> { let h; { let mut buf = stream.parse_buffer(); h = Header::parse(&mut buf)?; } Ok(TypeInformation{ stream: stream, header: h, }) } /// This buffer is used when a `Type` refers to a primitive type. It doesn't contain anything /// type-specific, but it does parse as `raw_type() == 0xffff`, which is a reserved value. Seems /// like a reasonable thing to do. const PRIMITIVE_TYPE: &'static [u8] = b"\xff\xff"; /// Represents a type from the type table. A `Type` has been minimally processed and may not be /// correctly formed or even understood by this library. /// /// To avoid copying, `Type`s exist as references to data owned by the parent `TypeInformation`. /// Therefore, a `Type` may not outlive its parent. #[derive(Copy,Clone,PartialEq)] pub struct Type<'t>(TypeIndex, &'t [u8]); impl<'t> Type<'t> { /// Returns this type's `TypeIndex`. pub fn type_index(&self) -> TypeIndex { self.0 } /// Returns the length of this type's data in terms of bytes in the on-disk format. /// /// Types are prefixed by length, which is not included in this count. pub fn len(&self) -> usize { self.1.len() } /// Returns the kind of type identified by this `Type`. /// /// As a special case, if this `Type` is actually a primitive type, `raw_kind()` will return /// `0xffff`. #[inline] pub fn raw_kind(&self) -> u16 { debug_assert!(self.1.len() >= 2); // assemble a little-endian u16 (self.1[0] as u16) | ((self.1[1] as u16) << 8) } /// Parse this Type into a TypeData. /// /// # Errors /// /// * `Error::UnimplementedTypeKind(kind)` if the type record isn't currently understood by this /// library /// * `Error::UnexpectedEof` if the type record is malformed pub fn parse(&self) -> Result<TypeData<'t>> { if self.0 < 0x1000 { // Primitive type type_data_for_primitive(self.0) } else { let mut buf = ParseBuffer::from(self.1); parse_type_data(&mut buf) } } } impl<'t> fmt::Debug for Type<'t> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Type{{ kind: 0x{:4x} [{} bytes] }}", self.raw_kind(), self.1.len()) } } /// A `TypeFinder` is a secondary, in-memory data structure that permits efficiently finding types /// by `TypeIndex`. It starts out empty and must be populated by calling `update(&TypeIter)` while /// iterating. /// /// `TypeFinder` allocates all the memory it needs when it is first created. The footprint is /// directly proportional to the total number of types; see `TypeInformation.len()`. /// /// # Time/space trade-off /// /// The naïve approach is to store the position of each `Type` as they are covered in the stream. /// The cost is memory: namely one `u32` per `Type`. /// /// Compare this approach to a `TypeFinder` that stores the position of every Nth type. Memory /// requirements would be reduced by a factor of N in exchange for requiring an average of (N-1)/2 /// iterations per lookup. However, iteration is cheap sequential memory access, and spending less /// memory on `TypeFinder` means more of the data can fit in the cache, so this is likely a good /// trade-off for small-ish values of N. /// /// `TypeFinder` is parameterized by `shift` which controls this trade-off as powers of two: /// /// * If `shift` is 0, `TypeFinder` stores 4 bytes per `Type` and always performs direct lookups. /// * If `shift` is 1, `TypeFinder` stores 2 bytes per `Type` and averages 0.5 iterations per lookup. /// * If `shift` is 2, `TypeFinder` stores 1 byte per `Type` and averages 1.5 iterations per lookup. /// * If `shift` is 3, `TypeFinder` stores 4 bits per `Type` and averages 3.5 iterations per lookup. /// * If `shift` is 4, `TypeFinder` stores 2 bits per `Type` and averages 7.5 iterations per lookup. /// * If `shift` is 5, `TypeFinder` stores 1 bit per `Type` and averages 15.5 iterations per lookup. /// /// This list can continue but with rapidly diminishing returns. Iteration cost is proportional to /// type size, which varies, but typical numbers from a large program are: /// /// * 24% of types are 12 bytes /// * 34% of types are <= 16 bytes /// * 84% of types are <= 32 bytes /// /// A `shift` of 2 or 3 is likely appropriate for most workloads. 500K types would require 1 MB or /// 500 KB of memory respectively, and lookups -- though indirect -- would still usually need only /// one or two 64-byte cache lines. #[derive(Debug)] pub struct TypeFinder<'t> { buffer: ParseBuffer<'t>, minimum_type_index: TypeIndex, maximum_type_index: TypeIndex, positions: Vec<u32>, shift: u8, } fn new_type_finder<'b, 't: 'b>(type_info: &'b TypeInformation<'t>, shift: u8) -> TypeFinder<'b> { let count = type_info.header.maximum_type_index - type_info.header.minimum_type_index; let shifted_count = (count >> shift) as usize; let mut positions: Vec<u32> = Vec::with_capacity(shifted_count); // add record zero, which is identical regardless of shift positions.push(type_info.header.header_size); TypeFinder{ buffer: type_info.stream.parse_buffer(), minimum_type_index: type_info.header.minimum_type_index, maximum_type_index: type_info.header.maximum_type_index, positions: positions, shift: shift, } } impl<'t> TypeFinder<'t> { /// Given a `TypeIndex`, find which position in the Vec we should jump to and how many times we /// need to iterate to find the requested type. /// /// `shift` refers to the size of these bit shifts. #[inline] fn resolve(&self, type_index: TypeIndex) -> (usize, usize) { let raw = type_index - self.minimum_type_index; ( (raw >> self.shift) as usize, (raw & ((1 << self.shift) - 1)) as usize ) } /// Returns the highest `TypeIndex` which is currently served by this `TypeFinder`. /// /// In general, you shouldn't need to consider this. Types always refer to types with lower /// `TypeIndex`es, and either: /// /// * You obtained a `Type` by iterating, in which case you should be calling `update()` as you /// iterate, and in which case all types it can reference are <= `max_indexed_type()`, or /// * You got a `Type` from this `TypeFinder`, in which case all types it can reference are /// still <= `max_indexed_type()`. /// #[inline] pub fn max_indexed_type(&self) -> TypeIndex { (self.positions.len() << self.shift) as TypeIndex + self.minimum_type_index - 1 } /// Update this `TypeFinder` based on the current position of a `TypeIter`. /// /// Do this each time you call `.next()`. #[inline] pub fn update(&mut self, iterator: &TypeIter) { let (vec_index, iteration_count) = self.resolve(iterator.type_index); if iteration_count == 0 && vec_index == self.positions.len() { let pos = iterator.buf.pos(); assert!(pos < u32::max_value() as usize); self.positions.push(pos as u32); } } /// Find a type by `TypeIndex`. /// /// # Errors /// /// * `Error::TypeNotFound(type_index)` if you ask for a type that doesn't exist /// * `Error::TypeNotIndexed(type_index, max_indexed_type)` if you ask for a type that is known /// to exist but is not currently known by this `TypeFinder`. pub fn find(&self, type_index: TypeIndex) -> Result<Type<'t>> { if type_index < self.minimum_type_index { return Ok(Type(type_index, PRIMITIVE_TYPE)); } else if type_index > self.maximum_type_index { return Err(Error::TypeNotFound(type_index)); } // figure out where we'd find this let (vec_index, iteration_count) = self.resolve(type_index); if let Some(pos) = self.positions.get(vec_index) { // hit let mut buf = self.buffer.clone(); // jump forwards buf.take(*pos as usize)?; // skip some records for _ in 0..iteration_count { let length = buf.parse_u16()?; buf.take(length as usize)?; } // read the type let length = buf.parse_u16()?; Ok(Type(type_index, buf.take(length as usize)?)) } else { // miss Err(Error::TypeNotIndexed(type_index, self.max_indexed_type())) } } } /// A `TypeIter` iterates over a `TypeInformation`, producing `Types`s. /// /// Type streams are represented internally as a series of records, each of which have a length, a /// type, and a type-specific field layout. Iteration performance is therefore similar to a linked /// list. #[derive(Debug)] pub struct TypeIter<'t> { buf: ParseBuffer<'t>, type_index: TypeIndex, } impl<'t> FallibleIterator for TypeIter<'t> { type Item = Type<'t>; type Error = Error; fn next(&mut self) -> result::Result<Option<Self::Item>, Self::Error> { // see if we're at EOF if self.buf.len() == 0 { return Ok(None); } // read the length of the next type let length = self.buf.parse_u16()? as usize; // validate if length < 2 { // this can't be correct return Err(Error::TypeTooShort); } // grab the type itself let type_buf = self.buf.take(length)?; let my_type_index = self.type_index; self.type_index += 1; // Done Ok(Some(Type(my_type_index, type_buf))) } } Remove () from usize(). // Copyright 2017 pdb Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::fmt; use std::result; use common::*; use msf::Stream; use FallibleIterator; mod constants; mod data; mod header; mod primitive; use self::data::parse_type_data; use self::header::*; use self::primitive::type_data_for_primitive; pub use self::data::{TypeData,ClassKind,EnumValue,FieldAttributes,FunctionAttributes,MethodListEntry,TypeProperties}; pub use self::primitive::{Indirection,PrimitiveType}; /// `TypeInformation` provides zero-copy access to a PDB type data stream. /// /// PDB type information is stored as a stream of length-prefixed `Type` records, and thus the most /// fundamental operation supported by `TypeInformation` is to iterate over `Type`s. /// /// Types are uniquely identified by `TypeIndex`, and types are stored within the PDB in ascending /// order of `TypeIndex`. /// /// Many types refer to other types by `TypeIndex`, and these references may refer to other types /// forming a chain that's arbitrarily long. Fortunately, PDB format requires that types refer only /// to types with lower `TypeIndex`es; thus, the stream of types form a directed acyclic graph. /// /// `TypeInformation` can iterate by `TypeIndex`, since that's essentially the only operation /// permitted by the data. `TypeFinder` is a secondary data structure to provide efficient /// backtracking. /// /// # Examples /// /// Iterating over the types while building a `TypeFinder`: /// /// ``` /// # use pdb::FallibleIterator; /// # /// # fn test() -> pdb::Result<usize> { /// # let file = std::fs::File::open("fixtures/self/foo.pdb")?; /// # let mut pdb = pdb::PDB::open(file)?; /// /// let type_information = pdb.type_information()?; /// let mut type_finder = type_information.new_type_finder(); /// /// # let expected_count = type_information.len(); /// # let mut count: usize = 0; /// let mut iter = type_information.iter(); /// while let Some(typ) = iter.next()? { /// // build the type finder as we go /// type_finder.update(&iter); /// /// // parse the type record /// match typ.parse() { /// Ok(pdb::TypeData::Class{name, properties, fields: Some(fields), ..}) => { /// // this Type describes a class-like type with fields /// println!("type {} is a class named {}", typ.type_index(), name); /// /// // `fields` is a TypeIndex which refers to a FieldList /// // To find information about the fields, find and parse that Type /// match type_finder.find(fields)?.parse()? { /// pdb::TypeData::FieldList{ fields, continuation } => { /// // `fields` is a Vec<TypeData> /// for field in fields { /// if let pdb::TypeData::Member { offset, name, field_type, .. } = field { /// // follow `field_type` as desired /// println!(" - field {} at offset {:x}", name, offset); /// } else { /// // handle member functions, nested types, etc. /// } /// } /// /// if let Some(more_fields) = continuation { /// // A FieldList can be split across multiple records /// // TODO: follow `more_fields` and handle the next FieldList /// } /// } /// _ => { } /// } /// /// }, /// Ok(_) => { /// // ignore everything that's not a class-like type /// }, /// Err(pdb::Error::UnimplementedTypeKind(_)) => { /// // found an unhandled type record /// // this probably isn't fatal in most use cases /// }, /// Err(e) => { /// // other error, probably is worth failing /// return Err(e); /// } /// } /// # count += 1; /// } /// /// # assert_eq!(expected_count, count); /// # Ok(count) /// # } /// # assert!(test().expect("test") > 8000); /// ``` #[derive(Debug)] pub struct TypeInformation<'t> { stream: Stream<'t>, header: Header, } impl<'t> TypeInformation<'t> { /// Returns an iterator that can traverse the type table in sequential order. pub fn iter(&self) -> TypeIter { // get a parse buffer let mut buf = self.stream.parse_buffer(); // drop the header // this can't fail; we've already read this once buf.take(self.header.header_size as usize).expect("dropping TPI header"); TypeIter{ buf: buf, type_index: self.header.minimum_type_index, } } /// Returns the number of types contained in this `TypeInformation`. /// /// Note that primitive types are not stored in the PDB file, so the number of distinct types /// reachable via this `TypeInformation` will be higher than `len()`. pub fn len(&self) -> usize { (self.header.maximum_type_index - self.header.minimum_type_index) as usize } /// Returns a `TypeFinder` with a default time-space tradeoff. /// /// The `TypeFinder` is initially empty and must be populated by iterating. pub fn new_type_finder(&self) -> TypeFinder { new_type_finder(self, 3) } } pub fn new_type_information(stream: Stream) -> Result<TypeInformation> { let h; { let mut buf = stream.parse_buffer(); h = Header::parse(&mut buf)?; } Ok(TypeInformation{ stream: stream, header: h, }) } /// This buffer is used when a `Type` refers to a primitive type. It doesn't contain anything /// type-specific, but it does parse as `raw_type() == 0xffff`, which is a reserved value. Seems /// like a reasonable thing to do. const PRIMITIVE_TYPE: &'static [u8] = b"\xff\xff"; /// Represents a type from the type table. A `Type` has been minimally processed and may not be /// correctly formed or even understood by this library. /// /// To avoid copying, `Type`s exist as references to data owned by the parent `TypeInformation`. /// Therefore, a `Type` may not outlive its parent. #[derive(Copy,Clone,PartialEq)] pub struct Type<'t>(TypeIndex, &'t [u8]); impl<'t> Type<'t> { /// Returns this type's `TypeIndex`. pub fn type_index(&self) -> TypeIndex { self.0 } /// Returns the length of this type's data in terms of bytes in the on-disk format. /// /// Types are prefixed by length, which is not included in this count. pub fn len(&self) -> usize { self.1.len() } /// Returns the kind of type identified by this `Type`. /// /// As a special case, if this `Type` is actually a primitive type, `raw_kind()` will return /// `0xffff`. #[inline] pub fn raw_kind(&self) -> u16 { debug_assert!(self.1.len() >= 2); // assemble a little-endian u16 (self.1[0] as u16) | ((self.1[1] as u16) << 8) } /// Parse this Type into a TypeData. /// /// # Errors /// /// * `Error::UnimplementedTypeKind(kind)` if the type record isn't currently understood by this /// library /// * `Error::UnexpectedEof` if the type record is malformed pub fn parse(&self) -> Result<TypeData<'t>> { if self.0 < 0x1000 { // Primitive type type_data_for_primitive(self.0) } else { let mut buf = ParseBuffer::from(self.1); parse_type_data(&mut buf) } } } impl<'t> fmt::Debug for Type<'t> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Type{{ kind: 0x{:4x} [{} bytes] }}", self.raw_kind(), self.1.len()) } } /// A `TypeFinder` is a secondary, in-memory data structure that permits efficiently finding types /// by `TypeIndex`. It starts out empty and must be populated by calling `update(&TypeIter)` while /// iterating. /// /// `TypeFinder` allocates all the memory it needs when it is first created. The footprint is /// directly proportional to the total number of types; see `TypeInformation.len()`. /// /// # Time/space trade-off /// /// The naïve approach is to store the position of each `Type` as they are covered in the stream. /// The cost is memory: namely one `u32` per `Type`. /// /// Compare this approach to a `TypeFinder` that stores the position of every Nth type. Memory /// requirements would be reduced by a factor of N in exchange for requiring an average of (N-1)/2 /// iterations per lookup. However, iteration is cheap sequential memory access, and spending less /// memory on `TypeFinder` means more of the data can fit in the cache, so this is likely a good /// trade-off for small-ish values of N. /// /// `TypeFinder` is parameterized by `shift` which controls this trade-off as powers of two: /// /// * If `shift` is 0, `TypeFinder` stores 4 bytes per `Type` and always performs direct lookups. /// * If `shift` is 1, `TypeFinder` stores 2 bytes per `Type` and averages 0.5 iterations per lookup. /// * If `shift` is 2, `TypeFinder` stores 1 byte per `Type` and averages 1.5 iterations per lookup. /// * If `shift` is 3, `TypeFinder` stores 4 bits per `Type` and averages 3.5 iterations per lookup. /// * If `shift` is 4, `TypeFinder` stores 2 bits per `Type` and averages 7.5 iterations per lookup. /// * If `shift` is 5, `TypeFinder` stores 1 bit per `Type` and averages 15.5 iterations per lookup. /// /// This list can continue but with rapidly diminishing returns. Iteration cost is proportional to /// type size, which varies, but typical numbers from a large program are: /// /// * 24% of types are 12 bytes /// * 34% of types are <= 16 bytes /// * 84% of types are <= 32 bytes /// /// A `shift` of 2 or 3 is likely appropriate for most workloads. 500K types would require 1 MB or /// 500 KB of memory respectively, and lookups -- though indirect -- would still usually need only /// one or two 64-byte cache lines. #[derive(Debug)] pub struct TypeFinder<'t> { buffer: ParseBuffer<'t>, minimum_type_index: TypeIndex, maximum_type_index: TypeIndex, positions: Vec<u32>, shift: u8, } fn new_type_finder<'b, 't: 'b>(type_info: &'b TypeInformation<'t>, shift: u8) -> TypeFinder<'b> { let count = type_info.header.maximum_type_index - type_info.header.minimum_type_index; let shifted_count = (count >> shift) as usize; let mut positions: Vec<u32> = Vec::with_capacity(shifted_count); // add record zero, which is identical regardless of shift positions.push(type_info.header.header_size); TypeFinder{ buffer: type_info.stream.parse_buffer(), minimum_type_index: type_info.header.minimum_type_index, maximum_type_index: type_info.header.maximum_type_index, positions: positions, shift: shift, } } impl<'t> TypeFinder<'t> { /// Given a `TypeIndex`, find which position in the Vec we should jump to and how many times we /// need to iterate to find the requested type. /// /// `shift` refers to the size of these bit shifts. #[inline] fn resolve(&self, type_index: TypeIndex) -> (usize, usize) { let raw = type_index - self.minimum_type_index; ( (raw >> self.shift) as usize, (raw & ((1 << self.shift) - 1)) as usize ) } /// Returns the highest `TypeIndex` which is currently served by this `TypeFinder`. /// /// In general, you shouldn't need to consider this. Types always refer to types with lower /// `TypeIndex`es, and either: /// /// * You obtained a `Type` by iterating, in which case you should be calling `update()` as you /// iterate, and in which case all types it can reference are <= `max_indexed_type()`, or /// * You got a `Type` from this `TypeFinder`, in which case all types it can reference are /// still <= `max_indexed_type()`. /// #[inline] pub fn max_indexed_type(&self) -> TypeIndex { (self.positions.len() << self.shift) as TypeIndex + self.minimum_type_index - 1 } /// Update this `TypeFinder` based on the current position of a `TypeIter`. /// /// Do this each time you call `.next()`. #[inline] pub fn update(&mut self, iterator: &TypeIter) { let (vec_index, iteration_count) = self.resolve(iterator.type_index); if iteration_count == 0 && vec_index == self.positions.len() { let pos = iterator.buf.pos(); assert!(pos < u32::max_value() as usize); self.positions.push(pos as u32); } } /// Find a type by `TypeIndex`. /// /// # Errors /// /// * `Error::TypeNotFound(type_index)` if you ask for a type that doesn't exist /// * `Error::TypeNotIndexed(type_index, max_indexed_type)` if you ask for a type that is known /// to exist but is not currently known by this `TypeFinder`. pub fn find(&self, type_index: TypeIndex) -> Result<Type<'t>> { if type_index < self.minimum_type_index { return Ok(Type(type_index, PRIMITIVE_TYPE)); } else if type_index > self.maximum_type_index { return Err(Error::TypeNotFound(type_index)); } // figure out where we'd find this let (vec_index, iteration_count) = self.resolve(type_index); if let Some(pos) = self.positions.get(vec_index) { // hit let mut buf = self.buffer.clone(); // jump forwards buf.take(*pos as usize)?; // skip some records for _ in 0..iteration_count { let length = buf.parse_u16()?; buf.take(length as usize)?; } // read the type let length = buf.parse_u16()?; Ok(Type(type_index, buf.take(length as usize)?)) } else { // miss Err(Error::TypeNotIndexed(type_index, self.max_indexed_type())) } } } /// A `TypeIter` iterates over a `TypeInformation`, producing `Types`s. /// /// Type streams are represented internally as a series of records, each of which have a length, a /// type, and a type-specific field layout. Iteration performance is therefore similar to a linked /// list. #[derive(Debug)] pub struct TypeIter<'t> { buf: ParseBuffer<'t>, type_index: TypeIndex, } impl<'t> FallibleIterator for TypeIter<'t> { type Item = Type<'t>; type Error = Error; fn next(&mut self) -> result::Result<Option<Self::Item>, Self::Error> { // see if we're at EOF if self.buf.len() == 0 { return Ok(None); } // read the length of the next type let length = self.buf.parse_u16()? as usize; // validate if length < 2 { // this can't be correct return Err(Error::TypeTooShort); } // grab the type itself let type_buf = self.buf.take(length)?; let my_type_index = self.type_index; self.type_index += 1; // Done Ok(Some(Type(my_type_index, type_buf))) } }
// Syntax: master_server [ip address (0.0.0.0 by default)] // This was created for HaloMD. // If the server does not respond in at least this many seconds, it will be dropped from the list. const DROP_TIME : i64 = 60; // Blacklist for blocking IPs. Separate with newlines. Any line that starts with a # is ignored. // Blacklisting IPs ignores heartbeat and keepalive packets from an IP address. // That means that servers that are banned will not be immediately removed, but will time out, instead. const BLACKLIST_FILE : &'static str = "blacklist.txt"; // Read the blacklist every x amount of seconds. const BLACKLIST_UPDATE_TIME : u32 = 60; // Note: The master server must have TCP 29920 open and UDP 27900 open. const BROADCAST_PORT_UDP : u16 = 27900; const SERVER_LIST_PORT_TCP : u16 = 29920; // Broadcast packet types. const KEEPALIVE : u8 = 8; const HEARTBEAT : u8 = 3; // Game state changes const GAMEEXITED : u16 = 2; // Opcode info const OPCODE_INDEX : usize = 0; const OPCODE_AND_HANDSHAKE_LENGTH : usize = 5; // Broadcasted game name const HALO_RETAIL : &'static str = "halor"; use std::net::{UdpSocket,TcpListener,SocketAddr}; use std::net::SocketAddr::{V4,V6}; use std::io::{Write,BufReader,BufRead}; use std::env; use std::fs::File; use std::thread; use std::sync::{Arc, Mutex}; extern crate time; mod halo_server; use halo_server::HaloServer; mod heartbeat_packet; use heartbeat_packet::HeartbeatPacket; trait IPString { fn ip_string(&self) -> String; } impl IPString for SocketAddr { fn ip_string(&self) -> String { match *self { V4(ipv4) => ipv4.ip().to_string(), V6(ipv6) => "[".to_string() + &ipv6.ip().to_string() + "]" } } } fn main() { let count = env::args().count(); let ip = if count == 2 { let j : Vec<_> = env::args().collect(); j[1].to_string() } else if count == 1 { "0.0.0.0".to_string() } else { println!("Only one argument is allowed: the IP to bind to."); return; }; // We need to bind on two different ports. If it failed to bind (invalid IP, port is taken), then we must make sure this is known. let halo_socket = match UdpSocket::bind((&ip as &str,BROADCAST_PORT_UDP)) { Err(error) => { println!("Error creating a UDP socket at {}:{}. {}.",ip,BROADCAST_PORT_UDP,error); return; }, Ok(halo_socket) => halo_socket }; let client_socket = match TcpListener::bind((&ip as &str,SERVER_LIST_PORT_TCP)) { Err(error) => { println!("Error listening to TCP at {}:{}. {}.",ip,SERVER_LIST_PORT_TCP,error); return; }, Ok(client_socket) => client_socket }; // Mutex for thread safety. let servers_halo: Vec<HaloServer> = Vec::new(); let servers_mut_udp = Arc::new(Mutex::new(servers_halo)); let servers_mut_tcp = servers_mut_udp.clone(); let servers_mut_destruction = servers_mut_udp.clone(); // Destruction thread. This will remove servers that have not broadcasted their presence in a while. thread::spawn(move || { loop { thread::sleep_ms(10 * 1000); let mut servers = servers_mut_destruction.lock().unwrap(); let timenow = time::now().to_timespec().sec; servers.retain(|x| x.last_alive + DROP_TIME > timenow); } }); // Blacklist mutex. Concurrency needs to be safe, my friend. let blacklist: Vec<String> = Vec::new(); let blacklist_update = Arc::new(Mutex::new(blacklist)); let blacklist_udp = blacklist_update.clone(); // Blacklist read thread. thread::spawn(move || { loop { // Placed in a block so blacklist is unlocked before sleeping to prevent threads from being locked for too long. { let mut blacklist_ref = blacklist_update.lock().unwrap(); blacklist_ref.clear(); match File::open(BLACKLIST_FILE) { Ok(file) => { let reader = BufReader::new(&file); match reader.lines().collect() { Err(_) => {}, Ok(t) => { let lines : Vec<String> = t; for line in lines.iter().filter(|x| x.starts_with("#") == false) { println!("Added {} to blacklist.", line); blacklist_ref.push(line.clone()); } } } }, Err(_) => {} }; } thread::sleep_ms(BLACKLIST_UPDATE_TIME * 1000); } }); // TCP server thread. This is for the HaloMD application. thread::spawn(move || { loop { for stream in client_socket.incoming() { let mut client = match stream { Err(_) => continue, Ok(the_stream) => the_stream }; // Unwrap the IP. let ip = match client.peer_addr() { Err(_) => continue, Ok(ip) => ip.ip_string() }; let mut ips = String::new(); // Make servers_ref go out of scope to unlock it for other threads, since we don't need it. { let servers_ref = servers_mut_tcp.lock().unwrap(); let servers = (*servers_ref).iter(); for j in servers { ips.push_str(&(j.to_string())); ips.push('\n'); } } // Some number placed after the requester's IP. If you ask me, the source code was abducted by aliens, and this is a tracking number. Regardless, it's needed. ips.push_str(&ip); ips.push_str(":49149:3425"); // We may be here a while. Just in case... thread::spawn( move || { let _ = client.write_all(ips.as_bytes()); }); } } }); // UDP server is run on the main thread. Servers broadcast their presence here. // These are the allowed game versions. HaloMD and Halo PC 1.09 uses 01.00.09.0620, while Halo PC servers on 1.10 use 01.00.10.0621 (these are also joinable). let game_versions = [ "01.00.09.0620".to_string(), "01.00.10.0621".to_string() ]; let mut buffer = [0; 2048]; loop { let (length, source) = match halo_socket.recv_from(&mut buffer) { Err(_) => continue, Ok(x) => x }; if length <= OPCODE_INDEX { continue; } if buffer[OPCODE_INDEX] != KEEPALIVE && buffer[OPCODE_INDEX] != HEARTBEAT { continue; } let client_ip = source.ip_string(); let blacklist_ref = blacklist_udp.lock().unwrap(); if blacklist_ref.contains(&client_ip) { continue; } // Heartbeat packet. These contain null-terminated C strings and are ordered in key1[0]value1[0]key2[0]value2[0]key3[0]value3[0] where [0] is a byte equal to 0x00. if buffer[OPCODE_INDEX] == HEARTBEAT && length > OPCODE_AND_HANDSHAKE_LENGTH { let mut servers = servers_mut_udp.lock().unwrap(); match HeartbeatPacket::from_buffer(&buffer[OPCODE_AND_HANDSHAKE_LENGTH..length]) { None => {}, Some(packet) => { let updatetime = time::now().to_timespec().sec; match servers.iter_mut().position(|x| x.ip == client_ip && x.port == packet.localport) { None => { if game_versions.contains(&packet.gamever) && &packet.gamename == HALO_RETAIL { let serverness = HaloServer { ip:client_ip, port: packet.localport, last_alive: updatetime }; (*servers).push(serverness); } } Some(k) => { servers[k].last_alive = updatetime; if packet.statechanged == GAMEEXITED { servers.remove(k); } } }; } }; } // Keepalive packet. We need to rely on the origin's port for this, unfortunately. This may mean that the source port is incorrect if the port was changed with NAT. else if buffer[OPCODE_INDEX] == KEEPALIVE { let mut servers_ref = servers_mut_udp.lock().unwrap(); let servers = (*servers_ref).iter_mut(); for i in servers { if i.ip == client_ip && i.port == source.port() { i.last_alive = time::now().to_timespec().sec; break; } } } } } Use ! instead of == false // Syntax: master_server [ip address (0.0.0.0 by default)] // This was created for HaloMD. // If the server does not respond in at least this many seconds, it will be dropped from the list. const DROP_TIME : i64 = 60; // Blacklist for blocking IPs. Separate with newlines. Any line that starts with a # is ignored. // Blacklisting IPs ignores heartbeat and keepalive packets from an IP address. // That means that servers that are banned will not be immediately removed, but will time out, instead. const BLACKLIST_FILE : &'static str = "blacklist.txt"; // Read the blacklist every x amount of seconds. const BLACKLIST_UPDATE_TIME : u32 = 60; // Note: The master server must have TCP 29920 open and UDP 27900 open. const BROADCAST_PORT_UDP : u16 = 27900; const SERVER_LIST_PORT_TCP : u16 = 29920; // Broadcast packet types. const KEEPALIVE : u8 = 8; const HEARTBEAT : u8 = 3; // Game state changes const GAMEEXITED : u16 = 2; // Opcode info const OPCODE_INDEX : usize = 0; const OPCODE_AND_HANDSHAKE_LENGTH : usize = 5; // Broadcasted game name const HALO_RETAIL : &'static str = "halor"; use std::net::{UdpSocket,TcpListener,SocketAddr}; use std::net::SocketAddr::{V4,V6}; use std::io::{Write,BufReader,BufRead}; use std::env; use std::fs::File; use std::thread; use std::sync::{Arc, Mutex}; extern crate time; mod halo_server; use halo_server::HaloServer; mod heartbeat_packet; use heartbeat_packet::HeartbeatPacket; trait IPString { fn ip_string(&self) -> String; } impl IPString for SocketAddr { fn ip_string(&self) -> String { match *self { V4(ipv4) => ipv4.ip().to_string(), V6(ipv6) => "[".to_string() + &ipv6.ip().to_string() + "]" } } } fn main() { let count = env::args().count(); let ip = if count == 2 { let j : Vec<_> = env::args().collect(); j[1].to_string() } else if count == 1 { "0.0.0.0".to_string() } else { println!("Only one argument is allowed: the IP to bind to."); return; }; // We need to bind on two different ports. If it failed to bind (invalid IP, port is taken), then we must make sure this is known. let halo_socket = match UdpSocket::bind((&ip as &str,BROADCAST_PORT_UDP)) { Err(error) => { println!("Error creating a UDP socket at {}:{}. {}.",ip,BROADCAST_PORT_UDP,error); return; }, Ok(halo_socket) => halo_socket }; let client_socket = match TcpListener::bind((&ip as &str,SERVER_LIST_PORT_TCP)) { Err(error) => { println!("Error listening to TCP at {}:{}. {}.",ip,SERVER_LIST_PORT_TCP,error); return; }, Ok(client_socket) => client_socket }; // Mutex for thread safety. let servers_halo: Vec<HaloServer> = Vec::new(); let servers_mut_udp = Arc::new(Mutex::new(servers_halo)); let servers_mut_tcp = servers_mut_udp.clone(); let servers_mut_destruction = servers_mut_udp.clone(); // Destruction thread. This will remove servers that have not broadcasted their presence in a while. thread::spawn(move || { loop { thread::sleep_ms(10 * 1000); let mut servers = servers_mut_destruction.lock().unwrap(); let timenow = time::now().to_timespec().sec; servers.retain(|x| x.last_alive + DROP_TIME > timenow); } }); // Blacklist mutex. Concurrency needs to be safe, my friend. let blacklist: Vec<String> = Vec::new(); let blacklist_update = Arc::new(Mutex::new(blacklist)); let blacklist_udp = blacklist_update.clone(); // Blacklist read thread. thread::spawn(move || { loop { // Placed in a block so blacklist is unlocked before sleeping to prevent threads from being locked for too long. { let mut blacklist_ref = blacklist_update.lock().unwrap(); blacklist_ref.clear(); match File::open(BLACKLIST_FILE) { Ok(file) => { let reader = BufReader::new(&file); match reader.lines().collect() { Err(_) => {}, Ok(t) => { let lines : Vec<String> = t; for line in lines.iter().filter(|x| !x.starts_with("#")) { println!("Added {} to blacklist.", line); blacklist_ref.push(line.clone()); } } } }, Err(_) => {} }; } thread::sleep_ms(BLACKLIST_UPDATE_TIME * 1000); } }); // TCP server thread. This is for the HaloMD application. thread::spawn(move || { loop { for stream in client_socket.incoming() { let mut client = match stream { Err(_) => continue, Ok(the_stream) => the_stream }; // Unwrap the IP. let ip = match client.peer_addr() { Err(_) => continue, Ok(ip) => ip.ip_string() }; let mut ips = String::new(); // Make servers_ref go out of scope to unlock it for other threads, since we don't need it. { let servers_ref = servers_mut_tcp.lock().unwrap(); let servers = (*servers_ref).iter(); for j in servers { ips.push_str(&(j.to_string())); ips.push('\n'); } } // Some number placed after the requester's IP. If you ask me, the source code was abducted by aliens, and this is a tracking number. Regardless, it's needed. ips.push_str(&ip); ips.push_str(":49149:3425"); // We may be here a while. Just in case... thread::spawn( move || { let _ = client.write_all(ips.as_bytes()); }); } } }); // UDP server is run on the main thread. Servers broadcast their presence here. // These are the allowed game versions. HaloMD and Halo PC 1.09 uses 01.00.09.0620, while Halo PC servers on 1.10 use 01.00.10.0621 (these are also joinable). let game_versions = [ "01.00.09.0620".to_string(), "01.00.10.0621".to_string() ]; let mut buffer = [0; 2048]; loop { let (length, source) = match halo_socket.recv_from(&mut buffer) { Err(_) => continue, Ok(x) => x }; if length <= OPCODE_INDEX { continue; } if buffer[OPCODE_INDEX] != KEEPALIVE && buffer[OPCODE_INDEX] != HEARTBEAT { continue; } let client_ip = source.ip_string(); let blacklist_ref = blacklist_udp.lock().unwrap(); if blacklist_ref.contains(&client_ip) { continue; } // Heartbeat packet. These contain null-terminated C strings and are ordered in key1[0]value1[0]key2[0]value2[0]key3[0]value3[0] where [0] is a byte equal to 0x00. if buffer[OPCODE_INDEX] == HEARTBEAT && length > OPCODE_AND_HANDSHAKE_LENGTH { let mut servers = servers_mut_udp.lock().unwrap(); match HeartbeatPacket::from_buffer(&buffer[OPCODE_AND_HANDSHAKE_LENGTH..length]) { None => {}, Some(packet) => { let updatetime = time::now().to_timespec().sec; match servers.iter_mut().position(|x| x.ip == client_ip && x.port == packet.localport) { None => { if game_versions.contains(&packet.gamever) && &packet.gamename == HALO_RETAIL { let serverness = HaloServer { ip:client_ip, port: packet.localport, last_alive: updatetime }; (*servers).push(serverness); } } Some(k) => { servers[k].last_alive = updatetime; if packet.statechanged == GAMEEXITED { servers.remove(k); } } }; } }; } // Keepalive packet. We need to rely on the origin's port for this, unfortunately. This may mean that the source port is incorrect if the port was changed with NAT. else if buffer[OPCODE_INDEX] == KEEPALIVE { let mut servers_ref = servers_mut_udp.lock().unwrap(); let servers = (*servers_ref).iter_mut(); for i in servers { if i.ip == client_ip && i.port == source.port() { i.last_alive = time::now().to_timespec().sec; break; } } } } }
#[cfg(target_pointer_width = "32")] use std::sync::atomic::AtomicI64 as AtomicLsn; #[cfg(target_pointer_width = "64")] use std::sync::atomic::AtomicIsize as AtomicLsn; #[cfg(feature = "failpoints")] use std::sync::atomic::Ordering::Relaxed; use std::{ mem::size_of, sync::atomic::Ordering::SeqCst, sync::atomic::{spin_loop_hint, AtomicBool, AtomicUsize}, sync::{Arc, Condvar, Mutex}, }; #[cfg(feature = "zstd")] use zstd::block::compress; use self::reader::LogReader; use super::*; // This is the most writers in a single IO buffer // that we have space to accomodate in the counter // for writers in the IO buffer header. const MAX_WRITERS: Header = 127; type Header = u64; /// A logical sequence number. #[cfg(target_pointer_width = "64")] type InnerLsn = isize; #[cfg(target_pointer_width = "32")] type InnerLsn = i64; macro_rules! io_fail { ($self:expr, $e:expr) => { #[cfg(feature = "failpoints")] fail_point!($e, |_| { $self._failpoint_crashing.store(true, SeqCst); // wake up any waiting threads so they don't stall forever $self.interval_updated.notify_all(); Err(Error::FailPoint) }); }; } struct IoBuf { buf: UnsafeCell<Vec<u8>>, header: AtomicUsize, lid: AtomicUsize, lsn: AtomicUsize, capacity: AtomicUsize, maxed: AtomicBool, linearizer: Mutex<()>, } unsafe impl Sync for IoBuf {} pub(super) struct IoBufs { pub(super) config: Config, // We have a fixed number of io buffers. Sometimes they will all be // full, and in order to prevent threads from having to spin in // the reserve function, we can have them block until a buffer becomes // available. buf_mu: Mutex<()>, buf_updated: Condvar, bufs: Vec<IoBuf>, current_buf: AtomicUsize, written_bufs: AtomicUsize, // Pending intervals that have been written to stable storage, but may be // higher than the current value of `stable` due to interesting thread // interleavings. intervals: Mutex<Vec<(Lsn, Lsn)>>, interval_updated: Condvar, // The highest CONTIGUOUS log sequence number that has been written to // stable storage. This may be lower than the length of the underlying // file, and there may be buffers that have been written out-of-order // to stable storage due to interesting thread interleavings. stable_lsn: AtomicLsn, max_reserved_lsn: AtomicLsn, segment_accountant: Arc<Mutex<SegmentAccountant>>, // used for signifying that we're simulating a crash #[cfg(feature = "failpoints")] _failpoint_crashing: AtomicBool, } /// `IoBufs` is a set of lock-free buffers for coordinating /// writes to underlying storage. impl IoBufs { pub(crate) fn start<R>( config: Config, mut snapshot: Snapshot<R>, ) -> Result<IoBufs, ()> { // open file for writing let file = config.file()?; let io_buf_size = config.io_buf_size; let snapshot_max_lsn = snapshot.max_lsn; let snapshot_last_lid = snapshot.last_lid; let (next_lsn, next_lid) = if snapshot_max_lsn < SEG_HEADER_LEN as Lsn { snapshot.max_lsn = 0; snapshot.last_lid = 0; (0, 0) } else { match file.read_message(snapshot_last_lid, &config) { Ok(LogRead::Inline(_lsn, _buf, len)) => ( snapshot_max_lsn + len as Lsn + MSG_HEADER_LEN as Lsn, snapshot_last_lid + len as LogId + MSG_HEADER_LEN as LogId, ), Ok(LogRead::Blob(_lsn, _buf, _blob_ptr)) => ( snapshot_max_lsn + BLOB_INLINE_LEN as Lsn + MSG_HEADER_LEN as Lsn, snapshot_last_lid + BLOB_INLINE_LEN as LogId + MSG_HEADER_LEN as LogId, ), other => { // we can overwrite this non-flush debug!( "got non-flush tip while recovering at {}: {:?}", snapshot_last_lid, other ); (snapshot_max_lsn, snapshot_last_lid) } } }; let mut segment_accountant = SegmentAccountant::start(config.clone(), snapshot)?; let bufs = rep_no_copy![IoBuf::new(io_buf_size); config.io_bufs]; let current_buf = 0; trace!( "starting IoBufs with next_lsn: {} \ next_lid: {}", next_lsn, next_lid ); if next_lsn == 0 { // recovering at segment boundary assert_eq!(next_lid, next_lsn as LogId); let iobuf = &bufs[current_buf]; let lid = segment_accountant.next(next_lsn)?; iobuf.set_lid(lid); iobuf.set_capacity(io_buf_size - SEG_TRAILER_LEN); iobuf.store_segment_header(0, next_lsn); maybe_fail!("initial allocation"); file.pwrite_all(&*vec![0; config.io_buf_size], lid)?; file.sync_all()?; maybe_fail!("initial allocation post"); debug!( "starting log at clean offset {}, recovered lsn {}", next_lid, next_lsn ); } else { // the tip offset is not completely full yet, reuse it let iobuf = &bufs[current_buf]; let offset = next_lid % io_buf_size as LogId; iobuf.set_lid(next_lid); iobuf.set_capacity( io_buf_size - offset as usize - SEG_TRAILER_LEN, ); iobuf.set_lsn(next_lsn); debug!( "starting log at split offset {}, recovered lsn {}", next_lid, next_lsn ); } // we want stable to begin at -1, since the 0th byte // of our file has not yet been written. let stable = if next_lsn == 0 { -1 } else { next_lsn - 1 }; // remove all blob files larger than our stable offset gc_blobs(&config, stable)?; Ok(IoBufs { config: config, buf_mu: Mutex::new(()), buf_updated: Condvar::new(), bufs: bufs, current_buf: AtomicUsize::new(current_buf), written_bufs: AtomicUsize::new(0), intervals: Mutex::new(vec![]), interval_updated: Condvar::new(), stable_lsn: AtomicLsn::new(stable as InnerLsn), max_reserved_lsn: AtomicLsn::new(stable as InnerLsn), segment_accountant: Arc::new(Mutex::new( segment_accountant, )), #[cfg(feature = "failpoints")] _failpoint_crashing: AtomicBool::new(false), }) } /// SegmentAccountant access for coordination with the `PageCache` pub(super) fn with_sa<B, F>(&self, f: F) -> B where F: FnOnce(&mut SegmentAccountant) -> B, { let start = clock(); debug_delay(); let mut sa = self.segment_accountant.lock().unwrap(); let locked_at = clock(); M.accountant_lock.measure(locked_at - start); let ret = f(&mut sa); drop(sa); M.accountant_hold.measure(clock() - locked_at); ret } /// SegmentAccountant access for coordination with the `PageCache`, /// performed after all threads have exited the currently checked-in /// epochs using a crossbeam-epoch EBR guard. /// /// IMPORTANT: Never call this function with anything that calls /// defer on the default EBR collector, or we could deadlock! pub(super) unsafe fn with_sa_deferred<F>(&self, f: F) where F: FnOnce(&mut SegmentAccountant) + Send + 'static, { let guard = pin(); let segment_accountant = self.segment_accountant.clone(); guard.defer(move || { let start = clock(); debug_delay(); let mut sa = segment_accountant.lock().unwrap(); let locked_at = clock(); M.accountant_lock.measure(locked_at - start); let _ = f(&mut sa); drop(sa); M.accountant_hold.measure(clock() - locked_at); }); guard.flush(); } fn idx(&self) -> usize { debug_delay(); let current_buf = self.current_buf.load(SeqCst); current_buf % self.config.io_bufs } /// Returns the last stable offset in storage. pub(super) fn stable(&self) -> Lsn { debug_delay(); self.stable_lsn.load(SeqCst) as Lsn } // Adds a header to the front of the buffer fn encapsulate( &self, raw_buf: Vec<u8>, lsn: Lsn, over_blob_threshold: bool, is_blob_rewrite: bool, ) -> Result<Vec<u8>, ()> { let buf = if over_blob_threshold { // write blob to file io_fail!(self, "blob blob write"); write_blob(&self.config, lsn, raw_buf)?; let lsn_buf: [u8; size_of::<BlobPointer>()] = u64_to_arr(lsn as u64); lsn_buf.to_vec() } else { raw_buf }; let crc16 = crc16_arr(&buf); let header = MessageHeader { kind: if over_blob_threshold || is_blob_rewrite { MessageKind::Blob } else { MessageKind::Inline }, lsn: lsn, len: buf.len(), crc16: crc16, }; let header_bytes: [u8; MSG_HEADER_LEN] = header.into(); let mut out = vec![0; MSG_HEADER_LEN + buf.len()]; out[0..MSG_HEADER_LEN].copy_from_slice(&header_bytes); out[MSG_HEADER_LEN..].copy_from_slice(&*buf); Ok(out) } /// Tries to claim a reservation for writing a buffer to a /// particular location in stable storge, which may either be /// completed or aborted later. Useful for maintaining /// linearizability across CAS operations that may need to /// persist part of their operation. /// /// # Panics /// /// Panics if the desired reservation is greater than the /// io buffer size minus the size of a segment header + /// a segment footer + a message header. pub(super) fn reserve( &self, raw_buf: Vec<u8>, ) -> Result<Reservation<'_>, ()> { self.reserve_inner(raw_buf, false) } /// Reserve a replacement buffer for a previously written /// blob write. This ensures the message header has the /// proper blob flag set. pub(super) fn reserve_blob( &self, blob_ptr: BlobPointer, ) -> Result<Reservation<'_>, ()> { let lsn_buf: [u8; size_of::<BlobPointer>()] = u64_to_arr(blob_ptr as u64); self.reserve_inner(lsn_buf.to_vec(), true) } fn reserve_inner( &self, raw_buf: Vec<u8>, is_blob_rewrite: bool, ) -> Result<Reservation<'_>, ()> { let _measure = Measure::new(&M.reserve); let io_bufs = self.config.io_bufs; // right shift 32 on 32-bit pointer systems panics #[cfg(target_pointer_width = "64")] assert_eq!((raw_buf.len() + MSG_HEADER_LEN) >> 32, 0); #[cfg(feature = "zstd")] let buf = if self.config.use_compression { let _measure = Measure::new(&M.compress); compress(&*raw_buf, self.config.zstd_compression_factor) .unwrap() } else { raw_buf }; #[cfg(not(feature = "zstd"))] let buf = raw_buf; let total_buf_len = MSG_HEADER_LEN + buf.len(); let max_overhead = std::cmp::max(SEG_HEADER_LEN, SEG_TRAILER_LEN); let max_buf_size = (self.config.io_buf_size / MINIMUM_ITEMS_PER_SEGMENT) - max_overhead; let over_blob_threshold = total_buf_len > max_buf_size; let inline_buf_len = if over_blob_threshold { MSG_HEADER_LEN + size_of::<Lsn>() } else { total_buf_len }; trace!("reserving buf of len {}", inline_buf_len); let mut printed = false; macro_rules! trace_once { ($($msg:expr),*) => { if !printed { trace!($($msg),*); printed = true; }}; } let mut spins = 0; loop { M.log_reservation_attempted(); #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(Relaxed) { return Err(Error::FailPoint); } } let guard = pin(); debug_delay(); let written_bufs = self.written_bufs.load(SeqCst); debug_delay(); let current_buf = self.current_buf.load(SeqCst); let idx = current_buf % io_bufs; spins += 1; if spins > 1_000_000 { debug!( "stalling in reserve, idx {}, buf len {}", idx, inline_buf_len, ); spins = 0; } if written_bufs > current_buf { // This can happen because a reservation can finish up // before the sealing thread gets around to bumping // current_buf. trace_once!("written ahead of sealed, spinning"); spin_loop_hint(); continue; } if current_buf - written_bufs >= io_bufs { // if written is too far behind, we need to // spin while it catches up to avoid overlap trace_once!( "old io buffer not written yet, spinning" ); spin_loop_hint(); // use a condition variable to wait until // we've updated the written_bufs counter. let _measure = Measure::new(&M.reserve_written_condvar_wait); let mut buf_mu = self.buf_mu.lock().unwrap(); while written_bufs == self.written_bufs.load(SeqCst) { buf_mu = self.buf_updated.wait(buf_mu).unwrap(); } continue; } // load current header value let iobuf = &self.bufs[idx]; let header = iobuf.get_header(); // skip if already sealed if is_sealed(header) { // already sealed, start over and hope cur // has already been bumped by sealer. trace_once!("io buffer already sealed, spinning"); spin_loop_hint(); // use a condition variable to wait until // we've updated the current_buf counter. let _measure = Measure::new(&M.reserve_current_condvar_wait); let mut buf_mu = self.buf_mu.lock().unwrap(); while current_buf == self.current_buf.load(SeqCst) { buf_mu = self.buf_updated.wait(buf_mu).unwrap(); } continue; } // try to claim space let buf_offset = offset(header); let prospective_size = buf_offset as usize + inline_buf_len; let would_overflow = prospective_size > iobuf.get_capacity(); if would_overflow { // This buffer is too full to accept our write! // Try to seal the buffer, and maybe write it if // there are zero writers. trace_once!("io buffer too full, spinning"); self.maybe_seal_and_write_iobuf(idx, header, true)?; spin_loop_hint(); continue; } // attempt to claim by incrementing an unsealed header let bumped_offset = bump_offset(header, inline_buf_len as Header); // check for maxed out IO buffer writers if n_writers(bumped_offset) == MAX_WRITERS { trace_once!( "spinning because our buffer has {} writers already", MAX_WRITERS ); spin_loop_hint(); continue; } let claimed = incr_writers(bumped_offset); assert!(!is_sealed(claimed)); if iobuf.cas_header(header, claimed).is_err() { // CAS failed, start over trace_once!( "CAS failed while claiming buffer slot, spinning" ); spin_loop_hint(); continue; } // if we're giving out a reservation, // the writer count should be positive assert_ne!(n_writers(claimed), 0); let lid = iobuf.get_lid(); assert_ne!( lid as usize, std::usize::MAX, "fucked up on idx {}\n{:?}", idx, self ); let out_buf = unsafe { (*iobuf.buf.get()).as_mut_slice() }; let res_start = buf_offset as usize; let res_end = res_start + inline_buf_len; let destination = &mut (out_buf)[res_start..res_end]; let reservation_offset = lid + u64::from(buf_offset); let reservation_lsn = iobuf.get_lsn() + u64::from(buf_offset) as Lsn; trace!( "reserved {} bytes at lsn {} lid {}", inline_buf_len, reservation_lsn, reservation_offset, ); self.bump_max_reserved_lsn(reservation_lsn); assert!(!(over_blob_threshold && is_blob_rewrite)); let encapsulated_buf = self.encapsulate( buf, reservation_lsn, over_blob_threshold, is_blob_rewrite, )?; M.log_reservation_success(); return Ok(Reservation { idx: idx, iobufs: self, data: encapsulated_buf, destination: destination, flushed: false, lsn: reservation_lsn, lid: reservation_offset, is_blob: over_blob_threshold || is_blob_rewrite, _guard: guard, }); } } /// Called by Reservation on termination (completion or abort). /// Handles departure from shared state, and possibly writing /// the buffer to stable storage if necessary. pub(super) fn exit_reservation( &self, idx: usize, ) -> Result<(), ()> { let iobuf = &self.bufs[idx]; let mut header = iobuf.get_header(); // Decrement writer count, retrying until successful. let mut spins = 0; loop { spins += 1; if spins > 10 { debug!("have spun >10x in decr"); spins = 0; } let new_hv = decr_writers(header); match iobuf.cas_header(header, new_hv) { Ok(new) => { header = new; break; } Err(new) => { // we failed to decr, retry header = new; } } } // Succeeded in decrementing writers, if we decremented writers // to 0 and it's sealed then we should write it to storage. if n_writers(header) == 0 && is_sealed(header) { trace!("exiting idx {} from res", idx); self.write_to_log(idx) } else { Ok(()) } } /// blocks until the specified log sequence number has /// been made stable on disk pub(crate) fn make_stable(&self, lsn: Lsn) -> Result<(), ()> { let _measure = Measure::new(&M.make_stable); // NB before we write the 0th byte of the file, stable is -1 while self.stable() < lsn { let idx = self.idx(); let header = self.bufs[idx].get_header(); if offset(header) == 0 || is_sealed(header) { // nothing to write, don't bother sealing // current IO buffer. } else { self.maybe_seal_and_write_iobuf(idx, header, false)?; continue; } // block until another thread updates the stable lsn let waiter = self.intervals.lock().unwrap(); if self.stable() < lsn { #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(SeqCst) { return Err(Error::FailPoint); } } trace!( "waiting on cond var for make_stable({})", lsn ); let _waiter = self.interval_updated.wait(waiter).unwrap(); } else { trace!("make_stable({}) returning", lsn); break; } } Ok(()) } /// Called by users who wish to force the current buffer /// to flush some pending writes. pub(super) fn flush(&self) -> Result<(), ()> { let max_reserved_lsn = self.max_reserved_lsn.load(SeqCst) as Lsn; self.make_stable(max_reserved_lsn) } // ensure self.max_reserved_lsn is set to this Lsn // or greater, for use in correct calls to flush. fn bump_max_reserved_lsn(&self, lsn: Lsn) { let mut current = self.max_reserved_lsn.load(SeqCst) as InnerLsn; loop { if current >= lsn as InnerLsn { return; } let last = self.max_reserved_lsn.compare_and_swap( current, lsn as InnerLsn, SeqCst, ); if last == current { // we succeeded. return; } current = last; } } // Attempt to seal the current IO buffer, possibly // writing it to disk if there are no other writers // operating on it. fn maybe_seal_and_write_iobuf( &self, idx: usize, header: Header, from_reserve: bool, ) -> Result<(), ()> { let iobuf = &self.bufs[idx]; if is_sealed(header) { // this buffer is already sealed. nothing to do here. return Ok(()); } // NB need to do this before CAS because it can get // written and reset by another thread afterward let lid = iobuf.get_lid(); let lsn = iobuf.get_lsn(); let capacity = iobuf.get_capacity(); let io_buf_size = self.config.io_buf_size; if offset(header) as usize > capacity { // a race happened, nothing we can do return Ok(()); } let should_pad = from_reserve && capacity - offset(header) as usize >= MSG_HEADER_LEN; let sealed = if should_pad { mk_sealed(bump_offset( header, capacity as LogId - offset(header), )) } else { mk_sealed(header) }; let res_len = offset(sealed) as usize; let maxed = res_len == capacity; let worked = iobuf.linearized(|| { if iobuf.cas_header(header, sealed).is_err() { // cas failed, don't try to continue return false; } trace!("{} sealed", idx); if from_reserve || maxed { // NB we linearize this together with sealing // the header here to guarantee that in write_to_log, // which may be executing as soon as the seal is set // by another thread, the thread that calls // iobuf.get_maxed() is linearized with this one! trace!("setting maxed to true for idx {}", idx); iobuf.set_maxed(true); } true }); if !worked { return Ok(()); } if should_pad { let offset = offset(header) as usize; let data = unsafe { (*iobuf.buf.get()).as_mut_slice() }; let len = capacity - offset - MSG_HEADER_LEN; // take the crc of the random bytes already after where we // would place our header. let padding_bytes = vec![EVIL_BYTE; len]; let crc16 = crc16_arr(&*padding_bytes); let header = MessageHeader { kind: MessageKind::Pad, lsn: lsn + offset as Lsn, len: len, crc16: crc16, }; let header_bytes: [u8; MSG_HEADER_LEN] = header.into(); data[offset..offset + MSG_HEADER_LEN] .copy_from_slice(&header_bytes); data[offset + MSG_HEADER_LEN..capacity] .copy_from_slice(&*padding_bytes); } assert!( capacity + SEG_HEADER_LEN >= res_len, "res_len of {} higher than buffer capacity {}", res_len, capacity ); let max = std::usize::MAX as LogId; assert_ne!( lid, max, "sealing something that should never have \ been claimed (idx {})\n{:?}", idx, self ); // open new slot let mut next_lsn = lsn; let next_offset = if from_reserve || maxed { // roll lsn to the next offset let lsn_idx = lsn / io_buf_size as Lsn; next_lsn = (lsn_idx + 1) * io_buf_size as Lsn; // mark unused as clear debug!( "rolling to new segment after clearing {}-{}", lid, lid + res_len as LogId, ); let ret = self.with_sa(|sa| sa.next(next_lsn)); #[cfg(feature = "failpoints")] { if let Err(Error::FailPoint) = ret { self._failpoint_crashing.store(true, SeqCst); // wake up any waiting threads so they don't stall forever self.interval_updated.notify_all(); } } ret? } else { debug!( "advancing offset within the current segment from {} to {}", lid, lid + res_len as LogId ); next_lsn += res_len as Lsn; let next_offset = lid + res_len as LogId; next_offset }; let next_idx = (idx + 1) % self.config.io_bufs; let next_iobuf = &self.bufs[next_idx]; // NB we spin on this CAS because the next iobuf may not actually // be written to disk yet! (we've lapped the writer in the iobuf // ring buffer) let mut spins = 0; while next_iobuf.cas_lid(max, next_offset).is_err() { spins += 1; if spins > 1_000_000 { debug!( "have spun >1,000,000x in seal of buf {}", idx ); spins = 0; } #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(Relaxed) { // panic!("propagating failpoint"); return Err(Error::FailPoint); } } spin_loop_hint(); } trace!("{} log set to {}", next_idx, next_offset); // NB as soon as the "sealed" bit is 0, this allows new threads // to start writing into this buffer, so do that after it's all // set up. expect this thread to block until the buffer completes // its entire lifecycle as soon as we do that. if from_reserve || maxed { next_iobuf.set_capacity(io_buf_size - SEG_TRAILER_LEN); next_iobuf.store_segment_header(sealed, next_lsn); } else { let new_cap = capacity - res_len; assert_ne!(new_cap, 0); next_iobuf.set_capacity(new_cap); next_iobuf.set_lsn(next_lsn); let last_salt = salt(sealed); let new_salt = bump_salt(last_salt); next_iobuf.set_header(new_salt); } trace!("{} zeroed header", next_idx); // we acquire this mutex to guarantee that any threads that // are going to wait on the condition variable will observe // the change. debug_delay(); let _ = self.buf_mu.lock().unwrap(); // communicate to other threads that we have advanced an IO buffer. debug_delay(); let _current_buf = self.current_buf.fetch_add(1, SeqCst) + 1; trace!("{} current_buf", _current_buf % self.config.io_bufs); // let any threads that are blocked on buf_mu know about the // updated counter. debug_delay(); self.buf_updated.notify_all(); // if writers is 0, it's our responsibility to write the buffer. if n_writers(sealed) == 0 { trace!("writing to log from maybe_seal"); self.write_to_log(idx) } else { Ok(()) } } // Write an IO buffer's data to stable storage and set up the // next IO buffer for writing. fn write_to_log(&self, idx: usize) -> Result<(), ()> { let _measure = Measure::new(&M.write_to_log); let iobuf = &self.bufs[idx]; let header = iobuf.get_header(); let lid = iobuf.get_lid(); let base_lsn = iobuf.get_lsn(); let io_buf_size = self.config.io_buf_size; assert_eq!( (lid % io_buf_size as LogId) as Lsn, base_lsn % io_buf_size as Lsn ); assert_ne!( lid as usize, std::usize::MAX, "created reservation for uninitialized slot", ); let res_len = offset(header) as usize; let data = unsafe { (*iobuf.buf.get()).as_mut_slice() }; let f = self.config.file()?; io_fail!(self, "buffer write"); f.pwrite_all(&data[..res_len], lid)?; f.sync_all()?; io_fail!(self, "buffer write post"); // write a trailer if we're maxed let maxed = iobuf.linearized(|| iobuf.get_maxed()); if maxed { let segment_lsn = base_lsn / io_buf_size as Lsn * io_buf_size as Lsn; let segment_lid = lid / io_buf_size as LogId * io_buf_size as LogId; let trailer_overhang = io_buf_size as Lsn - SEG_TRAILER_LEN as Lsn; let trailer_lid = segment_lid + trailer_overhang as LogId; let trailer_lsn = segment_lsn + trailer_overhang; let trailer = SegmentTrailer { lsn: trailer_lsn, ok: true, }; let trailer_bytes: [u8; SEG_TRAILER_LEN] = trailer.into(); io_fail!(self, "trailer write"); f.pwrite_all(&trailer_bytes, trailer_lid)?; f.sync_all()?; io_fail!(self, "trailer write post"); iobuf.set_maxed(false); debug!( "wrote trailer at lid {} for lsn {}", trailer_lid, trailer_lsn ); // transition this segment into deplete-only mode now // that n_writers is 0, and all calls to mark_replace/link // happen before the reservation completes. trace!( "deactivating segment with lsn {} at idx {} with lid {}", segment_lsn, idx, lid ); unsafe { self.with_sa_deferred(move |sa| { trace!("EBR deactivating segment {} with lsn {} and lid {}", idx, segment_lsn, segment_lid); if let Err(e) = sa.deactivate_segment(segment_lsn, segment_lid) { error!("segment accountant failed to deactivate segment: {}", e); } }); } } else { trace!( "not deactivating segment with lsn {}", base_lsn / io_buf_size as Lsn * io_buf_size as Lsn ); } if res_len > 0 || maxed { let complete_len = if maxed { let lsn_idx = base_lsn as usize / io_buf_size; let next_seg_beginning = (lsn_idx + 1) * io_buf_size; next_seg_beginning - base_lsn as usize } else { res_len }; debug!( "wrote lsns {}-{} to disk at offsets {}-{} in buffer {}", base_lsn, base_lsn + res_len as Lsn - 1, lid, lid + res_len as LogId - 1, idx ); self.mark_interval(base_lsn, complete_len); } M.written_bytes.measure(res_len as f64); // signal that this IO buffer is now uninitialized let max = std::usize::MAX as LogId; iobuf.set_lid(max); trace!("{} log <- MAX", idx); // we acquire this mutex to guarantee that any threads that // are going to wait on the condition variable will observe // the change. debug_delay(); let _ = self.buf_mu.lock().unwrap(); // communicate to other threads that we have written an IO buffer. debug_delay(); let _written_bufs = self.written_bufs.fetch_add(1, SeqCst); trace!("{} written", _written_bufs % self.config.io_bufs); // let any threads that are blocked on buf_mu know about the // updated counter. debug_delay(); self.buf_updated.notify_all(); Ok(()) } // It's possible that IO buffers are written out of order! // So we need to use this to keep track of them, and only // increment self.stable. If we didn't do this, then we would // accidentally decrement self.stable sometimes, or bump stable // above an offset that corresponds to a buffer that hasn't actually // been written yet! It's OK to use a mutex here because it is pretty // fast, compared to the other operations on shared state. fn mark_interval(&self, whence: Lsn, len: usize) { trace!("mark_interval({}, {})", whence, len); assert_ne!( len, 0, "mark_interval called with a zero-length range, starting from {}", whence ); let mut intervals = self.intervals.lock().unwrap(); let interval = (whence, whence + len as Lsn - 1); intervals.push(interval); debug_assert!( intervals.len() < 1000, "intervals is getting crazy... {:?}", *intervals ); // reverse sort intervals.sort_unstable_by(|a, b| b.cmp(a)); let mut updated = false; let len_before = intervals.len(); while let Some(&(low, high)) = intervals.last() { assert_ne!(low, high); let cur_stable = self.stable_lsn.load(SeqCst) as Lsn; assert!( low > cur_stable, "somehow, we marked offset {} stable while \ interval {}-{} had not yet been applied!", cur_stable, low, high ); if cur_stable + 1 == low { let old = self.stable_lsn.swap(high as InnerLsn, SeqCst) as Lsn; assert_eq!( old, cur_stable, "concurrent stable offset modification detected" ); debug!("new highest interval: {} - {}", low, high); intervals.pop(); updated = true; } else { break; } } if len_before - intervals.len() > 100 { debug!( "large merge of {} intervals", len_before - intervals.len() ); } if updated { self.interval_updated.notify_all(); } } } impl Drop for IoBufs { fn drop(&mut self) { // don't do any more IO if we're simulating a crash #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(SeqCst) { return; } } if let Err(e) = self.flush() { error!("failed to flush from IoBufs::drop: {}", e); } if let Ok(f) = self.config.file() { f.sync_all().unwrap(); } debug!("IoBufs dropped"); } } impl periodic::Callback for std::sync::Arc<IoBufs> { fn call(&self) { if let Err(e) = self.flush() { #[cfg(feature = "failpoints")] { if let Error::FailPoint = e { self._failpoint_crashing.store(true, SeqCst); // wake up any waiting threads so they don't stall forever self.interval_updated.notify_all(); } } error!( "failed to flush from periodic flush thread: {}", e ); } } } impl Debug for IoBufs { fn fmt( &self, formatter: &mut fmt::Formatter<'_>, ) -> std::result::Result<(), fmt::Error> { debug_delay(); let current_buf = self.current_buf.load(SeqCst); debug_delay(); let written_bufs = self.written_bufs.load(SeqCst); formatter.write_fmt(format_args!( "IoBufs {{ sealed: {}, written: {}, bufs: {:?} }}", current_buf, written_bufs, self.bufs )) } } impl Debug for IoBuf { fn fmt( &self, formatter: &mut fmt::Formatter<'_>, ) -> std::result::Result<(), fmt::Error> { let header = self.get_header(); formatter.write_fmt(format_args!( "\n\tIoBuf {{ lid: {}, n_writers: {}, offset: \ {}, sealed: {} }}", self.get_lid(), n_writers(header), offset(header), is_sealed(header) )) } } impl IoBuf { fn new(buf_size: usize) -> IoBuf { IoBuf { buf: UnsafeCell::new(vec![0; buf_size]), header: AtomicUsize::new(0), lid: AtomicUsize::new(std::usize::MAX), lsn: AtomicUsize::new(0), capacity: AtomicUsize::new(0), maxed: AtomicBool::new(false), linearizer: Mutex::new(()), } } // use this for operations on an IoBuf that must be // linearized together, and can't fit in the header! fn linearized<F, B>(&self, f: F) -> B where F: FnOnce() -> B, { let _l = self.linearizer.lock().unwrap(); f() } // This is called upon the initialization of a fresh segment. // We write a new segment header to the beginning of the buffer // for assistance during recovery. The caller is responsible // for ensuring that the IoBuf's capacity has been set properly. fn store_segment_header(&self, last: Header, lsn: Lsn) { debug!("storing lsn {} in beginning of buffer", lsn); assert!( self.get_capacity() >= SEG_HEADER_LEN + SEG_TRAILER_LEN ); self.set_lsn(lsn); let header = SegmentHeader { lsn: lsn, ok: true }; let header_bytes: [u8; SEG_HEADER_LEN] = header.into(); unsafe { (*self.buf.get())[0..SEG_HEADER_LEN] .copy_from_slice(&header_bytes); } // ensure writes to the buffer land after our header. let last_salt = salt(last); let new_salt = bump_salt(last_salt); let bumped = bump_offset(new_salt, SEG_HEADER_LEN as Header); self.set_header(bumped); } fn set_capacity(&self, cap: usize) { debug_delay(); self.capacity.store(cap, SeqCst); } fn get_capacity(&self) -> usize { debug_delay(); self.capacity.load(SeqCst) } fn set_lsn(&self, lsn: Lsn) { debug_delay(); self.lsn.store(lsn as usize, SeqCst); } fn set_maxed(&self, maxed: bool) { debug_delay(); self.maxed.store(maxed, SeqCst); } fn get_maxed(&self) -> bool { debug_delay(); self.maxed.load(SeqCst) } fn get_lsn(&self) -> Lsn { debug_delay(); self.lsn.load(SeqCst) as Lsn } fn set_lid(&self, offset: LogId) { debug_delay(); self.lid.store(offset as usize, SeqCst); } fn get_lid(&self) -> LogId { debug_delay(); self.lid.load(SeqCst) as LogId } fn get_header(&self) -> Header { debug_delay(); self.header.load(SeqCst) as Header } fn set_header(&self, new: Header) { debug_delay(); self.header.store(new as usize, SeqCst); } fn cas_header( &self, old: Header, new: Header, ) -> std::result::Result<Header, Header> { debug_delay(); let res = self.header.compare_and_swap( old as usize, new as usize, SeqCst, ) as Header; if res == old { Ok(new) } else { Err(res) } } fn cas_lid( &self, old: LogId, new: LogId, ) -> std::result::Result<LogId, LogId> { debug_delay(); let res = self.lid.compare_and_swap( old as usize, new as usize, SeqCst, ) as LogId; if res == old { Ok(new) } else { Err(res) } } } #[cfg_attr(not(feature = "no_inline"), inline)] fn is_sealed(v: Header) -> bool { v & 1 << 31 == 1 << 31 } #[cfg_attr(not(feature = "no_inline"), inline)] fn mk_sealed(v: Header) -> Header { v | 1 << 31 } #[cfg_attr(not(feature = "no_inline"), inline)] fn n_writers(v: Header) -> Header { v << 33 >> 57 } #[cfg_attr(not(feature = "no_inline"), inline)] fn incr_writers(v: Header) -> Header { assert_ne!(n_writers(v), MAX_WRITERS); v + (1 << 24) } #[cfg_attr(not(feature = "no_inline"), inline)] fn decr_writers(v: Header) -> Header { assert_ne!(n_writers(v), 0); v - (1 << 24) } #[cfg_attr(not(feature = "no_inline"), inline)] fn offset(v: Header) -> Header { v << 40 >> 40 } #[cfg_attr(not(feature = "no_inline"), inline)] fn bump_offset(v: Header, by: Header) -> Header { assert_eq!(by >> 24, 0); v + by } #[cfg_attr(not(feature = "no_inline"), inline)] fn bump_salt(v: Header) -> Header { (v + (1 << 32)) & 0xFFFFFFFF00000000 } #[cfg_attr(not(feature = "no_inline"), inline)] fn salt(v: Header) -> Header { v >> 32 << 32 } Fix race condition in handling io buffer padding #[cfg(target_pointer_width = "32")] use std::sync::atomic::AtomicI64 as AtomicLsn; #[cfg(target_pointer_width = "64")] use std::sync::atomic::AtomicIsize as AtomicLsn; #[cfg(feature = "failpoints")] use std::sync::atomic::Ordering::Relaxed; use std::{ mem::size_of, sync::atomic::Ordering::SeqCst, sync::atomic::{spin_loop_hint, AtomicBool, AtomicUsize}, sync::{Arc, Condvar, Mutex}, }; #[cfg(feature = "zstd")] use zstd::block::compress; use self::reader::LogReader; use super::*; // This is the most writers in a single IO buffer // that we have space to accomodate in the counter // for writers in the IO buffer header. const MAX_WRITERS: Header = 127; type Header = u64; /// A logical sequence number. #[cfg(target_pointer_width = "64")] type InnerLsn = isize; #[cfg(target_pointer_width = "32")] type InnerLsn = i64; macro_rules! io_fail { ($self:expr, $e:expr) => { #[cfg(feature = "failpoints")] fail_point!($e, |_| { $self._failpoint_crashing.store(true, SeqCst); // wake up any waiting threads so they don't stall forever $self.interval_updated.notify_all(); Err(Error::FailPoint) }); }; } struct IoBuf { buf: UnsafeCell<Vec<u8>>, header: AtomicUsize, lid: AtomicUsize, lsn: AtomicUsize, capacity: AtomicUsize, maxed: AtomicBool, linearizer: Mutex<()>, } unsafe impl Sync for IoBuf {} pub(super) struct IoBufs { pub(super) config: Config, // We have a fixed number of io buffers. Sometimes they will all be // full, and in order to prevent threads from having to spin in // the reserve function, we can have them block until a buffer becomes // available. buf_mu: Mutex<()>, buf_updated: Condvar, bufs: Vec<IoBuf>, current_buf: AtomicUsize, written_bufs: AtomicUsize, // Pending intervals that have been written to stable storage, but may be // higher than the current value of `stable` due to interesting thread // interleavings. intervals: Mutex<Vec<(Lsn, Lsn)>>, interval_updated: Condvar, // The highest CONTIGUOUS log sequence number that has been written to // stable storage. This may be lower than the length of the underlying // file, and there may be buffers that have been written out-of-order // to stable storage due to interesting thread interleavings. stable_lsn: AtomicLsn, max_reserved_lsn: AtomicLsn, segment_accountant: Arc<Mutex<SegmentAccountant>>, // used for signifying that we're simulating a crash #[cfg(feature = "failpoints")] _failpoint_crashing: AtomicBool, } /// `IoBufs` is a set of lock-free buffers for coordinating /// writes to underlying storage. impl IoBufs { pub(crate) fn start<R>( config: Config, mut snapshot: Snapshot<R>, ) -> Result<IoBufs, ()> { // open file for writing let file = config.file()?; let io_buf_size = config.io_buf_size; let snapshot_max_lsn = snapshot.max_lsn; let snapshot_last_lid = snapshot.last_lid; let (next_lsn, next_lid) = if snapshot_max_lsn < SEG_HEADER_LEN as Lsn { snapshot.max_lsn = 0; snapshot.last_lid = 0; (0, 0) } else { match file.read_message(snapshot_last_lid, &config) { Ok(LogRead::Inline(_lsn, _buf, len)) => ( snapshot_max_lsn + len as Lsn + MSG_HEADER_LEN as Lsn, snapshot_last_lid + len as LogId + MSG_HEADER_LEN as LogId, ), Ok(LogRead::Blob(_lsn, _buf, _blob_ptr)) => ( snapshot_max_lsn + BLOB_INLINE_LEN as Lsn + MSG_HEADER_LEN as Lsn, snapshot_last_lid + BLOB_INLINE_LEN as LogId + MSG_HEADER_LEN as LogId, ), other => { // we can overwrite this non-flush debug!( "got non-flush tip while recovering at {}: {:?}", snapshot_last_lid, other ); (snapshot_max_lsn, snapshot_last_lid) } } }; let mut segment_accountant = SegmentAccountant::start(config.clone(), snapshot)?; let bufs = rep_no_copy![IoBuf::new(io_buf_size); config.io_bufs]; let current_buf = 0; trace!( "starting IoBufs with next_lsn: {} \ next_lid: {}", next_lsn, next_lid ); if next_lsn == 0 { // recovering at segment boundary assert_eq!(next_lid, next_lsn as LogId); let iobuf = &bufs[current_buf]; let lid = segment_accountant.next(next_lsn)?; iobuf.set_lid(lid); iobuf.set_capacity(io_buf_size - SEG_TRAILER_LEN); iobuf.store_segment_header(0, next_lsn); maybe_fail!("initial allocation"); file.pwrite_all(&*vec![0; config.io_buf_size], lid)?; file.sync_all()?; maybe_fail!("initial allocation post"); debug!( "starting log at clean offset {}, recovered lsn {}", next_lid, next_lsn ); } else { // the tip offset is not completely full yet, reuse it let iobuf = &bufs[current_buf]; let offset = next_lid % io_buf_size as LogId; iobuf.set_lid(next_lid); iobuf.set_capacity( io_buf_size - offset as usize - SEG_TRAILER_LEN, ); iobuf.set_lsn(next_lsn); debug!( "starting log at split offset {}, recovered lsn {}", next_lid, next_lsn ); } // we want stable to begin at -1, since the 0th byte // of our file has not yet been written. let stable = if next_lsn == 0 { -1 } else { next_lsn - 1 }; // remove all blob files larger than our stable offset gc_blobs(&config, stable)?; Ok(IoBufs { config: config, buf_mu: Mutex::new(()), buf_updated: Condvar::new(), bufs: bufs, current_buf: AtomicUsize::new(current_buf), written_bufs: AtomicUsize::new(0), intervals: Mutex::new(vec![]), interval_updated: Condvar::new(), stable_lsn: AtomicLsn::new(stable as InnerLsn), max_reserved_lsn: AtomicLsn::new(stable as InnerLsn), segment_accountant: Arc::new(Mutex::new( segment_accountant, )), #[cfg(feature = "failpoints")] _failpoint_crashing: AtomicBool::new(false), }) } /// SegmentAccountant access for coordination with the `PageCache` pub(super) fn with_sa<B, F>(&self, f: F) -> B where F: FnOnce(&mut SegmentAccountant) -> B, { let start = clock(); debug_delay(); let mut sa = self.segment_accountant.lock().unwrap(); let locked_at = clock(); M.accountant_lock.measure(locked_at - start); let ret = f(&mut sa); drop(sa); M.accountant_hold.measure(clock() - locked_at); ret } /// SegmentAccountant access for coordination with the `PageCache`, /// performed after all threads have exited the currently checked-in /// epochs using a crossbeam-epoch EBR guard. /// /// IMPORTANT: Never call this function with anything that calls /// defer on the default EBR collector, or we could deadlock! pub(super) unsafe fn with_sa_deferred<F>(&self, f: F) where F: FnOnce(&mut SegmentAccountant) + Send + 'static, { let guard = pin(); let segment_accountant = self.segment_accountant.clone(); guard.defer(move || { let start = clock(); debug_delay(); let mut sa = segment_accountant.lock().unwrap(); let locked_at = clock(); M.accountant_lock.measure(locked_at - start); let _ = f(&mut sa); drop(sa); M.accountant_hold.measure(clock() - locked_at); }); guard.flush(); } fn idx(&self) -> usize { debug_delay(); let current_buf = self.current_buf.load(SeqCst); current_buf % self.config.io_bufs } /// Returns the last stable offset in storage. pub(super) fn stable(&self) -> Lsn { debug_delay(); self.stable_lsn.load(SeqCst) as Lsn } // Adds a header to the front of the buffer fn encapsulate( &self, raw_buf: Vec<u8>, lsn: Lsn, over_blob_threshold: bool, is_blob_rewrite: bool, ) -> Result<Vec<u8>, ()> { let buf = if over_blob_threshold { // write blob to file io_fail!(self, "blob blob write"); write_blob(&self.config, lsn, raw_buf)?; let lsn_buf: [u8; size_of::<BlobPointer>()] = u64_to_arr(lsn as u64); lsn_buf.to_vec() } else { raw_buf }; let crc16 = crc16_arr(&buf); let header = MessageHeader { kind: if over_blob_threshold || is_blob_rewrite { MessageKind::Blob } else { MessageKind::Inline }, lsn: lsn, len: buf.len(), crc16: crc16, }; let header_bytes: [u8; MSG_HEADER_LEN] = header.into(); let mut out = vec![0; MSG_HEADER_LEN + buf.len()]; out[0..MSG_HEADER_LEN].copy_from_slice(&header_bytes); out[MSG_HEADER_LEN..].copy_from_slice(&*buf); Ok(out) } /// Tries to claim a reservation for writing a buffer to a /// particular location in stable storge, which may either be /// completed or aborted later. Useful for maintaining /// linearizability across CAS operations that may need to /// persist part of their operation. /// /// # Panics /// /// Panics if the desired reservation is greater than the /// io buffer size minus the size of a segment header + /// a segment footer + a message header. pub(super) fn reserve( &self, raw_buf: Vec<u8>, ) -> Result<Reservation<'_>, ()> { self.reserve_inner(raw_buf, false) } /// Reserve a replacement buffer for a previously written /// blob write. This ensures the message header has the /// proper blob flag set. pub(super) fn reserve_blob( &self, blob_ptr: BlobPointer, ) -> Result<Reservation<'_>, ()> { let lsn_buf: [u8; size_of::<BlobPointer>()] = u64_to_arr(blob_ptr as u64); self.reserve_inner(lsn_buf.to_vec(), true) } fn reserve_inner( &self, raw_buf: Vec<u8>, is_blob_rewrite: bool, ) -> Result<Reservation<'_>, ()> { let _measure = Measure::new(&M.reserve); let io_bufs = self.config.io_bufs; // right shift 32 on 32-bit pointer systems panics #[cfg(target_pointer_width = "64")] assert_eq!((raw_buf.len() + MSG_HEADER_LEN) >> 32, 0); #[cfg(feature = "zstd")] let buf = if self.config.use_compression { let _measure = Measure::new(&M.compress); compress(&*raw_buf, self.config.zstd_compression_factor) .unwrap() } else { raw_buf }; #[cfg(not(feature = "zstd"))] let buf = raw_buf; let total_buf_len = MSG_HEADER_LEN + buf.len(); let max_overhead = std::cmp::max(SEG_HEADER_LEN, SEG_TRAILER_LEN); let max_buf_size = (self.config.io_buf_size / MINIMUM_ITEMS_PER_SEGMENT) - max_overhead; let over_blob_threshold = total_buf_len > max_buf_size; let inline_buf_len = if over_blob_threshold { MSG_HEADER_LEN + size_of::<Lsn>() } else { total_buf_len }; trace!("reserving buf of len {}", inline_buf_len); let mut printed = false; macro_rules! trace_once { ($($msg:expr),*) => { if !printed { trace!($($msg),*); printed = true; }}; } let mut spins = 0; loop { M.log_reservation_attempted(); #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(Relaxed) { return Err(Error::FailPoint); } } let guard = pin(); debug_delay(); let written_bufs = self.written_bufs.load(SeqCst); debug_delay(); let current_buf = self.current_buf.load(SeqCst); let idx = current_buf % io_bufs; spins += 1; if spins > 1_000_000 { debug!( "stalling in reserve, idx {}, buf len {}", idx, inline_buf_len, ); spins = 0; } if written_bufs > current_buf { // This can happen because a reservation can finish up // before the sealing thread gets around to bumping // current_buf. trace_once!("written ahead of sealed, spinning"); spin_loop_hint(); continue; } if current_buf - written_bufs >= io_bufs { // if written is too far behind, we need to // spin while it catches up to avoid overlap trace_once!( "old io buffer not written yet, spinning" ); spin_loop_hint(); // use a condition variable to wait until // we've updated the written_bufs counter. let _measure = Measure::new(&M.reserve_written_condvar_wait); let mut buf_mu = self.buf_mu.lock().unwrap(); while written_bufs == self.written_bufs.load(SeqCst) { buf_mu = self.buf_updated.wait(buf_mu).unwrap(); } continue; } // load current header value let iobuf = &self.bufs[idx]; let header = iobuf.get_header(); // skip if already sealed if is_sealed(header) { // already sealed, start over and hope cur // has already been bumped by sealer. trace_once!("io buffer already sealed, spinning"); spin_loop_hint(); // use a condition variable to wait until // we've updated the current_buf counter. let _measure = Measure::new(&M.reserve_current_condvar_wait); let mut buf_mu = self.buf_mu.lock().unwrap(); while current_buf == self.current_buf.load(SeqCst) { buf_mu = self.buf_updated.wait(buf_mu).unwrap(); } continue; } // try to claim space let buf_offset = offset(header); let prospective_size = buf_offset as usize + inline_buf_len; let would_overflow = prospective_size > iobuf.get_capacity(); if would_overflow { // This buffer is too full to accept our write! // Try to seal the buffer, and maybe write it if // there are zero writers. trace_once!("io buffer too full, spinning"); self.maybe_seal_and_write_iobuf(idx, header, true)?; spin_loop_hint(); continue; } // attempt to claim by incrementing an unsealed header let bumped_offset = bump_offset(header, inline_buf_len as Header); // check for maxed out IO buffer writers if n_writers(bumped_offset) == MAX_WRITERS { trace_once!( "spinning because our buffer has {} writers already", MAX_WRITERS ); spin_loop_hint(); continue; } let claimed = incr_writers(bumped_offset); assert!(!is_sealed(claimed)); if iobuf.cas_header(header, claimed).is_err() { // CAS failed, start over trace_once!( "CAS failed while claiming buffer slot, spinning" ); spin_loop_hint(); continue; } // if we're giving out a reservation, // the writer count should be positive assert_ne!(n_writers(claimed), 0); let lid = iobuf.get_lid(); assert_ne!( lid as usize, std::usize::MAX, "fucked up on idx {}\n{:?}", idx, self ); let out_buf = unsafe { (*iobuf.buf.get()).as_mut_slice() }; let res_start = buf_offset as usize; let res_end = res_start + inline_buf_len; let destination = &mut (out_buf)[res_start..res_end]; let reservation_offset = lid + u64::from(buf_offset); let reservation_lsn = iobuf.get_lsn() + u64::from(buf_offset) as Lsn; trace!( "reserved {} bytes at lsn {} lid {}", inline_buf_len, reservation_lsn, reservation_offset, ); self.bump_max_reserved_lsn(reservation_lsn); assert!(!(over_blob_threshold && is_blob_rewrite)); let encapsulated_buf = self.encapsulate( buf, reservation_lsn, over_blob_threshold, is_blob_rewrite, )?; M.log_reservation_success(); return Ok(Reservation { idx: idx, iobufs: self, data: encapsulated_buf, destination: destination, flushed: false, lsn: reservation_lsn, lid: reservation_offset, is_blob: over_blob_threshold || is_blob_rewrite, _guard: guard, }); } } /// Called by Reservation on termination (completion or abort). /// Handles departure from shared state, and possibly writing /// the buffer to stable storage if necessary. pub(super) fn exit_reservation( &self, idx: usize, ) -> Result<(), ()> { let iobuf = &self.bufs[idx]; let mut header = iobuf.get_header(); // Decrement writer count, retrying until successful. let mut spins = 0; loop { spins += 1; if spins > 10 { debug!("have spun >10x in decr"); spins = 0; } let new_hv = decr_writers(header); match iobuf.cas_header(header, new_hv) { Ok(new) => { header = new; break; } Err(new) => { // we failed to decr, retry header = new; } } } // Succeeded in decrementing writers, if we decremented writers // to 0 and it's sealed then we should write it to storage. if n_writers(header) == 0 && is_sealed(header) { trace!("exiting idx {} from res", idx); self.write_to_log(idx) } else { Ok(()) } } /// blocks until the specified log sequence number has /// been made stable on disk pub(crate) fn make_stable(&self, lsn: Lsn) -> Result<(), ()> { let _measure = Measure::new(&M.make_stable); // NB before we write the 0th byte of the file, stable is -1 while self.stable() < lsn { let idx = self.idx(); let header = self.bufs[idx].get_header(); if offset(header) == 0 || is_sealed(header) { // nothing to write, don't bother sealing // current IO buffer. } else { self.maybe_seal_and_write_iobuf(idx, header, false)?; continue; } // block until another thread updates the stable lsn let waiter = self.intervals.lock().unwrap(); if self.stable() < lsn { #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(SeqCst) { return Err(Error::FailPoint); } } trace!( "waiting on cond var for make_stable({})", lsn ); let _waiter = self.interval_updated.wait(waiter).unwrap(); } else { trace!("make_stable({}) returning", lsn); break; } } Ok(()) } /// Called by users who wish to force the current buffer /// to flush some pending writes. pub(super) fn flush(&self) -> Result<(), ()> { let max_reserved_lsn = self.max_reserved_lsn.load(SeqCst) as Lsn; self.make_stable(max_reserved_lsn) } // ensure self.max_reserved_lsn is set to this Lsn // or greater, for use in correct calls to flush. fn bump_max_reserved_lsn(&self, lsn: Lsn) { let mut current = self.max_reserved_lsn.load(SeqCst) as InnerLsn; loop { if current >= lsn as InnerLsn { return; } let last = self.max_reserved_lsn.compare_and_swap( current, lsn as InnerLsn, SeqCst, ); if last == current { // we succeeded. return; } current = last; } } // Attempt to seal the current IO buffer, possibly // writing it to disk if there are no other writers // operating on it. fn maybe_seal_and_write_iobuf( &self, idx: usize, header: Header, from_reserve: bool, ) -> Result<(), ()> { let iobuf = &self.bufs[idx]; if is_sealed(header) { // this buffer is already sealed. nothing to do here. return Ok(()); } // NB need to do this before CAS because it can get // written and reset by another thread afterward let lid = iobuf.get_lid(); let lsn = iobuf.get_lsn(); let capacity = iobuf.get_capacity(); let io_buf_size = self.config.io_buf_size; if offset(header) as usize > capacity { // a race happened, nothing we can do return Ok(()); } let sealed = mk_sealed(header); let res_len = offset(sealed) as usize; let maxed = res_len == capacity; let worked = iobuf.linearized(|| { if iobuf.cas_header(header, sealed).is_err() { // cas failed, don't try to continue return false; } trace!("{} sealed", idx); if from_reserve || maxed { // NB we linearize this together with sealing // the header here to guarantee that in write_to_log, // which may be executing as soon as the seal is set // by another thread, the thread that calls // iobuf.get_maxed() is linearized with this one! trace!("setting maxed to true for idx {}", idx); iobuf.set_maxed(true); } true }); if !worked { return Ok(()); } assert!( capacity + SEG_HEADER_LEN >= res_len, "res_len of {} higher than buffer capacity {}", res_len, capacity ); let max = std::usize::MAX as LogId; assert_ne!( lid, max, "sealing something that should never have \ been claimed (idx {})\n{:?}", idx, self ); // open new slot let mut next_lsn = lsn; let next_offset = if from_reserve || maxed { // roll lsn to the next offset let lsn_idx = lsn / io_buf_size as Lsn; next_lsn = (lsn_idx + 1) * io_buf_size as Lsn; // mark unused as clear debug!( "rolling to new segment after clearing {}-{}", lid, lid + res_len as LogId, ); let ret = self.with_sa(|sa| sa.next(next_lsn)); #[cfg(feature = "failpoints")] { if let Err(Error::FailPoint) = ret { self._failpoint_crashing.store(true, SeqCst); // wake up any waiting threads so they don't stall forever self.interval_updated.notify_all(); } } ret? } else { debug!( "advancing offset within the current segment from {} to {}", lid, lid + res_len as LogId ); next_lsn += res_len as Lsn; let next_offset = lid + res_len as LogId; next_offset }; let next_idx = (idx + 1) % self.config.io_bufs; let next_iobuf = &self.bufs[next_idx]; // NB we spin on this CAS because the next iobuf may not actually // be written to disk yet! (we've lapped the writer in the iobuf // ring buffer) let mut spins = 0; while next_iobuf.cas_lid(max, next_offset).is_err() { spins += 1; if spins > 1_000_000 { debug!( "have spun >1,000,000x in seal of buf {}", idx ); spins = 0; } #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(Relaxed) { // panic!("propagating failpoint"); return Err(Error::FailPoint); } } spin_loop_hint(); } trace!("{} log set to {}", next_idx, next_offset); // NB as soon as the "sealed" bit is 0, this allows new threads // to start writing into this buffer, so do that after it's all // set up. expect this thread to block until the buffer completes // its entire lifecycle as soon as we do that. if from_reserve || maxed { next_iobuf.set_capacity(io_buf_size - SEG_TRAILER_LEN); next_iobuf.store_segment_header(sealed, next_lsn); } else { let new_cap = capacity - res_len; assert_ne!(new_cap, 0); next_iobuf.set_capacity(new_cap); next_iobuf.set_lsn(next_lsn); let last_salt = salt(sealed); let new_salt = bump_salt(last_salt); next_iobuf.set_header(new_salt); } trace!("{} zeroed header", next_idx); // we acquire this mutex to guarantee that any threads that // are going to wait on the condition variable will observe // the change. debug_delay(); let _ = self.buf_mu.lock().unwrap(); // communicate to other threads that we have advanced an IO buffer. debug_delay(); let _current_buf = self.current_buf.fetch_add(1, SeqCst) + 1; trace!("{} current_buf", _current_buf % self.config.io_bufs); // let any threads that are blocked on buf_mu know about the // updated counter. debug_delay(); self.buf_updated.notify_all(); // if writers is 0, it's our responsibility to write the buffer. if n_writers(sealed) == 0 { trace!("writing to log from maybe_seal"); self.write_to_log(idx) } else { Ok(()) } } // Write an IO buffer's data to stable storage and set up the // next IO buffer for writing. fn write_to_log(&self, idx: usize) -> Result<(), ()> { let _measure = Measure::new(&M.write_to_log); let iobuf = &self.bufs[idx]; let header = iobuf.get_header(); let lid = iobuf.get_lid(); let base_lsn = iobuf.get_lsn(); let capacity = iobuf.get_capacity(); let io_buf_size = self.config.io_buf_size; assert_eq!( (lid % io_buf_size as LogId) as Lsn, base_lsn % io_buf_size as Lsn ); assert_ne!( lid as usize, std::usize::MAX, "created reservation for uninitialized slot", ); assert!(is_sealed(header)); let res_len = offset(header) as usize; let maxed = iobuf.linearized(|| iobuf.get_maxed()); let unused_space = capacity - res_len; let should_pad = unused_space >= MSG_HEADER_LEN; let total_len = if maxed && should_pad { let offset = offset(header) as usize; let data = unsafe { (*iobuf.buf.get()).as_mut_slice() }; let len = capacity - offset - MSG_HEADER_LEN; // take the crc of the random bytes already after where we // would place our header. let padding_bytes = vec![EVIL_BYTE; len]; let crc16 = crc16_arr(&*padding_bytes); let header = MessageHeader { kind: MessageKind::Pad, lsn: base_lsn + offset as Lsn, len: len, crc16: crc16, }; let header_bytes: [u8; MSG_HEADER_LEN] = header.into(); data[offset..offset + MSG_HEADER_LEN] .copy_from_slice(&header_bytes); data[offset + MSG_HEADER_LEN..capacity] .copy_from_slice(&*padding_bytes); capacity } else { res_len }; let data = unsafe { (*iobuf.buf.get()).as_mut_slice() }; let f = self.config.file()?; io_fail!(self, "buffer write"); f.pwrite_all(&data[..total_len], lid)?; f.sync_all()?; io_fail!(self, "buffer write post"); // write a trailer if we're maxed if maxed { let segment_lsn = base_lsn / io_buf_size as Lsn * io_buf_size as Lsn; let segment_lid = lid / io_buf_size as LogId * io_buf_size as LogId; let trailer_overhang = io_buf_size as Lsn - SEG_TRAILER_LEN as Lsn; let trailer_lid = segment_lid + trailer_overhang as LogId; let trailer_lsn = segment_lsn + trailer_overhang; let trailer = SegmentTrailer { lsn: trailer_lsn, ok: true, }; let trailer_bytes: [u8; SEG_TRAILER_LEN] = trailer.into(); io_fail!(self, "trailer write"); f.pwrite_all(&trailer_bytes, trailer_lid)?; f.sync_all()?; io_fail!(self, "trailer write post"); M.written_bytes.measure(SEG_TRAILER_LEN as f64); iobuf.set_maxed(false); debug!( "wrote trailer at lid {} for lsn {}", trailer_lid, trailer_lsn ); // transition this segment into deplete-only mode now // that n_writers is 0, and all calls to mark_replace/link // happen before the reservation completes. trace!( "deactivating segment with lsn {} at idx {} with lid {}", segment_lsn, idx, lid ); unsafe { self.with_sa_deferred(move |sa| { trace!("EBR deactivating segment {} with lsn {} and lid {}", idx, segment_lsn, segment_lid); if let Err(e) = sa.deactivate_segment(segment_lsn, segment_lid) { error!("segment accountant failed to deactivate segment: {}", e); } }); } } else { trace!( "not deactivating segment with lsn {}", base_lsn / io_buf_size as Lsn * io_buf_size as Lsn ); } if total_len > 0 || maxed { let complete_len = if maxed { let lsn_idx = base_lsn as usize / io_buf_size; let next_seg_beginning = (lsn_idx + 1) * io_buf_size; next_seg_beginning - base_lsn as usize } else { total_len }; debug!( "wrote lsns {}-{} to disk at offsets {}-{} in buffer {}", base_lsn, base_lsn + total_len as Lsn - 1, lid, lid + total_len as LogId - 1, idx ); self.mark_interval(base_lsn, complete_len); } M.written_bytes.measure(total_len as f64); // signal that this IO buffer is now uninitialized let max = std::usize::MAX as LogId; iobuf.set_lid(max); trace!("{} log <- MAX", idx); // we acquire this mutex to guarantee that any threads that // are going to wait on the condition variable will observe // the change. debug_delay(); let _ = self.buf_mu.lock().unwrap(); // communicate to other threads that we have written an IO buffer. debug_delay(); let _written_bufs = self.written_bufs.fetch_add(1, SeqCst); trace!("{} written", _written_bufs % self.config.io_bufs); // let any threads that are blocked on buf_mu know about the // updated counter. debug_delay(); self.buf_updated.notify_all(); Ok(()) } // It's possible that IO buffers are written out of order! // So we need to use this to keep track of them, and only // increment self.stable. If we didn't do this, then we would // accidentally decrement self.stable sometimes, or bump stable // above an offset that corresponds to a buffer that hasn't actually // been written yet! It's OK to use a mutex here because it is pretty // fast, compared to the other operations on shared state. fn mark_interval(&self, whence: Lsn, len: usize) { trace!("mark_interval({}, {})", whence, len); assert_ne!( len, 0, "mark_interval called with a zero-length range, starting from {}", whence ); let mut intervals = self.intervals.lock().unwrap(); let interval = (whence, whence + len as Lsn - 1); intervals.push(interval); debug_assert!( intervals.len() < 1000, "intervals is getting crazy... {:?}", *intervals ); // reverse sort intervals.sort_unstable_by(|a, b| b.cmp(a)); let mut updated = false; let len_before = intervals.len(); while let Some(&(low, high)) = intervals.last() { assert_ne!(low, high); let cur_stable = self.stable_lsn.load(SeqCst) as Lsn; assert!( low > cur_stable, "somehow, we marked offset {} stable while \ interval {}-{} had not yet been applied!", cur_stable, low, high ); if cur_stable + 1 == low { let old = self.stable_lsn.swap(high as InnerLsn, SeqCst) as Lsn; assert_eq!( old, cur_stable, "concurrent stable offset modification detected" ); debug!("new highest interval: {} - {}", low, high); intervals.pop(); updated = true; } else { break; } } if len_before - intervals.len() > 100 { debug!( "large merge of {} intervals", len_before - intervals.len() ); } if updated { self.interval_updated.notify_all(); } } } impl Drop for IoBufs { fn drop(&mut self) { // don't do any more IO if we're simulating a crash #[cfg(feature = "failpoints")] { if self._failpoint_crashing.load(SeqCst) { return; } } if let Err(e) = self.flush() { error!("failed to flush from IoBufs::drop: {}", e); } if let Ok(f) = self.config.file() { f.sync_all().unwrap(); } debug!("IoBufs dropped"); } } impl periodic::Callback for std::sync::Arc<IoBufs> { fn call(&self) { if let Err(e) = self.flush() { #[cfg(feature = "failpoints")] { if let Error::FailPoint = e { self._failpoint_crashing.store(true, SeqCst); // wake up any waiting threads so they don't stall forever self.interval_updated.notify_all(); } } error!( "failed to flush from periodic flush thread: {}", e ); } } } impl Debug for IoBufs { fn fmt( &self, formatter: &mut fmt::Formatter<'_>, ) -> std::result::Result<(), fmt::Error> { debug_delay(); let current_buf = self.current_buf.load(SeqCst); debug_delay(); let written_bufs = self.written_bufs.load(SeqCst); formatter.write_fmt(format_args!( "IoBufs {{ sealed: {}, written: {}, bufs: {:?} }}", current_buf, written_bufs, self.bufs )) } } impl Debug for IoBuf { fn fmt( &self, formatter: &mut fmt::Formatter<'_>, ) -> std::result::Result<(), fmt::Error> { let header = self.get_header(); formatter.write_fmt(format_args!( "\n\tIoBuf {{ lid: {}, n_writers: {}, offset: \ {}, sealed: {} }}", self.get_lid(), n_writers(header), offset(header), is_sealed(header) )) } } impl IoBuf { fn new(buf_size: usize) -> IoBuf { IoBuf { buf: UnsafeCell::new(vec![0; buf_size]), header: AtomicUsize::new(0), lid: AtomicUsize::new(std::usize::MAX), lsn: AtomicUsize::new(0), capacity: AtomicUsize::new(0), maxed: AtomicBool::new(false), linearizer: Mutex::new(()), } } // use this for operations on an IoBuf that must be // linearized together, and can't fit in the header! fn linearized<F, B>(&self, f: F) -> B where F: FnOnce() -> B, { let _l = self.linearizer.lock().unwrap(); f() } // This is called upon the initialization of a fresh segment. // We write a new segment header to the beginning of the buffer // for assistance during recovery. The caller is responsible // for ensuring that the IoBuf's capacity has been set properly. fn store_segment_header(&self, last: Header, lsn: Lsn) { debug!("storing lsn {} in beginning of buffer", lsn); assert!( self.get_capacity() >= SEG_HEADER_LEN + SEG_TRAILER_LEN ); self.set_lsn(lsn); let header = SegmentHeader { lsn: lsn, ok: true }; let header_bytes: [u8; SEG_HEADER_LEN] = header.into(); unsafe { (*self.buf.get())[0..SEG_HEADER_LEN] .copy_from_slice(&header_bytes); } // ensure writes to the buffer land after our header. let last_salt = salt(last); let new_salt = bump_salt(last_salt); let bumped = bump_offset(new_salt, SEG_HEADER_LEN as Header); self.set_header(bumped); } fn set_capacity(&self, cap: usize) { debug_delay(); self.capacity.store(cap, SeqCst); } fn get_capacity(&self) -> usize { debug_delay(); self.capacity.load(SeqCst) } fn set_lsn(&self, lsn: Lsn) { debug_delay(); self.lsn.store(lsn as usize, SeqCst); } fn set_maxed(&self, maxed: bool) { debug_delay(); self.maxed.store(maxed, SeqCst); } fn get_maxed(&self) -> bool { debug_delay(); self.maxed.load(SeqCst) } fn get_lsn(&self) -> Lsn { debug_delay(); self.lsn.load(SeqCst) as Lsn } fn set_lid(&self, offset: LogId) { debug_delay(); self.lid.store(offset as usize, SeqCst); } fn get_lid(&self) -> LogId { debug_delay(); self.lid.load(SeqCst) as LogId } fn get_header(&self) -> Header { debug_delay(); self.header.load(SeqCst) as Header } fn set_header(&self, new: Header) { debug_delay(); self.header.store(new as usize, SeqCst); } fn cas_header( &self, old: Header, new: Header, ) -> std::result::Result<Header, Header> { debug_delay(); let res = self.header.compare_and_swap( old as usize, new as usize, SeqCst, ) as Header; if res == old { Ok(new) } else { Err(res) } } fn cas_lid( &self, old: LogId, new: LogId, ) -> std::result::Result<LogId, LogId> { debug_delay(); let res = self.lid.compare_and_swap( old as usize, new as usize, SeqCst, ) as LogId; if res == old { Ok(new) } else { Err(res) } } } #[cfg_attr(not(feature = "no_inline"), inline)] fn is_sealed(v: Header) -> bool { v & 1 << 31 == 1 << 31 } #[cfg_attr(not(feature = "no_inline"), inline)] fn mk_sealed(v: Header) -> Header { v | 1 << 31 } #[cfg_attr(not(feature = "no_inline"), inline)] fn n_writers(v: Header) -> Header { v << 33 >> 57 } #[cfg_attr(not(feature = "no_inline"), inline)] fn incr_writers(v: Header) -> Header { assert_ne!(n_writers(v), MAX_WRITERS); v + (1 << 24) } #[cfg_attr(not(feature = "no_inline"), inline)] fn decr_writers(v: Header) -> Header { assert_ne!(n_writers(v), 0); v - (1 << 24) } #[cfg_attr(not(feature = "no_inline"), inline)] fn offset(v: Header) -> Header { v << 40 >> 40 } #[cfg_attr(not(feature = "no_inline"), inline)] fn bump_offset(v: Header, by: Header) -> Header { assert_eq!(by >> 24, 0); v + by } #[cfg_attr(not(feature = "no_inline"), inline)] fn bump_salt(v: Header) -> Header { (v + (1 << 32)) & 0xFFFFFFFF00000000 } #[cfg_attr(not(feature = "no_inline"), inline)] fn salt(v: Header) -> Header { v >> 32 << 32 }
//! Just embed git-hash to `--version` use std::{env, path::PathBuf, process::Command}; fn main() { set_rerun(); let rev = env::var("RUST_ANALYZER_REV").ok().or_else(rev).unwrap_or_else(|| "???????".to_string()); println!("cargo:rustc-env=REV={}", rev) } fn set_rerun() { println!("cargo:rerun-if-env-changed=RUST_ANALYZER_REV"); let mut manifest_dir = PathBuf::from( env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is always set by cargo."), ); while manifest_dir.parent().is_some() { if manifest_dir.join(".git/HEAD").exists() { let git_dir = manifest_dir.join(".git"); println!("cargo:rerun-if-changed={}", git_dir.join("HEAD").display()); // current branch ref if let Ok(output) = Command::new("git").args(&["rev-parse", "--symbolic-full-name", "HEAD"]).output() { if let Ok(ref_link) = String::from_utf8(output.stdout) { println!("cargo:rerun-if-changed={}", git_dir.join(ref_link).display()); } } return; } manifest_dir.pop(); } println!("cargo:warning=Could not find `.git/HEAD` from manifest dir!"); } fn rev() -> Option<String> { let output = Command::new("git").args(&["rev-parse", "HEAD"]).output().ok()?; let stdout = String::from_utf8(output.stdout).ok()?; let short_hash = stdout.get(0..7)?; Some(short_hash.to_owned()) } Fix fmt //! Just embed git-hash to `--version` use std::{env, path::PathBuf, process::Command}; fn main() { set_rerun(); let rev = env::var("RUST_ANALYZER_REV").ok().or_else(rev).unwrap_or_else(|| "???????".to_string()); println!("cargo:rustc-env=REV={}", rev) } fn set_rerun() { println!("cargo:rerun-if-env-changed=RUST_ANALYZER_REV"); let mut manifest_dir = PathBuf::from( env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is always set by cargo."), ); while manifest_dir.parent().is_some() { if manifest_dir.join(".git/HEAD").exists() { let git_dir = manifest_dir.join(".git"); println!("cargo:rerun-if-changed={}", git_dir.join("HEAD").display()); // current branch ref if let Ok(output) = Command::new("git").args(&["rev-parse", "--symbolic-full-name", "HEAD"]).output() { if let Ok(ref_link) = String::from_utf8(output.stdout) { println!("cargo:rerun-if-changed={}", git_dir.join(ref_link).display()); } } return; } manifest_dir.pop(); } println!("cargo:warning=Could not find `.git/HEAD` from manifest dir!"); } fn rev() -> Option<String> { let output = Command::new("git").args(&["rev-parse", "HEAD"]).output().ok()?; let stdout = String::from_utf8(output.stdout).ok()?; let short_hash = stdout.get(0..7)?; Some(short_hash.to_owned()) }
use core::borrow::{Borrow, BorrowMut}; use core::cmp; use core::fmt; use core::marker::PhantomData; use core::mem::{self, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::slice; use core::sync::atomic::Ordering; use crate::alloc::alloc; use crate::alloc::boxed::Box; use crate::guard::Guard; use crate::primitive::sync::atomic::AtomicUsize; use crossbeam_utils::atomic::AtomicConsume; /// Given ordering for the success case in a compare-exchange operation, returns the strongest /// appropriate ordering for the failure case. #[inline] fn strongest_failure_ordering(ord: Ordering) -> Ordering { use self::Ordering::*; match ord { Relaxed | Release => Relaxed, Acquire | AcqRel => Acquire, _ => SeqCst, } } /// The error returned on failed compare-and-set operation. // TODO: remove in the next major version. #[deprecated(note = "Use `CompareExchangeError` instead")] pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>; /// The error returned on failed compare-and-swap operation. pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> { /// The value in the atomic pointer at the time of the failed operation. pub current: Shared<'g, T>, /// The new value, which the operation failed to store. pub new: P, } impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("CompareExchangeError") .field("current", &self.current) .field("new", &self.new) .finish() } } /// Memory orderings for compare-and-set operations. /// /// A compare-and-set operation can have different memory orderings depending on whether it /// succeeds or fails. This trait generalizes different ways of specifying memory orderings. /// /// The two ways of specifying orderings for compare-and-set are: /// /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate /// ordering is chosen. /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is /// for the failure case. // TODO: remove in the next major version. #[deprecated( note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \ use `compare_exchange` or `compare_exchange_weak instead`" )] pub trait CompareAndSetOrdering { /// The ordering of the operation when it succeeds. fn success(&self) -> Ordering; /// The ordering of the operation when it fails. /// /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than /// the success ordering. fn failure(&self) -> Ordering; } #[allow(deprecated)] impl CompareAndSetOrdering for Ordering { #[inline] fn success(&self) -> Ordering { *self } #[inline] fn failure(&self) -> Ordering { strongest_failure_ordering(*self) } } #[allow(deprecated)] impl CompareAndSetOrdering for (Ordering, Ordering) { #[inline] fn success(&self) -> Ordering { self.0 } #[inline] fn failure(&self) -> Ordering { self.1 } } /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. #[inline] fn low_bits<T: ?Sized + Pointable>() -> usize { (1 << T::ALIGN.trailing_zeros()) - 1 } /// Panics if the pointer is not properly unaligned. #[inline] fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) { assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer"); } /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. /// /// `tag` is truncated to fit into the unused bits of the pointer to `T`. #[inline] fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize { (data & !low_bits::<T>()) | (tag & low_bits::<T>()) } /// Decomposes a tagged pointer `data` into the pointer and the tag. #[inline] fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) { (data & !low_bits::<T>(), data & low_bits::<T>()) } /// Types that are pointed to by a single word. /// /// In concurrent programming, it is necessary to represent an object within a word because atomic /// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait /// qualifies such types that are pointed to by a single word. /// /// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is /// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array /// size and elements. /// /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In /// particular, Crossbeam supports dynamically sized slices as follows. /// /// ``` /// use std::mem::MaybeUninit; /// use crossbeam_epoch::Owned; /// /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10] /// ``` pub trait Pointable { /// The alignment of pointer. const ALIGN: usize; /// The type for initializers. type Init; /// Initializes a with the given initializer. /// /// # Safety /// /// The result should be a multiple of `ALIGN`. unsafe fn init(init: Self::Init) -> usize; /// Dereferences the given pointer. /// /// # Safety /// /// - The given `ptr` should have been initialized with [`Pointable::init`]. /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently. unsafe fn deref<'a>(ptr: usize) -> &'a Self; /// Mutably dereferences the given pointer. /// /// # Safety /// /// - The given `ptr` should have been initialized with [`Pointable::init`]. /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] /// concurrently. unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self; /// Drops the object pointed to by the given pointer. /// /// # Safety /// /// - The given `ptr` should have been initialized with [`Pointable::init`]. /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] /// concurrently. unsafe fn drop(ptr: usize); } impl<T> Pointable for T { const ALIGN: usize = mem::align_of::<T>(); type Init = T; unsafe fn init(init: Self::Init) -> usize { Box::into_raw(Box::new(init)) as usize } unsafe fn deref<'a>(ptr: usize) -> &'a Self { &*(ptr as *const T) } unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self { &mut *(ptr as *mut T) } unsafe fn drop(ptr: usize) { drop(Box::from_raw(ptr as *mut T)); } } /// Array with size. /// /// # Memory layout /// /// An array consisting of size and elements: /// /// ```text /// elements /// | /// | /// ------------------------------------ /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 | /// ------------------------------------ /// ``` /// /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not /// along with pointer as in `Box<[T]>`). /// /// Elements are not present in the type, but they will be in the allocation. /// ``` /// // TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use // [`alloc::alloc::Layout::extend`] instead. #[repr(C)] struct Array<T> { size: usize, elements: [MaybeUninit<T>; 0], } impl<T> Pointable for [MaybeUninit<T>] { const ALIGN: usize = mem::align_of::<Array<T>>(); type Init = usize; unsafe fn init(size: Self::Init) -> usize { let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * size; let align = mem::align_of::<Array<T>>(); let layout = alloc::Layout::from_size_align(size, align).unwrap(); let ptr = alloc::alloc(layout) as *mut Array<T>; if ptr.is_null() { alloc::handle_alloc_error(layout); } (*ptr).size = size; ptr as usize } unsafe fn deref<'a>(ptr: usize) -> &'a Self { let array = &*(ptr as *const Array<T>); slice::from_raw_parts(array.elements.as_ptr() as *const _, array.size) } unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self { let array = &*(ptr as *mut Array<T>); slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.size) } unsafe fn drop(ptr: usize) { let array = &*(ptr as *mut Array<T>); let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.size; let align = mem::align_of::<Array<T>>(); let layout = alloc::Layout::from_size_align(size, align).unwrap(); alloc::dealloc(ptr as *mut u8, layout); } } /// An atomic pointer that can be safely shared between threads. /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. For example, the tag for a pointer to a sized type `T` /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`. /// /// Any method that loads the pointer must be passed a reference to a [`Guard`]. /// /// Crossbeam supports dynamically sized types. See [`Pointable`] for details. pub struct Atomic<T: ?Sized + Pointable> { data: AtomicUsize, _marker: PhantomData<*mut T>, } unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {} unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {} impl<T> Atomic<T> { /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::new(1234); /// ``` pub fn new(init: T) -> Atomic<T> { Self::init(init) } } impl<T: ?Sized + Pointable> Atomic<T> { /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::<i32>::init(1234); /// ``` pub fn init(init: T::Init) -> Atomic<T> { Self::from(Owned::init(init)) } /// Returns a new atomic pointer pointing to the tagged pointer `data`. fn from_usize(data: usize) -> Self { Self { data: AtomicUsize::new(data), _marker: PhantomData, } } /// Returns a new null atomic pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::<i32>::null(); /// ``` /// #[cfg_attr(all(feature = "nightly", not(crossbeam_loom)), const_fn::const_fn)] pub fn null() -> Atomic<T> { Self { data: AtomicUsize::new(0), _marker: PhantomData, } } /// Loads a `Shared` from the atomic pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// ``` pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.load(ord)) } } /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. /// /// This is similar to the "acquire" ordering, except that an ordering is /// only guaranteed with operations that "depend on" the result of the load. /// However consume loads are usually much faster than acquire loads on /// architectures with a weak memory model since they don't require memory /// fence instructions. /// /// The exact definition of "depend on" is a bit vague, but it works as you /// would expect in practice since a lot of software, especially the Linux /// kernel, rely on this behavior. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load_consume(guard); /// ``` pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.load_consume()) } } /// Stores a `Shared` or `Owned` pointer into the atomic pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// a.store(Shared::null(), SeqCst); /// a.store(Owned::new(1234), SeqCst); /// ``` pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) { self.data.store(new.into_usize(), ord); } /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous /// `Shared`. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.swap(Shared::null(), SeqCst, guard); /// ``` pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) } } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// The return value is a result indicating whether the new pointer was written. On success the /// pointer that was written is returned. On failure the actual current value and `new` are /// returned. /// /// This method takes two `Ordering` arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using `Acquire` as success ordering makes the store part /// of this operation `Relaxed`, and using `Release` makes the successful load /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` /// and must be equivalent to or weaker than the success ordering. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// /// let guard = &epoch::pin(); /// let curr = a.load(SeqCst, guard); /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard); /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard); /// ``` pub fn compare_exchange<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>, { let new = new.into_usize(); self.data .compare_exchange(current.into_usize(), new, success, failure) .map(|_| unsafe { Shared::from_usize(new) }) .map_err(|current| unsafe { CompareExchangeError { current: Shared::from_usize(current), new: P::from_usize(new), } }) } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison /// succeeds, which can result in more efficient code on some platforms. The return value is a /// result indicating whether the new pointer was written. On success the pointer that was /// written is returned. On failure the actual current value and `new` are returned. /// /// This method takes two `Ordering` arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using `Acquire` as success ordering makes the store part /// of this operation `Relaxed`, and using `Release` makes the successful load /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` /// and must be equivalent to or weaker than the success ordering. /// /// [`compare_exchange`]: Atomic::compare_exchange /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// /// let mut new = Owned::new(5678); /// let mut ptr = a.load(SeqCst, guard); /// loop { /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) { /// Ok(p) => { /// ptr = p; /// break; /// } /// Err(err) => { /// ptr = err.current; /// new = err.new; /// } /// } /// } /// /// let mut curr = a.load(SeqCst, guard); /// loop { /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) { /// Ok(_) => break, /// Err(err) => curr = err.current, /// } /// } /// ``` pub fn compare_exchange_weak<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>, { let new = new.into_usize(); self.data .compare_exchange_weak(current.into_usize(), new, success, failure) .map(|_| unsafe { Shared::from_usize(new) }) .map_err(|current| unsafe { CompareExchangeError { current: Shared::from_usize(current), new: P::from_usize(new), } }) } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// The return value is a result indicating whether the new pointer was written. On success the /// pointer that was written is returned. On failure the actual current value and `new` are /// returned. /// /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory /// ordering of this operation. /// /// # Migrating to `compare_exchange` /// /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// /// let guard = &epoch::pin(); /// let curr = a.load(SeqCst, guard); /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); /// ``` // TODO: remove in the next major version. #[allow(deprecated)] #[deprecated(note = "Use `compare_exchange` instead")] pub fn compare_and_set<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>, { self.compare_exchange(current, new, ord.success(), ord.failure(), guard) } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison /// succeeds, which can result in more efficient code on some platforms. The return value is a /// result indicating whether the new pointer was written. On success the pointer that was /// written is returned. On failure the actual current value and `new` are returned. /// /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory /// ordering of this operation. /// /// [`compare_and_set`]: Atomic::compare_and_set /// /// # Migrating to `compare_exchange_weak` /// /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// /// let mut new = Owned::new(5678); /// let mut ptr = a.load(SeqCst, guard); /// loop { /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { /// Ok(p) => { /// ptr = p; /// break; /// } /// Err(err) => { /// ptr = err.current; /// new = err.new; /// } /// } /// } /// /// let mut curr = a.load(SeqCst, guard); /// loop { /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { /// Ok(_) => break, /// Err(err) => curr = err.current, /// } /// } /// ``` // TODO: remove in the next major version. #[allow(deprecated)] #[deprecated(note = "Use `compare_exchange_weak` instead")] pub fn compare_and_set_weak<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>, { self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard) } /// Bitwise "and" with the current tag. /// /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the /// new tag to the result. Returns the previous pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<i32>::from(Shared::null().with_tag(3)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) } } /// Bitwise "or" with the current tag. /// /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the /// new tag to the result. Returns the previous pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<i32>::from(Shared::null().with_tag(1)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); /// assert_eq!(a.load(SeqCst, guard).tag(), 3); /// ``` pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) } } /// Bitwise "xor" with the current tag. /// /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the /// new tag to the result. Returns the previous pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<i32>::from(Shared::null().with_tag(1)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) } } /// Takes ownership of the pointee. /// /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for /// destructors of data structures. /// /// # Panics /// /// Panics if this pointer is null, but only in debug mode. /// /// # Safety /// /// This method may be called only if the pointer is valid and nobody else is holding a /// reference to the same object. /// /// # Examples /// /// ```rust /// # use std::mem; /// # use crossbeam_epoch::Atomic; /// struct DataStructure { /// ptr: Atomic<usize>, /// } /// /// impl Drop for DataStructure { /// fn drop(&mut self) { /// // By now the DataStructure lives only in our thread and we are sure we don't hold /// // any Shared or & to it ourselves. /// unsafe { /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned()); /// } /// } /// } /// ``` pub unsafe fn into_owned(self) -> Owned<T> { #[cfg(crossbeam_loom)] { // FIXME: loom does not yet support into_inner, so we use unsync_load for now, // which should have the same synchronization properties: // https://github.com/tokio-rs/loom/issues/117 Owned::from_usize(self.data.unsync_load()) } #[cfg(not(crossbeam_loom))] { Owned::from_usize(self.data.into_inner()) } } } impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let data = self.data.load(Ordering::SeqCst); let (raw, tag) = decompose_tag::<T>(data); f.debug_struct("Atomic") .field("raw", &raw) .field("tag", &tag) .finish() } } impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let data = self.data.load(Ordering::SeqCst); let (raw, _) = decompose_tag::<T>(data); fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f) } } impl<T: ?Sized + Pointable> Clone for Atomic<T> { /// Returns a copy of the atomic value. /// /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other /// atomics or fences. fn clone(&self) -> Self { let data = self.data.load(Ordering::Relaxed); Atomic::from_usize(data) } } impl<T: ?Sized + Pointable> Default for Atomic<T> { fn default() -> Self { Atomic::null() } } impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> { /// Returns a new atomic pointer pointing to `owned`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{Atomic, Owned}; /// /// let a = Atomic::<i32>::from(Owned::new(1234)); /// ``` fn from(owned: Owned<T>) -> Self { let data = owned.data; mem::forget(owned); Self::from_usize(data) } } impl<T> From<Box<T>> for Atomic<T> { fn from(b: Box<T>) -> Self { Self::from(Owned::from(b)) } } impl<T> From<T> for Atomic<T> { fn from(t: T) -> Self { Self::new(t) } } impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> { /// Returns a new atomic pointer pointing to `ptr`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{Atomic, Shared}; /// /// let a = Atomic::<i32>::from(Shared::<i32>::null()); /// ``` fn from(ptr: Shared<'g, T>) -> Self { Self::from_usize(ptr.data) } } impl<T> From<*const T> for Atomic<T> { /// Returns a new atomic pointer pointing to `raw`. /// /// # Examples /// /// ``` /// use std::ptr; /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::<i32>::from(ptr::null::<i32>()); /// ``` fn from(raw: *const T) -> Self { Self::from_usize(raw as usize) } } /// A trait for either `Owned` or `Shared` pointers. pub trait Pointer<T: ?Sized + Pointable> { /// Returns the machine representation of the pointer. fn into_usize(self) -> usize; /// Returns a new pointer pointing to the tagged pointer `data`. /// /// # Safety /// /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should /// not be converted back by `Pointer::from_usize()` multiple times. unsafe fn from_usize(data: usize) -> Self; } /// An owned heap-allocated object. /// /// This type is very similar to `Box<T>`. /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. pub struct Owned<T: ?Sized + Pointable> { data: usize, _marker: PhantomData<Box<T>>, } impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> { #[inline] fn into_usize(self) -> usize { let data = self.data; mem::forget(self); data } /// Returns a new pointer pointing to the tagged pointer `data`. /// /// # Panics /// /// Panics if the data is zero in debug mode. #[inline] unsafe fn from_usize(data: usize) -> Self { debug_assert!(data != 0, "converting zero into `Owned`"); Owned { data, _marker: PhantomData, } } } impl<T> Owned<T> { /// Returns a new owned pointer pointing to `raw`. /// /// This function is unsafe because improper use may lead to memory problems. Argument `raw` /// must be a valid pointer. Also, a double-free may occur if the function is called twice on /// the same raw pointer. /// /// # Panics /// /// Panics if `raw` is not properly aligned. /// /// # Safety /// /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted /// back by `Owned::from_raw()` multiple times. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; /// ``` pub unsafe fn from_raw(raw: *mut T) -> Owned<T> { let raw = raw as usize; ensure_aligned::<T>(raw); Self::from_usize(raw) } /// Converts the owned pointer into a `Box`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::new(1234); /// let b: Box<i32> = o.into_box(); /// assert_eq!(*b, 1234); /// ``` pub fn into_box(self) -> Box<T> { let (raw, _) = decompose_tag::<T>(self.data); mem::forget(self); unsafe { Box::from_raw(raw as *mut _) } } /// Allocates `value` on the heap and returns a new owned pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::new(1234); /// ``` pub fn new(init: T) -> Owned<T> { Self::init(init) } } impl<T: ?Sized + Pointable> Owned<T> { /// Allocates `value` on the heap and returns a new owned pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::<i32>::init(1234); /// ``` pub fn init(init: T::Init) -> Owned<T> { unsafe { Self::from_usize(T::init(init)) } } /// Converts the owned pointer into a [`Shared`]. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Owned}; /// /// let o = Owned::new(1234); /// let guard = &epoch::pin(); /// let p = o.into_shared(guard); /// ``` #[allow(clippy::needless_lifetimes)] pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.into_usize()) } } /// Returns the tag stored within the pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// assert_eq!(Owned::new(1234).tag(), 0); /// ``` pub fn tag(&self) -> usize { let (_, tag) = decompose_tag::<T>(self.data); tag } /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the /// unused bits of the pointer to `T`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::new(0u64); /// assert_eq!(o.tag(), 0); /// let o = o.with_tag(2); /// assert_eq!(o.tag(), 2); /// ``` pub fn with_tag(self, tag: usize) -> Owned<T> { let data = self.into_usize(); unsafe { Self::from_usize(compose_tag::<T>(data, tag)) } } } impl<T: ?Sized + Pointable> Drop for Owned<T> { fn drop(&mut self) { let (raw, _) = decompose_tag::<T>(self.data); unsafe { T::drop(raw); } } } impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (raw, tag) = decompose_tag::<T>(self.data); f.debug_struct("Owned") .field("raw", &raw) .field("tag", &tag) .finish() } } impl<T: Clone> Clone for Owned<T> { fn clone(&self) -> Self { Owned::new((**self).clone()).with_tag(self.tag()) } } impl<T: ?Sized + Pointable> Deref for Owned<T> { type Target = T; fn deref(&self) -> &T { let (raw, _) = decompose_tag::<T>(self.data); unsafe { T::deref(raw) } } } impl<T: ?Sized + Pointable> DerefMut for Owned<T> { fn deref_mut(&mut self) -> &mut T { let (raw, _) = decompose_tag::<T>(self.data); unsafe { T::deref_mut(raw) } } } impl<T> From<T> for Owned<T> { fn from(t: T) -> Self { Owned::new(t) } } impl<T> From<Box<T>> for Owned<T> { /// Returns a new owned pointer pointing to `b`. /// /// # Panics /// /// Panics if the pointer (the `Box`) is not properly aligned. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; /// ``` fn from(b: Box<T>) -> Self { unsafe { Self::from_raw(Box::into_raw(b)) } } } impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> { fn borrow(&self) -> &T { self.deref() } } impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> { fn borrow_mut(&mut self) -> &mut T { self.deref_mut() } } impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> { fn as_ref(&self) -> &T { self.deref() } } impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> { fn as_mut(&mut self) -> &mut T { self.deref_mut() } } /// A pointer to an object protected by the epoch GC. /// /// The pointer is valid for use only during the lifetime `'g`. /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. pub struct Shared<'g, T: 'g + ?Sized + Pointable> { data: usize, _marker: PhantomData<(&'g (), *const T)>, } impl<T: ?Sized + Pointable> Clone for Shared<'_, T> { fn clone(&self) -> Self { Self { data: self.data, _marker: PhantomData, } } } impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {} impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> { #[inline] fn into_usize(self) -> usize { self.data } #[inline] unsafe fn from_usize(data: usize) -> Self { Shared { data, _marker: PhantomData, } } } impl<'g, T> Shared<'g, T> { /// Converts the pointer to a raw pointer (without the tag). /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let o = Owned::new(1234); /// let raw = &*o as *const _; /// let a = Atomic::from(o); /// /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// assert_eq!(p.as_raw(), raw); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> *const T { let (raw, _) = decompose_tag::<T>(self.data); raw as *const _ } } impl<'g, T: ?Sized + Pointable> Shared<'g, T> { /// Returns a new null pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Shared; /// /// let p = Shared::<i32>::null(); /// assert!(p.is_null()); /// ``` pub fn null() -> Shared<'g, T> { Shared { data: 0, _marker: PhantomData, } } /// Returns `true` if the pointer is null. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::null(); /// let guard = &epoch::pin(); /// assert!(a.load(SeqCst, guard).is_null()); /// a.store(Owned::new(1234), SeqCst); /// assert!(!a.load(SeqCst, guard).is_null()); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn is_null(&self) -> bool { let (raw, _) = decompose_tag::<T>(self.data); raw == 0 } /// Dereferences the pointer. /// /// Returns a reference to the pointee that is valid during the lifetime `'g`. /// /// # Safety /// /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. /// /// Another concern is the possibility of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` /// /// The problem is that relaxed orderings don't synchronize initialization of the object with /// the read from the second thread. This is a data race. A possible solution would be to use /// `Release` and `Acquire` orderings. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// unsafe { /// assert_eq!(p.deref(), &1234); /// } /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] #[allow(clippy::should_implement_trait)] pub unsafe fn deref(&self) -> &'g T { let (raw, _) = decompose_tag::<T>(self.data); T::deref(raw) } /// Dereferences the pointer. /// /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`. /// /// # Safety /// /// * There is no guarantee that there are no more threads attempting to read/write from/to the /// actual object at the same time. /// /// The user must know that there are no concurrent accesses towards the object itself. /// /// * Other than the above, all safety concerns of `deref()` applies here. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(vec![1, 2, 3, 4]); /// let guard = &epoch::pin(); /// /// let mut p = a.load(SeqCst, guard); /// unsafe { /// assert!(!p.is_null()); /// let b = p.deref_mut(); /// assert_eq!(b, &vec![1, 2, 3, 4]); /// b.push(5); /// assert_eq!(b, &vec![1, 2, 3, 4, 5]); /// } /// /// let p = a.load(SeqCst, guard); /// unsafe { /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]); /// } /// ``` #[allow(clippy::should_implement_trait)] pub unsafe fn deref_mut(&mut self) -> &'g mut T { let (raw, _) = decompose_tag::<T>(self.data); T::deref_mut(raw) } /// Converts the pointer to a reference. /// /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. /// /// # Safety /// /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. /// /// Another concern is the possibility of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` /// /// The problem is that relaxed orderings don't synchronize initialization of the object with /// the read from the second thread. This is a data race. A possible solution would be to use /// `Release` and `Acquire` orderings. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// unsafe { /// assert_eq!(p.as_ref(), Some(&1234)); /// } /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub unsafe fn as_ref(&self) -> Option<&'g T> { let (raw, _) = decompose_tag::<T>(self.data); if raw == 0 { None } else { Some(T::deref(raw)) } } /// Takes ownership of the pointee. /// /// # Panics /// /// Panics if this pointer is null, but only in debug mode. /// /// # Safety /// /// This method may be called only if the pointer is valid and nobody else is holding a /// reference to the same object. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// unsafe { /// let guard = &epoch::unprotected(); /// let p = a.load(SeqCst, guard); /// drop(p.into_owned()); /// } /// ``` pub unsafe fn into_owned(self) -> Owned<T> { debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`"); Owned::from_usize(self.data) } /// Returns the tag stored within the pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2)); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// assert_eq!(p.tag(), 2); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn tag(&self) -> usize { let (_, tag) = decompose_tag::<T>(self.data); tag } /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the /// unused bits of the pointer to `T`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(0u64); /// let guard = &epoch::pin(); /// let p1 = a.load(SeqCst, guard); /// let p2 = p1.with_tag(2); /// /// assert_eq!(p1.tag(), 0); /// assert_eq!(p2.tag(), 2); /// assert_eq!(p1.as_raw(), p2.as_raw()); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) } } } impl<T> From<*const T> for Shared<'_, T> { /// Returns a new pointer pointing to `raw`. /// /// # Panics /// /// Panics if `raw` is not properly aligned. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Shared; /// /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _); /// assert!(!p.is_null()); /// ``` fn from(raw: *const T) -> Self { let raw = raw as usize; ensure_aligned::<T>(raw); unsafe { Self::from_usize(raw) } } } impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> { fn eq(&self, other: &Self) -> bool { self.data == other.data } } impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {} impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.data.partial_cmp(&other.data) } } impl<T: ?Sized + Pointable> Ord for Shared<'_, T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.data.cmp(&other.data) } } impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (raw, tag) = decompose_tag::<T>(self.data); f.debug_struct("Shared") .field("raw", &raw) .field("tag", &tag) .finish() } } impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f) } } impl<T: ?Sized + Pointable> Default for Shared<'_, T> { fn default() -> Self { Shared::null() } } #[cfg(all(test, not(crossbeam_loom)))] mod tests { use super::Shared; #[test] fn valid_tag_i8() { Shared::<i8>::null().with_tag(0); } #[test] fn valid_tag_i64() { Shared::<i64>::null().with_tag(7); } #[cfg(feature = "nightly")] #[test] fn const_atomic_null() { use super::Atomic; const _: Atomic<u8> = Atomic::<u8>::null(); } } Added try_into_owned methods use core::borrow::{Borrow, BorrowMut}; use core::cmp; use core::fmt; use core::marker::PhantomData; use core::mem::{self, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::slice; use core::sync::atomic::Ordering; use crate::alloc::alloc; use crate::alloc::boxed::Box; use crate::guard::Guard; use crate::primitive::sync::atomic::AtomicUsize; use crossbeam_utils::atomic::AtomicConsume; /// Given ordering for the success case in a compare-exchange operation, returns the strongest /// appropriate ordering for the failure case. #[inline] fn strongest_failure_ordering(ord: Ordering) -> Ordering { use self::Ordering::*; match ord { Relaxed | Release => Relaxed, Acquire | AcqRel => Acquire, _ => SeqCst, } } /// The error returned on failed compare-and-set operation. // TODO: remove in the next major version. #[deprecated(note = "Use `CompareExchangeError` instead")] pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>; /// The error returned on failed compare-and-swap operation. pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> { /// The value in the atomic pointer at the time of the failed operation. pub current: Shared<'g, T>, /// The new value, which the operation failed to store. pub new: P, } impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("CompareExchangeError") .field("current", &self.current) .field("new", &self.new) .finish() } } /// Memory orderings for compare-and-set operations. /// /// A compare-and-set operation can have different memory orderings depending on whether it /// succeeds or fails. This trait generalizes different ways of specifying memory orderings. /// /// The two ways of specifying orderings for compare-and-set are: /// /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate /// ordering is chosen. /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is /// for the failure case. // TODO: remove in the next major version. #[deprecated( note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \ use `compare_exchange` or `compare_exchange_weak instead`" )] pub trait CompareAndSetOrdering { /// The ordering of the operation when it succeeds. fn success(&self) -> Ordering; /// The ordering of the operation when it fails. /// /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than /// the success ordering. fn failure(&self) -> Ordering; } #[allow(deprecated)] impl CompareAndSetOrdering for Ordering { #[inline] fn success(&self) -> Ordering { *self } #[inline] fn failure(&self) -> Ordering { strongest_failure_ordering(*self) } } #[allow(deprecated)] impl CompareAndSetOrdering for (Ordering, Ordering) { #[inline] fn success(&self) -> Ordering { self.0 } #[inline] fn failure(&self) -> Ordering { self.1 } } /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. #[inline] fn low_bits<T: ?Sized + Pointable>() -> usize { (1 << T::ALIGN.trailing_zeros()) - 1 } /// Panics if the pointer is not properly unaligned. #[inline] fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) { assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer"); } /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. /// /// `tag` is truncated to fit into the unused bits of the pointer to `T`. #[inline] fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize { (data & !low_bits::<T>()) | (tag & low_bits::<T>()) } /// Decomposes a tagged pointer `data` into the pointer and the tag. #[inline] fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) { (data & !low_bits::<T>(), data & low_bits::<T>()) } /// Types that are pointed to by a single word. /// /// In concurrent programming, it is necessary to represent an object within a word because atomic /// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait /// qualifies such types that are pointed to by a single word. /// /// The trait generalizes `Box<T>` for a sized type `T`. In a box, an object of type `T` is /// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array /// size and elements. /// /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In /// particular, Crossbeam supports dynamically sized slices as follows. /// /// ``` /// use std::mem::MaybeUninit; /// use crossbeam_epoch::Owned; /// /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10] /// ``` pub trait Pointable { /// The alignment of pointer. const ALIGN: usize; /// The type for initializers. type Init; /// Initializes a with the given initializer. /// /// # Safety /// /// The result should be a multiple of `ALIGN`. unsafe fn init(init: Self::Init) -> usize; /// Dereferences the given pointer. /// /// # Safety /// /// - The given `ptr` should have been initialized with [`Pointable::init`]. /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently. unsafe fn deref<'a>(ptr: usize) -> &'a Self; /// Mutably dereferences the given pointer. /// /// # Safety /// /// - The given `ptr` should have been initialized with [`Pointable::init`]. /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] /// concurrently. unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self; /// Drops the object pointed to by the given pointer. /// /// # Safety /// /// - The given `ptr` should have been initialized with [`Pointable::init`]. /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] /// concurrently. unsafe fn drop(ptr: usize); } impl<T> Pointable for T { const ALIGN: usize = mem::align_of::<T>(); type Init = T; unsafe fn init(init: Self::Init) -> usize { Box::into_raw(Box::new(init)) as usize } unsafe fn deref<'a>(ptr: usize) -> &'a Self { &*(ptr as *const T) } unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self { &mut *(ptr as *mut T) } unsafe fn drop(ptr: usize) { drop(Box::from_raw(ptr as *mut T)); } } /// Array with size. /// /// # Memory layout /// /// An array consisting of size and elements: /// /// ```text /// elements /// | /// | /// ------------------------------------ /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 | /// ------------------------------------ /// ``` /// /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not /// along with pointer as in `Box<[T]>`). /// /// Elements are not present in the type, but they will be in the allocation. /// ``` /// // TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use // [`alloc::alloc::Layout::extend`] instead. #[repr(C)] struct Array<T> { size: usize, elements: [MaybeUninit<T>; 0], } impl<T> Pointable for [MaybeUninit<T>] { const ALIGN: usize = mem::align_of::<Array<T>>(); type Init = usize; unsafe fn init(size: Self::Init) -> usize { let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * size; let align = mem::align_of::<Array<T>>(); let layout = alloc::Layout::from_size_align(size, align).unwrap(); let ptr = alloc::alloc(layout) as *mut Array<T>; if ptr.is_null() { alloc::handle_alloc_error(layout); } (*ptr).size = size; ptr as usize } unsafe fn deref<'a>(ptr: usize) -> &'a Self { let array = &*(ptr as *const Array<T>); slice::from_raw_parts(array.elements.as_ptr() as *const _, array.size) } unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self { let array = &*(ptr as *mut Array<T>); slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.size) } unsafe fn drop(ptr: usize) { let array = &*(ptr as *mut Array<T>); let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.size; let align = mem::align_of::<Array<T>>(); let layout = alloc::Layout::from_size_align(size, align).unwrap(); alloc::dealloc(ptr as *mut u8, layout); } } /// An atomic pointer that can be safely shared between threads. /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. For example, the tag for a pointer to a sized type `T` /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`. /// /// Any method that loads the pointer must be passed a reference to a [`Guard`]. /// /// Crossbeam supports dynamically sized types. See [`Pointable`] for details. pub struct Atomic<T: ?Sized + Pointable> { data: AtomicUsize, _marker: PhantomData<*mut T>, } unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {} unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {} impl<T> Atomic<T> { /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::new(1234); /// ``` pub fn new(init: T) -> Atomic<T> { Self::init(init) } } impl<T: ?Sized + Pointable> Atomic<T> { /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::<i32>::init(1234); /// ``` pub fn init(init: T::Init) -> Atomic<T> { Self::from(Owned::init(init)) } /// Returns a new atomic pointer pointing to the tagged pointer `data`. fn from_usize(data: usize) -> Self { Self { data: AtomicUsize::new(data), _marker: PhantomData, } } /// Returns a new null atomic pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::<i32>::null(); /// ``` /// #[cfg_attr(all(feature = "nightly", not(crossbeam_loom)), const_fn::const_fn)] pub fn null() -> Atomic<T> { Self { data: AtomicUsize::new(0), _marker: PhantomData, } } /// Loads a `Shared` from the atomic pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// ``` pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.load(ord)) } } /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. /// /// This is similar to the "acquire" ordering, except that an ordering is /// only guaranteed with operations that "depend on" the result of the load. /// However consume loads are usually much faster than acquire loads on /// architectures with a weak memory model since they don't require memory /// fence instructions. /// /// The exact definition of "depend on" is a bit vague, but it works as you /// would expect in practice since a lot of software, especially the Linux /// kernel, rely on this behavior. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load_consume(guard); /// ``` pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.load_consume()) } } /// Stores a `Shared` or `Owned` pointer into the atomic pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// a.store(Shared::null(), SeqCst); /// a.store(Owned::new(1234), SeqCst); /// ``` pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) { self.data.store(new.into_usize(), ord); } /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous /// `Shared`. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.swap(Shared::null(), SeqCst, guard); /// ``` pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) } } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// The return value is a result indicating whether the new pointer was written. On success the /// pointer that was written is returned. On failure the actual current value and `new` are /// returned. /// /// This method takes two `Ordering` arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using `Acquire` as success ordering makes the store part /// of this operation `Relaxed`, and using `Release` makes the successful load /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` /// and must be equivalent to or weaker than the success ordering. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// /// let guard = &epoch::pin(); /// let curr = a.load(SeqCst, guard); /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard); /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard); /// ``` pub fn compare_exchange<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>, { let new = new.into_usize(); self.data .compare_exchange(current.into_usize(), new, success, failure) .map(|_| unsafe { Shared::from_usize(new) }) .map_err(|current| unsafe { CompareExchangeError { current: Shared::from_usize(current), new: P::from_usize(new), } }) } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison /// succeeds, which can result in more efficient code on some platforms. The return value is a /// result indicating whether the new pointer was written. On success the pointer that was /// written is returned. On failure the actual current value and `new` are returned. /// /// This method takes two `Ordering` arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using `Acquire` as success ordering makes the store part /// of this operation `Relaxed`, and using `Release` makes the successful load /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` /// and must be equivalent to or weaker than the success ordering. /// /// [`compare_exchange`]: Atomic::compare_exchange /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// /// let mut new = Owned::new(5678); /// let mut ptr = a.load(SeqCst, guard); /// loop { /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) { /// Ok(p) => { /// ptr = p; /// break; /// } /// Err(err) => { /// ptr = err.current; /// new = err.new; /// } /// } /// } /// /// let mut curr = a.load(SeqCst, guard); /// loop { /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) { /// Ok(_) => break, /// Err(err) => curr = err.current, /// } /// } /// ``` pub fn compare_exchange_weak<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>, { let new = new.into_usize(); self.data .compare_exchange_weak(current.into_usize(), new, success, failure) .map(|_| unsafe { Shared::from_usize(new) }) .map_err(|current| unsafe { CompareExchangeError { current: Shared::from_usize(current), new: P::from_usize(new), } }) } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// The return value is a result indicating whether the new pointer was written. On success the /// pointer that was written is returned. On failure the actual current value and `new` are /// returned. /// /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory /// ordering of this operation. /// /// # Migrating to `compare_exchange` /// /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// /// let guard = &epoch::pin(); /// let curr = a.load(SeqCst, guard); /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard); /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard); /// ``` // TODO: remove in the next major version. #[allow(deprecated)] #[deprecated(note = "Use `compare_exchange` instead")] pub fn compare_and_set<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>, { self.compare_exchange(current, new, ord.success(), ord.failure(), guard) } /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current /// value is the same as `current`. The tag is also taken into account, so two pointers to the /// same object, but with different tags, will not be considered equal. /// /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison /// succeeds, which can result in more efficient code on some platforms. The return value is a /// result indicating whether the new pointer was written. On success the pointer that was /// written is returned. On failure the actual current value and `new` are returned. /// /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory /// ordering of this operation. /// /// [`compare_and_set`]: Atomic::compare_and_set /// /// # Migrating to `compare_exchange_weak` /// /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// # Examples /// /// ``` /// # #![allow(deprecated)] /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// /// let mut new = Owned::new(5678); /// let mut ptr = a.load(SeqCst, guard); /// loop { /// match a.compare_and_set_weak(ptr, new, SeqCst, guard) { /// Ok(p) => { /// ptr = p; /// break; /// } /// Err(err) => { /// ptr = err.current; /// new = err.new; /// } /// } /// } /// /// let mut curr = a.load(SeqCst, guard); /// loop { /// match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) { /// Ok(_) => break, /// Err(err) => curr = err.current, /// } /// } /// ``` // TODO: remove in the next major version. #[allow(deprecated)] #[deprecated(note = "Use `compare_exchange_weak` instead")] pub fn compare_and_set_weak<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>, { self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard) } /// Bitwise "and" with the current tag. /// /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the /// new tag to the result. Returns the previous pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<i32>::from(Shared::null().with_tag(3)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) } } /// Bitwise "or" with the current tag. /// /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the /// new tag to the result. Returns the previous pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<i32>::from(Shared::null().with_tag(1)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); /// assert_eq!(a.load(SeqCst, guard).tag(), 3); /// ``` pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) } } /// Bitwise "xor" with the current tag. /// /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the /// new tag to the result. Returns the previous pointer. /// /// This method takes an [`Ordering`] argument which describes the memory ordering of this /// operation. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<i32>::from(Shared::null().with_tag(1)); /// let guard = &epoch::pin(); /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); /// assert_eq!(a.load(SeqCst, guard).tag(), 2); /// ``` pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) } } /// Takes ownership of the pointee. /// /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for /// destructors of data structures. /// /// # Panics /// /// Panics if this pointer is null, but only in debug mode. /// /// # Safety /// /// This method may be called only if the pointer is valid and nobody else is holding a /// reference to the same object. /// /// # Examples /// /// ```rust /// # use std::mem; /// # use crossbeam_epoch::Atomic; /// struct DataStructure { /// ptr: Atomic<usize>, /// } /// /// impl Drop for DataStructure { /// fn drop(&mut self) { /// // By now the DataStructure lives only in our thread and we are sure we don't hold /// // any Shared or & to it ourselves. /// unsafe { /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned()); /// } /// } /// } /// ``` pub unsafe fn into_owned(self) -> Owned<T> { #[cfg(crossbeam_loom)] { // FIXME: loom does not yet support into_inner, so we use unsync_load for now, // which should have the same synchronization properties: // https://github.com/tokio-rs/loom/issues/117 Owned::from_usize(self.data.unsync_load()) } #[cfg(not(crossbeam_loom))] { Owned::from_usize(self.data.into_inner()) } } /// Takes ownership of the pointee if it is non-null. /// /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for /// destructors of data structures. /// /// # Safety /// /// This method may be called only if the pointer is valid and nobody else is holding a /// reference to the same object, or the pointer is null.. /// /// # Examples /// /// ```rust /// # use std::mem; /// # use crossbeam_epoch::Atomic; /// struct DataStructure { /// ptr: Atomic<usize>, /// } /// /// impl Drop for DataStructure { /// fn drop(&mut self) { /// // By now the DataStructure lives only in our thread and we are sure we don't hold /// // any Shared or & to it ourselves, but it may be null, so we have to be careful. /// let old = mem::replace(&mut self.ptr, Atomic::null()); /// unsafe { /// if let Option::Some(x) = old.try_into_owned() { /// drop(x) /// } /// } /// } /// } /// ``` pub unsafe fn try_into_owned(self) -> Option<Owned<T>> { // FIXME: See self.into_owned() #[cfg(crossbeam_loom)] let data = self.data.unsync_load(); #[cfg(not(crossbeam_loom))] let data = self.data.into_inner(); if decompose_tag::<T>(data).0 == 0 { Option::None } else { Option::Some(Owned::from_usize(data)) } } } impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let data = self.data.load(Ordering::SeqCst); let (raw, tag) = decompose_tag::<T>(data); f.debug_struct("Atomic") .field("raw", &raw) .field("tag", &tag) .finish() } } impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let data = self.data.load(Ordering::SeqCst); let (raw, _) = decompose_tag::<T>(data); fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f) } } impl<T: ?Sized + Pointable> Clone for Atomic<T> { /// Returns a copy of the atomic value. /// /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other /// atomics or fences. fn clone(&self) -> Self { let data = self.data.load(Ordering::Relaxed); Atomic::from_usize(data) } } impl<T: ?Sized + Pointable> Default for Atomic<T> { fn default() -> Self { Atomic::null() } } impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> { /// Returns a new atomic pointer pointing to `owned`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{Atomic, Owned}; /// /// let a = Atomic::<i32>::from(Owned::new(1234)); /// ``` fn from(owned: Owned<T>) -> Self { let data = owned.data; mem::forget(owned); Self::from_usize(data) } } impl<T> From<Box<T>> for Atomic<T> { fn from(b: Box<T>) -> Self { Self::from(Owned::from(b)) } } impl<T> From<T> for Atomic<T> { fn from(t: T) -> Self { Self::new(t) } } impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> { /// Returns a new atomic pointer pointing to `ptr`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{Atomic, Shared}; /// /// let a = Atomic::<i32>::from(Shared::<i32>::null()); /// ``` fn from(ptr: Shared<'g, T>) -> Self { Self::from_usize(ptr.data) } } impl<T> From<*const T> for Atomic<T> { /// Returns a new atomic pointer pointing to `raw`. /// /// # Examples /// /// ``` /// use std::ptr; /// use crossbeam_epoch::Atomic; /// /// let a = Atomic::<i32>::from(ptr::null::<i32>()); /// ``` fn from(raw: *const T) -> Self { Self::from_usize(raw as usize) } } /// A trait for either `Owned` or `Shared` pointers. pub trait Pointer<T: ?Sized + Pointable> { /// Returns the machine representation of the pointer. fn into_usize(self) -> usize; /// Returns a new pointer pointing to the tagged pointer `data`. /// /// # Safety /// /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should /// not be converted back by `Pointer::from_usize()` multiple times. unsafe fn from_usize(data: usize) -> Self; } /// An owned heap-allocated object. /// /// This type is very similar to `Box<T>`. /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. pub struct Owned<T: ?Sized + Pointable> { data: usize, _marker: PhantomData<Box<T>>, } impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> { #[inline] fn into_usize(self) -> usize { let data = self.data; mem::forget(self); data } /// Returns a new pointer pointing to the tagged pointer `data`. /// /// # Panics /// /// Panics if the data is zero in debug mode. #[inline] unsafe fn from_usize(data: usize) -> Self { debug_assert!(data != 0, "converting zero into `Owned`"); Owned { data, _marker: PhantomData, } } } impl<T> Owned<T> { /// Returns a new owned pointer pointing to `raw`. /// /// This function is unsafe because improper use may lead to memory problems. Argument `raw` /// must be a valid pointer. Also, a double-free may occur if the function is called twice on /// the same raw pointer. /// /// # Panics /// /// Panics if `raw` is not properly aligned. /// /// # Safety /// /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted /// back by `Owned::from_raw()` multiple times. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; /// ``` pub unsafe fn from_raw(raw: *mut T) -> Owned<T> { let raw = raw as usize; ensure_aligned::<T>(raw); Self::from_usize(raw) } /// Converts the owned pointer into a `Box`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::new(1234); /// let b: Box<i32> = o.into_box(); /// assert_eq!(*b, 1234); /// ``` pub fn into_box(self) -> Box<T> { let (raw, _) = decompose_tag::<T>(self.data); mem::forget(self); unsafe { Box::from_raw(raw as *mut _) } } /// Allocates `value` on the heap and returns a new owned pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::new(1234); /// ``` pub fn new(init: T) -> Owned<T> { Self::init(init) } } impl<T: ?Sized + Pointable> Owned<T> { /// Allocates `value` on the heap and returns a new owned pointer pointing to it. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::<i32>::init(1234); /// ``` pub fn init(init: T::Init) -> Owned<T> { unsafe { Self::from_usize(T::init(init)) } } /// Converts the owned pointer into a [`Shared`]. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Owned}; /// /// let o = Owned::new(1234); /// let guard = &epoch::pin(); /// let p = o.into_shared(guard); /// ``` #[allow(clippy::needless_lifetimes)] pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { unsafe { Shared::from_usize(self.into_usize()) } } /// Returns the tag stored within the pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// assert_eq!(Owned::new(1234).tag(), 0); /// ``` pub fn tag(&self) -> usize { let (_, tag) = decompose_tag::<T>(self.data); tag } /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the /// unused bits of the pointer to `T`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = Owned::new(0u64); /// assert_eq!(o.tag(), 0); /// let o = o.with_tag(2); /// assert_eq!(o.tag(), 2); /// ``` pub fn with_tag(self, tag: usize) -> Owned<T> { let data = self.into_usize(); unsafe { Self::from_usize(compose_tag::<T>(data, tag)) } } } impl<T: ?Sized + Pointable> Drop for Owned<T> { fn drop(&mut self) { let (raw, _) = decompose_tag::<T>(self.data); unsafe { T::drop(raw); } } } impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (raw, tag) = decompose_tag::<T>(self.data); f.debug_struct("Owned") .field("raw", &raw) .field("tag", &tag) .finish() } } impl<T: Clone> Clone for Owned<T> { fn clone(&self) -> Self { Owned::new((**self).clone()).with_tag(self.tag()) } } impl<T: ?Sized + Pointable> Deref for Owned<T> { type Target = T; fn deref(&self) -> &T { let (raw, _) = decompose_tag::<T>(self.data); unsafe { T::deref(raw) } } } impl<T: ?Sized + Pointable> DerefMut for Owned<T> { fn deref_mut(&mut self) -> &mut T { let (raw, _) = decompose_tag::<T>(self.data); unsafe { T::deref_mut(raw) } } } impl<T> From<T> for Owned<T> { fn from(t: T) -> Self { Owned::new(t) } } impl<T> From<Box<T>> for Owned<T> { /// Returns a new owned pointer pointing to `b`. /// /// # Panics /// /// Panics if the pointer (the `Box`) is not properly aligned. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Owned; /// /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; /// ``` fn from(b: Box<T>) -> Self { unsafe { Self::from_raw(Box::into_raw(b)) } } } impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> { fn borrow(&self) -> &T { self.deref() } } impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> { fn borrow_mut(&mut self) -> &mut T { self.deref_mut() } } impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> { fn as_ref(&self) -> &T { self.deref() } } impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> { fn as_mut(&mut self) -> &mut T { self.deref_mut() } } /// A pointer to an object protected by the epoch GC. /// /// The pointer is valid for use only during the lifetime `'g`. /// /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused /// least significant bits of the address. pub struct Shared<'g, T: 'g + ?Sized + Pointable> { data: usize, _marker: PhantomData<(&'g (), *const T)>, } impl<T: ?Sized + Pointable> Clone for Shared<'_, T> { fn clone(&self) -> Self { Self { data: self.data, _marker: PhantomData, } } } impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {} impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> { #[inline] fn into_usize(self) -> usize { self.data } #[inline] unsafe fn from_usize(data: usize) -> Self { Shared { data, _marker: PhantomData, } } } impl<'g, T> Shared<'g, T> { /// Converts the pointer to a raw pointer (without the tag). /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let o = Owned::new(1234); /// let raw = &*o as *const _; /// let a = Atomic::from(o); /// /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// assert_eq!(p.as_raw(), raw); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn as_raw(&self) -> *const T { let (raw, _) = decompose_tag::<T>(self.data); raw as *const _ } } impl<'g, T: ?Sized + Pointable> Shared<'g, T> { /// Returns a new null pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Shared; /// /// let p = Shared::<i32>::null(); /// assert!(p.is_null()); /// ``` pub fn null() -> Shared<'g, T> { Shared { data: 0, _marker: PhantomData, } } /// Returns `true` if the pointer is null. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::null(); /// let guard = &epoch::pin(); /// assert!(a.load(SeqCst, guard).is_null()); /// a.store(Owned::new(1234), SeqCst); /// assert!(!a.load(SeqCst, guard).is_null()); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn is_null(&self) -> bool { let (raw, _) = decompose_tag::<T>(self.data); raw == 0 } /// Dereferences the pointer. /// /// Returns a reference to the pointee that is valid during the lifetime `'g`. /// /// # Safety /// /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. /// /// Another concern is the possibility of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` /// /// The problem is that relaxed orderings don't synchronize initialization of the object with /// the read from the second thread. This is a data race. A possible solution would be to use /// `Release` and `Acquire` orderings. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// unsafe { /// assert_eq!(p.deref(), &1234); /// } /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] #[allow(clippy::should_implement_trait)] pub unsafe fn deref(&self) -> &'g T { let (raw, _) = decompose_tag::<T>(self.data); T::deref(raw) } /// Dereferences the pointer. /// /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`. /// /// # Safety /// /// * There is no guarantee that there are no more threads attempting to read/write from/to the /// actual object at the same time. /// /// The user must know that there are no concurrent accesses towards the object itself. /// /// * Other than the above, all safety concerns of `deref()` applies here. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(vec![1, 2, 3, 4]); /// let guard = &epoch::pin(); /// /// let mut p = a.load(SeqCst, guard); /// unsafe { /// assert!(!p.is_null()); /// let b = p.deref_mut(); /// assert_eq!(b, &vec![1, 2, 3, 4]); /// b.push(5); /// assert_eq!(b, &vec![1, 2, 3, 4, 5]); /// } /// /// let p = a.load(SeqCst, guard); /// unsafe { /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]); /// } /// ``` #[allow(clippy::should_implement_trait)] pub unsafe fn deref_mut(&mut self) -> &'g mut T { let (raw, _) = decompose_tag::<T>(self.data); T::deref_mut(raw) } /// Converts the pointer to a reference. /// /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. /// /// # Safety /// /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. /// /// Another concern is the possibility of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` /// /// The problem is that relaxed orderings don't synchronize initialization of the object with /// the read from the second thread. This is a data race. A possible solution would be to use /// `Release` and `Acquire` orderings. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// unsafe { /// assert_eq!(p.as_ref(), Some(&1234)); /// } /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub unsafe fn as_ref(&self) -> Option<&'g T> { let (raw, _) = decompose_tag::<T>(self.data); if raw == 0 { None } else { Some(T::deref(raw)) } } /// Takes ownership of the pointee. /// /// # Panics /// /// Panics if this pointer is null, but only in debug mode. /// /// # Safety /// /// This method may be called only if the pointer is valid and nobody else is holding a /// reference to the same object. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// unsafe { /// let guard = &epoch::unprotected(); /// let p = a.load(SeqCst, guard); /// drop(p.into_owned()); /// } /// ``` pub unsafe fn into_owned(self) -> Owned<T> { debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`"); Owned::from_usize(self.data) } /// Takes ownership of the pointee if it is not null. /// /// # Safety /// /// This method may be called only if the pointer is valid and nobody else is holding a /// reference to the same object, or if the pointer is null. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(1234); /// unsafe { /// let guard = &epoch::unprotected(); /// let p = a.load(SeqCst, guard); /// if let Option::Some(x) = p.try_into_owned() { /// drop(x); /// } /// } /// ``` pub unsafe fn try_into_owned(self) -> Option<Owned<T>> { if self.is_null() { Option::None } else { Option::Some(Owned::from_usize(self.data)) } } /// Returns the tag stored within the pointer. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2)); /// let guard = &epoch::pin(); /// let p = a.load(SeqCst, guard); /// assert_eq!(p.tag(), 2); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn tag(&self) -> usize { let (_, tag) = decompose_tag::<T>(self.data); tag } /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the /// unused bits of the pointer to `T`. /// /// # Examples /// /// ``` /// use crossbeam_epoch::{self as epoch, Atomic}; /// use std::sync::atomic::Ordering::SeqCst; /// /// let a = Atomic::new(0u64); /// let guard = &epoch::pin(); /// let p1 = a.load(SeqCst, guard); /// let p2 = p1.with_tag(2); /// /// assert_eq!(p1.tag(), 0); /// assert_eq!(p2.tag(), 2); /// assert_eq!(p1.as_raw(), p2.as_raw()); /// ``` #[allow(clippy::trivially_copy_pass_by_ref)] pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) } } } impl<T> From<*const T> for Shared<'_, T> { /// Returns a new pointer pointing to `raw`. /// /// # Panics /// /// Panics if `raw` is not properly aligned. /// /// # Examples /// /// ``` /// use crossbeam_epoch::Shared; /// /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _); /// assert!(!p.is_null()); /// ``` fn from(raw: *const T) -> Self { let raw = raw as usize; ensure_aligned::<T>(raw); unsafe { Self::from_usize(raw) } } } impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> { fn eq(&self, other: &Self) -> bool { self.data == other.data } } impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {} impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.data.partial_cmp(&other.data) } } impl<T: ?Sized + Pointable> Ord for Shared<'_, T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.data.cmp(&other.data) } } impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (raw, tag) = decompose_tag::<T>(self.data); f.debug_struct("Shared") .field("raw", &raw) .field("tag", &tag) .finish() } } impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f) } } impl<T: ?Sized + Pointable> Default for Shared<'_, T> { fn default() -> Self { Shared::null() } } #[cfg(all(test, not(crossbeam_loom)))] mod tests { use super::Shared; #[test] fn valid_tag_i8() { Shared::<i8>::null().with_tag(0); } #[test] fn valid_tag_i64() { Shared::<i64>::null().with_tag(7); } #[cfg(feature = "nightly")] #[test] fn const_atomic_null() { use super::Atomic; const _: Atomic<u8> = Atomic::<u8>::null(); } }
//! Implementation of a Micro Transport Protocol library. //! //! http://www.bittorrent.org/beps/bep_0029.html //! //! TODO //! ---- //! //! - congestion control //! - proper connection closing //! - automatically send FIN (or should it be RST?) on `drop` if not already closed //! - setters and getters that hide header field endianness conversion //! - SACK extension //! - handle packet loss #![crate_name = "utp"] #![license = "MIT/ASL2"] #![crate_type = "dylib"] #![crate_type = "rlib"] #![feature(macro_rules, phase)] #![deny(missing_doc)] extern crate time; #[phase(plugin, link)] extern crate log; use std::io::net::udp::UdpSocket; use std::io::net::ip::SocketAddr; use std::io::IoResult; use std::mem::transmute; use std::rand::random; use std::fmt; static HEADER_SIZE: uint = 20; // For simplicity's sake, let us assume no packet will ever exceed the // Ethernet maximum transfer unit of 1500 bytes. static BUF_SIZE: uint = 1500; macro_rules! u8_to_unsigned_be( ($src:ident[$start:expr..$end:expr] -> $t:ty) => ({ let mut result: $t = 0; for i in range(0u, $end-$start+1).rev() { result = result | $src[$start+i] as $t << i*8; } result }) ) macro_rules! reply_with_ack( ($header:expr, $src:expr) => ({ let resp = self.prepare_reply($header, ST_STATE).wnd_size(BUF_SIZE as u32); try!(self.socket.send_to(resp.bytes().as_slice(), $src)); debug!("sent {}", resp.header); }) ) /// Return current time in microseconds since the UNIX epoch. fn now_microseconds() -> u32 { let t = time::get_time(); (t.sec * 1_000_000) as u32 + (t.nsec/1000) as u32 } #[allow(dead_code,non_camel_case_types)] #[deriving(PartialEq,Eq,Show)] enum UtpPacketType { ST_DATA = 0, ST_FIN = 1, ST_STATE = 2, ST_RESET = 3, ST_SYN = 4, } enum UtpExtension { SelectiveAckExtensionId = 1, } #[allow(dead_code)] #[deriving(Clone)] #[packed] struct UtpPacketHeader { type_ver: u8, // type: u4, ver: u4 extension: u8, connection_id: u16, timestamp_microseconds: u32, timestamp_difference_microseconds: u32, wnd_size: u32, seq_nr: u16, ack_nr: u16, } impl UtpPacketHeader { /// Set type of packet to the specified type. fn set_type(&mut self, t: UtpPacketType) { let version = 0x0F & self.type_ver; self.type_ver = t as u8 << 4 | version; } fn get_type(&self) -> UtpPacketType { let t: UtpPacketType = unsafe { transmute(self.type_ver >> 4) }; t } fn get_version(&self) -> u8 { self.type_ver & 0x0F } fn wnd_size(&self, new_wnd_size: u32) -> UtpPacketHeader { UtpPacketHeader { wnd_size: new_wnd_size.to_be(), .. self.clone() } } /// Return packet header as a slice of bytes. fn bytes(&self) -> &[u8] { let buf: &[u8, ..HEADER_SIZE] = unsafe { transmute(self) }; return buf.as_slice(); } fn len(&self) -> uint { return HEADER_SIZE; } /// Read byte buffer and return corresponding packet header. /// It assumes the fields are in network (big-endian) byte order, /// preserving it. fn decode(buf: &[u8]) -> UtpPacketHeader { UtpPacketHeader { type_ver: buf[0], extension: buf[1], connection_id: u8_to_unsigned_be!(buf[2..3] -> u16), timestamp_microseconds: u8_to_unsigned_be!(buf[4..7] -> u32), timestamp_difference_microseconds: u8_to_unsigned_be!(buf[8..11] -> u32), wnd_size: u8_to_unsigned_be!(buf[12..15] -> u32), seq_nr: u8_to_unsigned_be!(buf[16..17] -> u16), ack_nr: u8_to_unsigned_be!(buf[18..19] -> u16), } } } impl fmt::Show for UtpPacketHeader { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(type: {}, version: {}, extension: {}, \ connection_id: {}, timestamp_microseconds: {}, \ timestamp_difference_microseconds: {}, wnd_size: {}, \ seq_nr: {}, ack_nr: {})", self.get_type(), Int::from_be(self.get_version()), Int::from_be(self.extension), Int::from_be(self.connection_id), Int::from_be(self.timestamp_microseconds), Int::from_be(self.timestamp_difference_microseconds), Int::from_be(self.wnd_size), Int::from_be(self.seq_nr), Int::from_be(self.ack_nr), ) } } #[allow(dead_code)] struct UtpPacket { header: UtpPacketHeader, extensions: Vec<u8>, payload: Vec<u8>, } impl UtpPacket { /// Construct a new, empty packet. fn new() -> UtpPacket { UtpPacket { header: UtpPacketHeader { type_ver: ST_DATA as u8 << 4 | 1, extension: 0, connection_id: 0, timestamp_microseconds: 0, timestamp_difference_microseconds: 0, wnd_size: 0, seq_nr: 0, ack_nr: 0, }, extensions: Vec::new(), payload: Vec::new(), } } fn set_type(&mut self, t: UtpPacketType) { self.header.set_type(t); } // TODO: Read up on pointers and ownership fn get_type(&self) -> UtpPacketType { self.header.get_type() } fn wnd_size(&self, new_wnd_size: u32) -> UtpPacket { UtpPacket { header: self.header.wnd_size(new_wnd_size), .. UtpPacket::new() } } /// Set Selective ACK field in packet header and add appropriate data. /// /// If None is passed, the SACK extension is disabled and the respective /// data is flushed. Otherwise, the SACK extension is enabled and the /// vector `v` is taken as the extension's payload. /// /// The length of the SACK extension is expressed in bytes, which /// must be a multiple of 4 and at least 4. fn set_sack(&mut self, v: Option<Vec<u8>>) { match v { None => { self.header.extension = 0; self.extensions = Vec::new(); }, Some(bv) => { // The length of the SACK extension is expressed in bytes, which // must be a multiple of 4 and at least 4. assert!(bv.len() >= 4); assert!(bv.len() % 4 == 0); self.header.extension = SelectiveAckExtensionId as u8; // Extension list header self.extensions.push(SelectiveAckExtensionId as u8); // length in bytes, multiples of 4, >= 4 self.extensions.push(bv.len() as u8); // Elements for byte in bv.iter() { self.extensions.push(*byte); } } } } /// TODO: return slice fn bytes(&self) -> Vec<u8> { let mut buf = Vec::with_capacity(self.len()); buf.push_all(self.header.bytes()); buf.push_all(self.extensions.as_slice()); buf.push_all(self.payload.as_slice()); return buf; } fn len(&self) -> uint { let len = self.header.len() + self.payload.len(); // Add an extra two bytes to extension length corresponding to the list // header (extension identifier + list length) if self.extensions.is_empty() { len } else { len + self.extensions.len() + 2 } } /// Decode a byte slice and construct the equivalent UtpPacket. /// /// Note that this method makes no attempt to guess the payload size, saving /// all except the initial 20 bytes corresponding to the header as payload. /// It's the caller's responsability to use an appropriately sized buffer. fn decode(buf: &[u8]) -> UtpPacket { let header = UtpPacketHeader::decode(buf); let (extensions, payload) = if header.extension == SelectiveAckExtensionId as u8 { assert!(buf[HEADER_SIZE] == SelectiveAckExtensionId as u8); let len = buf[HEADER_SIZE + 1] as uint; let extension_start = HEADER_SIZE + 2; (Vec::from_slice(buf.slice(extension_start, extension_start + len)), Vec::from_slice(buf.slice_from(extension_start + len))) } else { (Vec::new(), Vec::from_slice(buf.slice_from(HEADER_SIZE))) }; UtpPacket { header: header, extensions: extensions, payload: payload, } } } impl Clone for UtpPacket { fn clone(&self) -> UtpPacket { UtpPacket { header: self.header, extensions: self.extensions.clone(), payload: self.payload.clone(), } } } impl fmt::Show for UtpPacket { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.header.fmt(f) } } #[allow(non_camel_case_types)] #[deriving(PartialEq,Eq,Show)] enum UtpSocketState { CS_NEW, CS_CONNECTED, CS_SYN_SENT, CS_FIN_RECEIVED, CS_FIN_SENT, CS_RST_RECEIVED, CS_CLOSED, CS_EOF, } /// A uTP (Micro Transport Protocol) socket. pub struct UtpSocket { socket: UdpSocket, connected_to: SocketAddr, sender_connection_id: u16, receiver_connection_id: u16, seq_nr: u16, ack_nr: u16, state: UtpSocketState, // Received but not acknowledged packets incoming_buffer: Vec<UtpPacket>, // Sent but not yet acknowledged packets send_buffer: Vec<UtpPacket>, duplicate_ack_count: uint, last_acked: u16, last_acked_timestamp: u32, rtt: int, rtt_variance: int, timeout: int, } impl UtpSocket { /// Create a UTP socket from the given address. #[unstable] pub fn bind(addr: SocketAddr) -> IoResult<UtpSocket> { let skt = UdpSocket::bind(addr); let connection_id = random::<u16>(); match skt { Ok(x) => Ok(UtpSocket { socket: x, connected_to: addr, receiver_connection_id: connection_id, sender_connection_id: connection_id + 1, seq_nr: 1, ack_nr: 0, state: CS_NEW, incoming_buffer: Vec::new(), send_buffer: Vec::new(), duplicate_ack_count: 0, last_acked: 0, last_acked_timestamp: 0, rtt: 0, rtt_variance: 0, timeout: 1000, }), Err(e) => Err(e) } } /// Open a uTP connection to a remote host by hostname or IP address. #[unstable] pub fn connect(mut self, other: SocketAddr) -> IoResult<UtpSocket> { use std::io::{IoError, ConnectionFailed}; self.connected_to = other; assert_eq!(self.receiver_connection_id + 1, self.sender_connection_id); let mut packet = UtpPacket::new(); packet.set_type(ST_SYN); packet.header.connection_id = self.receiver_connection_id.to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.timestamp_microseconds = now_microseconds().to_be(); // Send packet let dst = self.connected_to; let _result = self.socket.send_to(packet.bytes().as_slice(), dst); debug!("sent {}", packet.header); self.state = CS_SYN_SENT; let mut buf = [0, ..BUF_SIZE]; let (_len, addr) = match self.socket.recv_from(buf) { Ok(v) => v, Err(e) => fail!("{}", e), }; assert!(_len == HEADER_SIZE); assert!(addr == self.connected_to); let packet = UtpPacket::decode(buf.slice_to(_len)); if packet.get_type() != ST_STATE { return Err(IoError { kind: ConnectionFailed, desc: "The remote peer sent an incorrect reply", detail: None, }); } self.ack_nr = Int::from_be(packet.header.seq_nr); debug!("connected to: {} {}", addr, self.connected_to); self.state = CS_CONNECTED; self.seq_nr += 1; Ok(self) } /// Gracefully close connection to peer. /// /// This method allows both peers to receive all packets still in /// flight. #[unstable] pub fn close(&mut self) -> IoResult<()> { let mut packet = UtpPacket::new(); packet.header.connection_id = self.sender_connection_id.to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.ack_nr = self.ack_nr.to_be(); packet.header.timestamp_microseconds = now_microseconds().to_be(); packet.set_type(ST_FIN); // Send FIN let dst = self.connected_to; try!(self.socket.send_to(packet.bytes().as_slice(), dst)); debug!("sent {}", packet); self.state = CS_FIN_SENT; // Receive JAKE let mut buf = [0u8, ..BUF_SIZE]; try!(self.socket.recv_from(buf)); let resp = UtpPacket::decode(buf); debug!("received {}", resp); assert!(resp.get_type() == ST_STATE); // Set socket state self.state = CS_CLOSED; Ok(()) } /// Receive data from socket. /// /// On success, returns the number of bytes read and the sender's address. /// Returns CS_EOF after receiving a FIN packet when the remaining /// inflight packets are consumed. Subsequent calls return CS_CLOSED. #[unstable] pub fn recv_from(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> { use std::cmp::min; use std::io::{IoError, EndOfFile, Closed, TimedOut, ConnectionReset}; if self.state == CS_EOF { self.state = CS_CLOSED; return Err(IoError { kind: EndOfFile, desc: "End of file reached", detail: None, }); } if self.state == CS_CLOSED { return Err(IoError { kind: Closed, desc: "Connection closed", detail: None, }); } let mut b = [0, ..BUF_SIZE + HEADER_SIZE]; debug!("setting read timeout of {} ms", self.timeout); if self.state != CS_NEW { self.socket.set_read_timeout(Some(self.timeout as u64)); } let (read, src) = match self.socket.recv_from(b) { Err(ref e) if e.kind == TimedOut => { debug!("recv_from timed out"); self.timeout = self.timeout * 2; self.send_fast_resend_request(); return Ok((0, self.connected_to)); }, Ok(x) => x, Err(e) => return Err(e), }; let packet = UtpPacket::decode(b.slice_to(read)); debug!("received {}", packet.header); if packet.get_type() == ST_RESET { return Err(IoError { kind: ConnectionReset, desc: "Remote host aborted connection (incorrect connection id)", detail: None, }); } // TODO: move this to handle_packet? if packet.get_type() == ST_SYN { self.connected_to = src; } // Check if the packet is out of order (that is, it's sequence number // does not immediately follow the ACK number) if packet.get_type() != ST_STATE && packet.get_type() != ST_SYN && self.ack_nr + 1 < Int::from_be(packet.header.seq_nr) { debug!("current ack_nr ({}) is behind received packet seq_nr ({})", self.ack_nr, Int::from_be(packet.header.seq_nr)); // Add to buffer but do not acknowledge until all packets between // ack_nr + 1 and curr_packet.seq_nr - 1 are received self.insert_into_buffer(packet); return Ok((0, self.connected_to)); } // Copy received payload to output buffer if packet isn't a duplicate let mut read = read - HEADER_SIZE; if self.ack_nr < Int::from_be(packet.header.seq_nr) { for i in range(0u, min(buf.len(), read)) { buf[i] = b[i + HEADER_SIZE]; } } else { read = 0; } match self.handle_packet(packet.clone()) { Some(pkt) => { let pkt = pkt.wnd_size(BUF_SIZE as u32); try!(self.socket.send_to(pkt.bytes().as_slice(), src)); debug!("sent {}", pkt.header); }, None => {} }; // Flush incoming buffer if possible let read = self.flush_incoming_buffer(buf, read); Ok((read, src)) } #[allow(missing_doc)] #[deprecated = "renamed to `recv_from`"] pub fn recvfrom(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> { self.recv_from(buf) } fn prepare_reply(&self, original: &UtpPacketHeader, t: UtpPacketType) -> UtpPacket { let mut resp = UtpPacket::new(); resp.set_type(t); let self_t_micro: u32 = now_microseconds(); let other_t_micro: u32 = Int::from_be(original.timestamp_microseconds); resp.header.timestamp_microseconds = self_t_micro.to_be(); resp.header.timestamp_difference_microseconds = (self_t_micro - other_t_micro).to_be(); resp.header.connection_id = self.sender_connection_id.to_be(); resp.header.seq_nr = self.seq_nr.to_be(); resp.header.ack_nr = self.ack_nr.to_be(); resp } /// Discards sequential, ordered packets in incoming buffer, starting from /// the most recently acknowledged to the most recent, as long as there are /// no missing packets. The discarded packets' payload is written to the /// slice `buf`, starting in position `start`. /// Returns the last written index. fn flush_incoming_buffer(&mut self, buf: &mut [u8], start: uint) -> uint { let mut idx = start; while !self.incoming_buffer.is_empty() && self.ack_nr + 1 == Int::from_be(self.incoming_buffer[0].header.seq_nr) { let packet = self.incoming_buffer.shift().unwrap(); debug!("Removing packet from buffer: {}", packet); for i in range(0u, packet.payload.len()) { buf[idx] = packet.payload[i]; idx += 1; } self.ack_nr = Int::from_be(packet.header.seq_nr); } return idx; } /// Send data on socket to the given address. Returns nothing on success. // // # Implementation details // // This method inserts packets into the send buffer and keeps trying to // advance the send window until an ACK corresponding to the last packet is // received. // // Note that the buffer passed to `send_to` might exceed the maximum packet // size, which will result in the data being split over several packets. #[unstable] pub fn send_to(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> { use std::io::{IoError, Closed}; if self.state == CS_CLOSED { return Err(IoError { kind: Closed, desc: "Connection closed", detail: None, }); } for chunk in buf.chunks(BUF_SIZE) { let mut packet = UtpPacket::new(); packet.set_type(ST_DATA); packet.payload = Vec::from_slice(chunk); packet.header.timestamp_microseconds = now_microseconds().to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.ack_nr = self.ack_nr.to_be(); packet.header.connection_id = self.sender_connection_id.to_be(); debug!("Pushing packet into send buffer: {}", packet); self.send_buffer.push(packet.clone()); try!(self.socket.send_to(packet.bytes().as_slice(), dst)); self.seq_nr += 1; } // Consume acknowledgements until latest packet let mut buf = [0, ..BUF_SIZE]; while self.last_acked < self.seq_nr - 1 { try!(self.recv_from(buf)); } Ok(()) } #[allow(missing_doc)] #[deprecated = "renamed to `send_to`"] pub fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> { self.send_to(buf, dst) } /// Send fast resend request. /// /// Sends three identical ACK/STATE packets to the remote host, signalling a /// fast resend request. fn send_fast_resend_request(&mut self) { let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.ack_nr = self.ack_nr.to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.connection_id = self.sender_connection_id.to_be(); for _ in range(0u, 3) { let t = now_microseconds(); packet.header.timestamp_microseconds = t.to_be(); packet.header.timestamp_difference_microseconds = (t - self.last_acked_timestamp).to_be(); self.socket.send_to(packet.bytes().as_slice(), self.connected_to); debug!("sent {}", packet.header); } } /// Handle incoming packet, updating socket state accordingly. /// /// Returns appropriate reply packet, if needed. fn handle_packet(&mut self, packet: UtpPacket) -> Option<UtpPacket> { // Reset connection if connection id doesn't match and this isn't a SYN if packet.get_type() != ST_SYN && !(Int::from_be(packet.header.connection_id) == self.sender_connection_id || Int::from_be(packet.header.connection_id) == self.receiver_connection_id) { return Some(self.prepare_reply(&packet.header, ST_RESET)); } // Acknowledge only if the packet strictly follows the previous one if self.ack_nr + 1 == Int::from_be(packet.header.seq_nr) { self.ack_nr = Int::from_be(packet.header.seq_nr); } match packet.header.get_type() { ST_SYN => { // Respond with an ACK and populate own fields // Update socket information for new connections self.ack_nr = Int::from_be(packet.header.seq_nr); self.seq_nr = random(); self.receiver_connection_id = Int::from_be(packet.header.connection_id) + 1; self.sender_connection_id = Int::from_be(packet.header.connection_id); self.state = CS_CONNECTED; Some(self.prepare_reply(&packet.header, ST_STATE)) } ST_DATA => Some(self.prepare_reply(&packet.header, ST_STATE)), ST_FIN => { self.state = CS_FIN_RECEIVED; // TODO: check if no packets are missing // If all packets are received self.state = CS_EOF; Some(self.prepare_reply(&packet.header, ST_STATE)) } ST_STATE => { let packet_rtt = Int::from_be(packet.header.timestamp_difference_microseconds) as int; let delta = self.rtt - packet_rtt; self.rtt_variance += (std::num::abs(delta) - self.rtt_variance) / 4; self.rtt += (packet_rtt - self.rtt) / 8; self.timeout = std::cmp::max(self.rtt + self.rtt_variance * 4, 500); debug!("packet_rtt: {}", packet_rtt); debug!("delta: {}", delta); debug!("self.rtt_variance: {}", self.rtt_variance); debug!("self.rtt: {}", self.rtt); debug!("self.timeout: {}", self.timeout); if packet.header.ack_nr == Int::from_be(self.last_acked) { self.duplicate_ack_count += 1; } else { self.last_acked = Int::from_be(packet.header.ack_nr); self.last_acked_timestamp = now_microseconds(); self.duplicate_ack_count = 1; } // Three duplicate ACKs, must resend packets since `ack_nr + 1` // TODO: checking if the send buffer isn't empty isn't a // foolproof way to differentiate between triple-ACK and three // keep alives spread in time if !self.send_buffer.is_empty() && self.duplicate_ack_count == 3 { match self.send_buffer.iter().position(|pkt| Int::from_be(pkt.header.seq_nr) == Int::from_be(packet.header.ack_nr) + 1) { None => fail!("Received request to resend packets since {} but none was found in send buffer!", Int::from_be(packet.header.ack_nr) + 1), Some(position) => { for _ in range(0u, position + 1) { let to_send = self.send_buffer.shift().unwrap(); debug!("resending: {}", to_send); self.socket.send_to(to_send.bytes().as_slice(), self.connected_to); } }, } } // Success, advance send window while !self.send_buffer.is_empty() && Int::from_be(self.send_buffer[0].header.seq_nr) <= self.last_acked { self.send_buffer.shift(); } None }, ST_RESET => { // TODO self.state = CS_RST_RECEIVED; None }, } } /// Insert a packet into the socket's buffer. /// /// The packet is inserted in such a way that the buffer is /// ordered ascendingly by their sequence number. This allows /// storing packets that were received out of order. /// /// Inserting a duplicate of a packet will replace the one in the buffer if /// it's more recent (larger timestamp). fn insert_into_buffer(&mut self, packet: UtpPacket) { let mut i = 0; for pkt in self.incoming_buffer.iter() { if Int::from_be(pkt.header.seq_nr) >= Int::from_be(packet.header.seq_nr) { break; } i += 1; } if !self.incoming_buffer.is_empty() && i < self.incoming_buffer.len() && self.incoming_buffer[i].header.seq_nr == packet.header.seq_nr { self.incoming_buffer.remove(i); self.incoming_buffer.insert(i, packet); } else { self.incoming_buffer.insert(i, packet); } } } impl Clone for UtpSocket { fn clone(&self) -> UtpSocket { UtpSocket { socket: self.socket.clone(), connected_to: self.connected_to, receiver_connection_id: self.receiver_connection_id, sender_connection_id: self.sender_connection_id, seq_nr: self.seq_nr, ack_nr: self.ack_nr, state: self.state, incoming_buffer: Vec::new(), send_buffer: Vec::new(), duplicate_ack_count: 0, last_acked: 0, last_acked_timestamp: 0, rtt: 0, rtt_variance: 0, timeout: 500, } } } /// Stream interface for UtpSocket. pub struct UtpStream { socket: UtpSocket, } impl UtpStream { /// Create a uTP stream listening on the given address. #[unstable] pub fn bind(addr: SocketAddr) -> IoResult<UtpStream> { let socket = UtpSocket::bind(addr); match socket { Ok(s) => Ok(UtpStream { socket: s }), Err(e) => Err(e), } } /// Open a uTP connection to a remote host by hostname or IP address. #[unstable] pub fn connect(dst: SocketAddr) -> IoResult<UtpStream> { use std::io::net::ip::Ipv4Addr; // Port 0 means the operating system gets to choose it let my_addr = SocketAddr { ip: Ipv4Addr(127,0,0,1), port: 0 }; let socket = match UtpSocket::bind(my_addr) { Ok(s) => s, Err(e) => return Err(e), }; match socket.connect(dst) { Ok(socket) => Ok(UtpStream { socket: socket }), Err(e) => Err(e), } } /// Gracefully close connection to peer. /// /// This method allows both peers to receive all packets still in /// flight. #[unstable] pub fn close(&mut self) -> IoResult<()> { self.socket.close() } } impl Reader for UtpStream { fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { match self.socket.recv_from(buf) { Ok((read, _src)) => Ok(read), Err(e) => Err(e), } } } impl Writer for UtpStream { fn write(&mut self, buf: &[u8]) -> IoResult<()> { let dst = self.socket.connected_to; self.socket.send_to(buf, dst) } } #[cfg(test)] mod test { use super::{UtpSocket, UtpPacket}; use super::{ST_STATE, ST_FIN, ST_DATA, ST_RESET, ST_SYN}; use super::{BUF_SIZE, HEADER_SIZE}; use super::{CS_CONNECTED, CS_NEW, CS_CLOSED, CS_EOF}; use std::rand::random; macro_rules! expect_eq( ($left:expr, $right:expr) => ( if !($left == $right) { fail!("expected {}, got {}", $right, $left); } ); ) macro_rules! iotry( ($e:expr) => (match $e { Ok(e) => e, Err(e) => fail!("{}", e) }) ) #[test] fn test_packet_decode() { let buf = [0x21, 0x00, 0x41, 0xa8, 0x99, 0x2f, 0xd0, 0x2a, 0x9f, 0x4a, 0x26, 0x21, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x6c, 0x79]; let pkt = UtpPacket::decode(buf); assert_eq!(pkt.header.get_version(), 1); assert_eq!(pkt.header.get_type(), ST_STATE); assert_eq!(pkt.header.extension, 0); assert_eq!(Int::from_be(pkt.header.connection_id), 16808); assert_eq!(Int::from_be(pkt.header.timestamp_microseconds), 2570047530); assert_eq!(Int::from_be(pkt.header.timestamp_difference_microseconds), 2672436769); assert_eq!(Int::from_be(pkt.header.wnd_size), ::std::num::pow(2u32, 20)); assert_eq!(Int::from_be(pkt.header.seq_nr), 15090); assert_eq!(Int::from_be(pkt.header.ack_nr), 27769); assert_eq!(pkt.len(), buf.len()); assert!(pkt.payload.is_empty()); } #[test] fn test_packet_encode() { let payload = Vec::from_slice("Hello\n".as_bytes()); let (timestamp, timestamp_diff): (u32, u32) = (15270793, 1707040186); let (connection_id, seq_nr, ack_nr): (u16, u16, u16) = (16808, 15090, 17096); let window_size: u32 = 1048576; let mut pkt = UtpPacket::new(); pkt.set_type(ST_DATA); pkt.header.timestamp_microseconds = timestamp.to_be(); pkt.header.timestamp_difference_microseconds = timestamp_diff.to_be(); pkt.header.connection_id = connection_id.to_be(); pkt.header.seq_nr = seq_nr.to_be(); pkt.header.ack_nr = ack_nr.to_be(); pkt.header.wnd_size = window_size.to_be(); pkt.payload = payload.clone(); let header = pkt.header; let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89, 0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x0a]; assert_eq!(pkt.len(), buf.len()); assert_eq!(pkt.len(), HEADER_SIZE + payload.len()); assert_eq!(pkt.payload, payload); assert_eq!(header.get_version(), 1); assert_eq!(header.get_type(), ST_DATA); assert_eq!(header.extension, 0); assert_eq!(Int::from_be(header.connection_id), connection_id); assert_eq!(Int::from_be(header.seq_nr), seq_nr); assert_eq!(Int::from_be(header.ack_nr), ack_nr); assert_eq!(Int::from_be(header.wnd_size), window_size); assert_eq!(Int::from_be(header.timestamp_microseconds), timestamp); assert_eq!(Int::from_be(header.timestamp_difference_microseconds), timestamp_diff); assert_eq!(pkt.bytes(), Vec::from_slice(buf)); } #[test] fn test_reversible() { let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89, 0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x0a]; assert_eq!(UtpPacket::decode(buf).bytes().as_slice(), buf); } #[test] fn test_socket_ipv4() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); assert_eq!(client.connected_to, serverAddr); drop(client); }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert_eq!(server.connected_to, clientAddr); assert!(server.state == CS_CONNECTED); drop(server); } #[test] fn test_recvfrom_on_closed_socket() { use std::io::test::next_test_ip4; use std::io::{Closed, EndOfFile}; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); assert_eq!(client.close(), Ok(())); drop(client); }); // Make the server listen for incoming connections let mut buf = [0u8, ..BUF_SIZE]; let _resp = server.recv_from(buf); assert!(server.state == CS_CONNECTED); // Closing the connection is fine match server.recv_from(buf) { Err(e) => fail!("{}", e), _ => {}, } expect_eq!(server.state, CS_EOF); // Trying to listen on the socket after closing it raises an // EOF error match server.recv_from(buf) { Err(e) => expect_eq!(e.kind, EndOfFile), v => fail!("expected {}, got {}", EndOfFile, v), } expect_eq!(server.state, CS_CLOSED); // Trying again raises a Closed error match server.recv_from(buf) { Err(e) => expect_eq!(e.kind, Closed), v => fail!("expected {}, got {}", Closed, v), } drop(server); } #[test] fn test_sendto_on_closed_socket() { use std::io::test::next_test_ip4; use std::io::Closed; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); spawn(proc() { let client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let mut buf = [0u8, ..BUF_SIZE]; let mut client = client; iotry!(client.recv_from(buf)); }); // Make the server listen for incoming connections let mut buf = [0u8, ..BUF_SIZE]; let (_read, _src) = iotry!(server.recv_from(buf)); assert!(server.state == CS_CONNECTED); iotry!(server.close()); expect_eq!(server.state, CS_CLOSED); // Trying to send to the socket after closing it raises an // error match server.send_to(buf, clientAddr) { Err(e) => expect_eq!(e.kind, Closed), v => fail!("expected {}, got {}", Closed, v), } drop(server); } #[test] fn test_acks_on_socket() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let (tx, rx) = channel(); let client = iotry!(UtpSocket::bind(clientAddr)); let server = iotry!(UtpSocket::bind(serverAddr)); spawn(proc() { // Make the server listen for incoming connections let mut server = server; let mut buf = [0u8, ..BUF_SIZE]; let _resp = server.recv_from(buf); tx.send(server.seq_nr); // Close the connection iotry!(server.recv_from(buf)); drop(server); }); let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let sender_seq_nr = rx.recv(); let ack_nr = client.ack_nr; assert!(ack_nr != 0); assert!(ack_nr == sender_seq_nr); assert_eq!(client.close(), Ok(())); // The reply to both connect (SYN) and close (FIN) should be // STATE packets, which don't increase the sequence number // and, hence, the receiver's acknowledgement number. assert!(client.ack_nr == ack_nr); drop(client); } #[test] fn test_handle_packet() { use std::io::test::next_test_ip4; //fn test_connection_setup() { let initial_connection_id: u16 = random(); let sender_connection_id = initial_connection_id + 1; let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let sent = packet.header; // Do we have a response? let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); // Is is of the correct type? let response = response.unwrap(); assert!(response.get_type() == ST_STATE); // Same connection id on both ends during connection establishment assert!(response.header.connection_id == sent.connection_id); // Response acknowledges SYN assert!(response.header.ack_nr == sent.seq_nr); // No payload? assert!(response.payload.is_empty()); //} // --------------------------------- // fn test_connection_usage() { let old_packet = packet; let old_response = response; let mut packet = UtpPacket::new(); packet.set_type(ST_DATA); packet.header.connection_id = sender_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; let sent = packet.header; let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); // Sender (i.e., who initated connection and sent SYN) has connection id // equal to initial connection id + 1 // Receiver (i.e., who accepted connection) has connection id equal to // initial connection id assert!(Int::from_be(response.header.connection_id) == initial_connection_id); assert!(Int::from_be(response.header.connection_id) == Int::from_be(sent.connection_id) - 1); // Previous packets should be ack'ed assert!(Int::from_be(response.header.ack_nr) == Int::from_be(sent.seq_nr)); // Responses with no payload should not increase the sequence number assert!(response.payload.is_empty()); assert!(Int::from_be(response.header.seq_nr) == Int::from_be(old_response.header.seq_nr)); // } //fn test_connection_teardown() { let old_packet = packet; let old_response = response; let mut packet = UtpPacket::new(); packet.set_type(ST_FIN); packet.header.connection_id = sender_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; let sent = packet.header; let response = socket.handle_packet(packet); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); // FIN packets have no payload but the sequence number shouldn't increase assert!(Int::from_be(sent.seq_nr) == Int::from_be(old_packet.header.seq_nr) + 1); // Nor should the ACK packet's sequence number assert!(response.header.seq_nr == old_response.header.seq_nr); // FIN should be acknowledged assert!(response.header.ack_nr == sent.seq_nr); //} } #[test] fn test_response_to_keepalive_ack() { use std::io::test::next_test_ip4; // Boilerplate test setup let initial_connection_id: u16 = random(); let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); // Establish connection let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); let old_packet = packet; let old_response = response; // Now, send a keepalive packet let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.connection_id = initial_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; let response = socket.handle_packet(packet.clone()); assert!(response.is_none()); // Send a second keepalive packet, identical to the previous one let response = socket.handle_packet(packet.clone()); assert!(response.is_none()); } #[test] fn test_response_to_wrong_connection_id() { use std::io::test::next_test_ip4; // Boilerplate test setup let initial_connection_id: u16 = random(); let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); // Establish connection let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); assert!(response.unwrap().get_type() == ST_STATE); // Now, disrupt connection with a packet with an incorrect connection id let new_connection_id = initial_connection_id.to_le(); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.connection_id = new_connection_id; let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_RESET); assert!(response.header.ack_nr == packet.header.seq_nr); } #[test] fn test_utp_stream() { use super::UtpStream; use std::io::test::next_test_ip4; let serverAddr = next_test_ip4(); let mut server = iotry!(UtpStream::bind(serverAddr)); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.close()); }); iotry!(server.read_to_end()); } #[test] fn test_utp_stream_small_data() { use super::UtpStream; use std::io::test::next_test_ip4; // Fits in a packet static len: uint = 1024; let data = Vec::from_fn(len, |idx| idx as u8); expect_eq!(len, data.len()); let d = data.clone(); let serverAddr = next_test_ip4(); let mut server = UtpStream::bind(serverAddr); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.write(d.as_slice())); iotry!(client.close()); }); let read = iotry!(server.read_to_end()); assert!(!read.is_empty()); expect_eq!(read.len(), data.len()); expect_eq!(read, data); } #[test] fn test_utp_stream_large_data() { use super::UtpStream; use std::io::test::next_test_ip4; // Has to be sent over several packets static len: uint = 1024 * 1024; let data = Vec::from_fn(len, |idx| idx as u8); expect_eq!(len, data.len()); let d = data.clone(); let serverAddr = next_test_ip4(); let mut server = UtpStream::bind(serverAddr); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.write(d.as_slice())); iotry!(client.close()); }); let read = iotry!(server.read_to_end()); assert!(!read.is_empty()); expect_eq!(read.len(), data.len()); expect_eq!(read, data); } #[test] fn test_utp_stream_successive_reads() { use super::UtpStream; use std::io::test::next_test_ip4; use std::io::Closed; static len: uint = 1024; let data: Vec<u8> = Vec::from_fn(len, |idx| idx as u8); expect_eq!(len, data.len()); let d = data.clone(); let serverAddr = next_test_ip4(); let mut server = UtpStream::bind(serverAddr); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.write(d.as_slice())); iotry!(client.close()); }); iotry!(server.read_to_end()); let mut buf = [0u8, ..4096]; match server.read(buf) { Err(ref e) if e.kind == Closed => {}, _ => fail!("should have failed with Closed"), }; } #[test] fn test_unordered_packets() { use std::io::test::next_test_ip4; // Boilerplate test setup let initial_connection_id: u16 = random(); let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); // Establish connection let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); let old_packet = packet; let old_response = response; let mut window: Vec<UtpPacket> = Vec::new(); // Now, send a keepalive packet let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = initial_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; packet.payload = vec!(1,2,3); window.push(packet); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = initial_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 2).to_be(); packet.header.ack_nr = old_response.header.seq_nr; packet.payload = vec!(4,5,6); window.push(packet); // Send packets in reverse order let response = socket.handle_packet(window[1].clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.header.ack_nr != window[1].header.seq_nr); let response = socket.handle_packet(window[0].clone()); assert!(response.is_some()); } #[test] fn test_socket_unordered_packets() { use std::io::test::next_test_ip4; use super::UtpStream; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let mut s = client.socket; let mut window: Vec<UtpPacket> = Vec::new(); let mut i = 0; for data in Vec::from_fn(12, |idx| idx as u8 + 1).as_slice().chunks(3) { let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = client.sender_connection_id.to_be(); packet.header.seq_nr = (client.seq_nr + i).to_be(); packet.header.ack_nr = client.ack_nr.to_be(); packet.payload = Vec::from_slice(data); window.push(packet); i += 1; } let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_FIN); packet.header.connection_id = client.sender_connection_id.to_be(); packet.header.seq_nr = (client.seq_nr + 2).to_be(); packet.header.ack_nr = client.ack_nr.to_be(); window.push(packet); iotry!(s.send_to(window[3].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[2].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[1].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[0].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[4].bytes().as_slice(), serverAddr)); for _ in range(0u, 2) { let mut buf = [0, ..BUF_SIZE]; iotry!(s.recv_from(buf)); } }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert!(server.state == CS_CONNECTED); let mut stream = UtpStream { socket: server }; let expected: Vec<u8> = Vec::from_fn(12, |idx| idx as u8 + 1); match stream.read_to_end() { Ok(data) => { expect_eq!(data.len(), expected.len()); expect_eq!(data, expected); }, Err(e) => fail!("{}", e), } } #[test] fn test_socket_should_not_buffer_syn_packets() { use std::io::test::next_test_ip4; use std::io::net::udp::UdpSocket; use super::UtpSocket; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let server = iotry!(UtpSocket::bind(serverAddr)); let client = iotry!(UdpSocket::bind(clientAddr)); let test_syn_raw = [0x41, 0x00, 0x41, 0xa7, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf1, 0x00, 0x00]; let test_syn_pkt = UtpPacket::decode(test_syn_raw); let seq_nr = Int::from_be(test_syn_pkt.header.seq_nr); spawn(proc() { let mut client = client; iotry!(client.send_to(test_syn_raw, serverAddr)); client.set_timeout(Some(10)); let mut buf = [0, ..BUF_SIZE]; let packet = match client.recv_from(buf) { Ok((nread, _src)) => UtpPacket::decode(buf.slice_to(nread)), Err(e) => fail!("{}", e), }; expect_eq!(packet.header.ack_nr, seq_nr.to_be()); drop(client); }); let mut server = server; let mut buf = [0, ..20]; iotry!(server.recv_from(buf)); assert!(server.ack_nr != 0); expect_eq!(server.ack_nr, seq_nr); assert!(server.incoming_buffer.is_empty()); } #[test] fn test_response_to_triple_ack() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let mut server = iotry!(UtpSocket::bind(serverAddr)); let client = iotry!(UtpSocket::bind(clientAddr)); // Fits in a packet static len: uint = 1024; let data = Vec::from_fn(len, |idx| idx as u8); let d = data.clone(); expect_eq!(len, data.len()); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); iotry!(client.send_to(d.as_slice(), serverAddr)); iotry!(client.close()); }); let mut buf = [0, ..BUF_SIZE]; // Expect SYN iotry!(server.recv_from(buf)); // Receive data let mut data_packet; match server.socket.recv_from(buf) { Ok((read, _src)) => { data_packet = UtpPacket::decode(buf.slice_to(read)); assert!(data_packet.get_type() == ST_DATA); expect_eq!(data_packet.payload, data); assert_eq!(data_packet.payload.len(), data.len()); }, Err(e) => fail!("{}", e), } let data_packet = data_packet; // Send triple ACK let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.seq_nr = server.seq_nr.to_be(); packet.header.ack_nr = (Int::from_be(data_packet.header.seq_nr) - 1).to_be(); packet.header.connection_id = server.sender_connection_id.to_be(); for _ in range(0u, 3) { iotry!(server.socket.send_to(packet.bytes().as_slice(), clientAddr)); } // Receive data again and check that it's the same we reported as missing match server.socket.recv_from(buf) { Ok((0, _)) => fail!("Received 0 bytes from socket"), Ok((read, _src)) => { let packet = UtpPacket::decode(buf.slice_to(read)); assert_eq!(packet.get_type(), ST_DATA); assert_eq!(Int::from_be(packet.header.seq_nr), Int::from_be(data_packet.header.seq_nr)); assert!(packet.payload == data_packet.payload); let response = server.handle_packet(packet).unwrap(); iotry!(server.socket.send_to(response.bytes().as_slice(), server.connected_to)); }, Err(e) => fail!("{}", e), } // Receive close iotry!(server.recv_from(buf)); } #[test] fn test_socket_timeout_request() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); let len = 512; let data = Vec::from_fn(len, |idx| idx as u8); let d = data.clone(); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); assert_eq!(client.connected_to, serverAddr); iotry!(client.send_to(d.as_slice(), serverAddr)); drop(client); }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert_eq!(server.connected_to, clientAddr); assert!(server.state == CS_CONNECTED); // Purposefully read from UDP socket directly and discard it, in order // to behave as if the packet was lost and thus trigger the timeout // handling in the *next* call to `UtpSocket.recv_from`. iotry!(server.socket.recv_from(buf)); // Now wait for the previously discarded packet loop { match server.recv_from(buf) { Ok((0, _)) => continue, Ok(_) => break, Err(e) => fail!("{}", e), } } drop(server); } #[test] fn test_sorted_buffer_insertion() { use std::io::test::next_test_ip4; let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); let mut packet = UtpPacket::new(); packet.header.seq_nr = 1; assert!(socket.incoming_buffer.is_empty()); socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 1); packet.header.seq_nr = 2; packet.header.timestamp_microseconds = 128; socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 2); assert_eq!(socket.incoming_buffer[1].header.seq_nr, 2); assert_eq!(socket.incoming_buffer[1].header.timestamp_microseconds, 128); packet.header.seq_nr = 3; packet.header.timestamp_microseconds = 256; socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 3); assert_eq!(socket.incoming_buffer[2].header.seq_nr, 3); assert_eq!(socket.incoming_buffer[2].header.timestamp_microseconds, 256); // Replace a packet with a more recent version packet.header.seq_nr = 2; packet.header.timestamp_microseconds = 456; socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 3); assert_eq!(socket.incoming_buffer[1].header.seq_nr, 2); assert_eq!(socket.incoming_buffer[1].header.timestamp_microseconds, 456); } #[test] fn test_duplicate_packet_handling() { use std::io::test::next_test_ip4; use super::UtpStream; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let mut s = client.socket.clone(); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = client.sender_connection_id.to_be(); packet.header.seq_nr = client.seq_nr.to_be(); packet.header.ack_nr = client.ack_nr.to_be(); packet.payload = vec!(1,2,3); // Send two copies of the packet, with different timestamps for _ in range(0u, 2) { packet.header.timestamp_microseconds = super::now_microseconds(); iotry!(s.send_to(packet.bytes().as_slice(), serverAddr)); } client.seq_nr += 1; // Receive one ACK for _ in range(0u, 1) { let mut buf = [0, ..BUF_SIZE]; iotry!(s.recv_from(buf)); } iotry!(client.close()); }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert!(server.state == CS_CONNECTED); let mut stream = UtpStream { socket: server }; let expected: Vec<u8> = vec!(1,2,3); match stream.read_to_end() { Ok(data) => { println!("{}", data); expect_eq!(data.len(), expected.len()); expect_eq!(data, expected); }, Err(e) => fail!("{}", e), } } } Add test for correct packet decoding. //! Implementation of a Micro Transport Protocol library. //! //! http://www.bittorrent.org/beps/bep_0029.html //! //! TODO //! ---- //! //! - congestion control //! - proper connection closing //! - automatically send FIN (or should it be RST?) on `drop` if not already closed //! - setters and getters that hide header field endianness conversion //! - SACK extension //! - handle packet loss #![crate_name = "utp"] #![license = "MIT/ASL2"] #![crate_type = "dylib"] #![crate_type = "rlib"] #![feature(macro_rules, phase)] #![deny(missing_doc)] extern crate time; #[phase(plugin, link)] extern crate log; use std::io::net::udp::UdpSocket; use std::io::net::ip::SocketAddr; use std::io::IoResult; use std::mem::transmute; use std::rand::random; use std::fmt; static HEADER_SIZE: uint = 20; // For simplicity's sake, let us assume no packet will ever exceed the // Ethernet maximum transfer unit of 1500 bytes. static BUF_SIZE: uint = 1500; macro_rules! u8_to_unsigned_be( ($src:ident[$start:expr..$end:expr] -> $t:ty) => ({ let mut result: $t = 0; for i in range(0u, $end-$start+1).rev() { result = result | $src[$start+i] as $t << i*8; } result }) ) macro_rules! reply_with_ack( ($header:expr, $src:expr) => ({ let resp = self.prepare_reply($header, ST_STATE).wnd_size(BUF_SIZE as u32); try!(self.socket.send_to(resp.bytes().as_slice(), $src)); debug!("sent {}", resp.header); }) ) /// Return current time in microseconds since the UNIX epoch. fn now_microseconds() -> u32 { let t = time::get_time(); (t.sec * 1_000_000) as u32 + (t.nsec/1000) as u32 } #[allow(dead_code,non_camel_case_types)] #[deriving(PartialEq,Eq,Show)] enum UtpPacketType { ST_DATA = 0, ST_FIN = 1, ST_STATE = 2, ST_RESET = 3, ST_SYN = 4, } enum UtpExtension { SelectiveAckExtensionId = 1, } #[allow(dead_code)] #[deriving(Clone)] #[packed] struct UtpPacketHeader { type_ver: u8, // type: u4, ver: u4 extension: u8, connection_id: u16, timestamp_microseconds: u32, timestamp_difference_microseconds: u32, wnd_size: u32, seq_nr: u16, ack_nr: u16, } impl UtpPacketHeader { /// Set type of packet to the specified type. fn set_type(&mut self, t: UtpPacketType) { let version = 0x0F & self.type_ver; self.type_ver = t as u8 << 4 | version; } fn get_type(&self) -> UtpPacketType { let t: UtpPacketType = unsafe { transmute(self.type_ver >> 4) }; t } fn get_version(&self) -> u8 { self.type_ver & 0x0F } fn wnd_size(&self, new_wnd_size: u32) -> UtpPacketHeader { UtpPacketHeader { wnd_size: new_wnd_size.to_be(), .. self.clone() } } /// Return packet header as a slice of bytes. fn bytes(&self) -> &[u8] { let buf: &[u8, ..HEADER_SIZE] = unsafe { transmute(self) }; return buf.as_slice(); } fn len(&self) -> uint { return HEADER_SIZE; } /// Read byte buffer and return corresponding packet header. /// It assumes the fields are in network (big-endian) byte order, /// preserving it. fn decode(buf: &[u8]) -> UtpPacketHeader { UtpPacketHeader { type_ver: buf[0], extension: buf[1], connection_id: u8_to_unsigned_be!(buf[2..3] -> u16), timestamp_microseconds: u8_to_unsigned_be!(buf[4..7] -> u32), timestamp_difference_microseconds: u8_to_unsigned_be!(buf[8..11] -> u32), wnd_size: u8_to_unsigned_be!(buf[12..15] -> u32), seq_nr: u8_to_unsigned_be!(buf[16..17] -> u16), ack_nr: u8_to_unsigned_be!(buf[18..19] -> u16), } } } impl fmt::Show for UtpPacketHeader { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(type: {}, version: {}, extension: {}, \ connection_id: {}, timestamp_microseconds: {}, \ timestamp_difference_microseconds: {}, wnd_size: {}, \ seq_nr: {}, ack_nr: {})", self.get_type(), Int::from_be(self.get_version()), Int::from_be(self.extension), Int::from_be(self.connection_id), Int::from_be(self.timestamp_microseconds), Int::from_be(self.timestamp_difference_microseconds), Int::from_be(self.wnd_size), Int::from_be(self.seq_nr), Int::from_be(self.ack_nr), ) } } #[allow(dead_code)] struct UtpPacket { header: UtpPacketHeader, extensions: Vec<u8>, payload: Vec<u8>, } impl UtpPacket { /// Construct a new, empty packet. fn new() -> UtpPacket { UtpPacket { header: UtpPacketHeader { type_ver: ST_DATA as u8 << 4 | 1, extension: 0, connection_id: 0, timestamp_microseconds: 0, timestamp_difference_microseconds: 0, wnd_size: 0, seq_nr: 0, ack_nr: 0, }, extensions: Vec::new(), payload: Vec::new(), } } fn set_type(&mut self, t: UtpPacketType) { self.header.set_type(t); } // TODO: Read up on pointers and ownership fn get_type(&self) -> UtpPacketType { self.header.get_type() } fn wnd_size(&self, new_wnd_size: u32) -> UtpPacket { UtpPacket { header: self.header.wnd_size(new_wnd_size), .. UtpPacket::new() } } /// Set Selective ACK field in packet header and add appropriate data. /// /// If None is passed, the SACK extension is disabled and the respective /// data is flushed. Otherwise, the SACK extension is enabled and the /// vector `v` is taken as the extension's payload. /// /// The length of the SACK extension is expressed in bytes, which /// must be a multiple of 4 and at least 4. fn set_sack(&mut self, v: Option<Vec<u8>>) { match v { None => { self.header.extension = 0; self.extensions = Vec::new(); }, Some(bv) => { // The length of the SACK extension is expressed in bytes, which // must be a multiple of 4 and at least 4. assert!(bv.len() >= 4); assert!(bv.len() % 4 == 0); self.header.extension = SelectiveAckExtensionId as u8; // Extension list header self.extensions.push(SelectiveAckExtensionId as u8); // length in bytes, multiples of 4, >= 4 self.extensions.push(bv.len() as u8); // Elements for byte in bv.iter() { self.extensions.push(*byte); } } } } /// TODO: return slice fn bytes(&self) -> Vec<u8> { let mut buf = Vec::with_capacity(self.len()); buf.push_all(self.header.bytes()); buf.push_all(self.extensions.as_slice()); buf.push_all(self.payload.as_slice()); return buf; } fn len(&self) -> uint { let len = self.header.len() + self.payload.len(); // Add an extra two bytes to extension length corresponding to the list // header (extension identifier + list length) if self.extensions.is_empty() { len } else { len + self.extensions.len() + 2 } } /// Decode a byte slice and construct the equivalent UtpPacket. /// /// Note that this method makes no attempt to guess the payload size, saving /// all except the initial 20 bytes corresponding to the header as payload. /// It's the caller's responsability to use an appropriately sized buffer. fn decode(buf: &[u8]) -> UtpPacket { let header = UtpPacketHeader::decode(buf); let (extensions, payload) = if header.extension == SelectiveAckExtensionId as u8 { assert!(buf[HEADER_SIZE] == SelectiveAckExtensionId as u8); let len = buf[HEADER_SIZE + 1] as uint; let extension_start = HEADER_SIZE + 2; (Vec::from_slice(buf.slice(extension_start, extension_start + len)), Vec::from_slice(buf.slice_from(extension_start + len))) } else { (Vec::new(), Vec::from_slice(buf.slice_from(HEADER_SIZE))) }; UtpPacket { header: header, extensions: extensions, payload: payload, } } } impl Clone for UtpPacket { fn clone(&self) -> UtpPacket { UtpPacket { header: self.header, extensions: self.extensions.clone(), payload: self.payload.clone(), } } } impl fmt::Show for UtpPacket { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.header.fmt(f) } } #[allow(non_camel_case_types)] #[deriving(PartialEq,Eq,Show)] enum UtpSocketState { CS_NEW, CS_CONNECTED, CS_SYN_SENT, CS_FIN_RECEIVED, CS_FIN_SENT, CS_RST_RECEIVED, CS_CLOSED, CS_EOF, } /// A uTP (Micro Transport Protocol) socket. pub struct UtpSocket { socket: UdpSocket, connected_to: SocketAddr, sender_connection_id: u16, receiver_connection_id: u16, seq_nr: u16, ack_nr: u16, state: UtpSocketState, // Received but not acknowledged packets incoming_buffer: Vec<UtpPacket>, // Sent but not yet acknowledged packets send_buffer: Vec<UtpPacket>, duplicate_ack_count: uint, last_acked: u16, last_acked_timestamp: u32, rtt: int, rtt_variance: int, timeout: int, } impl UtpSocket { /// Create a UTP socket from the given address. #[unstable] pub fn bind(addr: SocketAddr) -> IoResult<UtpSocket> { let skt = UdpSocket::bind(addr); let connection_id = random::<u16>(); match skt { Ok(x) => Ok(UtpSocket { socket: x, connected_to: addr, receiver_connection_id: connection_id, sender_connection_id: connection_id + 1, seq_nr: 1, ack_nr: 0, state: CS_NEW, incoming_buffer: Vec::new(), send_buffer: Vec::new(), duplicate_ack_count: 0, last_acked: 0, last_acked_timestamp: 0, rtt: 0, rtt_variance: 0, timeout: 1000, }), Err(e) => Err(e) } } /// Open a uTP connection to a remote host by hostname or IP address. #[unstable] pub fn connect(mut self, other: SocketAddr) -> IoResult<UtpSocket> { use std::io::{IoError, ConnectionFailed}; self.connected_to = other; assert_eq!(self.receiver_connection_id + 1, self.sender_connection_id); let mut packet = UtpPacket::new(); packet.set_type(ST_SYN); packet.header.connection_id = self.receiver_connection_id.to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.timestamp_microseconds = now_microseconds().to_be(); // Send packet let dst = self.connected_to; let _result = self.socket.send_to(packet.bytes().as_slice(), dst); debug!("sent {}", packet.header); self.state = CS_SYN_SENT; let mut buf = [0, ..BUF_SIZE]; let (_len, addr) = match self.socket.recv_from(buf) { Ok(v) => v, Err(e) => fail!("{}", e), }; assert!(_len == HEADER_SIZE); assert!(addr == self.connected_to); let packet = UtpPacket::decode(buf.slice_to(_len)); if packet.get_type() != ST_STATE { return Err(IoError { kind: ConnectionFailed, desc: "The remote peer sent an incorrect reply", detail: None, }); } self.ack_nr = Int::from_be(packet.header.seq_nr); debug!("connected to: {} {}", addr, self.connected_to); self.state = CS_CONNECTED; self.seq_nr += 1; Ok(self) } /// Gracefully close connection to peer. /// /// This method allows both peers to receive all packets still in /// flight. #[unstable] pub fn close(&mut self) -> IoResult<()> { let mut packet = UtpPacket::new(); packet.header.connection_id = self.sender_connection_id.to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.ack_nr = self.ack_nr.to_be(); packet.header.timestamp_microseconds = now_microseconds().to_be(); packet.set_type(ST_FIN); // Send FIN let dst = self.connected_to; try!(self.socket.send_to(packet.bytes().as_slice(), dst)); debug!("sent {}", packet); self.state = CS_FIN_SENT; // Receive JAKE let mut buf = [0u8, ..BUF_SIZE]; try!(self.socket.recv_from(buf)); let resp = UtpPacket::decode(buf); debug!("received {}", resp); assert!(resp.get_type() == ST_STATE); // Set socket state self.state = CS_CLOSED; Ok(()) } /// Receive data from socket. /// /// On success, returns the number of bytes read and the sender's address. /// Returns CS_EOF after receiving a FIN packet when the remaining /// inflight packets are consumed. Subsequent calls return CS_CLOSED. #[unstable] pub fn recv_from(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> { use std::cmp::min; use std::io::{IoError, EndOfFile, Closed, TimedOut, ConnectionReset}; if self.state == CS_EOF { self.state = CS_CLOSED; return Err(IoError { kind: EndOfFile, desc: "End of file reached", detail: None, }); } if self.state == CS_CLOSED { return Err(IoError { kind: Closed, desc: "Connection closed", detail: None, }); } let mut b = [0, ..BUF_SIZE + HEADER_SIZE]; debug!("setting read timeout of {} ms", self.timeout); if self.state != CS_NEW { self.socket.set_read_timeout(Some(self.timeout as u64)); } let (read, src) = match self.socket.recv_from(b) { Err(ref e) if e.kind == TimedOut => { debug!("recv_from timed out"); self.timeout = self.timeout * 2; self.send_fast_resend_request(); return Ok((0, self.connected_to)); }, Ok(x) => x, Err(e) => return Err(e), }; let packet = UtpPacket::decode(b.slice_to(read)); debug!("received {}", packet.header); if packet.get_type() == ST_RESET { return Err(IoError { kind: ConnectionReset, desc: "Remote host aborted connection (incorrect connection id)", detail: None, }); } // TODO: move this to handle_packet? if packet.get_type() == ST_SYN { self.connected_to = src; } // Check if the packet is out of order (that is, it's sequence number // does not immediately follow the ACK number) if packet.get_type() != ST_STATE && packet.get_type() != ST_SYN && self.ack_nr + 1 < Int::from_be(packet.header.seq_nr) { debug!("current ack_nr ({}) is behind received packet seq_nr ({})", self.ack_nr, Int::from_be(packet.header.seq_nr)); // Add to buffer but do not acknowledge until all packets between // ack_nr + 1 and curr_packet.seq_nr - 1 are received self.insert_into_buffer(packet); return Ok((0, self.connected_to)); } // Copy received payload to output buffer if packet isn't a duplicate let mut read = read - HEADER_SIZE; if self.ack_nr < Int::from_be(packet.header.seq_nr) { for i in range(0u, min(buf.len(), read)) { buf[i] = b[i + HEADER_SIZE]; } } else { read = 0; } match self.handle_packet(packet.clone()) { Some(pkt) => { let pkt = pkt.wnd_size(BUF_SIZE as u32); try!(self.socket.send_to(pkt.bytes().as_slice(), src)); debug!("sent {}", pkt.header); }, None => {} }; // Flush incoming buffer if possible let read = self.flush_incoming_buffer(buf, read); Ok((read, src)) } #[allow(missing_doc)] #[deprecated = "renamed to `recv_from`"] pub fn recvfrom(&mut self, buf: &mut[u8]) -> IoResult<(uint,SocketAddr)> { self.recv_from(buf) } fn prepare_reply(&self, original: &UtpPacketHeader, t: UtpPacketType) -> UtpPacket { let mut resp = UtpPacket::new(); resp.set_type(t); let self_t_micro: u32 = now_microseconds(); let other_t_micro: u32 = Int::from_be(original.timestamp_microseconds); resp.header.timestamp_microseconds = self_t_micro.to_be(); resp.header.timestamp_difference_microseconds = (self_t_micro - other_t_micro).to_be(); resp.header.connection_id = self.sender_connection_id.to_be(); resp.header.seq_nr = self.seq_nr.to_be(); resp.header.ack_nr = self.ack_nr.to_be(); resp } /// Discards sequential, ordered packets in incoming buffer, starting from /// the most recently acknowledged to the most recent, as long as there are /// no missing packets. The discarded packets' payload is written to the /// slice `buf`, starting in position `start`. /// Returns the last written index. fn flush_incoming_buffer(&mut self, buf: &mut [u8], start: uint) -> uint { let mut idx = start; while !self.incoming_buffer.is_empty() && self.ack_nr + 1 == Int::from_be(self.incoming_buffer[0].header.seq_nr) { let packet = self.incoming_buffer.shift().unwrap(); debug!("Removing packet from buffer: {}", packet); for i in range(0u, packet.payload.len()) { buf[idx] = packet.payload[i]; idx += 1; } self.ack_nr = Int::from_be(packet.header.seq_nr); } return idx; } /// Send data on socket to the given address. Returns nothing on success. // // # Implementation details // // This method inserts packets into the send buffer and keeps trying to // advance the send window until an ACK corresponding to the last packet is // received. // // Note that the buffer passed to `send_to` might exceed the maximum packet // size, which will result in the data being split over several packets. #[unstable] pub fn send_to(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> { use std::io::{IoError, Closed}; if self.state == CS_CLOSED { return Err(IoError { kind: Closed, desc: "Connection closed", detail: None, }); } for chunk in buf.chunks(BUF_SIZE) { let mut packet = UtpPacket::new(); packet.set_type(ST_DATA); packet.payload = Vec::from_slice(chunk); packet.header.timestamp_microseconds = now_microseconds().to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.ack_nr = self.ack_nr.to_be(); packet.header.connection_id = self.sender_connection_id.to_be(); debug!("Pushing packet into send buffer: {}", packet); self.send_buffer.push(packet.clone()); try!(self.socket.send_to(packet.bytes().as_slice(), dst)); self.seq_nr += 1; } // Consume acknowledgements until latest packet let mut buf = [0, ..BUF_SIZE]; while self.last_acked < self.seq_nr - 1 { try!(self.recv_from(buf)); } Ok(()) } #[allow(missing_doc)] #[deprecated = "renamed to `send_to`"] pub fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()> { self.send_to(buf, dst) } /// Send fast resend request. /// /// Sends three identical ACK/STATE packets to the remote host, signalling a /// fast resend request. fn send_fast_resend_request(&mut self) { let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.ack_nr = self.ack_nr.to_be(); packet.header.seq_nr = self.seq_nr.to_be(); packet.header.connection_id = self.sender_connection_id.to_be(); for _ in range(0u, 3) { let t = now_microseconds(); packet.header.timestamp_microseconds = t.to_be(); packet.header.timestamp_difference_microseconds = (t - self.last_acked_timestamp).to_be(); self.socket.send_to(packet.bytes().as_slice(), self.connected_to); debug!("sent {}", packet.header); } } /// Handle incoming packet, updating socket state accordingly. /// /// Returns appropriate reply packet, if needed. fn handle_packet(&mut self, packet: UtpPacket) -> Option<UtpPacket> { // Reset connection if connection id doesn't match and this isn't a SYN if packet.get_type() != ST_SYN && !(Int::from_be(packet.header.connection_id) == self.sender_connection_id || Int::from_be(packet.header.connection_id) == self.receiver_connection_id) { return Some(self.prepare_reply(&packet.header, ST_RESET)); } // Acknowledge only if the packet strictly follows the previous one if self.ack_nr + 1 == Int::from_be(packet.header.seq_nr) { self.ack_nr = Int::from_be(packet.header.seq_nr); } match packet.header.get_type() { ST_SYN => { // Respond with an ACK and populate own fields // Update socket information for new connections self.ack_nr = Int::from_be(packet.header.seq_nr); self.seq_nr = random(); self.receiver_connection_id = Int::from_be(packet.header.connection_id) + 1; self.sender_connection_id = Int::from_be(packet.header.connection_id); self.state = CS_CONNECTED; Some(self.prepare_reply(&packet.header, ST_STATE)) } ST_DATA => Some(self.prepare_reply(&packet.header, ST_STATE)), ST_FIN => { self.state = CS_FIN_RECEIVED; // TODO: check if no packets are missing // If all packets are received self.state = CS_EOF; Some(self.prepare_reply(&packet.header, ST_STATE)) } ST_STATE => { let packet_rtt = Int::from_be(packet.header.timestamp_difference_microseconds) as int; let delta = self.rtt - packet_rtt; self.rtt_variance += (std::num::abs(delta) - self.rtt_variance) / 4; self.rtt += (packet_rtt - self.rtt) / 8; self.timeout = std::cmp::max(self.rtt + self.rtt_variance * 4, 500); debug!("packet_rtt: {}", packet_rtt); debug!("delta: {}", delta); debug!("self.rtt_variance: {}", self.rtt_variance); debug!("self.rtt: {}", self.rtt); debug!("self.timeout: {}", self.timeout); if packet.header.ack_nr == Int::from_be(self.last_acked) { self.duplicate_ack_count += 1; } else { self.last_acked = Int::from_be(packet.header.ack_nr); self.last_acked_timestamp = now_microseconds(); self.duplicate_ack_count = 1; } // Three duplicate ACKs, must resend packets since `ack_nr + 1` // TODO: checking if the send buffer isn't empty isn't a // foolproof way to differentiate between triple-ACK and three // keep alives spread in time if !self.send_buffer.is_empty() && self.duplicate_ack_count == 3 { match self.send_buffer.iter().position(|pkt| Int::from_be(pkt.header.seq_nr) == Int::from_be(packet.header.ack_nr) + 1) { None => fail!("Received request to resend packets since {} but none was found in send buffer!", Int::from_be(packet.header.ack_nr) + 1), Some(position) => { for _ in range(0u, position + 1) { let to_send = self.send_buffer.shift().unwrap(); debug!("resending: {}", to_send); self.socket.send_to(to_send.bytes().as_slice(), self.connected_to); } }, } } // Success, advance send window while !self.send_buffer.is_empty() && Int::from_be(self.send_buffer[0].header.seq_nr) <= self.last_acked { self.send_buffer.shift(); } None }, ST_RESET => { // TODO self.state = CS_RST_RECEIVED; None }, } } /// Insert a packet into the socket's buffer. /// /// The packet is inserted in such a way that the buffer is /// ordered ascendingly by their sequence number. This allows /// storing packets that were received out of order. /// /// Inserting a duplicate of a packet will replace the one in the buffer if /// it's more recent (larger timestamp). fn insert_into_buffer(&mut self, packet: UtpPacket) { let mut i = 0; for pkt in self.incoming_buffer.iter() { if Int::from_be(pkt.header.seq_nr) >= Int::from_be(packet.header.seq_nr) { break; } i += 1; } if !self.incoming_buffer.is_empty() && i < self.incoming_buffer.len() && self.incoming_buffer[i].header.seq_nr == packet.header.seq_nr { self.incoming_buffer.remove(i); self.incoming_buffer.insert(i, packet); } else { self.incoming_buffer.insert(i, packet); } } } impl Clone for UtpSocket { fn clone(&self) -> UtpSocket { UtpSocket { socket: self.socket.clone(), connected_to: self.connected_to, receiver_connection_id: self.receiver_connection_id, sender_connection_id: self.sender_connection_id, seq_nr: self.seq_nr, ack_nr: self.ack_nr, state: self.state, incoming_buffer: Vec::new(), send_buffer: Vec::new(), duplicate_ack_count: 0, last_acked: 0, last_acked_timestamp: 0, rtt: 0, rtt_variance: 0, timeout: 500, } } } /// Stream interface for UtpSocket. pub struct UtpStream { socket: UtpSocket, } impl UtpStream { /// Create a uTP stream listening on the given address. #[unstable] pub fn bind(addr: SocketAddr) -> IoResult<UtpStream> { let socket = UtpSocket::bind(addr); match socket { Ok(s) => Ok(UtpStream { socket: s }), Err(e) => Err(e), } } /// Open a uTP connection to a remote host by hostname or IP address. #[unstable] pub fn connect(dst: SocketAddr) -> IoResult<UtpStream> { use std::io::net::ip::Ipv4Addr; // Port 0 means the operating system gets to choose it let my_addr = SocketAddr { ip: Ipv4Addr(127,0,0,1), port: 0 }; let socket = match UtpSocket::bind(my_addr) { Ok(s) => s, Err(e) => return Err(e), }; match socket.connect(dst) { Ok(socket) => Ok(UtpStream { socket: socket }), Err(e) => Err(e), } } /// Gracefully close connection to peer. /// /// This method allows both peers to receive all packets still in /// flight. #[unstable] pub fn close(&mut self) -> IoResult<()> { self.socket.close() } } impl Reader for UtpStream { fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { match self.socket.recv_from(buf) { Ok((read, _src)) => Ok(read), Err(e) => Err(e), } } } impl Writer for UtpStream { fn write(&mut self, buf: &[u8]) -> IoResult<()> { let dst = self.socket.connected_to; self.socket.send_to(buf, dst) } } #[cfg(test)] mod test { use super::{UtpSocket, UtpPacket}; use super::{ST_STATE, ST_FIN, ST_DATA, ST_RESET, ST_SYN}; use super::{BUF_SIZE, HEADER_SIZE}; use super::{CS_CONNECTED, CS_NEW, CS_CLOSED, CS_EOF}; use std::rand::random; macro_rules! expect_eq( ($left:expr, $right:expr) => ( if !($left == $right) { fail!("expected {}, got {}", $right, $left); } ); ) macro_rules! iotry( ($e:expr) => (match $e { Ok(e) => e, Err(e) => fail!("{}", e) }) ) #[test] fn test_packet_decode() { let buf = [0x21, 0x00, 0x41, 0xa8, 0x99, 0x2f, 0xd0, 0x2a, 0x9f, 0x4a, 0x26, 0x21, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x6c, 0x79]; let pkt = UtpPacket::decode(buf); assert_eq!(pkt.header.get_version(), 1); assert_eq!(pkt.header.get_type(), ST_STATE); assert_eq!(pkt.header.extension, 0); assert_eq!(Int::from_be(pkt.header.connection_id), 16808); assert_eq!(Int::from_be(pkt.header.timestamp_microseconds), 2570047530); assert_eq!(Int::from_be(pkt.header.timestamp_difference_microseconds), 2672436769); assert_eq!(Int::from_be(pkt.header.wnd_size), ::std::num::pow(2u32, 20)); assert_eq!(Int::from_be(pkt.header.seq_nr), 15090); assert_eq!(Int::from_be(pkt.header.ack_nr), 27769); assert_eq!(pkt.len(), buf.len()); assert!(pkt.payload.is_empty()); } #[test] fn test_decode_packet_with_extension() { let buf = [0x21, 0x01, 0x41, 0xa7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xdc, 0xab, 0x53, 0x3a, 0xf5, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00]; let packet = UtpPacket::decode(buf); assert_eq!(packet.header.get_version(), 1); assert_eq!(packet.header.get_type(), ST_STATE); assert_eq!(packet.header.extension, 1); assert_eq!(Int::from_be(packet.header.connection_id), 16807); assert_eq!(Int::from_be(packet.header.timestamp_microseconds), 0); assert_eq!(Int::from_be(packet.header.timestamp_difference_microseconds), 0); assert_eq!(Int::from_be(packet.header.wnd_size), 1500); assert_eq!(Int::from_be(packet.header.seq_nr), 43859); assert_eq!(Int::from_be(packet.header.ack_nr), 15093); assert_eq!(packet.len(), buf.len()); assert!(packet.payload.is_empty()); assert!(packet.extensions.len() == 4); assert!(packet.extensions == vec!(0,0,0,0)); } #[test] fn test_packet_encode() { let payload = Vec::from_slice("Hello\n".as_bytes()); let (timestamp, timestamp_diff): (u32, u32) = (15270793, 1707040186); let (connection_id, seq_nr, ack_nr): (u16, u16, u16) = (16808, 15090, 17096); let window_size: u32 = 1048576; let mut pkt = UtpPacket::new(); pkt.set_type(ST_DATA); pkt.header.timestamp_microseconds = timestamp.to_be(); pkt.header.timestamp_difference_microseconds = timestamp_diff.to_be(); pkt.header.connection_id = connection_id.to_be(); pkt.header.seq_nr = seq_nr.to_be(); pkt.header.ack_nr = ack_nr.to_be(); pkt.header.wnd_size = window_size.to_be(); pkt.payload = payload.clone(); let header = pkt.header; let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89, 0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x0a]; assert_eq!(pkt.len(), buf.len()); assert_eq!(pkt.len(), HEADER_SIZE + payload.len()); assert_eq!(pkt.payload, payload); assert_eq!(header.get_version(), 1); assert_eq!(header.get_type(), ST_DATA); assert_eq!(header.extension, 0); assert_eq!(Int::from_be(header.connection_id), connection_id); assert_eq!(Int::from_be(header.seq_nr), seq_nr); assert_eq!(Int::from_be(header.ack_nr), ack_nr); assert_eq!(Int::from_be(header.wnd_size), window_size); assert_eq!(Int::from_be(header.timestamp_microseconds), timestamp); assert_eq!(Int::from_be(header.timestamp_difference_microseconds), timestamp_diff); assert_eq!(pkt.bytes(), Vec::from_slice(buf)); } #[test] fn test_reversible() { let buf: &[u8] = [0x01, 0x00, 0x41, 0xa8, 0x00, 0xe9, 0x03, 0x89, 0x65, 0xbf, 0x5d, 0xba, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf2, 0x42, 0xc8, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x0a]; assert_eq!(UtpPacket::decode(buf).bytes().as_slice(), buf); } #[test] fn test_socket_ipv4() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); assert_eq!(client.connected_to, serverAddr); drop(client); }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert_eq!(server.connected_to, clientAddr); assert!(server.state == CS_CONNECTED); drop(server); } #[test] fn test_recvfrom_on_closed_socket() { use std::io::test::next_test_ip4; use std::io::{Closed, EndOfFile}; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); assert_eq!(client.close(), Ok(())); drop(client); }); // Make the server listen for incoming connections let mut buf = [0u8, ..BUF_SIZE]; let _resp = server.recv_from(buf); assert!(server.state == CS_CONNECTED); // Closing the connection is fine match server.recv_from(buf) { Err(e) => fail!("{}", e), _ => {}, } expect_eq!(server.state, CS_EOF); // Trying to listen on the socket after closing it raises an // EOF error match server.recv_from(buf) { Err(e) => expect_eq!(e.kind, EndOfFile), v => fail!("expected {}, got {}", EndOfFile, v), } expect_eq!(server.state, CS_CLOSED); // Trying again raises a Closed error match server.recv_from(buf) { Err(e) => expect_eq!(e.kind, Closed), v => fail!("expected {}, got {}", Closed, v), } drop(server); } #[test] fn test_sendto_on_closed_socket() { use std::io::test::next_test_ip4; use std::io::Closed; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); spawn(proc() { let client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let mut buf = [0u8, ..BUF_SIZE]; let mut client = client; iotry!(client.recv_from(buf)); }); // Make the server listen for incoming connections let mut buf = [0u8, ..BUF_SIZE]; let (_read, _src) = iotry!(server.recv_from(buf)); assert!(server.state == CS_CONNECTED); iotry!(server.close()); expect_eq!(server.state, CS_CLOSED); // Trying to send to the socket after closing it raises an // error match server.send_to(buf, clientAddr) { Err(e) => expect_eq!(e.kind, Closed), v => fail!("expected {}, got {}", Closed, v), } drop(server); } #[test] fn test_acks_on_socket() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let (tx, rx) = channel(); let client = iotry!(UtpSocket::bind(clientAddr)); let server = iotry!(UtpSocket::bind(serverAddr)); spawn(proc() { // Make the server listen for incoming connections let mut server = server; let mut buf = [0u8, ..BUF_SIZE]; let _resp = server.recv_from(buf); tx.send(server.seq_nr); // Close the connection iotry!(server.recv_from(buf)); drop(server); }); let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let sender_seq_nr = rx.recv(); let ack_nr = client.ack_nr; assert!(ack_nr != 0); assert!(ack_nr == sender_seq_nr); assert_eq!(client.close(), Ok(())); // The reply to both connect (SYN) and close (FIN) should be // STATE packets, which don't increase the sequence number // and, hence, the receiver's acknowledgement number. assert!(client.ack_nr == ack_nr); drop(client); } #[test] fn test_handle_packet() { use std::io::test::next_test_ip4; //fn test_connection_setup() { let initial_connection_id: u16 = random(); let sender_connection_id = initial_connection_id + 1; let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let sent = packet.header; // Do we have a response? let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); // Is is of the correct type? let response = response.unwrap(); assert!(response.get_type() == ST_STATE); // Same connection id on both ends during connection establishment assert!(response.header.connection_id == sent.connection_id); // Response acknowledges SYN assert!(response.header.ack_nr == sent.seq_nr); // No payload? assert!(response.payload.is_empty()); //} // --------------------------------- // fn test_connection_usage() { let old_packet = packet; let old_response = response; let mut packet = UtpPacket::new(); packet.set_type(ST_DATA); packet.header.connection_id = sender_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; let sent = packet.header; let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); // Sender (i.e., who initated connection and sent SYN) has connection id // equal to initial connection id + 1 // Receiver (i.e., who accepted connection) has connection id equal to // initial connection id assert!(Int::from_be(response.header.connection_id) == initial_connection_id); assert!(Int::from_be(response.header.connection_id) == Int::from_be(sent.connection_id) - 1); // Previous packets should be ack'ed assert!(Int::from_be(response.header.ack_nr) == Int::from_be(sent.seq_nr)); // Responses with no payload should not increase the sequence number assert!(response.payload.is_empty()); assert!(Int::from_be(response.header.seq_nr) == Int::from_be(old_response.header.seq_nr)); // } //fn test_connection_teardown() { let old_packet = packet; let old_response = response; let mut packet = UtpPacket::new(); packet.set_type(ST_FIN); packet.header.connection_id = sender_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; let sent = packet.header; let response = socket.handle_packet(packet); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); // FIN packets have no payload but the sequence number shouldn't increase assert!(Int::from_be(sent.seq_nr) == Int::from_be(old_packet.header.seq_nr) + 1); // Nor should the ACK packet's sequence number assert!(response.header.seq_nr == old_response.header.seq_nr); // FIN should be acknowledged assert!(response.header.ack_nr == sent.seq_nr); //} } #[test] fn test_response_to_keepalive_ack() { use std::io::test::next_test_ip4; // Boilerplate test setup let initial_connection_id: u16 = random(); let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); // Establish connection let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); let old_packet = packet; let old_response = response; // Now, send a keepalive packet let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.connection_id = initial_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; let response = socket.handle_packet(packet.clone()); assert!(response.is_none()); // Send a second keepalive packet, identical to the previous one let response = socket.handle_packet(packet.clone()); assert!(response.is_none()); } #[test] fn test_response_to_wrong_connection_id() { use std::io::test::next_test_ip4; // Boilerplate test setup let initial_connection_id: u16 = random(); let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); // Establish connection let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); assert!(response.unwrap().get_type() == ST_STATE); // Now, disrupt connection with a packet with an incorrect connection id let new_connection_id = initial_connection_id.to_le(); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.connection_id = new_connection_id; let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_RESET); assert!(response.header.ack_nr == packet.header.seq_nr); } #[test] fn test_utp_stream() { use super::UtpStream; use std::io::test::next_test_ip4; let serverAddr = next_test_ip4(); let mut server = iotry!(UtpStream::bind(serverAddr)); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.close()); }); iotry!(server.read_to_end()); } #[test] fn test_utp_stream_small_data() { use super::UtpStream; use std::io::test::next_test_ip4; // Fits in a packet static len: uint = 1024; let data = Vec::from_fn(len, |idx| idx as u8); expect_eq!(len, data.len()); let d = data.clone(); let serverAddr = next_test_ip4(); let mut server = UtpStream::bind(serverAddr); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.write(d.as_slice())); iotry!(client.close()); }); let read = iotry!(server.read_to_end()); assert!(!read.is_empty()); expect_eq!(read.len(), data.len()); expect_eq!(read, data); } #[test] fn test_utp_stream_large_data() { use super::UtpStream; use std::io::test::next_test_ip4; // Has to be sent over several packets static len: uint = 1024 * 1024; let data = Vec::from_fn(len, |idx| idx as u8); expect_eq!(len, data.len()); let d = data.clone(); let serverAddr = next_test_ip4(); let mut server = UtpStream::bind(serverAddr); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.write(d.as_slice())); iotry!(client.close()); }); let read = iotry!(server.read_to_end()); assert!(!read.is_empty()); expect_eq!(read.len(), data.len()); expect_eq!(read, data); } #[test] fn test_utp_stream_successive_reads() { use super::UtpStream; use std::io::test::next_test_ip4; use std::io::Closed; static len: uint = 1024; let data: Vec<u8> = Vec::from_fn(len, |idx| idx as u8); expect_eq!(len, data.len()); let d = data.clone(); let serverAddr = next_test_ip4(); let mut server = UtpStream::bind(serverAddr); spawn(proc() { let mut client = iotry!(UtpStream::connect(serverAddr)); iotry!(client.write(d.as_slice())); iotry!(client.close()); }); iotry!(server.read_to_end()); let mut buf = [0u8, ..4096]; match server.read(buf) { Err(ref e) if e.kind == Closed => {}, _ => fail!("should have failed with Closed"), }; } #[test] fn test_unordered_packets() { use std::io::test::next_test_ip4; // Boilerplate test setup let initial_connection_id: u16 = random(); let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); // Establish connection let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_SYN); packet.header.connection_id = initial_connection_id.to_be(); let response = socket.handle_packet(packet.clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.get_type() == ST_STATE); let old_packet = packet; let old_response = response; let mut window: Vec<UtpPacket> = Vec::new(); // Now, send a keepalive packet let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = initial_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 1).to_be(); packet.header.ack_nr = old_response.header.seq_nr; packet.payload = vec!(1,2,3); window.push(packet); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = initial_connection_id.to_be(); packet.header.seq_nr = (Int::from_be(old_packet.header.seq_nr) + 2).to_be(); packet.header.ack_nr = old_response.header.seq_nr; packet.payload = vec!(4,5,6); window.push(packet); // Send packets in reverse order let response = socket.handle_packet(window[1].clone()); assert!(response.is_some()); let response = response.unwrap(); assert!(response.header.ack_nr != window[1].header.seq_nr); let response = socket.handle_packet(window[0].clone()); assert!(response.is_some()); } #[test] fn test_socket_unordered_packets() { use std::io::test::next_test_ip4; use super::UtpStream; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let mut s = client.socket; let mut window: Vec<UtpPacket> = Vec::new(); let mut i = 0; for data in Vec::from_fn(12, |idx| idx as u8 + 1).as_slice().chunks(3) { let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = client.sender_connection_id.to_be(); packet.header.seq_nr = (client.seq_nr + i).to_be(); packet.header.ack_nr = client.ack_nr.to_be(); packet.payload = Vec::from_slice(data); window.push(packet); i += 1; } let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_FIN); packet.header.connection_id = client.sender_connection_id.to_be(); packet.header.seq_nr = (client.seq_nr + 2).to_be(); packet.header.ack_nr = client.ack_nr.to_be(); window.push(packet); iotry!(s.send_to(window[3].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[2].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[1].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[0].bytes().as_slice(), serverAddr)); iotry!(s.send_to(window[4].bytes().as_slice(), serverAddr)); for _ in range(0u, 2) { let mut buf = [0, ..BUF_SIZE]; iotry!(s.recv_from(buf)); } }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert!(server.state == CS_CONNECTED); let mut stream = UtpStream { socket: server }; let expected: Vec<u8> = Vec::from_fn(12, |idx| idx as u8 + 1); match stream.read_to_end() { Ok(data) => { expect_eq!(data.len(), expected.len()); expect_eq!(data, expected); }, Err(e) => fail!("{}", e), } } #[test] fn test_socket_should_not_buffer_syn_packets() { use std::io::test::next_test_ip4; use std::io::net::udp::UdpSocket; use super::UtpSocket; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let server = iotry!(UtpSocket::bind(serverAddr)); let client = iotry!(UdpSocket::bind(clientAddr)); let test_syn_raw = [0x41, 0x00, 0x41, 0xa7, 0x00, 0x00, 0x00, 0x27, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x3a, 0xf1, 0x00, 0x00]; let test_syn_pkt = UtpPacket::decode(test_syn_raw); let seq_nr = Int::from_be(test_syn_pkt.header.seq_nr); spawn(proc() { let mut client = client; iotry!(client.send_to(test_syn_raw, serverAddr)); client.set_timeout(Some(10)); let mut buf = [0, ..BUF_SIZE]; let packet = match client.recv_from(buf) { Ok((nread, _src)) => UtpPacket::decode(buf.slice_to(nread)), Err(e) => fail!("{}", e), }; expect_eq!(packet.header.ack_nr, seq_nr.to_be()); drop(client); }); let mut server = server; let mut buf = [0, ..20]; iotry!(server.recv_from(buf)); assert!(server.ack_nr != 0); expect_eq!(server.ack_nr, seq_nr); assert!(server.incoming_buffer.is_empty()); } #[test] fn test_response_to_triple_ack() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let mut server = iotry!(UtpSocket::bind(serverAddr)); let client = iotry!(UtpSocket::bind(clientAddr)); // Fits in a packet static len: uint = 1024; let data = Vec::from_fn(len, |idx| idx as u8); let d = data.clone(); expect_eq!(len, data.len()); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); iotry!(client.send_to(d.as_slice(), serverAddr)); iotry!(client.close()); }); let mut buf = [0, ..BUF_SIZE]; // Expect SYN iotry!(server.recv_from(buf)); // Receive data let mut data_packet; match server.socket.recv_from(buf) { Ok((read, _src)) => { data_packet = UtpPacket::decode(buf.slice_to(read)); assert!(data_packet.get_type() == ST_DATA); expect_eq!(data_packet.payload, data); assert_eq!(data_packet.payload.len(), data.len()); }, Err(e) => fail!("{}", e), } let data_packet = data_packet; // Send triple ACK let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_STATE); packet.header.seq_nr = server.seq_nr.to_be(); packet.header.ack_nr = (Int::from_be(data_packet.header.seq_nr) - 1).to_be(); packet.header.connection_id = server.sender_connection_id.to_be(); for _ in range(0u, 3) { iotry!(server.socket.send_to(packet.bytes().as_slice(), clientAddr)); } // Receive data again and check that it's the same we reported as missing match server.socket.recv_from(buf) { Ok((0, _)) => fail!("Received 0 bytes from socket"), Ok((read, _src)) => { let packet = UtpPacket::decode(buf.slice_to(read)); assert_eq!(packet.get_type(), ST_DATA); assert_eq!(Int::from_be(packet.header.seq_nr), Int::from_be(data_packet.header.seq_nr)); assert!(packet.payload == data_packet.payload); let response = server.handle_packet(packet).unwrap(); iotry!(server.socket.send_to(response.bytes().as_slice(), server.connected_to)); }, Err(e) => fail!("{}", e), } // Receive close iotry!(server.recv_from(buf)); } #[test] fn test_socket_timeout_request() { use std::io::test::next_test_ip4; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); let len = 512; let data = Vec::from_fn(len, |idx| idx as u8); let d = data.clone(); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); assert_eq!(client.connected_to, serverAddr); iotry!(client.send_to(d.as_slice(), serverAddr)); drop(client); }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert_eq!(server.connected_to, clientAddr); assert!(server.state == CS_CONNECTED); // Purposefully read from UDP socket directly and discard it, in order // to behave as if the packet was lost and thus trigger the timeout // handling in the *next* call to `UtpSocket.recv_from`. iotry!(server.socket.recv_from(buf)); // Now wait for the previously discarded packet loop { match server.recv_from(buf) { Ok((0, _)) => continue, Ok(_) => break, Err(e) => fail!("{}", e), } } drop(server); } #[test] fn test_sorted_buffer_insertion() { use std::io::test::next_test_ip4; let serverAddr = next_test_ip4(); let mut socket = iotry!(UtpSocket::bind(serverAddr)); let mut packet = UtpPacket::new(); packet.header.seq_nr = 1; assert!(socket.incoming_buffer.is_empty()); socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 1); packet.header.seq_nr = 2; packet.header.timestamp_microseconds = 128; socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 2); assert_eq!(socket.incoming_buffer[1].header.seq_nr, 2); assert_eq!(socket.incoming_buffer[1].header.timestamp_microseconds, 128); packet.header.seq_nr = 3; packet.header.timestamp_microseconds = 256; socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 3); assert_eq!(socket.incoming_buffer[2].header.seq_nr, 3); assert_eq!(socket.incoming_buffer[2].header.timestamp_microseconds, 256); // Replace a packet with a more recent version packet.header.seq_nr = 2; packet.header.timestamp_microseconds = 456; socket.insert_into_buffer(packet.clone()); assert_eq!(socket.incoming_buffer.len(), 3); assert_eq!(socket.incoming_buffer[1].header.seq_nr, 2); assert_eq!(socket.incoming_buffer[1].header.timestamp_microseconds, 456); } #[test] fn test_duplicate_packet_handling() { use std::io::test::next_test_ip4; use super::UtpStream; let (serverAddr, clientAddr) = (next_test_ip4(), next_test_ip4()); let client = iotry!(UtpSocket::bind(clientAddr)); let mut server = iotry!(UtpSocket::bind(serverAddr)); assert!(server.state == CS_NEW); assert!(client.state == CS_NEW); // Check proper difference in client's send connection id and receive connection id assert_eq!(client.sender_connection_id, client.receiver_connection_id + 1); spawn(proc() { let mut client = iotry!(client.connect(serverAddr)); assert!(client.state == CS_CONNECTED); let mut s = client.socket.clone(); let mut packet = UtpPacket::new().wnd_size(BUF_SIZE as u32); packet.set_type(ST_DATA); packet.header.connection_id = client.sender_connection_id.to_be(); packet.header.seq_nr = client.seq_nr.to_be(); packet.header.ack_nr = client.ack_nr.to_be(); packet.payload = vec!(1,2,3); // Send two copies of the packet, with different timestamps for _ in range(0u, 2) { packet.header.timestamp_microseconds = super::now_microseconds(); iotry!(s.send_to(packet.bytes().as_slice(), serverAddr)); } client.seq_nr += 1; // Receive one ACK for _ in range(0u, 1) { let mut buf = [0, ..BUF_SIZE]; iotry!(s.recv_from(buf)); } iotry!(client.close()); }); let mut buf = [0u8, ..BUF_SIZE]; match server.recv_from(buf) { e => println!("{}", e), } // After establishing a new connection, the server's ids are a mirror of the client's. assert_eq!(server.receiver_connection_id, server.sender_connection_id + 1); assert!(server.state == CS_CONNECTED); let mut stream = UtpStream { socket: server }; let expected: Vec<u8> = vec!(1,2,3); match stream.read_to_end() { Ok(data) => { println!("{}", data); expect_eq!(data.len(), expected.len()); expect_eq!(data, expected); }, Err(e) => fail!("{}", e), } } }
// Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Rangeproof library functions use crate::blake2; use crate::keychain::{Identifier, Keychain}; use crate::libtx::error::{Error, ErrorKind}; use crate::util::secp::key::SecretKey; use crate::util::secp::pedersen::{Commitment, ProofInfo, ProofMessage, RangeProof}; use crate::util::secp::{self, Secp256k1}; fn create_nonce<K>(k: &K, commit: &Commitment) -> Result<SecretKey, Error> where K: Keychain, { // hash(commit|wallet root secret key (m)) as nonce let root_key = k.derive_key(0, &K::root_key_id())?; let res = blake2::blake2b::blake2b(32, &commit.0, &root_key.0[..]); let res = res.as_bytes(); let mut ret_val = [0; 32]; for i in 0..res.len() { ret_val[i] = res[i]; } match SecretKey::from_slice(k.secp(), &ret_val) { Ok(sk) => Ok(sk), Err(e) => Err(ErrorKind::RangeProof( format!("Unable to create nonce: {:?}", e).to_string(), ))?, } } /// Create a bulletproof pub fn create<K>( k: &K, amount: u64, key_id: &Identifier, _commit: Commitment, extra_data: Option<Vec<u8>>, ) -> Result<RangeProof, Error> where K: Keychain, { let commit = k.commit(amount, key_id)?; let skey = k.derive_key(amount, key_id)?; let nonce = create_nonce(k, &commit)?; let message = ProofMessage::from_bytes(&key_id.serialize_path()); Ok(k.secp() .bullet_proof(amount, skey, nonce, extra_data, Some(message))) } /// Verify a proof pub fn verify( secp: &Secp256k1, commit: Commitment, proof: RangeProof, extra_data: Option<Vec<u8>>, ) -> Result<(), secp::Error> { let result = secp.verify_bullet_proof(commit, proof, extra_data); match result { Ok(_) => Ok(()), Err(e) => Err(e), } } /// Rewind a rangeproof to retrieve the amount pub fn rewind<K>( k: &K, commit: Commitment, extra_data: Option<Vec<u8>>, proof: RangeProof, ) -> Result<ProofInfo, Error> where K: Keychain, { let nonce = create_nonce(k, &commit)?; let proof_message = k .secp() .rewind_bullet_proof(commit, nonce, extra_data, proof); let proof_info = match proof_message { Ok(p) => p, Err(_) => ProofInfo { success: false, value: 0, message: ProofMessage::empty(), blinding: SecretKey([0; secp::constants::SECRET_KEY_SIZE]), mlen: 0, min: 0, max: 0, exp: 0, mantissa: 0, }, }; return Ok(proof_info); } perf: Removing unnecessary array copy. (#2531) // Copyright 2018 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Rangeproof library functions use crate::blake2; use crate::keychain::{Identifier, Keychain}; use crate::libtx::error::{Error, ErrorKind}; use crate::util::secp::key::SecretKey; use crate::util::secp::pedersen::{Commitment, ProofInfo, ProofMessage, RangeProof}; use crate::util::secp::{self, Secp256k1}; fn create_nonce<K>(k: &K, commit: &Commitment) -> Result<SecretKey, Error> where K: Keychain, { // hash(commit|wallet root secret key (m)) as nonce let root_key = k.derive_key(0, &K::root_key_id())?; let res = blake2::blake2b::blake2b(32, &commit.0, &root_key.0[..]); let res = res.as_bytes(); match SecretKey::from_slice(k.secp(), &res) { Ok(sk) => Ok(sk), Err(e) => Err(ErrorKind::RangeProof( format!("Unable to create nonce: {:?}", e).to_string(), ))?, } } /// Create a bulletproof pub fn create<K>( k: &K, amount: u64, key_id: &Identifier, _commit: Commitment, extra_data: Option<Vec<u8>>, ) -> Result<RangeProof, Error> where K: Keychain, { let commit = k.commit(amount, key_id)?; let skey = k.derive_key(amount, key_id)?; let nonce = create_nonce(k, &commit)?; let message = ProofMessage::from_bytes(&key_id.serialize_path()); Ok(k.secp() .bullet_proof(amount, skey, nonce, extra_data, Some(message))) } /// Verify a proof pub fn verify( secp: &Secp256k1, commit: Commitment, proof: RangeProof, extra_data: Option<Vec<u8>>, ) -> Result<(), secp::Error> { let result = secp.verify_bullet_proof(commit, proof, extra_data); match result { Ok(_) => Ok(()), Err(e) => Err(e), } } /// Rewind a rangeproof to retrieve the amount pub fn rewind<K>( k: &K, commit: Commitment, extra_data: Option<Vec<u8>>, proof: RangeProof, ) -> Result<ProofInfo, Error> where K: Keychain, { let nonce = create_nonce(k, &commit)?; let proof_message = k .secp() .rewind_bullet_proof(commit, nonce, extra_data, proof); let proof_info = match proof_message { Ok(p) => p, Err(_) => ProofInfo { success: false, value: 0, message: ProofMessage::empty(), blinding: SecretKey([0; secp::constants::SECRET_KEY_SIZE]), mlen: 0, min: 0, max: 0, exp: 0, mantissa: 0, }, }; return Ok(proof_info); }
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The `version` module gives you tools to create and compare SemVer-compliant //! versions. use std::cmp::{self, Ordering}; use std::error::Error; use std::fmt; use std::hash; use std::result; use std::str; use semver_parser; #[cfg(feature = "serde")] use serde::de::{self, Deserialize, Deserializer, Visitor}; #[cfg(feature = "serde")] use serde::ser::{Serialize, Serializer}; /// An identifier in the pre-release or build metadata. /// /// See sections 9 and 10 of the spec for more about pre-release identifers and /// build metadata. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Identifier { /// An identifier that's solely numbers. Numeric(u64), /// An identifier with letters and numbers. AlphaNumeric(String), } impl From<semver_parser::version::Identifier> for Identifier { fn from(other: semver_parser::version::Identifier) -> Identifier { match other { semver_parser::version::Identifier::Numeric(n) => Identifier::Numeric(n), semver_parser::version::Identifier::AlphaNumeric(s) => Identifier::AlphaNumeric(s), } } } impl fmt::Display for Identifier { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Identifier::Numeric(ref n) => fmt::Display::fmt(n, f), Identifier::AlphaNumeric(ref s) => fmt::Display::fmt(s, f), } } } #[cfg(feature = "serde")] impl Serialize for Identifier { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer, { // Serialize Identifier as a number or string. match *self { Identifier::Numeric(n) => serializer.serialize_u64(n), Identifier::AlphaNumeric(ref s) => serializer.serialize_str(s), } } } #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for Identifier { fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error> where D: Deserializer<'de>, { struct IdentifierVisitor; // Deserialize Identifier from a number or string. impl<'de> Visitor<'de> for IdentifierVisitor { type Value = Identifier; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a SemVer pre-release or build identifier") } fn visit_u64<E>(self, numeric: u64) -> result::Result<Self::Value, E> where E: de::Error, { Ok(Identifier::Numeric(numeric)) } fn visit_str<E>(self, alphanumeric: &str) -> result::Result<Self::Value, E> where E: de::Error, { Ok(Identifier::AlphaNumeric(alphanumeric.to_owned())) } } deserializer.deserialize_any(IdentifierVisitor) } } /// Represents a version number conforming to the semantic versioning scheme. #[derive(Clone, Eq, Debug)] #[cfg_attr(feature = "diesel", derive(AsExpression, FromSqlRow))] #[cfg_attr(feature = "diesel", sql_type = "diesel::sql_types::Text")] pub struct Version { /// The major version, to be incremented on incompatible changes. pub major: u64, /// The minor version, to be incremented when functionality is added in a /// backwards-compatible manner. pub minor: u64, /// The patch version, to be incremented when backwards-compatible bug /// fixes are made. pub patch: u64, /// The pre-release version identifier, if one exists. pub pre: Vec<Identifier>, /// The build metadata, ignored when determining version precedence. pub build: Vec<Identifier>, } impl From<semver_parser::version::Version> for Version { fn from(other: semver_parser::version::Version) -> Version { Version { major: other.major, minor: other.minor, patch: other.patch, pre: other.pre.into_iter().map(From::from).collect(), build: other.build.into_iter().map(From::from).collect(), } } } #[cfg(feature = "serde")] impl Serialize for Version { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer, { // Serialize Version as a string. serializer.collect_str(self) } } #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for Version { fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error> where D: Deserializer<'de>, { struct VersionVisitor; // Deserialize Version from a string. impl<'de> Visitor<'de> for VersionVisitor { type Value = Version; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a SemVer version as a string") } fn visit_str<E>(self, v: &str) -> result::Result<Self::Value, E> where E: de::Error, { Version::parse(v).map_err(de::Error::custom) } } deserializer.deserialize_str(VersionVisitor) } } /// An error type for this crate /// /// Currently, just a generic error. Will make this nicer later. #[derive(Clone, PartialEq, Debug, PartialOrd)] pub enum SemVerError { /// An error ocurred while parsing. ParseError(String), } impl fmt::Display for SemVerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { SemVerError::ParseError(ref m) => write!(f, "{}", m), } } } impl Error for SemVerError {} /// A Result type for errors pub type Result<T> = result::Result<T, SemVerError>; impl Version { /// Contructs the simple case without pre or build. pub fn new(major: u64, minor: u64, patch: u64) -> Version { Version { major, minor, patch, pre: Vec::new(), build: Vec::new(), } } /// Parse a string into a semver object. /// /// # Errors /// /// Returns an error variant if the input could not be parsed as a semver object. /// /// In general, this means that the provided string does not conform to the /// [semver spec][semver]. /// /// An error for overflow is returned if any numeric component is larger than what can be /// stored in `u64`. /// /// The following are examples for other common error causes: /// /// * `1.0` - too few numeric components are used. Exactly 3 are expected. /// * `1.0.01` - a numeric component has a leading zero. /// * `1.0.foo` - uses a non-numeric components where one is expected. /// * `1.0.0foo` - metadata is not separated using a legal character like, `+` or `-`. /// * `1.0.0+foo_123` - contains metadata with an illegal character (`_`). /// Legal characters for metadata include `a-z`, `A-Z`, `0-9`, `-`, and `.` (dot). /// /// [semver]: https://semver.org pub fn parse(version: &str) -> Result<Version> { let res = semver_parser::version::parse(version); match res { // Convert plain String error into proper ParseError Err(e) => Err(SemVerError::ParseError(e.to_string())), Ok(v) => Ok(From::from(v)), } } /// Clears the build metadata fn clear_metadata(&mut self) { self.build = Vec::new(); self.pre = Vec::new(); } /// Increments the patch number for this Version (Must be mutable) pub fn increment_patch(&mut self) { self.patch += 1; self.clear_metadata(); } /// Increments the minor version number for this Version (Must be mutable) /// /// As instructed by section 7 of the spec, the patch number is reset to 0. pub fn increment_minor(&mut self) { self.minor += 1; self.patch = 0; self.clear_metadata(); } /// Increments the major version number for this Version (Must be mutable) /// /// As instructed by section 8 of the spec, the minor and patch numbers are /// reset to 0 pub fn increment_major(&mut self) { self.major += 1; self.minor = 0; self.patch = 0; self.clear_metadata(); } /// Checks to see if the current Version is in pre-release status pub fn is_prerelease(&self) -> bool { !self.pre.is_empty() } } impl str::FromStr for Version { type Err = SemVerError; fn from_str(s: &str) -> Result<Version> { Version::parse(s) } } impl fmt::Display for Version { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut result = format!("{}.{}.{}", self.major, self.minor, self.patch); if !self.pre.is_empty() { result.push_str("-"); for (i, x) in self.pre.iter().enumerate() { if i != 0 { result.push_str("."); } result.push_str(format!("{}", x).as_ref()); } } if !self.build.is_empty() { result.push_str("+"); for (i, x) in self.build.iter().enumerate() { if i != 0 { result.push_str("."); } result.push_str(format!("{}", x).as_ref()); } } f.pad(result.as_ref())?; Ok(()) } } impl cmp::PartialEq for Version { #[inline] fn eq(&self, other: &Version) -> bool { // We should ignore build metadata here, otherwise versions v1 and v2 // can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which // violate strict total ordering rules. self.major == other.major && self.minor == other.minor && self.patch == other.patch && self.pre == other.pre } } impl cmp::PartialOrd for Version { fn partial_cmp(&self, other: &Version) -> Option<Ordering> { Some(self.cmp(other)) } } impl cmp::Ord for Version { fn cmp(&self, other: &Version) -> Ordering { match self.major.cmp(&other.major) { Ordering::Equal => {} r => return r, } match self.minor.cmp(&other.minor) { Ordering::Equal => {} r => return r, } match self.patch.cmp(&other.patch) { Ordering::Equal => {} r => return r, } // NB: semver spec says 0.0.0-pre < 0.0.0 // but the version of ord defined for vec // says that [] < [pre] so we alter it here match (self.pre.len(), other.pre.len()) { (0, 0) => Ordering::Equal, (0, _) => Ordering::Greater, (_, 0) => Ordering::Less, (_, _) => self.pre.cmp(&other.pre), } } } impl hash::Hash for Version { fn hash<H: hash::Hasher>(&self, into: &mut H) { self.major.hash(into); self.minor.hash(into); self.patch.hash(into); self.pre.hash(into); } } impl From<(u64, u64, u64)> for Version { fn from(tuple: (u64, u64, u64)) -> Version { let (major, minor, patch) = tuple; Version::new(major, minor, patch) } } #[cfg(test)] mod tests { use super::Identifier; use super::SemVerError; use super::Version; use std::result; #[test] fn test_parse() { fn parse_error(e: &str) -> result::Result<Version, SemVerError> { return Err(SemVerError::ParseError(e.to_string())); } assert_eq!( Version::parse(""), parse_error("expected more input") ); assert_eq!( Version::parse(" "), parse_error("expected more input") ); assert_eq!(Version::parse("1"), parse_error("expected more input")); assert_eq!(Version::parse("1.2"), parse_error("expected more input")); assert_eq!( Version::parse("1.2.3-"), parse_error("expected more input") ); assert_eq!( Version::parse("a.b.c"), parse_error("encountered unexpected token: AlphaNumeric(\"a\")") ); assert_eq!( Version::parse("1.2.3 abc"), parse_error("expected end of input, but got: [AlphaNumeric(\"abc\")]") ); assert_eq!( Version::parse("1.2.3"), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!(Version::parse("1.2.3"), Ok(Version::new(1, 2, 3))); assert_eq!( Version::parse(" 1.2.3 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!( Version::parse("1.2.3-alpha1"), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( Version::parse(" 1.2.3-alpha1 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( Version::parse("1.2.3+build5"), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse(" 1.2.3+build5 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse("1.2.3-alpha1+build5"), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse(" 1.2.3-alpha1+build5 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf "), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![ Identifier::Numeric(1), Identifier::AlphaNumeric(String::from("alpha1")), Identifier::Numeric(9), ], build: vec![ Identifier::AlphaNumeric(String::from("build5")), Identifier::Numeric(7), Identifier::AlphaNumeric(String::from("3aedf")), ], }) ); assert_eq!( Version::parse("0.4.0-beta.1+0851523"), Ok(Version { major: 0, minor: 4, patch: 0, pre: vec![ Identifier::AlphaNumeric(String::from("beta")), Identifier::Numeric(1), ], build: vec![Identifier::AlphaNumeric(String::from("0851523"))], }) ); // for https://nodejs.org/dist/index.json, where some older npm versions are "1.1.0-beta-10" assert_eq!( Version::parse("1.1.0-beta-10"), Ok(Version { major: 1, minor: 1, patch: 0, pre: vec![ Identifier::AlphaNumeric(String::from("beta-10")), ], build: Vec::new(), }) ); } #[test] fn test_increment_patch() { let mut buggy_release = Version::parse("0.1.0").unwrap(); buggy_release.increment_patch(); assert_eq!(buggy_release, Version::parse("0.1.1").unwrap()); } #[test] fn test_increment_minor() { let mut feature_release = Version::parse("1.4.6").unwrap(); feature_release.increment_minor(); assert_eq!(feature_release, Version::parse("1.5.0").unwrap()); } #[test] fn test_increment_major() { let mut chrome_release = Version::parse("46.1.246773").unwrap(); chrome_release.increment_major(); assert_eq!(chrome_release, Version::parse("47.0.0").unwrap()); } #[test] fn test_increment_keep_prerelease() { let mut release = Version::parse("1.0.0-alpha").unwrap(); release.increment_patch(); assert_eq!(release, Version::parse("1.0.1").unwrap()); release.increment_minor(); assert_eq!(release, Version::parse("1.1.0").unwrap()); release.increment_major(); assert_eq!(release, Version::parse("2.0.0").unwrap()); } #[test] fn test_increment_clear_metadata() { let mut release = Version::parse("1.0.0+4442").unwrap(); release.increment_patch(); assert_eq!(release, Version::parse("1.0.1").unwrap()); release = Version::parse("1.0.1+hello").unwrap(); release.increment_minor(); assert_eq!(release, Version::parse("1.1.0").unwrap()); release = Version::parse("1.1.3747+hello").unwrap(); release.increment_major(); assert_eq!(release, Version::parse("2.0.0").unwrap()); } #[test] fn test_eq() { assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3")); assert_eq!( Version::parse("1.2.3-alpha1"), Version::parse("1.2.3-alpha1") ); assert_eq!( Version::parse("1.2.3+build.42"), Version::parse("1.2.3+build.42") ); assert_eq!( Version::parse("1.2.3-alpha1+42"), Version::parse("1.2.3-alpha1+42") ); assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42")); } #[test] fn test_ne() { assert!(Version::parse("0.0.0") != Version::parse("0.0.1")); assert!(Version::parse("0.0.0") != Version::parse("0.1.0")); assert!(Version::parse("0.0.0") != Version::parse("1.0.0")); assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); } #[test] fn test_show() { assert_eq!( format!("{}", Version::parse("1.2.3").unwrap()), "1.2.3".to_string() ); assert_eq!( format!("{}", Version::parse("1.2.3-alpha1").unwrap()), "1.2.3-alpha1".to_string() ); assert_eq!( format!("{}", Version::parse("1.2.3+build.42").unwrap()), "1.2.3+build.42".to_string() ); assert_eq!( format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()), "1.2.3-alpha1+42".to_string() ); } #[test] fn test_display() { let version = Version::parse("1.2.3-rc1").unwrap(); assert_eq!(format!("{:20}", version), "1.2.3-rc1 "); assert_eq!(format!("{:*^20}", version), "*****1.2.3-rc1******"); assert_eq!(format!("{:.4}", version), "1.2."); } #[test] fn test_to_string() { assert_eq!( Version::parse("1.2.3").unwrap().to_string(), "1.2.3".to_string() ); assert_eq!( Version::parse("1.2.3-alpha1").unwrap().to_string(), "1.2.3-alpha1".to_string() ); assert_eq!( Version::parse("1.2.3+build.42").unwrap().to_string(), "1.2.3+build.42".to_string() ); assert_eq!( Version::parse("1.2.3-alpha1+42").unwrap().to_string(), "1.2.3-alpha1+42".to_string() ); } #[test] fn test_lt() { assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3")); assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2")); assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2"))); assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42"))); } #[test] fn test_le() { assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42")); } #[test] fn test_gt() { assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0")); assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0")); assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1")); assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2")); assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2"))); assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42"))); } #[test] fn test_ge() { assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42")); } #[test] fn test_prerelease_check() { assert!(Version::parse("1.0.0").unwrap().is_prerelease() == false); assert!(Version::parse("0.0.1").unwrap().is_prerelease() == false); assert!(Version::parse("4.1.4-alpha").unwrap().is_prerelease()); assert!(Version::parse("1.0.0-beta294296").unwrap().is_prerelease()); } #[test] fn test_spec_order() { let vs = [ "1.0.0-alpha", "1.0.0-alpha.1", "1.0.0-alpha.beta", "1.0.0-beta", "1.0.0-beta.2", "1.0.0-beta.11", "1.0.0-rc.1", "1.0.0", ]; let mut i = 1; while i < vs.len() { let a = Version::parse(vs[i - 1]); let b = Version::parse(vs[i]); assert!(a < b, "nope {:?} < {:?}", a, b); i += 1; } } #[test] fn test_from_str() { assert_eq!( "1.2.3".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!( " 1.2.3 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!( "1.2.3-alpha1".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( " 1.2.3-alpha1 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( "1.2.3+build5".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( " 1.2.3+build5 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( "1.2.3-alpha1+build5".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( " 1.2.3-alpha1+build5 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( "1.2.3-1.alpha1.9+build5.7.3aedf ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![ Identifier::Numeric(1), Identifier::AlphaNumeric(String::from("alpha1")), Identifier::Numeric(9), ], build: vec![ Identifier::AlphaNumeric(String::from("build5")), Identifier::Numeric(7), Identifier::AlphaNumeric(String::from("3aedf")), ], }) ); assert_eq!( "0.4.0-beta.1+0851523".parse(), Ok(Version { major: 0, minor: 4, patch: 0, pre: vec![ Identifier::AlphaNumeric(String::from("beta")), Identifier::Numeric(1), ], build: vec![Identifier::AlphaNumeric(String::from("0851523"))], }) ); } #[test] fn test_from_str_errors() { fn parse_error(e: &str) -> result::Result<Version, SemVerError> { return Err(SemVerError::ParseError(e.to_string())); } assert_eq!("".parse(), parse_error("expected more input")); assert_eq!(" ".parse(), parse_error("expected more input")); assert_eq!("1".parse(), parse_error("expected more input")); assert_eq!("1.2".parse(), parse_error("expected more input")); assert_eq!("1.2.3-".parse(), parse_error("expected more input")); assert_eq!( "a.b.c".parse(), parse_error("encountered unexpected token: AlphaNumeric(\"a\")") ); assert_eq!( "1.2.3 abc".parse(), parse_error("expected end of input, but got: [AlphaNumeric(\"abc\")]") ); } } cargo fmt // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The `version` module gives you tools to create and compare SemVer-compliant //! versions. use std::cmp::{self, Ordering}; use std::error::Error; use std::fmt; use std::hash; use std::result; use std::str; use semver_parser; #[cfg(feature = "serde")] use serde::de::{self, Deserialize, Deserializer, Visitor}; #[cfg(feature = "serde")] use serde::ser::{Serialize, Serializer}; /// An identifier in the pre-release or build metadata. /// /// See sections 9 and 10 of the spec for more about pre-release identifers and /// build metadata. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Identifier { /// An identifier that's solely numbers. Numeric(u64), /// An identifier with letters and numbers. AlphaNumeric(String), } impl From<semver_parser::version::Identifier> for Identifier { fn from(other: semver_parser::version::Identifier) -> Identifier { match other { semver_parser::version::Identifier::Numeric(n) => Identifier::Numeric(n), semver_parser::version::Identifier::AlphaNumeric(s) => Identifier::AlphaNumeric(s), } } } impl fmt::Display for Identifier { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Identifier::Numeric(ref n) => fmt::Display::fmt(n, f), Identifier::AlphaNumeric(ref s) => fmt::Display::fmt(s, f), } } } #[cfg(feature = "serde")] impl Serialize for Identifier { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer, { // Serialize Identifier as a number or string. match *self { Identifier::Numeric(n) => serializer.serialize_u64(n), Identifier::AlphaNumeric(ref s) => serializer.serialize_str(s), } } } #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for Identifier { fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error> where D: Deserializer<'de>, { struct IdentifierVisitor; // Deserialize Identifier from a number or string. impl<'de> Visitor<'de> for IdentifierVisitor { type Value = Identifier; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a SemVer pre-release or build identifier") } fn visit_u64<E>(self, numeric: u64) -> result::Result<Self::Value, E> where E: de::Error, { Ok(Identifier::Numeric(numeric)) } fn visit_str<E>(self, alphanumeric: &str) -> result::Result<Self::Value, E> where E: de::Error, { Ok(Identifier::AlphaNumeric(alphanumeric.to_owned())) } } deserializer.deserialize_any(IdentifierVisitor) } } /// Represents a version number conforming to the semantic versioning scheme. #[derive(Clone, Eq, Debug)] #[cfg_attr(feature = "diesel", derive(AsExpression, FromSqlRow))] #[cfg_attr(feature = "diesel", sql_type = "diesel::sql_types::Text")] pub struct Version { /// The major version, to be incremented on incompatible changes. pub major: u64, /// The minor version, to be incremented when functionality is added in a /// backwards-compatible manner. pub minor: u64, /// The patch version, to be incremented when backwards-compatible bug /// fixes are made. pub patch: u64, /// The pre-release version identifier, if one exists. pub pre: Vec<Identifier>, /// The build metadata, ignored when determining version precedence. pub build: Vec<Identifier>, } impl From<semver_parser::version::Version> for Version { fn from(other: semver_parser::version::Version) -> Version { Version { major: other.major, minor: other.minor, patch: other.patch, pre: other.pre.into_iter().map(From::from).collect(), build: other.build.into_iter().map(From::from).collect(), } } } #[cfg(feature = "serde")] impl Serialize for Version { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer, { // Serialize Version as a string. serializer.collect_str(self) } } #[cfg(feature = "serde")] impl<'de> Deserialize<'de> for Version { fn deserialize<D>(deserializer: D) -> result::Result<Self, D::Error> where D: Deserializer<'de>, { struct VersionVisitor; // Deserialize Version from a string. impl<'de> Visitor<'de> for VersionVisitor { type Value = Version; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("a SemVer version as a string") } fn visit_str<E>(self, v: &str) -> result::Result<Self::Value, E> where E: de::Error, { Version::parse(v).map_err(de::Error::custom) } } deserializer.deserialize_str(VersionVisitor) } } /// An error type for this crate /// /// Currently, just a generic error. Will make this nicer later. #[derive(Clone, PartialEq, Debug, PartialOrd)] pub enum SemVerError { /// An error ocurred while parsing. ParseError(String), } impl fmt::Display for SemVerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { SemVerError::ParseError(ref m) => write!(f, "{}", m), } } } impl Error for SemVerError {} /// A Result type for errors pub type Result<T> = result::Result<T, SemVerError>; impl Version { /// Contructs the simple case without pre or build. pub fn new(major: u64, minor: u64, patch: u64) -> Version { Version { major, minor, patch, pre: Vec::new(), build: Vec::new(), } } /// Parse a string into a semver object. /// /// # Errors /// /// Returns an error variant if the input could not be parsed as a semver object. /// /// In general, this means that the provided string does not conform to the /// [semver spec][semver]. /// /// An error for overflow is returned if any numeric component is larger than what can be /// stored in `u64`. /// /// The following are examples for other common error causes: /// /// * `1.0` - too few numeric components are used. Exactly 3 are expected. /// * `1.0.01` - a numeric component has a leading zero. /// * `1.0.foo` - uses a non-numeric components where one is expected. /// * `1.0.0foo` - metadata is not separated using a legal character like, `+` or `-`. /// * `1.0.0+foo_123` - contains metadata with an illegal character (`_`). /// Legal characters for metadata include `a-z`, `A-Z`, `0-9`, `-`, and `.` (dot). /// /// [semver]: https://semver.org pub fn parse(version: &str) -> Result<Version> { let res = semver_parser::version::parse(version); match res { // Convert plain String error into proper ParseError Err(e) => Err(SemVerError::ParseError(e.to_string())), Ok(v) => Ok(From::from(v)), } } /// Clears the build metadata fn clear_metadata(&mut self) { self.build = Vec::new(); self.pre = Vec::new(); } /// Increments the patch number for this Version (Must be mutable) pub fn increment_patch(&mut self) { self.patch += 1; self.clear_metadata(); } /// Increments the minor version number for this Version (Must be mutable) /// /// As instructed by section 7 of the spec, the patch number is reset to 0. pub fn increment_minor(&mut self) { self.minor += 1; self.patch = 0; self.clear_metadata(); } /// Increments the major version number for this Version (Must be mutable) /// /// As instructed by section 8 of the spec, the minor and patch numbers are /// reset to 0 pub fn increment_major(&mut self) { self.major += 1; self.minor = 0; self.patch = 0; self.clear_metadata(); } /// Checks to see if the current Version is in pre-release status pub fn is_prerelease(&self) -> bool { !self.pre.is_empty() } } impl str::FromStr for Version { type Err = SemVerError; fn from_str(s: &str) -> Result<Version> { Version::parse(s) } } impl fmt::Display for Version { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut result = format!("{}.{}.{}", self.major, self.minor, self.patch); if !self.pre.is_empty() { result.push_str("-"); for (i, x) in self.pre.iter().enumerate() { if i != 0 { result.push_str("."); } result.push_str(format!("{}", x).as_ref()); } } if !self.build.is_empty() { result.push_str("+"); for (i, x) in self.build.iter().enumerate() { if i != 0 { result.push_str("."); } result.push_str(format!("{}", x).as_ref()); } } f.pad(result.as_ref())?; Ok(()) } } impl cmp::PartialEq for Version { #[inline] fn eq(&self, other: &Version) -> bool { // We should ignore build metadata here, otherwise versions v1 and v2 // can exist such that !(v1 < v2) && !(v1 > v2) && v1 != v2, which // violate strict total ordering rules. self.major == other.major && self.minor == other.minor && self.patch == other.patch && self.pre == other.pre } } impl cmp::PartialOrd for Version { fn partial_cmp(&self, other: &Version) -> Option<Ordering> { Some(self.cmp(other)) } } impl cmp::Ord for Version { fn cmp(&self, other: &Version) -> Ordering { match self.major.cmp(&other.major) { Ordering::Equal => {} r => return r, } match self.minor.cmp(&other.minor) { Ordering::Equal => {} r => return r, } match self.patch.cmp(&other.patch) { Ordering::Equal => {} r => return r, } // NB: semver spec says 0.0.0-pre < 0.0.0 // but the version of ord defined for vec // says that [] < [pre] so we alter it here match (self.pre.len(), other.pre.len()) { (0, 0) => Ordering::Equal, (0, _) => Ordering::Greater, (_, 0) => Ordering::Less, (_, _) => self.pre.cmp(&other.pre), } } } impl hash::Hash for Version { fn hash<H: hash::Hasher>(&self, into: &mut H) { self.major.hash(into); self.minor.hash(into); self.patch.hash(into); self.pre.hash(into); } } impl From<(u64, u64, u64)> for Version { fn from(tuple: (u64, u64, u64)) -> Version { let (major, minor, patch) = tuple; Version::new(major, minor, patch) } } #[cfg(test)] mod tests { use super::Identifier; use super::SemVerError; use super::Version; use std::result; #[test] fn test_parse() { fn parse_error(e: &str) -> result::Result<Version, SemVerError> { return Err(SemVerError::ParseError(e.to_string())); } assert_eq!(Version::parse(""), parse_error("expected more input")); assert_eq!(Version::parse(" "), parse_error("expected more input")); assert_eq!(Version::parse("1"), parse_error("expected more input")); assert_eq!(Version::parse("1.2"), parse_error("expected more input")); assert_eq!(Version::parse("1.2.3-"), parse_error("expected more input")); assert_eq!( Version::parse("a.b.c"), parse_error("encountered unexpected token: AlphaNumeric(\"a\")") ); assert_eq!( Version::parse("1.2.3 abc"), parse_error("expected end of input, but got: [AlphaNumeric(\"abc\")]") ); assert_eq!( Version::parse("1.2.3"), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!(Version::parse("1.2.3"), Ok(Version::new(1, 2, 3))); assert_eq!( Version::parse(" 1.2.3 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!( Version::parse("1.2.3-alpha1"), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( Version::parse(" 1.2.3-alpha1 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( Version::parse("1.2.3+build5"), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse(" 1.2.3+build5 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse("1.2.3-alpha1+build5"), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse(" 1.2.3-alpha1+build5 "), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( Version::parse("1.2.3-1.alpha1.9+build5.7.3aedf "), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![ Identifier::Numeric(1), Identifier::AlphaNumeric(String::from("alpha1")), Identifier::Numeric(9), ], build: vec![ Identifier::AlphaNumeric(String::from("build5")), Identifier::Numeric(7), Identifier::AlphaNumeric(String::from("3aedf")), ], }) ); assert_eq!( Version::parse("0.4.0-beta.1+0851523"), Ok(Version { major: 0, minor: 4, patch: 0, pre: vec![ Identifier::AlphaNumeric(String::from("beta")), Identifier::Numeric(1), ], build: vec![Identifier::AlphaNumeric(String::from("0851523"))], }) ); // for https://nodejs.org/dist/index.json, where some older npm versions are "1.1.0-beta-10" assert_eq!( Version::parse("1.1.0-beta-10"), Ok(Version { major: 1, minor: 1, patch: 0, pre: vec![Identifier::AlphaNumeric(String::from("beta-10")),], build: Vec::new(), }) ); } #[test] fn test_increment_patch() { let mut buggy_release = Version::parse("0.1.0").unwrap(); buggy_release.increment_patch(); assert_eq!(buggy_release, Version::parse("0.1.1").unwrap()); } #[test] fn test_increment_minor() { let mut feature_release = Version::parse("1.4.6").unwrap(); feature_release.increment_minor(); assert_eq!(feature_release, Version::parse("1.5.0").unwrap()); } #[test] fn test_increment_major() { let mut chrome_release = Version::parse("46.1.246773").unwrap(); chrome_release.increment_major(); assert_eq!(chrome_release, Version::parse("47.0.0").unwrap()); } #[test] fn test_increment_keep_prerelease() { let mut release = Version::parse("1.0.0-alpha").unwrap(); release.increment_patch(); assert_eq!(release, Version::parse("1.0.1").unwrap()); release.increment_minor(); assert_eq!(release, Version::parse("1.1.0").unwrap()); release.increment_major(); assert_eq!(release, Version::parse("2.0.0").unwrap()); } #[test] fn test_increment_clear_metadata() { let mut release = Version::parse("1.0.0+4442").unwrap(); release.increment_patch(); assert_eq!(release, Version::parse("1.0.1").unwrap()); release = Version::parse("1.0.1+hello").unwrap(); release.increment_minor(); assert_eq!(release, Version::parse("1.1.0").unwrap()); release = Version::parse("1.1.3747+hello").unwrap(); release.increment_major(); assert_eq!(release, Version::parse("2.0.0").unwrap()); } #[test] fn test_eq() { assert_eq!(Version::parse("1.2.3"), Version::parse("1.2.3")); assert_eq!( Version::parse("1.2.3-alpha1"), Version::parse("1.2.3-alpha1") ); assert_eq!( Version::parse("1.2.3+build.42"), Version::parse("1.2.3+build.42") ); assert_eq!( Version::parse("1.2.3-alpha1+42"), Version::parse("1.2.3-alpha1+42") ); assert_eq!(Version::parse("1.2.3+23"), Version::parse("1.2.3+42")); } #[test] fn test_ne() { assert!(Version::parse("0.0.0") != Version::parse("0.0.1")); assert!(Version::parse("0.0.0") != Version::parse("0.1.0")); assert!(Version::parse("0.0.0") != Version::parse("1.0.0")); assert!(Version::parse("1.2.3-alpha") != Version::parse("1.2.3-beta")); } #[test] fn test_show() { assert_eq!( format!("{}", Version::parse("1.2.3").unwrap()), "1.2.3".to_string() ); assert_eq!( format!("{}", Version::parse("1.2.3-alpha1").unwrap()), "1.2.3-alpha1".to_string() ); assert_eq!( format!("{}", Version::parse("1.2.3+build.42").unwrap()), "1.2.3+build.42".to_string() ); assert_eq!( format!("{}", Version::parse("1.2.3-alpha1+42").unwrap()), "1.2.3-alpha1+42".to_string() ); } #[test] fn test_display() { let version = Version::parse("1.2.3-rc1").unwrap(); assert_eq!(format!("{:20}", version), "1.2.3-rc1 "); assert_eq!(format!("{:*^20}", version), "*****1.2.3-rc1******"); assert_eq!(format!("{:.4}", version), "1.2."); } #[test] fn test_to_string() { assert_eq!( Version::parse("1.2.3").unwrap().to_string(), "1.2.3".to_string() ); assert_eq!( Version::parse("1.2.3-alpha1").unwrap().to_string(), "1.2.3-alpha1".to_string() ); assert_eq!( Version::parse("1.2.3+build.42").unwrap().to_string(), "1.2.3+build.42".to_string() ); assert_eq!( Version::parse("1.2.3-alpha1+42").unwrap().to_string(), "1.2.3-alpha1+42".to_string() ); } #[test] fn test_lt() { assert!(Version::parse("0.0.0") < Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.0.0") < Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.0") < Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3")); assert!(Version::parse("1.2.3-alpha1") < Version::parse("1.2.3-alpha2")); assert!(!(Version::parse("1.2.3-alpha2") < Version::parse("1.2.3-alpha2"))); assert!(!(Version::parse("1.2.3+23") < Version::parse("1.2.3+42"))); } #[test] fn test_le() { assert!(Version::parse("0.0.0") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.0.0") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.0") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3-alpha1") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3-alpha2") <= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3+23") <= Version::parse("1.2.3+42")); } #[test] fn test_gt() { assert!(Version::parse("1.2.3-alpha2") > Version::parse("0.0.0")); assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.0.0")); assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.0")); assert!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha1")); assert!(Version::parse("1.2.3") > Version::parse("1.2.3-alpha2")); assert!(!(Version::parse("1.2.3-alpha2") > Version::parse("1.2.3-alpha2"))); assert!(!(Version::parse("1.2.3+23") > Version::parse("1.2.3+42"))); } #[test] fn test_ge() { assert!(Version::parse("1.2.3-alpha2") >= Version::parse("0.0.0")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.0.0")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.0")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha1")); assert!(Version::parse("1.2.3-alpha2") >= Version::parse("1.2.3-alpha2")); assert!(Version::parse("1.2.3+23") >= Version::parse("1.2.3+42")); } #[test] fn test_prerelease_check() { assert!(Version::parse("1.0.0").unwrap().is_prerelease() == false); assert!(Version::parse("0.0.1").unwrap().is_prerelease() == false); assert!(Version::parse("4.1.4-alpha").unwrap().is_prerelease()); assert!(Version::parse("1.0.0-beta294296").unwrap().is_prerelease()); } #[test] fn test_spec_order() { let vs = [ "1.0.0-alpha", "1.0.0-alpha.1", "1.0.0-alpha.beta", "1.0.0-beta", "1.0.0-beta.2", "1.0.0-beta.11", "1.0.0-rc.1", "1.0.0", ]; let mut i = 1; while i < vs.len() { let a = Version::parse(vs[i - 1]); let b = Version::parse(vs[i]); assert!(a < b, "nope {:?} < {:?}", a, b); i += 1; } } #[test] fn test_from_str() { assert_eq!( "1.2.3".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!( " 1.2.3 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: Vec::new(), }) ); assert_eq!( "1.2.3-alpha1".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( " 1.2.3-alpha1 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: Vec::new(), }) ); assert_eq!( "1.2.3+build5".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( " 1.2.3+build5 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: Vec::new(), build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( "1.2.3-alpha1+build5".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( " 1.2.3-alpha1+build5 ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![Identifier::AlphaNumeric(String::from("alpha1"))], build: vec![Identifier::AlphaNumeric(String::from("build5"))], }) ); assert_eq!( "1.2.3-1.alpha1.9+build5.7.3aedf ".parse(), Ok(Version { major: 1, minor: 2, patch: 3, pre: vec![ Identifier::Numeric(1), Identifier::AlphaNumeric(String::from("alpha1")), Identifier::Numeric(9), ], build: vec![ Identifier::AlphaNumeric(String::from("build5")), Identifier::Numeric(7), Identifier::AlphaNumeric(String::from("3aedf")), ], }) ); assert_eq!( "0.4.0-beta.1+0851523".parse(), Ok(Version { major: 0, minor: 4, patch: 0, pre: vec![ Identifier::AlphaNumeric(String::from("beta")), Identifier::Numeric(1), ], build: vec![Identifier::AlphaNumeric(String::from("0851523"))], }) ); } #[test] fn test_from_str_errors() { fn parse_error(e: &str) -> result::Result<Version, SemVerError> { return Err(SemVerError::ParseError(e.to_string())); } assert_eq!("".parse(), parse_error("expected more input")); assert_eq!(" ".parse(), parse_error("expected more input")); assert_eq!("1".parse(), parse_error("expected more input")); assert_eq!("1.2".parse(), parse_error("expected more input")); assert_eq!("1.2.3-".parse(), parse_error("expected more input")); assert_eq!( "a.b.c".parse(), parse_error("encountered unexpected token: AlphaNumeric(\"a\")") ); assert_eq!( "1.2.3 abc".parse(), parse_error("expected end of input, but got: [AlphaNumeric(\"abc\")]") ); } }
// pretty-printers are not loaded // compile-flags:-g // min-gdb-version: 8.1 // min-cdb-version: 10.0.18317.1001 // === GDB TESTS ================================================================================== // gdb-command:run // gdb-command:print r // gdb-check:[...]$1 = Rc(strong=2, weak=1) = {value = 42, strong = 2, weak = 1} // gdb-command:print a // gdb-check:[...]$2 = Arc(strong=2, weak=1) = {value = 42, strong = 2, weak = 1} // === LLDB TESTS ================================================================================== // lldb-command:run // lldb-command:print r // lldb-check:[...]$0 = strong=2, weak=1 { value = 42 } // lldb-command:print a // lldb-check:[...]$1 = strong=2, weak=1 { data = 42 } // === CDB TESTS ================================================================================== // cdb-command:g // cdb-command:dx r,d // cdb-check:r,d : 42 [Type: alloc::rc::Rc<i32>] // cdb-check: [<Raw View>] [Type: alloc::rc::Rc<i32>] // cdb-check: [Reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-check: [Weak reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-command:dx r1,d // cdb-check:r1,d : 42 [Type: alloc::rc::Rc<i32>] // cdb-check: [<Raw View>] [Type: alloc::rc::Rc<i32>] // cdb-check: [Reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-check: [Weak reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-command:dx w1,d // cdb-check:w1,d : 42 [Type: alloc::rc::Weak<i32>] // cdb-check: [<Raw View>] [Type: alloc::rc::Weak<i32>] // cdb-check: [Reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-check: [Weak reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-command:dx a,d // cdb-check:a,d : 42 [Type: alloc::sync::Arc<i32>] // cdb-check: [<Raw View>] [Type: alloc::sync::Arc<i32>] // cdb-check: [Reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-check: [Weak reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-command:dx a1,d // cdb-check:a1,d : 42 [Type: alloc::sync::Arc<i32>] // cdb-check: [<Raw View>] [Type: alloc::sync::Arc<i32>] // cdb-check: [Reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-check: [Weak reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-command:dx w2,d // cdb-check:w2,d : 42 [Type: alloc::sync::Weak<i32>] // cdb-check: [<Raw View>] [Type: alloc::sync::Weak<i32>] // cdb-check: [Reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-check: [Weak reference count] : 2 [Type: core::sync::atomic::AtomicUsize] use std::rc::Rc; use std::sync::Arc; fn main() { let r = Rc::new(42); let r1 = Rc::clone(&r); let w1 = Rc::downgrade(&r); let a = Arc::new(42); let a1 = Arc::clone(&a); let w2 = Arc::downgrade(&a); zzz(); // #break } fn zzz() { () } tests: Ignore `test/debuginfo/rc_arc.rs` on windows-gnu The tests checks some pretty-printer output, but pretty-printers are not embedded on windows-gnu // ignore-windows-gnu: pretty-printers are not loaded // compile-flags:-g // min-gdb-version: 8.1 // min-cdb-version: 10.0.18317.1001 // === GDB TESTS ================================================================================== // gdb-command:run // gdb-command:print r // gdb-check:[...]$1 = Rc(strong=2, weak=1) = {value = 42, strong = 2, weak = 1} // gdb-command:print a // gdb-check:[...]$2 = Arc(strong=2, weak=1) = {value = 42, strong = 2, weak = 1} // === LLDB TESTS ================================================================================== // lldb-command:run // lldb-command:print r // lldb-check:[...]$0 = strong=2, weak=1 { value = 42 } // lldb-command:print a // lldb-check:[...]$1 = strong=2, weak=1 { data = 42 } // === CDB TESTS ================================================================================== // cdb-command:g // cdb-command:dx r,d // cdb-check:r,d : 42 [Type: alloc::rc::Rc<i32>] // cdb-check: [<Raw View>] [Type: alloc::rc::Rc<i32>] // cdb-check: [Reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-check: [Weak reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-command:dx r1,d // cdb-check:r1,d : 42 [Type: alloc::rc::Rc<i32>] // cdb-check: [<Raw View>] [Type: alloc::rc::Rc<i32>] // cdb-check: [Reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-check: [Weak reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-command:dx w1,d // cdb-check:w1,d : 42 [Type: alloc::rc::Weak<i32>] // cdb-check: [<Raw View>] [Type: alloc::rc::Weak<i32>] // cdb-check: [Reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-check: [Weak reference count] : 2 [Type: core::cell::Cell<usize>] // cdb-command:dx a,d // cdb-check:a,d : 42 [Type: alloc::sync::Arc<i32>] // cdb-check: [<Raw View>] [Type: alloc::sync::Arc<i32>] // cdb-check: [Reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-check: [Weak reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-command:dx a1,d // cdb-check:a1,d : 42 [Type: alloc::sync::Arc<i32>] // cdb-check: [<Raw View>] [Type: alloc::sync::Arc<i32>] // cdb-check: [Reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-check: [Weak reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-command:dx w2,d // cdb-check:w2,d : 42 [Type: alloc::sync::Weak<i32>] // cdb-check: [<Raw View>] [Type: alloc::sync::Weak<i32>] // cdb-check: [Reference count] : 2 [Type: core::sync::atomic::AtomicUsize] // cdb-check: [Weak reference count] : 2 [Type: core::sync::atomic::AtomicUsize] use std::rc::Rc; use std::sync::Arc; fn main() { let r = Rc::new(42); let r1 = Rc::clone(&r); let w1 = Rc::downgrade(&r); let a = Arc::new(42); let a1 = Arc::clone(&a); let w2 = Arc::downgrade(&a); zzz(); // #break } fn zzz() { () }
use crate::error::{KeyringError, Result}; use byteorder::{ByteOrder, LittleEndian}; use std::ffi::OsStr; use std::iter::once; use std::mem; use std::os::windows::ffi::OsStrExt; use std::slice; use std::str; use winapi::shared::minwindef::FILETIME; use winapi::um::wincred::{ CredDeleteW, CredFree, CredReadW, CredWriteW, CREDENTIALW, CRED_PERSIST_ENTERPRISE, CRED_TYPE_GENERIC, PCREDENTIALW, PCREDENTIAL_ATTRIBUTEW, }; // DWORD is u32 // LPCWSTR is *const u16 // BOOL is i32 (false = 0, true = 1) // PCREDENTIALW = *mut CREDENTIALW // Note: decision to concatenate user and service name // to create target is because Windows assumes one user // per service. See issue here: https://github.com/jaraco/keyring/issues/47 pub struct Keyring<'a> { service: &'a str, username: &'a str, } impl<'a> Keyring<'a> { pub fn new(service: &'a str, username: &'a str) -> Keyring<'a> { Keyring { service, username } } pub fn set_password(&self, password: &str) -> Result<()> { // Setting values of credential let flags = 0; let cred_type = CRED_TYPE_GENERIC; let target_name: String = [self.username, self.service].join("."); let mut target_name = to_wstr(&target_name); // empty string for comments, and target alias, // I don't use here let mut empty_str = to_wstr(""); // Ignored by CredWriteW let last_written = FILETIME { dwLowDateTime: 0, dwHighDateTime: 0, }; // In order to allow editing of the password // from within Windows, the password must be // transformed into utf16. (but because it's a // blob, it then needs to be passed to windows // as an array of bytes). let blob_u16 = to_wstr_no_null(password); let mut blob = vec![0; blob_u16.len() * 2]; LittleEndian::write_u16_into(&blob_u16, &mut blob); let blob_len = blob.len() as u32; let persist = CRED_PERSIST_ENTERPRISE; let attribute_count = 0; let attributes: PCREDENTIAL_ATTRIBUTEW = unsafe { mem::uninitialized() }; let mut username = to_wstr(self.username); let mut credential = CREDENTIALW { Flags: flags, Type: cred_type, TargetName: target_name.as_mut_ptr(), Comment: empty_str.as_mut_ptr(), LastWritten: last_written, CredentialBlobSize: blob_len, CredentialBlob: blob.as_mut_ptr(), Persist: persist, AttributeCount: attribute_count, Attributes: attributes, TargetAlias: empty_str.as_mut_ptr(), UserName: username.as_mut_ptr(), }; // raw pointer to credential, is coerced from &mut let pcredential: PCREDENTIALW = &mut credential; // Call windows API match unsafe { CredWriteW(pcredential, 0) } { 0 => Err(KeyringError::WindowsVaultError), _ => Ok(()), } } pub fn get_password(&self) -> Result<String> { // passing uninitialized pcredential. // Should be ok; it's freed by a windows api // call CredFree. let mut pcredential: PCREDENTIALW = unsafe { mem::uninitialized() }; let target_name: String = [self.username, self.service].join("."); let target_name = to_wstr(&target_name); let cred_type = CRED_TYPE_GENERIC; // Windows api call match unsafe { CredReadW(target_name.as_ptr(), cred_type, 0, &mut pcredential) } { 0 => Err(KeyringError::WindowsVaultError), _ => { // Dereferencing pointer to credential let credential: CREDENTIALW = unsafe { *pcredential }; // get blob by creating an array from the pointer // and the length reported back from the credential let blob_pointer: *const u8 = credential.CredentialBlob; let blob_len: usize = credential.CredentialBlobSize as usize; // blob needs to be transformed from bytes to an // array of u16, which will then be transformed into // a utf8 string. As noted above, this is to allow // editing of the password from within the vault order // or other windows programs, which operate in utf16 let blob: &[u8] = unsafe { slice::from_raw_parts(blob_pointer, blob_len) }; let mut blob_u16 = vec![0; blob_len / 2]; LittleEndian::read_u16_into(&blob, &mut blob_u16); // Now can get utf8 string from the array let password = String::from_utf16(&blob_u16) .map(|pass| pass.to_string()) .map_err(|_| KeyringError::WindowsVaultError); // Free the credential unsafe { CredFree(pcredential as *mut _); } password } } } pub fn delete_password(&self) -> Result<()> { let target_name: String = [self.username, self.service].join("."); let cred_type = CRED_TYPE_GENERIC; let target_name = to_wstr(&target_name); match unsafe { CredDeleteW(target_name.as_ptr(), cred_type, 0) } { 0 => Err(KeyringError::WindowsVaultError), _ => Ok(()), } } } // helper function for turning utf8 strings to windows // utf16 fn to_wstr(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().chain(once(0)).collect() } fn to_wstr_no_null(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().collect() } #[cfg(test)] mod test { use super::*; #[test] fn test_basic() { let password_1 = "大根"; let password_2 = "0xE5A4A7E6A0B9"; // Above in hex string let keyring = Keyring::new("testservice", "testuser"); keyring.set_password(password_1).unwrap(); let res_1 = keyring.get_password().unwrap(); println!("{}:{}", res_1, password_1); assert_eq!(res_1, password_1); keyring.set_password(password_2).unwrap(); let res_2 = keyring.get_password().unwrap(); println!("{}:{}", res_2, password_2); assert_eq!(res_2, password_2); keyring.delete_password().unwrap(); } } Remove mem::uninitialized use crate::error::{KeyringError, Result}; use byteorder::{ByteOrder, LittleEndian}; use std::ffi::OsStr; use std::iter::once; use std::mem::MaybeUninit; use std::os::windows::ffi::OsStrExt; use std::slice; use std::str; use winapi::shared::minwindef::FILETIME; use winapi::um::wincred::{ CredDeleteW, CredFree, CredReadW, CredWriteW, CREDENTIALW, CRED_PERSIST_ENTERPRISE, CRED_TYPE_GENERIC, PCREDENTIALW, PCREDENTIAL_ATTRIBUTEW, }; // DWORD is u32 // LPCWSTR is *const u16 // BOOL is i32 (false = 0, true = 1) // PCREDENTIALW = *mut CREDENTIALW // Note: decision to concatenate user and service name // to create target is because Windows assumes one user // per service. See issue here: https://github.com/jaraco/keyring/issues/47 pub struct Keyring<'a> { service: &'a str, username: &'a str, } impl<'a> Keyring<'a> { pub fn new(service: &'a str, username: &'a str) -> Keyring<'a> { Keyring { service, username } } pub fn set_password(&self, password: &str) -> Result<()> { // Setting values of credential let flags = 0; let cred_type = CRED_TYPE_GENERIC; let target_name: String = [self.username, self.service].join("."); let mut target_name = to_wstr(&target_name); // empty string for comments, and target alias, // I don't use here let mut empty_str = to_wstr(""); // Ignored by CredWriteW let last_written = FILETIME { dwLowDateTime: 0, dwHighDateTime: 0, }; // In order to allow editing of the password // from within Windows, the password must be // transformed into utf16. (but because it's a // blob, it then needs to be passed to windows // as an array of bytes). let blob_u16 = to_wstr_no_null(password); let mut blob = vec![0; blob_u16.len() * 2]; LittleEndian::write_u16_into(&blob_u16, &mut blob); let blob_len = blob.len() as u32; let persist = CRED_PERSIST_ENTERPRISE; let attribute_count = 0; let attributes: PCREDENTIAL_ATTRIBUTEW = std::ptr::null_mut(); let mut username = to_wstr(self.username); let mut credential = CREDENTIALW { Flags: flags, Type: cred_type, TargetName: target_name.as_mut_ptr(), Comment: empty_str.as_mut_ptr(), LastWritten: last_written, CredentialBlobSize: blob_len, CredentialBlob: blob.as_mut_ptr(), Persist: persist, AttributeCount: attribute_count, Attributes: attributes, TargetAlias: empty_str.as_mut_ptr(), UserName: username.as_mut_ptr(), }; // raw pointer to credential, is coerced from &mut let pcredential: PCREDENTIALW = &mut credential; // Call windows API match unsafe { CredWriteW(pcredential, 0) } { 0 => Err(KeyringError::WindowsVaultError), _ => Ok(()), } } pub fn get_password(&self) -> Result<String> { // passing uninitialized pcredential. // Should be ok; it's freed by a windows api // call CredFree. let mut pcredential = MaybeUninit::uninit(); let target_name: String = [self.username, self.service].join("."); let target_name = to_wstr(&target_name); let cred_type = CRED_TYPE_GENERIC; // Windows api call match unsafe { CredReadW(target_name.as_ptr(), cred_type, 0, pcredential.as_mut_ptr()) } { 0 => Err(KeyringError::WindowsVaultError), _ => { let pcredential = unsafe { pcredential.assume_init() }; // Dereferencing pointer to credential let credential: CREDENTIALW = unsafe { *pcredential }; // get blob by creating an array from the pointer // and the length reported back from the credential let blob_pointer: *const u8 = credential.CredentialBlob; let blob_len: usize = credential.CredentialBlobSize as usize; // blob needs to be transformed from bytes to an // array of u16, which will then be transformed into // a utf8 string. As noted above, this is to allow // editing of the password from within the vault order // or other windows programs, which operate in utf16 let blob: &[u8] = unsafe { slice::from_raw_parts(blob_pointer, blob_len) }; let mut blob_u16 = vec![0; blob_len / 2]; LittleEndian::read_u16_into(&blob, &mut blob_u16); // Now can get utf8 string from the array let password = String::from_utf16(&blob_u16) .map(|pass| pass.to_string()) .map_err(|_| KeyringError::WindowsVaultError); // Free the credential unsafe { CredFree(pcredential as *mut _); } password } } } pub fn delete_password(&self) -> Result<()> { let target_name: String = [self.username, self.service].join("."); let cred_type = CRED_TYPE_GENERIC; let target_name = to_wstr(&target_name); match unsafe { CredDeleteW(target_name.as_ptr(), cred_type, 0) } { 0 => Err(KeyringError::WindowsVaultError), _ => Ok(()), } } } // helper function for turning utf8 strings to windows // utf16 fn to_wstr(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().chain(once(0)).collect() } fn to_wstr_no_null(s: &str) -> Vec<u16> { OsStr::new(s).encode_wide().collect() } #[cfg(test)] mod test { use super::*; #[test] fn test_basic() { let password_1 = "大根"; let password_2 = "0xE5A4A7E6A0B9"; // Above in hex string let keyring = Keyring::new("testservice", "testuser"); keyring.set_password(password_1).unwrap(); let res_1 = keyring.get_password().unwrap(); println!("{}:{}", res_1, password_1); assert_eq!(res_1, password_1); keyring.set_password(password_2).unwrap(); let res_2 = keyring.get_password().unwrap(); println!("{}:{}", res_2, password_2); assert_eq!(res_2, password_2); keyring.delete_password().unwrap(); } }
use crate::sites::*; use std::collections::BTreeSet; pub const LUT4_PIN_MAP : &[(&str, &str)] = &[ ("A", "A"), ("B", "B"), ("C", "C"), ("D", "D"), ("Z", "F"), ]; // TODO: need to add an extra pip to use LUT->DI path const FD1P3BX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("PD", "LSR"), ("Q", "Q"), ]; const FD1P3DX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("CD", "LSR"), ("Q", "Q"), ]; const FD1P3IX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("CD", "LSR"), ("Q", "Q"), ]; const FD1P3JX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("PD", "LSR"), ("Q", "Q"), ]; const IB_PIN_MAP : &[(&str, &str)] = &[ ("I", "B"), ("O", "O"), ]; const OB_PIN_MAP : &[(&str, &str)] = &[ ("I", "I"), ("O", "B"), ]; // TODO: add back DFFs once we have some constraints set up const BEL_CELL_TYPES : &[(&str, &[&str])] = &[ ("OXIDE_COMB", &["LUT4"]), ("OXIDE_FF", &["FD1P3BX", "FD1P3DX", "FD1P3IX", "FD1P3JX"]), ("SEIO33_CORE", &["IB", "OB"]), ("SEIO18_CORE", &["IB", "OB"]), ]; fn conv_map(map: &[(&str, &str)]) -> Vec<(String, String)> { map.iter().map(|(c, b)| (c.to_string(), b.to_string())).collect() } fn get_map_for_cell_bel(cell_type: &str, _bel: &SiteBel) -> Vec<(String, String)> { match cell_type { "LUT4" => conv_map(LUT4_PIN_MAP), "FD1P3BX" => conv_map(FD1P3BX_PIN_MAP), "FD1P3DX" => conv_map(FD1P3DX_PIN_MAP), "FD1P3IX" => conv_map(FD1P3IX_PIN_MAP), "FD1P3JX" => conv_map(FD1P3JX_PIN_MAP), "IB" => conv_map(IB_PIN_MAP), "OB" => conv_map(OB_PIN_MAP), _ => unimplemented!(), } } #[derive(Clone)] pub struct PinMap { pub cell_type: String, pub bels: Vec<String>, pub pin_map: Vec<(String, String)>, } pub fn get_pin_maps(site: &Site) -> Vec<PinMap> { let mut map = Vec::new(); let unique_bel_types : BTreeSet<String> = site.bels.iter().filter_map(|b| if b.bel_class == SiteBelClass::BEL { Some(b.bel_type.to_string()) } else { None }).collect(); for bel_type in unique_bel_types.iter() { if let Some((_, cell_types)) = BEL_CELL_TYPES.iter().find(|(bt, _)| bt == bel_type) { for cell_type in cell_types.iter() { map.push(PinMap { cell_type: cell_type.to_string(), bels: site.bels.iter().filter_map(|b| if &b.bel_type == bel_type { Some(b.name.to_string()) } else { None }).collect(), pin_map: get_map_for_cell_bel(cell_type, site.bels.iter().find(|b| &b.bel_type == bel_type).unwrap()), }); } } } return map; } interchange: Add DCC pin mapping Signed-off-by: gatecat <c690b4516f836771c8bd4e23cd8f1cebc83093a6@ds0.me> use crate::sites::*; use std::collections::BTreeSet; pub const LUT4_PIN_MAP : &[(&str, &str)] = &[ ("A", "A"), ("B", "B"), ("C", "C"), ("D", "D"), ("Z", "F"), ]; // TODO: need to add an extra pip to use LUT->DI path const FD1P3BX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("PD", "LSR"), ("Q", "Q"), ]; const FD1P3DX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("CD", "LSR"), ("Q", "Q"), ]; const FD1P3IX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("CD", "LSR"), ("Q", "Q"), ]; const FD1P3JX_PIN_MAP : &[(&str, &str)] = &[ ("D", "M"), ("CK", "CLK"), ("SP", "CE"), ("PD", "LSR"), ("Q", "Q"), ]; const IB_PIN_MAP : &[(&str, &str)] = &[ ("I", "B"), ("O", "O"), ]; const OB_PIN_MAP : &[(&str, &str)] = &[ ("I", "I"), ("O", "B"), ]; // TODO: add back DFFs once we have some constraints set up const BEL_CELL_TYPES : &[(&str, &[&str])] = &[ ("OXIDE_COMB", &["LUT4"]), ("OXIDE_FF", &["FD1P3BX", "FD1P3DX", "FD1P3IX", "FD1P3JX"]), ("SEIO33_CORE", &["IB", "OB"]), ("SEIO18_CORE", &["IB", "OB"]), ("DCC", &["DCC"]), ]; fn conv_map(map: &[(&str, &str)]) -> Vec<(String, String)> { map.iter().map(|(c, b)| (c.to_string(), b.to_string())).collect() } fn auto_map(site: &Site, bel: &SiteBel) -> Vec<(String, String)> { bel.pins.iter().map(|p| &site.bel_pins[*p]).map(|p| (p.pin_name.to_string(), p.pin_name.to_string())).collect() } fn get_map_for_cell_bel(cell_type: &str, site: &Site, bel: &SiteBel) -> Vec<(String, String)> { match cell_type { "LUT4" => conv_map(LUT4_PIN_MAP), "FD1P3BX" => conv_map(FD1P3BX_PIN_MAP), "FD1P3DX" => conv_map(FD1P3DX_PIN_MAP), "FD1P3IX" => conv_map(FD1P3IX_PIN_MAP), "FD1P3JX" => conv_map(FD1P3JX_PIN_MAP), "IB" => conv_map(IB_PIN_MAP), "OB" => conv_map(OB_PIN_MAP), "DCC" => auto_map(site, bel), _ => unimplemented!(), } } #[derive(Clone)] pub struct PinMap { pub cell_type: String, pub bels: Vec<String>, pub pin_map: Vec<(String, String)>, } pub fn get_pin_maps(site: &Site) -> Vec<PinMap> { let mut map = Vec::new(); let unique_bel_types : BTreeSet<String> = site.bels.iter().filter_map(|b| if b.bel_class == SiteBelClass::BEL { Some(b.bel_type.to_string()) } else { None }).collect(); for bel_type in unique_bel_types.iter() { if let Some((_, cell_types)) = BEL_CELL_TYPES.iter().find(|(bt, _)| bt == bel_type) { for cell_type in cell_types.iter() { map.push(PinMap { cell_type: cell_type.to_string(), bels: site.bels.iter().filter_map(|b| if &b.bel_type == bel_type { Some(b.name.to_string()) } else { None }).collect(), pin_map: get_map_for_cell_bel(cell_type, site, site.bels.iter().find(|b| &b.bel_type == bel_type).unwrap()), }); } } } return map; }
// Copyright 2015-2017 Intecture Developers. // // Licensed under the Mozilla Public License 2.0 <LICENSE or // https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied, // modified, or distributed except according to those terms. extern crate futures; extern crate intecture_api; extern crate tokio_core; use futures::Future; use intecture_api::prelude::*; use tokio_core::reactor::Core; fn main() { let mut core = Core::new().unwrap(); let host = Local::new().and_then(|host| { Command::new(&host, "whoami", None).and_then(|mut cmd| { cmd.exec().map(|out| { println!("I'm currently running as {}", String::from_utf8_lossy(&out.stdout).trim()); }) }) }); core.run(host).unwrap(); } Fixed examples // Copyright 2015-2017 Intecture Developers. // // Licensed under the Mozilla Public License 2.0 <LICENSE or // https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied, // modified, or distributed except according to those terms. extern crate futures; extern crate intecture_api; extern crate tokio_core; use futures::Future; use intecture_api::prelude::*; use tokio_core::reactor::Core; fn main() { let mut core = Core::new().unwrap(); let handle = core.handle(); let host = Local::new().and_then(|host| { Command::new(&host, "whoami", None).and_then(|mut cmd| { cmd.exec(&handle).map(|out| { println!("I'm currently running as {}", String::from_utf8_lossy(&out.stdout).trim()); }) }) }); core.run(host).unwrap(); }
#[macro_use] mod macros; mod expression; mod statement; use error::Result; use ast::{Program, Store, Node, Index, Item}; use lexer::{Lexer, Token}; use lexer::Token::*; pub struct Parser<'src> { /// Lexer will produce tokens from the source lexer: Lexer<'src>, /// Current token, to be used by peek! and next! macros token: Option<Token>, /// AST under construction program: Program<'src>, } impl<'src> Parser<'src> { pub fn new(source: &'src str) -> Self { Parser { lexer: Lexer::new(source), token: None, program: Program { source: source, root: None, items: Store::new(), } } } #[inline] fn consume(&mut self) { self.token = None; } #[inline] fn store(&mut self, node: Node) -> Index { self.program.items.insert(node) } #[inline] fn parse(&mut self) -> Result<()> { let statement = match next!(self) { EndOfProgram => return Ok(()), token => try!(self.statement(token)) }; let mut previous = self.store(statement); self.program.root = Some(previous); loop { let statement = match next!(self) { EndOfProgram => break, token => try!(self.statement(token)) }; let index = self.store(statement); self.program.items[previous].next = Some(index); previous = index; } Ok(()) } #[inline] fn block_body_tail(&mut self) -> Result<Option<Index>> { let statement = match next!(self) { BraceClose => return Ok(None), token => try!(self.statement(token)), }; let mut previous = self.store(statement); let root = Some(previous); loop { let statement = match next!(self) { BraceClose => break, token => try!(self.statement(token)), }; let index = self.store(statement); self.program.items[previous].next = Some(index); previous = index; } Ok(root) } #[inline] fn block_body(&mut self) -> Result<Option<Index>> { expect!(self, BraceOpen); self.block_body_tail() } fn parameter_list(&mut self) -> Result<Option<Index>> { let name = match next!(self) { ParenClose => return Ok(None), Identifier(name) => name, _ => unexpected_token!(self), }; let mut previous = self.store(Item::Identifier(name.into()).at(0, 0)); let root = Some(previous); loop { let name = match next!(self) { ParenClose => break, Comma => expect_identifier!(self), _ => unexpected_token!(self), }; let index = self.store(Item::Identifier(name.into()).at(0, 0)); self.program.items[previous].next = Some(index); previous = index; } Ok(root) // let mut default_params = false; // loop { // let name = match next!(self) { // ParenClose => break, // Identifier(name) => name, // _ => unexpected_token!(self) // }; // list.push(match peek!(self) { // Operator(Assign) => { // self.consume(); // let expression = try!(self.expression(0)); // default_params = true; // Parameter { // name: name.into(), // default: Some(Box::new(expression)) // } // } // _ => { // if default_params { // unexpected_token!(self); // } // Parameter { // name: name.into(), // default: None // } // } // }); // match next!(self) { // ParenClose => break, // Comma => {}, // _ => unexpected_token!(self) // } // } // Ok(list) } } pub fn parse<'src>(source: &'src str) -> Result<Program<'src>> { let mut parser = Parser::new(source); parser.parse()?; Ok(parser.program) } #[cfg(test)] mod test { use super::*; use ast::OperatorKind; macro_rules! assert_item { ($item:expr, $m:pat => $eval:expr) => { match $item { $m => assert!($eval), _ => panic!("Failed assert_item") } } } macro_rules! assert_ident { ($item.expr, $src:ident, $expect:expr) => { assert_item!($item, Item::Identifier(ref i) => i.as_str($src) == $expect); } } #[test] fn empty_parse() { let program = parse("").unwrap(); assert_eq!(program.items.len(), 0); assert_eq!(program.root, None); assert_eq!(program.statements().next(), None); } #[test] fn empty_statements() { let program = parse(";;;").unwrap(); assert_eq!(program.items.len(), 3); // Statements are linked let mut stmts = program.statements(); assert_eq!(stmts.next().unwrap(), &Item::EmptyStatement); assert_eq!(stmts.next().unwrap(), &Item::EmptyStatement); assert_eq!(stmts.next().unwrap(), &Item::EmptyStatement); assert_eq!(stmts.next(), None); } #[test] fn parse_ident_expr() { let src = "foo; bar; baz;"; let program = parse(src).unwrap(); let items = &program.items; // 3 times statement and expression assert_eq!(items.len(), 6); // First statement is after first expression assert_eq!(program.root, Some(1)); // Statements are linked let mut stmts = program.statements(); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(0)); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(2)); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(4)); assert_eq!(stmts.next(), None); // Match identifiers assert_ident!(items[0].item, src, "foo"); assert_ident!(items[0].item, src, "bar"); assert_ident!(items[0].item, src, "baz"); } #[test] fn parse_binary_and_postfix_expr() { let src = "foo + bar; baz++;"; let program = parse(src).unwrap(); let items = &program.items; // 2 statements, 3 simple expressions, one binary expression, one postfix expression assert_eq!(items.len(), 7); // First statement is after binary expression and two of it's side expressions assert_eq!(program.root, Some(3)); // Statements are linked let mut stmts = program.statements(); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(2)); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(5)); assert_eq!(stmts.next(), None); // Binary expression assert_eq!(items[2].item, Item::BinaryExpr { parenthesized: false, operator: OperatorKind::Addition, left: 0, right: 1, }); assert_ident!(items[0].item, src, "foo"); assert_ident!(items[1].item, src, "foo"); // Postfix expression assert_eq!(items[5].item, Item::PostfixExpr { operator: OperatorKind::Increment, operand: 4 }); assert_ident!(items[1].item, src, "baz"); } #[test] fn function_statement_empty() { let src = "function foo() {}"; let program = parse(src).unwrap(); let mut stmts = program.statements(); match *stmts.next().unwrap() { Item::FunctionStatement { ref name, params: None, body: None, } => assert_eq!(name.as_str(src), "foo"), _ => panic!() } assert_eq!(stmts.next(), None); } #[test] fn function_statement_params() { let src = "function foo(bar, baz) {}"; let program = parse(src).unwrap(); let items = &program.items; let mut stmts = program.statements(); match *stmts.next().unwrap() { Item::FunctionStatement { ref name, params: Some(0), body: None, } => assert_eq!(name.as_str(src), "foo"), _ => panic!() } // Params are linked let mut params = program.items.list(0); assert_ident!(*params.next().unwrap(), src, "bar"); assert_ident!(*params.next().unwrap(), src, "baz"); assert_eq!(params.next(), None); } } Tests are working #[macro_use] mod macros; mod expression; mod statement; use error::Result; use ast::{Program, Store, Node, Index, Item}; use lexer::{Lexer, Token}; use lexer::Token::*; pub struct Parser<'src> { /// Lexer will produce tokens from the source lexer: Lexer<'src>, /// Current token, to be used by peek! and next! macros token: Option<Token>, /// AST under construction program: Program<'src>, } impl<'src> Parser<'src> { pub fn new(source: &'src str) -> Self { Parser { lexer: Lexer::new(source), token: None, program: Program { source: source, root: None, items: Store::new(), } } } #[inline] fn consume(&mut self) { self.token = None; } #[inline] fn store(&mut self, node: Node) -> Index { self.program.items.insert(node) } #[inline] fn parse(&mut self) -> Result<()> { let statement = match next!(self) { EndOfProgram => return Ok(()), token => try!(self.statement(token)) }; let mut previous = self.store(statement); self.program.root = Some(previous); loop { let statement = match next!(self) { EndOfProgram => break, token => try!(self.statement(token)) }; let index = self.store(statement); self.program.items[previous].next = Some(index); previous = index; } Ok(()) } #[inline] fn block_body_tail(&mut self) -> Result<Option<Index>> { let statement = match next!(self) { BraceClose => return Ok(None), token => try!(self.statement(token)), }; let mut previous = self.store(statement); let root = Some(previous); loop { let statement = match next!(self) { BraceClose => break, token => try!(self.statement(token)), }; let index = self.store(statement); self.program.items[previous].next = Some(index); previous = index; } Ok(root) } #[inline] fn block_body(&mut self) -> Result<Option<Index>> { expect!(self, BraceOpen); self.block_body_tail() } fn parameter_list(&mut self) -> Result<Option<Index>> { let name = match next!(self) { ParenClose => return Ok(None), Identifier(name) => name, _ => unexpected_token!(self), }; let mut previous = self.store(Item::Identifier(name.into()).at(0, 0)); let root = Some(previous); loop { let name = match next!(self) { ParenClose => break, Comma => expect_identifier!(self), _ => unexpected_token!(self), }; let index = self.store(Item::Identifier(name.into()).at(0, 0)); self.program.items[previous].next = Some(index); previous = index; } Ok(root) // let mut default_params = false; // loop { // let name = match next!(self) { // ParenClose => break, // Identifier(name) => name, // _ => unexpected_token!(self) // }; // list.push(match peek!(self) { // Operator(Assign) => { // self.consume(); // let expression = try!(self.expression(0)); // default_params = true; // Parameter { // name: name.into(), // default: Some(Box::new(expression)) // } // } // _ => { // if default_params { // unexpected_token!(self); // } // Parameter { // name: name.into(), // default: None // } // } // }); // match next!(self) { // ParenClose => break, // Comma => {}, // _ => unexpected_token!(self) // } // } // Ok(list) } } pub fn parse<'src>(source: &'src str) -> Result<Program<'src>> { let mut parser = Parser::new(source); parser.parse()?; Ok(parser.program) } #[cfg(test)] mod test { use super::*; use ast::OperatorKind; macro_rules! assert_item { ($item:expr, $m:pat => $eval:expr) => { match $item { $m => assert!($eval), _ => panic!("Failed assert_item") } } } macro_rules! assert_ident { ($item:expr, $src:ident, $expect:expr) => { assert_item!($item, Item::Identifier(ref i) => i.as_str($src) == $expect); } } #[test] fn empty_parse() { let program = parse("").unwrap(); assert_eq!(program.items.len(), 0); assert_eq!(program.root, None); assert_eq!(program.statements().next(), None); } #[test] fn empty_statements() { let program = parse(";;;").unwrap(); assert_eq!(program.items.len(), 3); // Statements are linked let mut stmts = program.statements(); assert_eq!(stmts.next().unwrap(), &Item::EmptyStatement); assert_eq!(stmts.next().unwrap(), &Item::EmptyStatement); assert_eq!(stmts.next().unwrap(), &Item::EmptyStatement); assert_eq!(stmts.next(), None); } #[test] fn parse_ident_expr() { let src = "foo; bar; baz;"; let program = parse(src).unwrap(); let items = &program.items; // 3 times statement and expression assert_eq!(items.len(), 6); // First statement is after first expression assert_eq!(program.root, Some(1)); // Statements are linked let mut stmts = program.statements(); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(0)); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(2)); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(4)); assert_eq!(stmts.next(), None); // Match identifiers assert_ident!(items[0].item, src, "foo"); assert_ident!(items[2].item, src, "bar"); assert_ident!(items[4].item, src, "baz"); } #[test] fn parse_binary_and_postfix_expr() { let src = "foo + bar; baz++;"; let program = parse(src).unwrap(); let items = &program.items; // 2 statements, 3 simple expressions, one binary expression, one postfix expression assert_eq!(items.len(), 7); // First statement is after binary expression and two of it's side expressions assert_eq!(program.root, Some(3)); // Statements are linked let mut stmts = program.statements(); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(2)); assert_eq!(stmts.next().unwrap(), &Item::ExpressionStatement(5)); assert_eq!(stmts.next(), None); // Binary expression assert_eq!(items[2].item, Item::BinaryExpr { parenthesized: false, operator: OperatorKind::Addition, left: 0, right: 1, }); assert_ident!(items[0].item, src, "foo"); assert_ident!(items[1].item, src, "bar"); // Postfix expression assert_eq!(items[5].item, Item::PostfixExpr { operator: OperatorKind::Increment, operand: 4 }); assert_ident!(items[4].item, src, "baz"); } #[test] fn function_statement_empty() { let src = "function foo() {}"; let program = parse(src).unwrap(); let mut stmts = program.statements(); match *stmts.next().unwrap() { Item::FunctionStatement { ref name, params: None, body: None, } => assert_eq!(name.as_str(src), "foo"), _ => panic!() } assert_eq!(stmts.next(), None); } #[test] fn function_statement_params() { let src = "function foo(bar, baz) {}"; let program = parse(src).unwrap(); let items = &program.items; let mut stmts = program.statements(); match *stmts.next().unwrap() { Item::FunctionStatement { ref name, params: Some(0), body: None, } => assert_eq!(name.as_str(src), "foo"), _ => panic!() } // Params are linked let mut params = program.items.list(0); assert_ident!(*params.next().unwrap(), src, "bar"); assert_ident!(*params.next().unwrap(), src, "baz"); assert_eq!(params.next(), None); } }
extern crate itertools; use super::merge_policy::{MergePolicy, MergeCandidate}; use core::SegmentMeta; pub struct LogMergePolicy; use std::f64; const LEVEL_LOG_SIZE: f64 = 0.75; const MIN_MERGE_SIZE: usize = 3; impl MergePolicy for LogMergePolicy { fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> { if segments.is_empty() { return Vec::new(); } let mut size_sorted_tuples = segments.iter() .map(|x| x.num_docs) .enumerate() .collect::<Vec<(usize, u32)>>(); size_sorted_tuples.sort_by(|x,y| y.cmp(x)); let size_sorted_log_tuples: Vec<_> = size_sorted_tuples.iter() .map(|x| (x.0, (x.1 as f64).log2())) .collect(); let (first_ind, first_score) = size_sorted_log_tuples[0]; let mut current_max_log_size = first_score; let mut levels = vec!(vec!(first_ind)); for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) { if score < (current_max_log_size - LEVEL_LOG_SIZE) { current_max_log_size = score; levels.push(Vec::new()); } levels.last_mut().unwrap().push(ind); } let result = levels.iter() .filter(|level| {level.len() >= MIN_MERGE_SIZE}) .map(|ind_vec| { MergeCandidate(ind_vec.iter() .map(|&ind| segments[ind].segment_id) .collect()) }) .collect(); result } } impl Default for LogMergePolicy { fn default() -> LogMergePolicy { LogMergePolicy } } #[cfg(test)] mod tests { use super::*; use indexer::merge_policy::MergePolicy; use core::{SegmentMeta, SegmentId}; #[test] fn test_log_merge_policy_empty() { let y = Vec::new(); let result_list = LogMergePolicy::default().compute_merge_candidates(&y); assert!(result_list.len() == 0); } #[test] fn test_log_merge_policy_pair() { let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 1); } #[test] fn test_log_merge_policy_levels() { // multiple levels all get merged correctly let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 2); } #[test] fn test_log_merge_policy_within_levels() { // multiple levels all get merged correctly let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 11), SegmentMeta::new(SegmentId::generate_random(), 12), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 2); } } clip segment size floor extern crate itertools; use super::merge_policy::{MergePolicy, MergeCandidate}; use core::SegmentMeta; pub struct LogMergePolicy; use std::f64; const LEVEL_LOG_SIZE: f64 = 0.75; const MIN_SEGMENT_SIZE: u32 = 2; const MIN_MERGE_SIZE: usize = 3; impl MergePolicy for LogMergePolicy { fn compute_merge_candidates(&self, segments: &[SegmentMeta]) -> Vec<MergeCandidate> { if segments.is_empty() { return Vec::new(); } let mut size_sorted_tuples = segments.iter() .map(|x| x.num_docs) .enumerate() .collect::<Vec<(usize, u32)>>(); size_sorted_tuples.sort_by(|x,y| y.cmp(x)); fn clip_min_size(size: u32 ) -> u32 { if size <= MIN_SEGMENT_SIZE { MIN_SEGMENT_SIZE } else { size } } let size_sorted_log_tuples: Vec<_> = size_sorted_tuples.iter() .map(|x| (x.0, (clip_min_size(x.1) as f64).log2())) .collect(); let (first_ind, first_score) = size_sorted_log_tuples[0]; let mut current_max_log_size = first_score; let mut levels = vec!(vec!(first_ind)); for &(ind, score) in (&size_sorted_log_tuples).iter().skip(1) { if score < (current_max_log_size - LEVEL_LOG_SIZE) { current_max_log_size = score; levels.push(Vec::new()); } levels.last_mut().unwrap().push(ind); } let result = levels.iter() .filter(|level| {level.len() >= MIN_MERGE_SIZE}) .map(|ind_vec| { MergeCandidate(ind_vec.iter() .map(|&ind| segments[ind].segment_id) .collect()) }) .collect(); result } } impl Default for LogMergePolicy { fn default() -> LogMergePolicy { LogMergePolicy } } #[cfg(test)] mod tests { use super::*; use indexer::merge_policy::MergePolicy; use core::{SegmentMeta, SegmentId}; #[test] fn test_log_merge_policy_empty() { let y = Vec::new(); let result_list = LogMergePolicy::default().compute_merge_candidates(&y); assert!(result_list.len() == 0); } #[test] fn test_log_merge_policy_pair() { let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 1); } #[test] fn test_log_merge_policy_levels() { // multiple levels all get merged correctly let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 2); } #[test] fn test_log_merge_policy_within_levels() { // multiple levels all get merged correctly let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 10), SegmentMeta::new(SegmentId::generate_random(), 11), SegmentMeta::new(SegmentId::generate_random(), 12), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000), SegmentMeta::new(SegmentId::generate_random(), 1000)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 2); } #[test] fn test_log_merge_policy_small_segments() { // multiple levels all get merged correctly let test_input = vec![SegmentMeta::new(SegmentId::generate_random(), 1), SegmentMeta::new(SegmentId::generate_random(), 1), SegmentMeta::new(SegmentId::generate_random(), 1), SegmentMeta::new(SegmentId::generate_random(), 2), SegmentMeta::new(SegmentId::generate_random(), 2), SegmentMeta::new(SegmentId::generate_random(), 2)]; let result_list = LogMergePolicy::default().compute_merge_candidates(&test_input); assert!(result_list.len() == 1); } }
//! The grid-details view lists several details views side-by-side. use std::io::{self, Write}; use ansi_term::ANSIStrings; use term_grid as grid; use crate::fs::{Dir, File}; use crate::fs::feature::git::GitCache; use crate::fs::feature::xattr::FileAttributes; use crate::fs::filter::FileFilter; use crate::output::cell::TextCell; use crate::output::details::{Options as DetailsOptions, Row as DetailsRow, Render as DetailsRender}; use crate::output::file_name::Options as FileStyle; use crate::output::grid::Options as GridOptions; use crate::output::table::{Table, Row as TableRow, Options as TableOptions}; use crate::output::tree::{TreeParams, TreeDepth}; use crate::theme::Theme; #[derive(PartialEq, Debug)] pub struct Options { pub grid: GridOptions, pub details: DetailsOptions, pub row_threshold: RowThreshold, } impl Options { pub fn to_details_options(&self) -> &DetailsOptions { &self.details } } /// The grid-details view can be configured to revert to just a details view /// (with one column) if it wouldn’t produce enough rows of output. /// /// Doing this makes the resulting output look a bit better: when listing a /// small directory of four files in four columns, the files just look spaced /// out and it’s harder to see what’s going on. So it can be enabled just for /// larger directory listings. #[derive(PartialEq, Debug, Copy, Clone)] pub enum RowThreshold { /// Only use grid-details view if it would result in at least this many /// rows of output. MinimumRows(usize), /// Use the grid-details view no matter what. AlwaysGrid, } pub struct Render<'a> { /// The directory that’s being rendered here. /// We need this to know which columns to put in the output. pub dir: Option<&'a Dir>, /// The files that have been read from the directory. They should all /// hold a reference to it. pub files: Vec<File<'a>>, /// How to colour various pieces of text. pub theme: &'a Theme, /// How to format filenames. pub file_style: &'a FileStyle, /// The grid part of the grid-details view. pub grid: &'a GridOptions, /// The details part of the grid-details view. pub details: &'a DetailsOptions, /// How to filter files after listing a directory. The files in this /// render will already have been filtered and sorted, but any directories /// that we recurse into will have to have this applied. pub filter: &'a FileFilter, /// The minimum number of rows that there need to be before grid-details /// mode is activated. pub row_threshold: RowThreshold, /// Whether we are skipping Git-ignored files. pub git_ignoring: bool, pub git: Option<&'a GitCache>, pub console_width: usize, } impl<'a> Render<'a> { /// Create a temporary Details render that gets used for the columns of /// the grid-details render that’s being generated. /// /// This includes an empty files vector because the files get added to /// the table in *this* file, not in details: we only want to insert every /// *n* files into each column’s table, not all of them. fn details_for_column(&self) -> DetailsRender<'a> { DetailsRender { dir: self.dir, files: Vec::new(), theme: self.theme, file_style: self.file_style, opts: self.details, recurse: None, filter: self.filter, git_ignoring: self.git_ignoring, git: self.git, } } /// Create a Details render for when this grid-details render doesn’t fit /// in the terminal (or something has gone wrong) and we have given up, or /// when the user asked for a grid-details view but the terminal width is /// not available, so we downgrade. pub fn give_up(self) -> DetailsRender<'a> { DetailsRender { dir: self.dir, files: self.files, theme: self.theme, file_style: self.file_style, opts: self.details, recurse: None, filter: self.filter, git_ignoring: self.git_ignoring, git: self.git, } } // This doesn’t take an IgnoreCache even though the details one does // because grid-details has no tree view. pub fn render<W: Write>(mut self, w: &mut W) -> io::Result<()> { if let Some((grid, width)) = self.find_fitting_grid() { write!(w, "{}", grid.fit_into_columns(width)) } else { self.give_up().render(w) } } pub fn find_fitting_grid(&mut self) -> Option<(grid::Grid, grid::Width)> { let options = self.details.table.as_ref().expect("Details table options not given!"); let drender = self.details_for_column(); let (first_table, _) = self.make_table(options, &drender); let rows = self.files.iter() .map(|file| first_table.row_for_file(file, file_has_xattrs(file))) .collect::<Vec<_>>(); let file_names = self.files.iter() .map(|file| self.file_style.for_file(file, self.theme).paint().promote()) .collect::<Vec<_>>(); let mut last_working_grid = self.make_grid(1, options, &file_names, rows.clone(), &drender); if file_names.len() == 1 { return Some((last_working_grid, 1)); } // If we can’t fit everything in a grid 100 columns wide, then // something has gone seriously awry for column_count in 2..100 { let grid = self.make_grid(column_count, options, &file_names, rows.clone(), &drender); let the_grid_fits = { let d = grid.fit_into_columns(column_count); d.width() <= self.console_width }; if the_grid_fits { if column_count == file_names.len() { return Some((grid, column_count)); } else { last_working_grid = grid; } } else { // If we’ve figured out how many columns can fit in the user’s // terminal, and it turns out there aren’t enough rows to // make it worthwhile, then just resort to the lines view. if let RowThreshold::MinimumRows(thresh) = self.row_threshold { if last_working_grid.fit_into_columns(column_count - 1).row_count() < thresh { return None; } } return Some((last_working_grid, column_count - 1)); } } None } fn make_table(&mut self, options: &'a TableOptions, drender: &DetailsRender<'_>) -> (Table<'a>, Vec<DetailsRow>) { match (self.git, self.dir) { (Some(g), Some(d)) => if ! g.has_anything_for(&d.path) { self.git = None }, (Some(g), None) => if ! self.files.iter().any(|f| g.has_anything_for(&f.path)) { self.git = None }, (None, _) => {/* Keep Git how it is */}, } let mut table = Table::new(options, self.git, &self.theme); let mut rows = Vec::new(); if self.details.header { let row = table.header_row(); table.add_widths(&row); rows.push(drender.render_header(row)); } (table, rows) } fn make_grid(&mut self, column_count: usize, options: &'a TableOptions, file_names: &[TextCell], rows: Vec<TableRow>, drender: &DetailsRender<'_>) -> grid::Grid { let mut tables = Vec::new(); for _ in 0 .. column_count { tables.push(self.make_table(options, drender)); } let mut num_cells = rows.len(); if self.details.header { num_cells += column_count; } let original_height = divide_rounding_up(rows.len(), column_count); let height = divide_rounding_up(num_cells, column_count); for (i, (file_name, row)) in file_names.iter().zip(rows.into_iter()).enumerate() { let index = if self.grid.across { i % column_count } else { i / original_height }; let (ref mut table, ref mut rows) = tables[index]; table.add_widths(&row); let details_row = drender.render_file(row, file_name.clone(), TreeParams::new(TreeDepth::root(), false)); rows.push(details_row); } let columns = tables .into_iter() .map(|(table, details_rows)| { drender.iterate_with_table(table, details_rows) .collect::<Vec<_>>() }) .collect::<Vec<_>>(); let direction = if self.grid.across { grid::Direction::LeftToRight } else { grid::Direction::TopToBottom }; let filling = grid::Filling::Spaces(4); let mut grid = grid::Grid::new(grid::GridOptions { direction, filling }); if self.grid.across { for row in 0 .. height { for column in &columns { if row < column.len() { let cell = grid::Cell { contents: ANSIStrings(&column[row].contents).to_string(), width: *column[row].width, }; grid.add(cell); } } } } else { for column in &columns { for cell in column.iter() { let cell = grid::Cell { contents: ANSIStrings(&cell.contents).to_string(), width: *cell.width, }; grid.add(cell); } } } grid } } fn divide_rounding_up(a: usize, b: usize) -> usize { let mut result = a / b; if a % b != 0 { result += 1; } result } fn file_has_xattrs(file: &File<'_>) -> bool { match file.path.attributes() { Ok(attrs) => ! attrs.is_empty(), Err(_) => false, } } Fix EXA_GRID_ROWS not working in some cases //! The grid-details view lists several details views side-by-side. use std::io::{self, Write}; use ansi_term::ANSIStrings; use term_grid as grid; use crate::fs::{Dir, File}; use crate::fs::feature::git::GitCache; use crate::fs::feature::xattr::FileAttributes; use crate::fs::filter::FileFilter; use crate::output::cell::TextCell; use crate::output::details::{Options as DetailsOptions, Row as DetailsRow, Render as DetailsRender}; use crate::output::file_name::Options as FileStyle; use crate::output::grid::Options as GridOptions; use crate::output::table::{Table, Row as TableRow, Options as TableOptions}; use crate::output::tree::{TreeParams, TreeDepth}; use crate::theme::Theme; #[derive(PartialEq, Debug)] pub struct Options { pub grid: GridOptions, pub details: DetailsOptions, pub row_threshold: RowThreshold, } impl Options { pub fn to_details_options(&self) -> &DetailsOptions { &self.details } } /// The grid-details view can be configured to revert to just a details view /// (with one column) if it wouldn’t produce enough rows of output. /// /// Doing this makes the resulting output look a bit better: when listing a /// small directory of four files in four columns, the files just look spaced /// out and it’s harder to see what’s going on. So it can be enabled just for /// larger directory listings. #[derive(PartialEq, Debug, Copy, Clone)] pub enum RowThreshold { /// Only use grid-details view if it would result in at least this many /// rows of output. MinimumRows(usize), /// Use the grid-details view no matter what. AlwaysGrid, } pub struct Render<'a> { /// The directory that’s being rendered here. /// We need this to know which columns to put in the output. pub dir: Option<&'a Dir>, /// The files that have been read from the directory. They should all /// hold a reference to it. pub files: Vec<File<'a>>, /// How to colour various pieces of text. pub theme: &'a Theme, /// How to format filenames. pub file_style: &'a FileStyle, /// The grid part of the grid-details view. pub grid: &'a GridOptions, /// The details part of the grid-details view. pub details: &'a DetailsOptions, /// How to filter files after listing a directory. The files in this /// render will already have been filtered and sorted, but any directories /// that we recurse into will have to have this applied. pub filter: &'a FileFilter, /// The minimum number of rows that there need to be before grid-details /// mode is activated. pub row_threshold: RowThreshold, /// Whether we are skipping Git-ignored files. pub git_ignoring: bool, pub git: Option<&'a GitCache>, pub console_width: usize, } impl<'a> Render<'a> { /// Create a temporary Details render that gets used for the columns of /// the grid-details render that’s being generated. /// /// This includes an empty files vector because the files get added to /// the table in *this* file, not in details: we only want to insert every /// *n* files into each column’s table, not all of them. fn details_for_column(&self) -> DetailsRender<'a> { DetailsRender { dir: self.dir, files: Vec::new(), theme: self.theme, file_style: self.file_style, opts: self.details, recurse: None, filter: self.filter, git_ignoring: self.git_ignoring, git: self.git, } } /// Create a Details render for when this grid-details render doesn’t fit /// in the terminal (or something has gone wrong) and we have given up, or /// when the user asked for a grid-details view but the terminal width is /// not available, so we downgrade. pub fn give_up(self) -> DetailsRender<'a> { DetailsRender { dir: self.dir, files: self.files, theme: self.theme, file_style: self.file_style, opts: self.details, recurse: None, filter: self.filter, git_ignoring: self.git_ignoring, git: self.git, } } // This doesn’t take an IgnoreCache even though the details one does // because grid-details has no tree view. pub fn render<W: Write>(mut self, w: &mut W) -> io::Result<()> { if let Some((grid, width)) = self.find_fitting_grid() { write!(w, "{}", grid.fit_into_columns(width)) } else { self.give_up().render(w) } } pub fn find_fitting_grid(&mut self) -> Option<(grid::Grid, grid::Width)> { let options = self.details.table.as_ref().expect("Details table options not given!"); let drender = self.details_for_column(); let (first_table, _) = self.make_table(options, &drender); let rows = self.files.iter() .map(|file| first_table.row_for_file(file, file_has_xattrs(file))) .collect::<Vec<_>>(); let file_names = self.files.iter() .map(|file| self.file_style.for_file(file, self.theme).paint().promote()) .collect::<Vec<_>>(); let mut last_working_grid = self.make_grid(1, options, &file_names, rows.clone(), &drender); if file_names.len() == 1 { return Some((last_working_grid, 1)); } // If we can’t fit everything in a grid 100 columns wide, then // something has gone seriously awry for column_count in 2..100 { let grid = self.make_grid(column_count, options, &file_names, rows.clone(), &drender); let the_grid_fits = { let d = grid.fit_into_columns(column_count); d.width() <= self.console_width }; if the_grid_fits { last_working_grid = grid; } if !the_grid_fits || column_count == file_names.len() { let last_column_count = if the_grid_fits { column_count } else { column_count - 1 }; // If we’ve figured out how many columns can fit in the user’s terminal, // and it turns out there aren’t enough rows to make it worthwhile // (according to EXA_GRID_ROWS), then just resort to the lines view. if let RowThreshold::MinimumRows(thresh) = self.row_threshold { if last_working_grid.fit_into_columns(last_column_count).row_count() < thresh { return None; } } return Some((last_working_grid, last_column_count)); } } None } fn make_table(&mut self, options: &'a TableOptions, drender: &DetailsRender<'_>) -> (Table<'a>, Vec<DetailsRow>) { match (self.git, self.dir) { (Some(g), Some(d)) => if ! g.has_anything_for(&d.path) { self.git = None }, (Some(g), None) => if ! self.files.iter().any(|f| g.has_anything_for(&f.path)) { self.git = None }, (None, _) => {/* Keep Git how it is */}, } let mut table = Table::new(options, self.git, &self.theme); let mut rows = Vec::new(); if self.details.header { let row = table.header_row(); table.add_widths(&row); rows.push(drender.render_header(row)); } (table, rows) } fn make_grid(&mut self, column_count: usize, options: &'a TableOptions, file_names: &[TextCell], rows: Vec<TableRow>, drender: &DetailsRender<'_>) -> grid::Grid { let mut tables = Vec::new(); for _ in 0 .. column_count { tables.push(self.make_table(options, drender)); } let mut num_cells = rows.len(); if self.details.header { num_cells += column_count; } let original_height = divide_rounding_up(rows.len(), column_count); let height = divide_rounding_up(num_cells, column_count); for (i, (file_name, row)) in file_names.iter().zip(rows.into_iter()).enumerate() { let index = if self.grid.across { i % column_count } else { i / original_height }; let (ref mut table, ref mut rows) = tables[index]; table.add_widths(&row); let details_row = drender.render_file(row, file_name.clone(), TreeParams::new(TreeDepth::root(), false)); rows.push(details_row); } let columns = tables .into_iter() .map(|(table, details_rows)| { drender.iterate_with_table(table, details_rows) .collect::<Vec<_>>() }) .collect::<Vec<_>>(); let direction = if self.grid.across { grid::Direction::LeftToRight } else { grid::Direction::TopToBottom }; let filling = grid::Filling::Spaces(4); let mut grid = grid::Grid::new(grid::GridOptions { direction, filling }); if self.grid.across { for row in 0 .. height { for column in &columns { if row < column.len() { let cell = grid::Cell { contents: ANSIStrings(&column[row].contents).to_string(), width: *column[row].width, }; grid.add(cell); } } } } else { for column in &columns { for cell in column.iter() { let cell = grid::Cell { contents: ANSIStrings(&cell.contents).to_string(), width: *cell.width, }; grid.add(cell); } } } grid } } fn divide_rounding_up(a: usize, b: usize) -> usize { let mut result = a / b; if a % b != 0 { result += 1; } result } fn file_has_xattrs(file: &File<'_>) -> bool { match file.path.attributes() { Ok(attrs) => ! attrs.is_empty(), Err(_) => false, } }
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 use std::fmt::{Display, Formatter}; use std::result; use std::sync::{Arc, Mutex}; #[cfg(not(test))] use super::{builder::build_microvm_for_boot, resources::VmResources, Vmm}; #[cfg(all(not(test), target_arch = "x86_64"))] use super::{persist::create_snapshot, persist::load_snapshot}; #[cfg(test)] use tests::{build_microvm_for_boot, MockVmRes as VmResources, MockVmm as Vmm}; #[cfg(all(test, target_arch = "x86_64"))] use tests::{create_snapshot, load_snapshot}; use super::Error as VmmError; use crate::builder::StartMicrovmError; #[cfg(target_arch = "x86_64")] use crate::persist::{CreateSnapshotError, LoadSnapshotError}; #[cfg(target_arch = "x86_64")] use crate::version_map::VERSION_MAP; use crate::vmm_config; use crate::vmm_config::balloon::{ BalloonConfigError, BalloonDeviceConfig, BalloonStats, BalloonUpdateConfig, BalloonUpdateStatsConfig, }; use crate::vmm_config::boot_source::{BootSourceConfig, BootSourceConfigError}; use crate::vmm_config::drive::{BlockDeviceConfig, DriveError}; use crate::vmm_config::instance_info::InstanceInfo; use crate::vmm_config::logger::{LoggerConfig, LoggerConfigError}; use crate::vmm_config::machine_config::{VmConfig, VmConfigError}; use crate::vmm_config::metrics::{MetricsConfig, MetricsConfigError}; use crate::vmm_config::mmds::{MmdsConfig, MmdsConfigError}; use crate::vmm_config::net::{ NetworkInterfaceConfig, NetworkInterfaceError, NetworkInterfaceUpdateConfig, }; #[cfg(target_arch = "x86_64")] use crate::vmm_config::snapshot::{CreateSnapshotParams, LoadSnapshotParams, SnapshotType}; use crate::vmm_config::vsock::{VsockConfigError, VsockDeviceConfig}; use logger::{info, update_metric_with_elapsed_time, METRICS}; use polly::event_manager::EventManager; use seccomp::BpfProgram; /// This enum represents the public interface of the VMM. Each action contains various /// bits of information (ids, paths, etc.). #[derive(PartialEq)] pub enum VmmAction { /// Configure the boot source of the microVM using as input the `ConfigureBootSource`. This /// action can only be called before the microVM has booted. ConfigureBootSource(BootSourceConfig), /// Configure the logger using as input the `LoggerConfig`. This action can only be called /// before the microVM has booted. ConfigureLogger(LoggerConfig), /// Configure the metrics using as input the `MetricsConfig`. This action can only be called /// before the microVM has booted. ConfigureMetrics(MetricsConfig), /// Create a snapshot using as input the `CreateSnapshotParams`. This action can only be called /// after the microVM has booted and only when the microVM is in `Paused` state. #[cfg(target_arch = "x86_64")] CreateSnapshot(CreateSnapshotParams), /// Get the ballon device latest statistics. GetBalloonStats, /// Get the configuration of the microVM. GetVmConfiguration, /// Flush the metrics. This action can only be called after the logger has been configured. FlushMetrics, /// Add a new block device or update one that already exists using the `BlockDeviceConfig` as /// input. This action can only be called before the microVM has booted. InsertBlockDevice(BlockDeviceConfig), /// Add a new network interface config or update one that already exists using the /// `NetworkInterfaceConfig` as input. This action can only be called before the microVM has /// booted. InsertNetworkDevice(NetworkInterfaceConfig), /// Load the microVM state using as input the `LoadSnapshotParams`. This action can only be /// called before the microVM has booted. If this action is successful, the loaded microVM will /// be in `Paused` state. Should change this state to `Resumed` for the microVM to run. #[cfg(target_arch = "x86_64")] LoadSnapshot(LoadSnapshotParams), /// Pause the guest, by pausing the microVM VCPUs. Pause, /// Resume the guest, by resuming the microVM VCPUs. Resume, /// Set the balloon device or update the one that already exists using the /// `BalloonDeviceConfig` as input. This action can only be called before the microVM /// has booted. SetBalloonDevice(BalloonDeviceConfig), /// Set the MMDS configuration. SetMmdsConfiguration(MmdsConfig), /// Set the vsock device or update the one that already exists using the /// `VsockDeviceConfig` as input. This action can only be called before the microVM has /// booted. SetVsockDevice(VsockDeviceConfig), /// Set the microVM configuration (memory & vcpu) using `VmConfig` as input. This /// action can only be called before the microVM has booted. SetVmConfiguration(VmConfig), /// Launch the microVM. This action can only be called before the microVM has booted. StartMicroVm, /// Send CTRL+ALT+DEL to the microVM, using the i8042 keyboard function. If an AT-keyboard /// driver is listening on the guest end, this can be used to shut down the microVM gracefully. #[cfg(target_arch = "x86_64")] SendCtrlAltDel, /// Update the balloon size, after microVM start. UpdateBalloon(BalloonUpdateConfig), /// Update the balloon statistics polling interval, after microVM start. UpdateBalloonStatistics(BalloonUpdateStatsConfig), /// Update the path of an existing block device. The data associated with this variant /// represents the `drive_id` and the `path_on_host`. UpdateBlockDevicePath(String, String), /// Update a network interface, after microVM start. Currently, the only updatable properties /// are the RX and TX rate limiters. UpdateNetworkInterface(NetworkInterfaceUpdateConfig), } /// Wrapper for all errors associated with VMM actions. #[derive(Debug)] pub enum VmmActionError { /// The action `SetBalloonDevice` failed because of bad user input. BalloonConfig(BalloonConfigError), /// The action `ConfigureBootSource` failed because of bad user input. BootSource(BootSourceConfigError), /// The action `CreateSnapshot` failed. #[cfg(target_arch = "x86_64")] CreateSnapshot(CreateSnapshotError), /// One of the actions `InsertBlockDevice` or `UpdateBlockDevicePath` /// failed because of bad user input. DriveConfig(DriveError), /// Internal Vmm error. InternalVmm(VmmError), /// Loading a microVM snapshot failed. #[cfg(target_arch = "x86_64")] LoadSnapshot(LoadSnapshotError), /// Loading a microVM snapshot not allowed after configuring boot-specific resources. #[cfg(target_arch = "x86_64")] LoadSnapshotNotAllowed, /// The action `ConfigureLogger` failed because of bad user input. Logger(LoggerConfigError), /// One of the actions `GetVmConfiguration` or `SetVmConfiguration` failed because of bad input. MachineConfig(VmConfigError), /// The action `ConfigureMetrics` failed because of bad user input. Metrics(MetricsConfigError), /// The action `SetMmdsConfiguration` failed because of bad user input. MmdsConfig(MmdsConfigError), /// The action `InsertNetworkDevice` failed because of bad user input. NetworkConfig(NetworkInterfaceError), /// The requested operation is not supported after starting the microVM. OperationNotSupportedPostBoot, /// The requested operation is not supported before starting the microVM. OperationNotSupportedPreBoot, /// The action `StartMicroVm` failed because of an internal error. StartMicrovm(StartMicrovmError), /// The action `SetVsockDevice` failed because of bad user input. VsockConfig(VsockConfigError), } impl Display for VmmActionError { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { use self::VmmActionError::*; write!( f, "{}", match self { BalloonConfig(err) => err.to_string(), BootSource(err) => err.to_string(), #[cfg(target_arch = "x86_64")] CreateSnapshot(err) => err.to_string(), DriveConfig(err) => err.to_string(), InternalVmm(err) => format!("Internal Vmm error: {}", err), #[cfg(target_arch = "x86_64")] LoadSnapshot(err) => format!("Load microVM snapshot error: {}", err), #[cfg(target_arch = "x86_64")] LoadSnapshotNotAllowed => { "Loading a microVM snapshot not allowed after configuring boot-specific resources." .to_string() } Logger(err) => err.to_string(), MachineConfig(err) => err.to_string(), Metrics(err) => err.to_string(), MmdsConfig(err) => err.to_string(), NetworkConfig(err) => err.to_string(), OperationNotSupportedPostBoot => { "The requested operation is not supported after starting the microVM." .to_string() } OperationNotSupportedPreBoot => { "The requested operation is not supported before starting the microVM." .to_string() } StartMicrovm(err) => err.to_string(), // The action `SetVsockDevice` failed because of bad user input. VsockConfig(err) => err.to_string(), } ) } } /// The enum represents the response sent by the VMM in case of success. The response is either /// empty, when no data needs to be sent, or an internal VMM structure. #[derive(Debug, PartialEq)] pub enum VmmData { /// The latest balloon device statistics. BalloonStats(BalloonStats), /// No data is sent on the channel. Empty, /// The microVM configuration represented by `VmConfig`. MachineConfiguration(VmConfig), } /// Shorthand result type for external VMM commands. pub type ActionResult = result::Result<VmmData, VmmActionError>; /// Enables pre-boot setup and instantiation of a Firecracker VMM. pub struct PrebootApiController<'a> { seccomp_filter: BpfProgram, instance_info: InstanceInfo, vm_resources: &'a mut VmResources, event_manager: &'a mut EventManager, built_vmm: Option<Arc<Mutex<Vmm>>>, // Configuring boot specific resources will set this to true. // Loading from snapshot will not be allowed once this is true. boot_path: bool, } impl<'a> PrebootApiController<'a> { /// Constructor for the PrebootApiController. pub fn new( seccomp_filter: BpfProgram, instance_info: InstanceInfo, vm_resources: &'a mut VmResources, event_manager: &'a mut EventManager, ) -> PrebootApiController<'a> { PrebootApiController { seccomp_filter, instance_info, vm_resources, event_manager, built_vmm: None, boot_path: false, } } /// Default implementation for the function that builds and starts a microVM. /// It takes two closures `recv_req` and `respond` as params which abstract away /// the message transport. /// /// Returns a populated `VmResources` object and a running `Vmm` object. pub fn build_microvm_from_requests<F, G>( seccomp_filter: BpfProgram, event_manager: &mut EventManager, instance_info: InstanceInfo, recv_req: F, respond: G, boot_timer_enabled: bool, ) -> (VmResources, Arc<Mutex<Vmm>>) where F: Fn() -> VmmAction, G: Fn(ActionResult), { let mut vm_resources = VmResources::default(); vm_resources.boot_timer = boot_timer_enabled; let mut preboot_controller = PrebootApiController::new( seccomp_filter, instance_info, &mut vm_resources, event_manager, ); // Configure and start microVM through successive API calls. // Iterate through API calls to configure microVm. // The loop breaks when a microVM is successfully started, and a running Vmm is built. while preboot_controller.built_vmm.is_none() { // Get request, process it, send back the response. respond(preboot_controller.handle_preboot_request(recv_req())); } // Safe to unwrap because previous loop cannot end on None. let vmm = preboot_controller.built_vmm.unwrap(); (vm_resources, vmm) } /// Handles the incoming preboot request and provides a response for it. /// Returns a built/running `Vmm` after handling a successful `StartMicroVm` request. pub fn handle_preboot_request(&mut self, request: VmmAction) -> ActionResult { use self::VmmAction::*; match request { // Supported operations allowed pre-boot. ConfigureBootSource(config) => self.set_boot_source(config), ConfigureLogger(logger_cfg) => { vmm_config::logger::init_logger(logger_cfg, &self.instance_info) .map(|()| VmmData::Empty) .map_err(VmmActionError::Logger) } ConfigureMetrics(metrics_cfg) => vmm_config::metrics::init_metrics(metrics_cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::Metrics), GetVmConfiguration => Ok(VmmData::MachineConfiguration( self.vm_resources.vm_config().clone(), )), InsertBlockDevice(config) => self.insert_block_device(config), InsertNetworkDevice(config) => self.insert_net_device(config), #[cfg(target_arch = "x86_64")] LoadSnapshot(config) => self.load_snapshot(&config), SetBalloonDevice(balloon_cfg) => self .vm_resources .balloon .set(balloon_cfg) .map(|_| VmmData::Empty) .map_err(VmmActionError::BalloonConfig), SetVsockDevice(config) => self.set_vsock_device(config), SetVmConfiguration(config) => self.set_vm_config(config), SetMmdsConfiguration(config) => self.set_mmds_config(config), StartMicroVm => self.start_microvm(), // Operations not allowed pre-boot. FlushMetrics | Pause | Resume | GetBalloonStats | UpdateBalloon(_) | UpdateBalloonStatistics(_) | UpdateBlockDevicePath(_, _) | UpdateNetworkInterface(_) => Err(VmmActionError::OperationNotSupportedPreBoot), #[cfg(target_arch = "x86_64")] CreateSnapshot(_) | SendCtrlAltDel => Err(VmmActionError::OperationNotSupportedPreBoot), } } fn insert_block_device(&mut self, cfg: BlockDeviceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_block_device(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::DriveConfig) } fn insert_net_device(&mut self, cfg: NetworkInterfaceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .build_net_device(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::NetworkConfig) } fn set_boot_source(&mut self, cfg: BootSourceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_boot_source(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::BootSource) } fn set_mmds_config(&mut self, cfg: MmdsConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_mmds_config(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::MmdsConfig) } fn set_vm_config(&mut self, cfg: VmConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_vm_config(&cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::MachineConfig) } fn set_vsock_device(&mut self, cfg: VsockDeviceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_vsock_device(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::VsockConfig) } // On success, this command will end the pre-boot stage and this controller // will be replaced by a runtime controller. fn start_microvm(&mut self) -> ActionResult { build_microvm_for_boot( &self.vm_resources, &mut self.event_manager, &self.seccomp_filter, ) .map(|vmm| { self.built_vmm = Some(vmm); VmmData::Empty }) .map_err(VmmActionError::StartMicrovm) } #[cfg(target_arch = "x86_64")] // On success, this command will end the pre-boot stage and this controller // will be replaced by a runtime controller. fn load_snapshot(&mut self, load_params: &LoadSnapshotParams) -> ActionResult { let load_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); if self.boot_path { let err = VmmActionError::LoadSnapshotNotAllowed; info!("{}", err); return Err(err); } let loaded_vmm = load_snapshot( &mut self.event_manager, &self.seccomp_filter, load_params, VERSION_MAP.clone(), ); let elapsed_time_us = update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_load_snapshot, load_start_us); info!("'load snapshot' VMM action took {} us.", elapsed_time_us); loaded_vmm .map(|vmm| { self.built_vmm = Some(vmm); VmmData::Empty }) .map_err(VmmActionError::LoadSnapshot) } } /// Enables RPC interaction with a running Firecracker VMM. pub struct RuntimeApiController { vmm: Arc<Mutex<Vmm>>, vm_config: VmConfig, } impl RuntimeApiController { /// Handles the incoming runtime `VmmAction` request and provides a response for it. pub fn handle_request(&mut self, request: VmmAction) -> ActionResult { use self::VmmAction::*; match request { // Supported operations allowed post-boot. #[cfg(target_arch = "x86_64")] CreateSnapshot(snapshot_create_cfg) => self.create_snapshot(&snapshot_create_cfg), FlushMetrics => self.flush_metrics(), GetBalloonStats => self .vmm .lock() .expect("Poisoned lock") .latest_balloon_stats() .map(VmmData::BalloonStats) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), GetVmConfiguration => Ok(VmmData::MachineConfiguration(self.vm_config.clone())), Pause => self.pause(), Resume => self.resume(), #[cfg(target_arch = "x86_64")] SendCtrlAltDel => self.send_ctrl_alt_del(), UpdateBalloon(balloon_update) => self .vmm .lock() .expect("Poisoned lock") .update_balloon_config(balloon_update.amount_mb) .map(|_| VmmData::Empty) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), UpdateBalloonStatistics(balloon_stats_update) => self .vmm .lock() .expect("Poisoned lock") .update_balloon_stats_config(balloon_stats_update.stats_polling_interval_s) .map(|_| VmmData::Empty) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), UpdateBlockDevicePath(drive_id, new_path) => { self.update_block_device_path(&drive_id, new_path) } UpdateNetworkInterface(netif_update) => self.update_net_rate_limiters(netif_update), // Operations not allowed post-boot. ConfigureBootSource(_) | ConfigureLogger(_) | ConfigureMetrics(_) | InsertBlockDevice(_) | InsertNetworkDevice(_) | SetBalloonDevice(_) | SetVsockDevice(_) | SetMmdsConfiguration(_) | SetVmConfiguration(_) | StartMicroVm => Err(VmmActionError::OperationNotSupportedPostBoot), #[cfg(target_arch = "x86_64")] LoadSnapshot(_) => Err(VmmActionError::OperationNotSupportedPostBoot), } } /// Creates a new `RuntimeApiController`. pub fn new(vm_config: VmConfig, vmm: Arc<Mutex<Vmm>>) -> Self { Self { vm_config, vmm } } /// Pauses the microVM by pausing the vCPUs. pub fn pause(&mut self) -> ActionResult { let pause_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); self.vmm .lock() .expect("Poisoned lock") .pause_vcpus() .map_err(VmmActionError::InternalVmm)?; let elapsed_time_us = update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_pause_vm, pause_start_us); info!("'pause vm' VMM action took {} us.", elapsed_time_us); Ok(VmmData::Empty) } /// Resumes the microVM by resuming the vCPUs. pub fn resume(&mut self) -> ActionResult { let resume_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); self.vmm .lock() .expect("Poisoned lock") .resume_vcpus() .map_err(VmmActionError::InternalVmm)?; let elapsed_time_us = update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_resume_vm, resume_start_us); info!("'resume vm' VMM action took {} us.", elapsed_time_us); Ok(VmmData::Empty) } /// Write the metrics on user demand (flush). We use the word `flush` here to highlight the fact /// that the metrics will be written immediately. /// Defer to inner Vmm. We'll move to a variant where the Vmm simply exposes functionality like /// getting the dirty pages, and then we'll have the metrics flushing logic entirely on the outside. fn flush_metrics(&mut self) -> ActionResult { // FIXME: we're losing the bool saying whether metrics were actually written. METRICS .write() .map(|_| VmmData::Empty) .map_err(super::Error::Metrics) .map_err(VmmActionError::InternalVmm) } /// Injects CTRL+ALT+DEL keystroke combo to the inner Vmm (if present). #[cfg(target_arch = "x86_64")] fn send_ctrl_alt_del(&mut self) -> ActionResult { self.vmm .lock() .expect("Poisoned lock") .send_ctrl_alt_del() .map(|()| VmmData::Empty) .map_err(VmmActionError::InternalVmm) } #[cfg(target_arch = "x86_64")] fn create_snapshot(&mut self, create_params: &CreateSnapshotParams) -> ActionResult { let mut locked_vmm = self.vmm.lock().unwrap(); let create_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); create_snapshot(&mut locked_vmm, create_params, VERSION_MAP.clone()) .map_err(VmmActionError::CreateSnapshot)?; match create_params.snapshot_type { SnapshotType::Full => { let elapsed_time_us = update_metric_with_elapsed_time( &METRICS.latencies_us.vmm_full_create_snapshot, create_start_us, ); info!( "'create full snapshot' VMM action took {} us.", elapsed_time_us ); } } Ok(VmmData::Empty) } /// Updates the path of the host file backing the emulated block device with id `drive_id`. /// We update the disk image on the device and its virtio configuration. fn update_block_device_path(&mut self, drive_id: &str, new_path: String) -> ActionResult { self.vmm .lock() .expect("Poisoned lock") .update_block_device_path(drive_id, new_path) .map(|()| VmmData::Empty) .map_err(DriveError::DeviceUpdate) .map_err(VmmActionError::DriveConfig) } /// Updates configuration for an emulated net device as described in `new_cfg`. fn update_net_rate_limiters(&mut self, new_cfg: NetworkInterfaceUpdateConfig) -> ActionResult { self.vmm .lock() .expect("Poisoned lock") .update_net_rate_limiters( &new_cfg.iface_id, new_cfg.rx_bytes(), new_cfg.rx_ops(), new_cfg.tx_bytes(), new_cfg.tx_ops(), ) .map(|()| VmmData::Empty) .map_err(NetworkInterfaceError::DeviceUpdate) .map_err(VmmActionError::NetworkConfig) } } #[cfg(test)] mod tests { use super::*; use crate::vmm_config::logger::LoggerLevel; use devices::virtio::VsockError; use seccomp::BpfProgramRef; use std::path::PathBuf; impl PartialEq for VmmActionError { fn eq(&self, other: &VmmActionError) -> bool { use VmmActionError::*; match (self, other) { (BootSource(_), BootSource(_)) => true, #[cfg(target_arch = "x86_64")] (CreateSnapshot(_), CreateSnapshot(_)) => true, (DriveConfig(_), DriveConfig(_)) => true, (InternalVmm(_), InternalVmm(_)) => true, #[cfg(target_arch = "x86_64")] (LoadSnapshot(_), LoadSnapshot(_)) => true, #[cfg(target_arch = "x86_64")] (LoadSnapshotNotAllowed, LoadSnapshotNotAllowed) => true, (Logger(_), Logger(_)) => true, (MachineConfig(_), MachineConfig(_)) => true, (Metrics(_), Metrics(_)) => true, (MmdsConfig(_), MmdsConfig(_)) => true, (NetworkConfig(_), NetworkConfig(_)) => true, (OperationNotSupportedPostBoot, OperationNotSupportedPostBoot) => true, (OperationNotSupportedPreBoot, OperationNotSupportedPreBoot) => true, (StartMicrovm(_), StartMicrovm(_)) => true, (VsockConfig(_), VsockConfig(_)) => true, _ => false, } } } // Mock `VmResources` used for testing. #[derive(Debug, Default)] pub struct MockVmRes { vm_config: VmConfig, boot_cfg_set: bool, block_set: bool, vsock_set: bool, net_set: bool, mmds_set: bool, pub boot_timer: bool, // when `true`, all self methods are forced to fail pub force_errors: bool, } impl MockVmRes { pub fn vm_config(&self) -> &VmConfig { &self.vm_config } pub fn set_vm_config(&mut self, machine_config: &VmConfig) -> Result<(), VmConfigError> { if self.force_errors { return Err(VmConfigError::InvalidVcpuCount); } self.vm_config = machine_config.clone(); Ok(()) } pub fn set_boot_source( &mut self, _: BootSourceConfig, ) -> Result<(), BootSourceConfigError> { if self.force_errors { return Err(BootSourceConfigError::InvalidKernelPath( std::io::Error::from_raw_os_error(0), )); } self.boot_cfg_set = true; Ok(()) } pub fn set_block_device(&mut self, _: BlockDeviceConfig) -> Result<(), DriveError> { if self.force_errors { return Err(DriveError::RootBlockDeviceAlreadyAdded); } self.block_set = true; Ok(()) } pub fn build_net_device( &mut self, _: NetworkInterfaceConfig, ) -> Result<(), NetworkInterfaceError> { if self.force_errors { return Err(NetworkInterfaceError::GuestMacAddressInUse(String::new())); } self.net_set = true; Ok(()) } pub fn set_vsock_device(&mut self, _: VsockDeviceConfig) -> Result<(), VsockConfigError> { if self.force_errors { return Err(VsockConfigError::CreateVsockDevice( VsockError::BufDescMissing, )); } self.vsock_set = true; Ok(()) } pub fn set_mmds_config(&mut self, _: MmdsConfig) -> Result<(), MmdsConfigError> { if self.force_errors { return Err(MmdsConfigError::InvalidIpv4Addr); } self.mmds_set = true; Ok(()) } } // Mock `Vmm` used for testing. #[derive(Debug, Default)] pub struct MockVmm { pub pause_called: bool, pub resume_called: bool, #[cfg(target_arch = "x86_64")] pub send_ctrl_alt_del_called: bool, pub update_block_device_path_called: bool, pub update_net_rate_limiters_called: bool, // when `true`, all self methods are forced to fail pub force_errors: bool, } impl MockVmm { pub fn resume_vcpus(&mut self) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::VcpuResume); } self.resume_called = true; Ok(()) } pub fn pause_vcpus(&mut self) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::VcpuPause); } self.pause_called = true; Ok(()) } #[cfg(target_arch = "x86_64")] pub fn send_ctrl_alt_del(&mut self) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::I8042Error( devices::legacy::I8042DeviceError::InternalBufferFull, )); } self.send_ctrl_alt_del_called = true; Ok(()) } pub fn update_block_device_path(&mut self, _: &str, _: String) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::DeviceManager( crate::device_manager::mmio::Error::IncorrectDeviceType, )); } self.update_block_device_path_called = true; Ok(()) } pub fn update_net_rate_limiters( &mut self, _: &str, _: rate_limiter::BucketUpdate, _: rate_limiter::BucketUpdate, _: rate_limiter::BucketUpdate, _: rate_limiter::BucketUpdate, ) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::DeviceManager( crate::device_manager::mmio::Error::IncorrectDeviceType, )); } self.update_net_rate_limiters_called = true; Ok(()) } } // Need to redefine this since the non-test one uses real VmResources // and real Vmm instead of our mocks. pub fn build_microvm_for_boot( _: &VmResources, _: &mut EventManager, _: BpfProgramRef, ) -> Result<Arc<Mutex<Vmm>>, StartMicrovmError> { Ok(Arc::new(Mutex::new(MockVmm::default()))) } #[cfg(target_arch = "x86_64")] // Need to redefine this since the non-test one uses real Vmm // instead of our mocks. pub fn create_snapshot( _: &mut Vmm, _: &CreateSnapshotParams, _: versionize::VersionMap, ) -> std::result::Result<(), CreateSnapshotError> { Ok(()) } #[cfg(target_arch = "x86_64")] // Need to redefine this since the non-test one uses real Vmm // instead of our mocks. pub fn load_snapshot( _: &mut EventManager, _: BpfProgramRef, _: &LoadSnapshotParams, _: versionize::VersionMap, ) -> Result<Arc<Mutex<Vmm>>, LoadSnapshotError> { Ok(Arc::new(Mutex::new(MockVmm::default()))) } fn default_preboot<'a>( vm_resources: &'a mut VmResources, event_manager: &'a mut EventManager, ) -> PrebootApiController<'a> { let instance_info = InstanceInfo { id: String::new(), started: false, vmm_version: String::new(), app_name: String::new(), }; PrebootApiController::new( BpfProgram::new(), instance_info, vm_resources, event_manager, ) } fn check_preboot_request<F>(request: VmmAction, check_success: F) where F: FnOnce(ActionResult, &MockVmRes), { let mut vm_resources = MockVmRes::default(); let mut evmgr = EventManager::new().unwrap(); let mut preboot = default_preboot(&mut vm_resources, &mut evmgr); let res = preboot.handle_preboot_request(request); check_success(res, &vm_resources); } // Forces error and validates error kind against expected. fn check_preboot_request_err(request: VmmAction, expected_err: VmmActionError) { let mut vm_resources = MockVmRes::default(); vm_resources.force_errors = true; let mut evmgr = EventManager::new().unwrap(); let mut preboot = default_preboot(&mut vm_resources, &mut evmgr); let err = preboot.handle_preboot_request(request).unwrap_err(); assert_eq!(err, expected_err); } #[test] fn test_preboot_config_boot_src() { let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.boot_cfg_set) }); let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); check_preboot_request_err( req, VmmActionError::BootSource(BootSourceConfigError::InvalidKernelCommandLine( String::new(), )), ); } #[test] fn test_preboot_get_vm_config() { let req = VmmAction::GetVmConfiguration; let expected_cfg = VmConfig::default(); check_preboot_request(req, |result, _| { assert_eq!(result, Ok(VmmData::MachineConfiguration(expected_cfg))) }); let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); check_preboot_request_err( req, VmmActionError::BootSource(BootSourceConfigError::InvalidKernelCommandLine( String::new(), )), ); } #[test] fn test_preboot_set_vm_config() { let req = VmmAction::SetVmConfiguration(VmConfig::default()); let expected_cfg = VmConfig::default(); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert_eq!(vm_res.vm_config, expected_cfg); }); let req = VmmAction::SetVmConfiguration(VmConfig::default()); check_preboot_request_err( req, VmmActionError::MachineConfig(VmConfigError::InvalidVcpuCount), ); } #[test] fn test_preboot_insert_block_dev() { let req = VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.block_set) }); let req = VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }); check_preboot_request_err( req, VmmActionError::DriveConfig(DriveError::RootBlockDeviceAlreadyAdded), ); } #[test] fn test_preboot_insert_net_dev() { let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.net_set) }); let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }); check_preboot_request_err( req, VmmActionError::NetworkConfig(NetworkInterfaceError::GuestMacAddressInUse( String::new(), )), ); } #[test] fn test_preboot_set_vsock_dev() { let req = VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.vsock_set) }); let req = VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }); check_preboot_request_err( req, VmmActionError::VsockConfig(VsockConfigError::CreateVsockDevice( VsockError::BufDescMissing, )), ); } #[test] fn test_preboot_set_mmds_config() { let req = VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.mmds_set) }); let req = VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }); check_preboot_request_err( req, VmmActionError::MmdsConfig(MmdsConfigError::InvalidIpv4Addr), ); } #[test] fn test_preboot_disallowed() { check_preboot_request_err( VmmAction::FlushMetrics, VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::Pause, VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::Resume, VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::UpdateBlockDevicePath(String::new(), String::new()), VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::UpdateNetworkInterface(NetworkInterfaceUpdateConfig { iface_id: String::new(), rx_rate_limiter: None, tx_rate_limiter: None, }), VmmActionError::OperationNotSupportedPreBoot, ); #[cfg(target_arch = "x86_64")] check_preboot_request_err( VmmAction::CreateSnapshot(CreateSnapshotParams { snapshot_type: SnapshotType::Full, snapshot_path: PathBuf::new(), mem_file_path: PathBuf::new(), version: None, }), VmmActionError::OperationNotSupportedPreBoot, ); #[cfg(target_arch = "x86_64")] check_preboot_request_err( VmmAction::SendCtrlAltDel, VmmActionError::OperationNotSupportedPreBoot, ); } #[test] fn test_build_microvm_from_requests() { // Use atomics to be able to use them non-mutably in closures below. use std::sync::atomic::{AtomicUsize, Ordering}; let cmd_step = AtomicUsize::new(0); let commands = || { cmd_step.fetch_add(1, Ordering::SeqCst); match cmd_step.load(Ordering::SeqCst) { 1 => VmmAction::FlushMetrics, 2 => VmmAction::Pause, 3 => VmmAction::Resume, 4 => VmmAction::StartMicroVm, _ => unreachable!(), } }; let resp_step = AtomicUsize::new(0); let expected_resp = |resp: ActionResult| { resp_step.fetch_add(1, Ordering::SeqCst); let expect = match resp_step.load(Ordering::SeqCst) { 1 => Err(VmmActionError::OperationNotSupportedPreBoot), 2 => Err(VmmActionError::OperationNotSupportedPreBoot), 3 => Err(VmmActionError::OperationNotSupportedPreBoot), 4 => Ok(VmmData::Empty), _ => unreachable!(), }; assert_eq!(resp, expect); }; let (_vm_res, _vmm) = PrebootApiController::build_microvm_from_requests( vec![], &mut EventManager::new().unwrap(), InstanceInfo { id: String::new(), started: false, vmm_version: String::new(), app_name: String::new(), }, commands, expected_resp, false, ); } fn check_runtime_request<F>(request: VmmAction, check_success: F) where F: FnOnce(ActionResult, &MockVmm), { let vmm = Arc::new(Mutex::new(MockVmm::default())); let mut runtime = RuntimeApiController::new(VmConfig::default(), vmm.clone()); let res = runtime.handle_request(request); check_success(res, &vmm.lock().unwrap()); } // Forces error and validates error kind against expected. fn check_runtime_request_err(request: VmmAction, expected_err: VmmActionError) { let vmm = Arc::new(Mutex::new(MockVmm { force_errors: true, ..Default::default() })); let mut runtime = RuntimeApiController::new(VmConfig::default(), vmm); let err = runtime.handle_request(request).unwrap_err(); assert_eq!(err, expected_err); } #[test] fn test_runtime_get_vm_config() { let req = VmmAction::GetVmConfiguration; check_runtime_request(req, |result, _| { assert_eq!( result, Ok(VmmData::MachineConfiguration(VmConfig::default())) ); }); } #[test] fn test_runtime_pause() { let req = VmmAction::Pause; check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.pause_called) }); let req = VmmAction::Pause; check_runtime_request_err(req, VmmActionError::InternalVmm(VmmError::VcpuPause)); } #[test] fn test_runtime_resume() { let req = VmmAction::Resume; check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.resume_called) }); let req = VmmAction::Resume; check_runtime_request_err(req, VmmActionError::InternalVmm(VmmError::VcpuResume)); } #[cfg(target_arch = "x86_64")] #[test] fn test_runtime_ctrl_alt_del() { let req = VmmAction::SendCtrlAltDel; check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.send_ctrl_alt_del_called) }); let req = VmmAction::SendCtrlAltDel; check_runtime_request_err( req, VmmActionError::InternalVmm(VmmError::I8042Error( devices::legacy::I8042DeviceError::InternalBufferFull, )), ); } #[test] fn test_runtime_update_block_device_path() { let req = VmmAction::UpdateBlockDevicePath(String::new(), String::new()); check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.update_block_device_path_called) }); let req = VmmAction::UpdateBlockDevicePath(String::new(), String::new()); check_runtime_request_err( req, VmmActionError::DriveConfig(DriveError::DeviceUpdate(VmmError::DeviceManager( crate::device_manager::mmio::Error::IncorrectDeviceType, ))), ); } #[test] fn test_runtime_update_net_rate_limiters() { let req = VmmAction::UpdateNetworkInterface(NetworkInterfaceUpdateConfig { iface_id: String::new(), rx_rate_limiter: None, tx_rate_limiter: None, }); check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.update_net_rate_limiters_called) }); let req = VmmAction::UpdateNetworkInterface(NetworkInterfaceUpdateConfig { iface_id: String::new(), rx_rate_limiter: None, tx_rate_limiter: None, }); check_runtime_request_err( req, VmmActionError::NetworkConfig(NetworkInterfaceError::DeviceUpdate( VmmError::DeviceManager(crate::device_manager::mmio::Error::IncorrectDeviceType), )), ); } #[test] fn test_runtime_disallowed() { check_runtime_request_err( VmmAction::ConfigureBootSource(BootSourceConfig::default()), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::ConfigureLogger(LoggerConfig { log_path: PathBuf::new(), level: LoggerLevel::Debug, show_level: false, show_log_origin: false, }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::ConfigureMetrics(MetricsConfig { metrics_path: PathBuf::new(), }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetVmConfiguration(VmConfig::default()), VmmActionError::OperationNotSupportedPostBoot, ); #[cfg(target_arch = "x86_64")] check_runtime_request_err( VmmAction::LoadSnapshot(LoadSnapshotParams { snapshot_path: PathBuf::new(), mem_file_path: PathBuf::new(), enable_diff_snapshots: false, }), VmmActionError::OperationNotSupportedPostBoot, ); } #[cfg(target_arch = "x86_64")] fn verify_load_snap_disallowed_after_boot_resources(res: VmmAction, res_name: &str) { let mut vm_resources = MockVmRes::default(); let mut evmgr = EventManager::new().unwrap(); let mut preboot = default_preboot(&mut vm_resources, &mut evmgr); preboot.handle_preboot_request(res).unwrap(); // Load snapshot should no longer be allowed. let req = VmmAction::LoadSnapshot(LoadSnapshotParams { snapshot_path: PathBuf::new(), mem_file_path: PathBuf::new(), enable_diff_snapshots: false, }); let err = preboot.handle_preboot_request(req); assert_eq!( err, Err(VmmActionError::LoadSnapshotNotAllowed), "LoadSnapshot should be disallowed after {}", res_name ); } #[cfg(target_arch = "x86_64")] #[test] fn test_preboot_load_snap_disallowed_after_boot_resources() { // Verify LoadSnapshot not allowed after configuring various boot-specific resources. let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); verify_load_snap_disallowed_after_boot_resources(req, "ConfigureBootSource"); let req = VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }); verify_load_snap_disallowed_after_boot_resources(req, "InsertBlockDevice"); let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }); verify_load_snap_disallowed_after_boot_resources(req, "InsertNetworkDevice"); let req = VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }); verify_load_snap_disallowed_after_boot_resources(req, "SetVsockDevice"); let req = VmmAction::SetVmConfiguration(VmConfig::default()); verify_load_snap_disallowed_after_boot_resources(req, "SetVmConfiguration"); let req = VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }); verify_load_snap_disallowed_after_boot_resources(req, "SetMmdsConfiguration"); } } rpc_interface: add balloon state to rpc iface Signed-off-by: George Pisaltu <084beef259e7feee20f091a1c4e2b24ee6aa34bb@amazon.com> // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 use std::fmt::{Display, Formatter}; use std::result; use std::sync::{Arc, Mutex}; #[cfg(not(test))] use super::{builder::build_microvm_for_boot, resources::VmResources, Vmm}; #[cfg(all(not(test), target_arch = "x86_64"))] use super::{persist::create_snapshot, persist::load_snapshot}; #[cfg(test)] use tests::{build_microvm_for_boot, MockVmRes as VmResources, MockVmm as Vmm}; #[cfg(all(test, target_arch = "x86_64"))] use tests::{create_snapshot, load_snapshot}; use super::Error as VmmError; use crate::builder::StartMicrovmError; #[cfg(target_arch = "x86_64")] use crate::persist::{CreateSnapshotError, LoadSnapshotError}; #[cfg(target_arch = "x86_64")] use crate::version_map::VERSION_MAP; use crate::vmm_config; use crate::vmm_config::balloon::{ BalloonConfigError, BalloonDeviceConfig, BalloonStats, BalloonUpdateConfig, BalloonUpdateStatsConfig, }; use crate::vmm_config::boot_source::{BootSourceConfig, BootSourceConfigError}; use crate::vmm_config::drive::{BlockDeviceConfig, DriveError}; use crate::vmm_config::instance_info::InstanceInfo; use crate::vmm_config::logger::{LoggerConfig, LoggerConfigError}; use crate::vmm_config::machine_config::{VmConfig, VmConfigError}; use crate::vmm_config::metrics::{MetricsConfig, MetricsConfigError}; use crate::vmm_config::mmds::{MmdsConfig, MmdsConfigError}; use crate::vmm_config::net::{ NetworkInterfaceConfig, NetworkInterfaceError, NetworkInterfaceUpdateConfig, }; #[cfg(target_arch = "x86_64")] use crate::vmm_config::snapshot::{CreateSnapshotParams, LoadSnapshotParams, SnapshotType}; use crate::vmm_config::vsock::{VsockConfigError, VsockDeviceConfig}; use logger::{info, update_metric_with_elapsed_time, METRICS}; use polly::event_manager::EventManager; use seccomp::BpfProgram; /// This enum represents the public interface of the VMM. Each action contains various /// bits of information (ids, paths, etc.). #[derive(PartialEq)] pub enum VmmAction { /// Configure the boot source of the microVM using as input the `ConfigureBootSource`. This /// action can only be called before the microVM has booted. ConfigureBootSource(BootSourceConfig), /// Configure the logger using as input the `LoggerConfig`. This action can only be called /// before the microVM has booted. ConfigureLogger(LoggerConfig), /// Configure the metrics using as input the `MetricsConfig`. This action can only be called /// before the microVM has booted. ConfigureMetrics(MetricsConfig), /// Create a snapshot using as input the `CreateSnapshotParams`. This action can only be called /// after the microVM has booted and only when the microVM is in `Paused` state. #[cfg(target_arch = "x86_64")] CreateSnapshot(CreateSnapshotParams), /// Get the balloon device configuration. GetBalloonConfig, /// Get the ballon device latest statistics. GetBalloonStats, /// Get the configuration of the microVM. GetVmConfiguration, /// Flush the metrics. This action can only be called after the logger has been configured. FlushMetrics, /// Add a new block device or update one that already exists using the `BlockDeviceConfig` as /// input. This action can only be called before the microVM has booted. InsertBlockDevice(BlockDeviceConfig), /// Add a new network interface config or update one that already exists using the /// `NetworkInterfaceConfig` as input. This action can only be called before the microVM has /// booted. InsertNetworkDevice(NetworkInterfaceConfig), /// Load the microVM state using as input the `LoadSnapshotParams`. This action can only be /// called before the microVM has booted. If this action is successful, the loaded microVM will /// be in `Paused` state. Should change this state to `Resumed` for the microVM to run. #[cfg(target_arch = "x86_64")] LoadSnapshot(LoadSnapshotParams), /// Pause the guest, by pausing the microVM VCPUs. Pause, /// Resume the guest, by resuming the microVM VCPUs. Resume, /// Set the balloon device or update the one that already exists using the /// `BalloonDeviceConfig` as input. This action can only be called before the microVM /// has booted. SetBalloonDevice(BalloonDeviceConfig), /// Set the MMDS configuration. SetMmdsConfiguration(MmdsConfig), /// Set the vsock device or update the one that already exists using the /// `VsockDeviceConfig` as input. This action can only be called before the microVM has /// booted. SetVsockDevice(VsockDeviceConfig), /// Set the microVM configuration (memory & vcpu) using `VmConfig` as input. This /// action can only be called before the microVM has booted. SetVmConfiguration(VmConfig), /// Launch the microVM. This action can only be called before the microVM has booted. StartMicroVm, /// Send CTRL+ALT+DEL to the microVM, using the i8042 keyboard function. If an AT-keyboard /// driver is listening on the guest end, this can be used to shut down the microVM gracefully. #[cfg(target_arch = "x86_64")] SendCtrlAltDel, /// Update the balloon size, after microVM start. UpdateBalloon(BalloonUpdateConfig), /// Update the balloon statistics polling interval, after microVM start. UpdateBalloonStatistics(BalloonUpdateStatsConfig), /// Update the path of an existing block device. The data associated with this variant /// represents the `drive_id` and the `path_on_host`. UpdateBlockDevicePath(String, String), /// Update a network interface, after microVM start. Currently, the only updatable properties /// are the RX and TX rate limiters. UpdateNetworkInterface(NetworkInterfaceUpdateConfig), } /// Wrapper for all errors associated with VMM actions. #[derive(Debug)] pub enum VmmActionError { /// The action `SetBalloonDevice` failed because of bad user input. BalloonConfig(BalloonConfigError), /// The action `ConfigureBootSource` failed because of bad user input. BootSource(BootSourceConfigError), /// The action `CreateSnapshot` failed. #[cfg(target_arch = "x86_64")] CreateSnapshot(CreateSnapshotError), /// One of the actions `InsertBlockDevice` or `UpdateBlockDevicePath` /// failed because of bad user input. DriveConfig(DriveError), /// Internal Vmm error. InternalVmm(VmmError), /// Loading a microVM snapshot failed. #[cfg(target_arch = "x86_64")] LoadSnapshot(LoadSnapshotError), /// Loading a microVM snapshot not allowed after configuring boot-specific resources. #[cfg(target_arch = "x86_64")] LoadSnapshotNotAllowed, /// The action `ConfigureLogger` failed because of bad user input. Logger(LoggerConfigError), /// One of the actions `GetVmConfiguration` or `SetVmConfiguration` failed because of bad input. MachineConfig(VmConfigError), /// The action `ConfigureMetrics` failed because of bad user input. Metrics(MetricsConfigError), /// The action `SetMmdsConfiguration` failed because of bad user input. MmdsConfig(MmdsConfigError), /// The action `InsertNetworkDevice` failed because of bad user input. NetworkConfig(NetworkInterfaceError), /// The requested operation is not supported after starting the microVM. OperationNotSupportedPostBoot, /// The requested operation is not supported before starting the microVM. OperationNotSupportedPreBoot, /// The action `StartMicroVm` failed because of an internal error. StartMicrovm(StartMicrovmError), /// The action `SetVsockDevice` failed because of bad user input. VsockConfig(VsockConfigError), } impl Display for VmmActionError { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { use self::VmmActionError::*; write!( f, "{}", match self { BalloonConfig(err) => err.to_string(), BootSource(err) => err.to_string(), #[cfg(target_arch = "x86_64")] CreateSnapshot(err) => err.to_string(), DriveConfig(err) => err.to_string(), InternalVmm(err) => format!("Internal Vmm error: {}", err), #[cfg(target_arch = "x86_64")] LoadSnapshot(err) => format!("Load microVM snapshot error: {}", err), #[cfg(target_arch = "x86_64")] LoadSnapshotNotAllowed => { "Loading a microVM snapshot not allowed after configuring boot-specific resources." .to_string() } Logger(err) => err.to_string(), MachineConfig(err) => err.to_string(), Metrics(err) => err.to_string(), MmdsConfig(err) => err.to_string(), NetworkConfig(err) => err.to_string(), OperationNotSupportedPostBoot => { "The requested operation is not supported after starting the microVM." .to_string() } OperationNotSupportedPreBoot => { "The requested operation is not supported before starting the microVM." .to_string() } StartMicrovm(err) => err.to_string(), // The action `SetVsockDevice` failed because of bad user input. VsockConfig(err) => err.to_string(), } ) } } /// The enum represents the response sent by the VMM in case of success. The response is either /// empty, when no data needs to be sent, or an internal VMM structure. #[derive(Debug, PartialEq)] pub enum VmmData { /// The balloon device configuration. BalloonConfig(BalloonDeviceConfig), /// The latest balloon device statistics. BalloonStats(BalloonStats), /// No data is sent on the channel. Empty, /// The microVM configuration represented by `VmConfig`. MachineConfiguration(VmConfig), } /// Shorthand result type for external VMM commands. pub type ActionResult = result::Result<VmmData, VmmActionError>; /// Enables pre-boot setup and instantiation of a Firecracker VMM. pub struct PrebootApiController<'a> { seccomp_filter: BpfProgram, instance_info: InstanceInfo, vm_resources: &'a mut VmResources, event_manager: &'a mut EventManager, built_vmm: Option<Arc<Mutex<Vmm>>>, // Configuring boot specific resources will set this to true. // Loading from snapshot will not be allowed once this is true. boot_path: bool, } impl<'a> PrebootApiController<'a> { /// Constructor for the PrebootApiController. pub fn new( seccomp_filter: BpfProgram, instance_info: InstanceInfo, vm_resources: &'a mut VmResources, event_manager: &'a mut EventManager, ) -> PrebootApiController<'a> { PrebootApiController { seccomp_filter, instance_info, vm_resources, event_manager, built_vmm: None, boot_path: false, } } /// Default implementation for the function that builds and starts a microVM. /// It takes two closures `recv_req` and `respond` as params which abstract away /// the message transport. /// /// Returns a populated `VmResources` object and a running `Vmm` object. pub fn build_microvm_from_requests<F, G>( seccomp_filter: BpfProgram, event_manager: &mut EventManager, instance_info: InstanceInfo, recv_req: F, respond: G, boot_timer_enabled: bool, ) -> (VmResources, Arc<Mutex<Vmm>>) where F: Fn() -> VmmAction, G: Fn(ActionResult), { let mut vm_resources = VmResources::default(); vm_resources.boot_timer = boot_timer_enabled; let mut preboot_controller = PrebootApiController::new( seccomp_filter, instance_info, &mut vm_resources, event_manager, ); // Configure and start microVM through successive API calls. // Iterate through API calls to configure microVm. // The loop breaks when a microVM is successfully started, and a running Vmm is built. while preboot_controller.built_vmm.is_none() { // Get request, process it, send back the response. respond(preboot_controller.handle_preboot_request(recv_req())); } // Safe to unwrap because previous loop cannot end on None. let vmm = preboot_controller.built_vmm.unwrap(); (vm_resources, vmm) } /// Handles the incoming preboot request and provides a response for it. /// Returns a built/running `Vmm` after handling a successful `StartMicroVm` request. pub fn handle_preboot_request(&mut self, request: VmmAction) -> ActionResult { use self::VmmAction::*; match request { // Supported operations allowed pre-boot. ConfigureBootSource(config) => self.set_boot_source(config), ConfigureLogger(logger_cfg) => { vmm_config::logger::init_logger(logger_cfg, &self.instance_info) .map(|()| VmmData::Empty) .map_err(VmmActionError::Logger) } ConfigureMetrics(metrics_cfg) => vmm_config::metrics::init_metrics(metrics_cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::Metrics), GetBalloonConfig => self .vm_resources .balloon .get_config() .map(VmmData::BalloonConfig) .map_err(VmmActionError::BalloonConfig), GetVmConfiguration => Ok(VmmData::MachineConfiguration( self.vm_resources.vm_config().clone(), )), InsertBlockDevice(config) => self.insert_block_device(config), InsertNetworkDevice(config) => self.insert_net_device(config), #[cfg(target_arch = "x86_64")] LoadSnapshot(config) => self.load_snapshot(&config), SetBalloonDevice(balloon_cfg) => self .vm_resources .balloon .set(balloon_cfg) .map(|_| VmmData::Empty) .map_err(VmmActionError::BalloonConfig), SetVsockDevice(config) => self.set_vsock_device(config), SetVmConfiguration(config) => self.set_vm_config(config), SetMmdsConfiguration(config) => self.set_mmds_config(config), StartMicroVm => self.start_microvm(), // Operations not allowed pre-boot. FlushMetrics | Pause | Resume | GetBalloonStats | UpdateBalloon(_) | UpdateBalloonStatistics(_) | UpdateBlockDevicePath(_, _) | UpdateNetworkInterface(_) => Err(VmmActionError::OperationNotSupportedPreBoot), #[cfg(target_arch = "x86_64")] CreateSnapshot(_) | SendCtrlAltDel => Err(VmmActionError::OperationNotSupportedPreBoot), } } fn insert_block_device(&mut self, cfg: BlockDeviceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_block_device(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::DriveConfig) } fn insert_net_device(&mut self, cfg: NetworkInterfaceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .build_net_device(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::NetworkConfig) } fn set_boot_source(&mut self, cfg: BootSourceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_boot_source(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::BootSource) } fn set_mmds_config(&mut self, cfg: MmdsConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_mmds_config(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::MmdsConfig) } fn set_vm_config(&mut self, cfg: VmConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_vm_config(&cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::MachineConfig) } fn set_vsock_device(&mut self, cfg: VsockDeviceConfig) -> ActionResult { self.boot_path = true; self.vm_resources .set_vsock_device(cfg) .map(|()| VmmData::Empty) .map_err(VmmActionError::VsockConfig) } // On success, this command will end the pre-boot stage and this controller // will be replaced by a runtime controller. fn start_microvm(&mut self) -> ActionResult { build_microvm_for_boot( &self.vm_resources, &mut self.event_manager, &self.seccomp_filter, ) .map(|vmm| { self.built_vmm = Some(vmm); VmmData::Empty }) .map_err(VmmActionError::StartMicrovm) } #[cfg(target_arch = "x86_64")] // On success, this command will end the pre-boot stage and this controller // will be replaced by a runtime controller. fn load_snapshot(&mut self, load_params: &LoadSnapshotParams) -> ActionResult { let load_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); if self.boot_path { let err = VmmActionError::LoadSnapshotNotAllowed; info!("{}", err); return Err(err); } let loaded_vmm = load_snapshot( &mut self.event_manager, &self.seccomp_filter, load_params, VERSION_MAP.clone(), ); let elapsed_time_us = update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_load_snapshot, load_start_us); info!("'load snapshot' VMM action took {} us.", elapsed_time_us); loaded_vmm .map(|vmm| { self.built_vmm = Some(vmm); VmmData::Empty }) .map_err(VmmActionError::LoadSnapshot) } } /// Enables RPC interaction with a running Firecracker VMM. pub struct RuntimeApiController { vmm: Arc<Mutex<Vmm>>, vm_config: VmConfig, } impl RuntimeApiController { /// Handles the incoming runtime `VmmAction` request and provides a response for it. pub fn handle_request(&mut self, request: VmmAction) -> ActionResult { use self::VmmAction::*; match request { // Supported operations allowed post-boot. #[cfg(target_arch = "x86_64")] CreateSnapshot(snapshot_create_cfg) => self.create_snapshot(&snapshot_create_cfg), FlushMetrics => self.flush_metrics(), GetBalloonConfig => self .vmm .lock() .expect("Poisoned lock") .balloon_config() .map(|state| VmmData::BalloonConfig(BalloonDeviceConfig::from(state))) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), GetBalloonStats => self .vmm .lock() .expect("Poisoned lock") .latest_balloon_stats() .map(VmmData::BalloonStats) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), GetVmConfiguration => Ok(VmmData::MachineConfiguration(self.vm_config.clone())), Pause => self.pause(), Resume => self.resume(), #[cfg(target_arch = "x86_64")] SendCtrlAltDel => self.send_ctrl_alt_del(), UpdateBalloon(balloon_update) => self .vmm .lock() .expect("Poisoned lock") .update_balloon_config(balloon_update.amount_mb) .map(|_| VmmData::Empty) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), UpdateBalloonStatistics(balloon_stats_update) => self .vmm .lock() .expect("Poisoned lock") .update_balloon_stats_config(balloon_stats_update.stats_polling_interval_s) .map(|_| VmmData::Empty) .map_err(|e| VmmActionError::BalloonConfig(BalloonConfigError::from(e))), UpdateBlockDevicePath(drive_id, new_path) => { self.update_block_device_path(&drive_id, new_path) } UpdateNetworkInterface(netif_update) => self.update_net_rate_limiters(netif_update), // Operations not allowed post-boot. ConfigureBootSource(_) | ConfigureLogger(_) | ConfigureMetrics(_) | InsertBlockDevice(_) | InsertNetworkDevice(_) | SetBalloonDevice(_) | SetVsockDevice(_) | SetMmdsConfiguration(_) | SetVmConfiguration(_) | StartMicroVm => Err(VmmActionError::OperationNotSupportedPostBoot), #[cfg(target_arch = "x86_64")] LoadSnapshot(_) => Err(VmmActionError::OperationNotSupportedPostBoot), } } /// Creates a new `RuntimeApiController`. pub fn new(vm_config: VmConfig, vmm: Arc<Mutex<Vmm>>) -> Self { Self { vm_config, vmm } } /// Pauses the microVM by pausing the vCPUs. pub fn pause(&mut self) -> ActionResult { let pause_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); self.vmm .lock() .expect("Poisoned lock") .pause_vcpus() .map_err(VmmActionError::InternalVmm)?; let elapsed_time_us = update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_pause_vm, pause_start_us); info!("'pause vm' VMM action took {} us.", elapsed_time_us); Ok(VmmData::Empty) } /// Resumes the microVM by resuming the vCPUs. pub fn resume(&mut self) -> ActionResult { let resume_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); self.vmm .lock() .expect("Poisoned lock") .resume_vcpus() .map_err(VmmActionError::InternalVmm)?; let elapsed_time_us = update_metric_with_elapsed_time(&METRICS.latencies_us.vmm_resume_vm, resume_start_us); info!("'resume vm' VMM action took {} us.", elapsed_time_us); Ok(VmmData::Empty) } /// Write the metrics on user demand (flush). We use the word `flush` here to highlight the fact /// that the metrics will be written immediately. /// Defer to inner Vmm. We'll move to a variant where the Vmm simply exposes functionality like /// getting the dirty pages, and then we'll have the metrics flushing logic entirely on the outside. fn flush_metrics(&mut self) -> ActionResult { // FIXME: we're losing the bool saying whether metrics were actually written. METRICS .write() .map(|_| VmmData::Empty) .map_err(super::Error::Metrics) .map_err(VmmActionError::InternalVmm) } /// Injects CTRL+ALT+DEL keystroke combo to the inner Vmm (if present). #[cfg(target_arch = "x86_64")] fn send_ctrl_alt_del(&mut self) -> ActionResult { self.vmm .lock() .expect("Poisoned lock") .send_ctrl_alt_del() .map(|()| VmmData::Empty) .map_err(VmmActionError::InternalVmm) } #[cfg(target_arch = "x86_64")] fn create_snapshot(&mut self, create_params: &CreateSnapshotParams) -> ActionResult { let mut locked_vmm = self.vmm.lock().unwrap(); let create_start_us = utils::time::get_time_us(utils::time::ClockType::Monotonic); create_snapshot(&mut locked_vmm, create_params, VERSION_MAP.clone()) .map_err(VmmActionError::CreateSnapshot)?; match create_params.snapshot_type { SnapshotType::Full => { let elapsed_time_us = update_metric_with_elapsed_time( &METRICS.latencies_us.vmm_full_create_snapshot, create_start_us, ); info!( "'create full snapshot' VMM action took {} us.", elapsed_time_us ); } } Ok(VmmData::Empty) } /// Updates the path of the host file backing the emulated block device with id `drive_id`. /// We update the disk image on the device and its virtio configuration. fn update_block_device_path(&mut self, drive_id: &str, new_path: String) -> ActionResult { self.vmm .lock() .expect("Poisoned lock") .update_block_device_path(drive_id, new_path) .map(|()| VmmData::Empty) .map_err(DriveError::DeviceUpdate) .map_err(VmmActionError::DriveConfig) } /// Updates configuration for an emulated net device as described in `new_cfg`. fn update_net_rate_limiters(&mut self, new_cfg: NetworkInterfaceUpdateConfig) -> ActionResult { self.vmm .lock() .expect("Poisoned lock") .update_net_rate_limiters( &new_cfg.iface_id, new_cfg.rx_bytes(), new_cfg.rx_ops(), new_cfg.tx_bytes(), new_cfg.tx_ops(), ) .map(|()| VmmData::Empty) .map_err(NetworkInterfaceError::DeviceUpdate) .map_err(VmmActionError::NetworkConfig) } } #[cfg(test)] mod tests { use super::*; use crate::vmm_config::logger::LoggerLevel; use devices::virtio::VsockError; use seccomp::BpfProgramRef; use std::path::PathBuf; impl PartialEq for VmmActionError { fn eq(&self, other: &VmmActionError) -> bool { use VmmActionError::*; match (self, other) { (BootSource(_), BootSource(_)) => true, #[cfg(target_arch = "x86_64")] (CreateSnapshot(_), CreateSnapshot(_)) => true, (DriveConfig(_), DriveConfig(_)) => true, (InternalVmm(_), InternalVmm(_)) => true, #[cfg(target_arch = "x86_64")] (LoadSnapshot(_), LoadSnapshot(_)) => true, #[cfg(target_arch = "x86_64")] (LoadSnapshotNotAllowed, LoadSnapshotNotAllowed) => true, (Logger(_), Logger(_)) => true, (MachineConfig(_), MachineConfig(_)) => true, (Metrics(_), Metrics(_)) => true, (MmdsConfig(_), MmdsConfig(_)) => true, (NetworkConfig(_), NetworkConfig(_)) => true, (OperationNotSupportedPostBoot, OperationNotSupportedPostBoot) => true, (OperationNotSupportedPreBoot, OperationNotSupportedPreBoot) => true, (StartMicrovm(_), StartMicrovm(_)) => true, (VsockConfig(_), VsockConfig(_)) => true, _ => false, } } } // Mock `VmResources` used for testing. #[derive(Debug, Default)] pub struct MockVmRes { vm_config: VmConfig, boot_cfg_set: bool, block_set: bool, vsock_set: bool, net_set: bool, mmds_set: bool, pub boot_timer: bool, // when `true`, all self methods are forced to fail pub force_errors: bool, } impl MockVmRes { pub fn vm_config(&self) -> &VmConfig { &self.vm_config } pub fn set_vm_config(&mut self, machine_config: &VmConfig) -> Result<(), VmConfigError> { if self.force_errors { return Err(VmConfigError::InvalidVcpuCount); } self.vm_config = machine_config.clone(); Ok(()) } pub fn set_boot_source( &mut self, _: BootSourceConfig, ) -> Result<(), BootSourceConfigError> { if self.force_errors { return Err(BootSourceConfigError::InvalidKernelPath( std::io::Error::from_raw_os_error(0), )); } self.boot_cfg_set = true; Ok(()) } pub fn set_block_device(&mut self, _: BlockDeviceConfig) -> Result<(), DriveError> { if self.force_errors { return Err(DriveError::RootBlockDeviceAlreadyAdded); } self.block_set = true; Ok(()) } pub fn build_net_device( &mut self, _: NetworkInterfaceConfig, ) -> Result<(), NetworkInterfaceError> { if self.force_errors { return Err(NetworkInterfaceError::GuestMacAddressInUse(String::new())); } self.net_set = true; Ok(()) } pub fn set_vsock_device(&mut self, _: VsockDeviceConfig) -> Result<(), VsockConfigError> { if self.force_errors { return Err(VsockConfigError::CreateVsockDevice( VsockError::BufDescMissing, )); } self.vsock_set = true; Ok(()) } pub fn set_mmds_config(&mut self, _: MmdsConfig) -> Result<(), MmdsConfigError> { if self.force_errors { return Err(MmdsConfigError::InvalidIpv4Addr); } self.mmds_set = true; Ok(()) } } // Mock `Vmm` used for testing. #[derive(Debug, Default)] pub struct MockVmm { pub pause_called: bool, pub resume_called: bool, #[cfg(target_arch = "x86_64")] pub send_ctrl_alt_del_called: bool, pub update_block_device_path_called: bool, pub update_net_rate_limiters_called: bool, // when `true`, all self methods are forced to fail pub force_errors: bool, } impl MockVmm { pub fn resume_vcpus(&mut self) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::VcpuResume); } self.resume_called = true; Ok(()) } pub fn pause_vcpus(&mut self) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::VcpuPause); } self.pause_called = true; Ok(()) } #[cfg(target_arch = "x86_64")] pub fn send_ctrl_alt_del(&mut self) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::I8042Error( devices::legacy::I8042DeviceError::InternalBufferFull, )); } self.send_ctrl_alt_del_called = true; Ok(()) } pub fn update_block_device_path(&mut self, _: &str, _: String) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::DeviceManager( crate::device_manager::mmio::Error::IncorrectDeviceType, )); } self.update_block_device_path_called = true; Ok(()) } pub fn update_net_rate_limiters( &mut self, _: &str, _: rate_limiter::BucketUpdate, _: rate_limiter::BucketUpdate, _: rate_limiter::BucketUpdate, _: rate_limiter::BucketUpdate, ) -> Result<(), VmmError> { if self.force_errors { return Err(VmmError::DeviceManager( crate::device_manager::mmio::Error::IncorrectDeviceType, )); } self.update_net_rate_limiters_called = true; Ok(()) } } // Need to redefine this since the non-test one uses real VmResources // and real Vmm instead of our mocks. pub fn build_microvm_for_boot( _: &VmResources, _: &mut EventManager, _: BpfProgramRef, ) -> Result<Arc<Mutex<Vmm>>, StartMicrovmError> { Ok(Arc::new(Mutex::new(MockVmm::default()))) } #[cfg(target_arch = "x86_64")] // Need to redefine this since the non-test one uses real Vmm // instead of our mocks. pub fn create_snapshot( _: &mut Vmm, _: &CreateSnapshotParams, _: versionize::VersionMap, ) -> std::result::Result<(), CreateSnapshotError> { Ok(()) } #[cfg(target_arch = "x86_64")] // Need to redefine this since the non-test one uses real Vmm // instead of our mocks. pub fn load_snapshot( _: &mut EventManager, _: BpfProgramRef, _: &LoadSnapshotParams, _: versionize::VersionMap, ) -> Result<Arc<Mutex<Vmm>>, LoadSnapshotError> { Ok(Arc::new(Mutex::new(MockVmm::default()))) } fn default_preboot<'a>( vm_resources: &'a mut VmResources, event_manager: &'a mut EventManager, ) -> PrebootApiController<'a> { let instance_info = InstanceInfo { id: String::new(), started: false, vmm_version: String::new(), app_name: String::new(), }; PrebootApiController::new( BpfProgram::new(), instance_info, vm_resources, event_manager, ) } fn check_preboot_request<F>(request: VmmAction, check_success: F) where F: FnOnce(ActionResult, &MockVmRes), { let mut vm_resources = MockVmRes::default(); let mut evmgr = EventManager::new().unwrap(); let mut preboot = default_preboot(&mut vm_resources, &mut evmgr); let res = preboot.handle_preboot_request(request); check_success(res, &vm_resources); } // Forces error and validates error kind against expected. fn check_preboot_request_err(request: VmmAction, expected_err: VmmActionError) { let mut vm_resources = MockVmRes::default(); vm_resources.force_errors = true; let mut evmgr = EventManager::new().unwrap(); let mut preboot = default_preboot(&mut vm_resources, &mut evmgr); let err = preboot.handle_preboot_request(request).unwrap_err(); assert_eq!(err, expected_err); } #[test] fn test_preboot_config_boot_src() { let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.boot_cfg_set) }); let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); check_preboot_request_err( req, VmmActionError::BootSource(BootSourceConfigError::InvalidKernelCommandLine( String::new(), )), ); } #[test] fn test_preboot_get_vm_config() { let req = VmmAction::GetVmConfiguration; let expected_cfg = VmConfig::default(); check_preboot_request(req, |result, _| { assert_eq!(result, Ok(VmmData::MachineConfiguration(expected_cfg))) }); let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); check_preboot_request_err( req, VmmActionError::BootSource(BootSourceConfigError::InvalidKernelCommandLine( String::new(), )), ); } #[test] fn test_preboot_set_vm_config() { let req = VmmAction::SetVmConfiguration(VmConfig::default()); let expected_cfg = VmConfig::default(); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert_eq!(vm_res.vm_config, expected_cfg); }); let req = VmmAction::SetVmConfiguration(VmConfig::default()); check_preboot_request_err( req, VmmActionError::MachineConfig(VmConfigError::InvalidVcpuCount), ); } #[test] fn test_preboot_insert_block_dev() { let req = VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.block_set) }); let req = VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }); check_preboot_request_err( req, VmmActionError::DriveConfig(DriveError::RootBlockDeviceAlreadyAdded), ); } #[test] fn test_preboot_insert_net_dev() { let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.net_set) }); let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }); check_preboot_request_err( req, VmmActionError::NetworkConfig(NetworkInterfaceError::GuestMacAddressInUse( String::new(), )), ); } #[test] fn test_preboot_set_vsock_dev() { let req = VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.vsock_set) }); let req = VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }); check_preboot_request_err( req, VmmActionError::VsockConfig(VsockConfigError::CreateVsockDevice( VsockError::BufDescMissing, )), ); } #[test] fn test_preboot_set_mmds_config() { let req = VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }); check_preboot_request(req, |result, vm_res| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vm_res.mmds_set) }); let req = VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }); check_preboot_request_err( req, VmmActionError::MmdsConfig(MmdsConfigError::InvalidIpv4Addr), ); } #[test] fn test_preboot_disallowed() { check_preboot_request_err( VmmAction::FlushMetrics, VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::Pause, VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::Resume, VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::UpdateBlockDevicePath(String::new(), String::new()), VmmActionError::OperationNotSupportedPreBoot, ); check_preboot_request_err( VmmAction::UpdateNetworkInterface(NetworkInterfaceUpdateConfig { iface_id: String::new(), rx_rate_limiter: None, tx_rate_limiter: None, }), VmmActionError::OperationNotSupportedPreBoot, ); #[cfg(target_arch = "x86_64")] check_preboot_request_err( VmmAction::CreateSnapshot(CreateSnapshotParams { snapshot_type: SnapshotType::Full, snapshot_path: PathBuf::new(), mem_file_path: PathBuf::new(), version: None, }), VmmActionError::OperationNotSupportedPreBoot, ); #[cfg(target_arch = "x86_64")] check_preboot_request_err( VmmAction::SendCtrlAltDel, VmmActionError::OperationNotSupportedPreBoot, ); } #[test] fn test_build_microvm_from_requests() { // Use atomics to be able to use them non-mutably in closures below. use std::sync::atomic::{AtomicUsize, Ordering}; let cmd_step = AtomicUsize::new(0); let commands = || { cmd_step.fetch_add(1, Ordering::SeqCst); match cmd_step.load(Ordering::SeqCst) { 1 => VmmAction::FlushMetrics, 2 => VmmAction::Pause, 3 => VmmAction::Resume, 4 => VmmAction::StartMicroVm, _ => unreachable!(), } }; let resp_step = AtomicUsize::new(0); let expected_resp = |resp: ActionResult| { resp_step.fetch_add(1, Ordering::SeqCst); let expect = match resp_step.load(Ordering::SeqCst) { 1 => Err(VmmActionError::OperationNotSupportedPreBoot), 2 => Err(VmmActionError::OperationNotSupportedPreBoot), 3 => Err(VmmActionError::OperationNotSupportedPreBoot), 4 => Ok(VmmData::Empty), _ => unreachable!(), }; assert_eq!(resp, expect); }; let (_vm_res, _vmm) = PrebootApiController::build_microvm_from_requests( vec![], &mut EventManager::new().unwrap(), InstanceInfo { id: String::new(), started: false, vmm_version: String::new(), app_name: String::new(), }, commands, expected_resp, false, ); } fn check_runtime_request<F>(request: VmmAction, check_success: F) where F: FnOnce(ActionResult, &MockVmm), { let vmm = Arc::new(Mutex::new(MockVmm::default())); let mut runtime = RuntimeApiController::new(VmConfig::default(), vmm.clone()); let res = runtime.handle_request(request); check_success(res, &vmm.lock().unwrap()); } // Forces error and validates error kind against expected. fn check_runtime_request_err(request: VmmAction, expected_err: VmmActionError) { let vmm = Arc::new(Mutex::new(MockVmm { force_errors: true, ..Default::default() })); let mut runtime = RuntimeApiController::new(VmConfig::default(), vmm); let err = runtime.handle_request(request).unwrap_err(); assert_eq!(err, expected_err); } #[test] fn test_runtime_get_vm_config() { let req = VmmAction::GetVmConfiguration; check_runtime_request(req, |result, _| { assert_eq!( result, Ok(VmmData::MachineConfiguration(VmConfig::default())) ); }); } #[test] fn test_runtime_pause() { let req = VmmAction::Pause; check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.pause_called) }); let req = VmmAction::Pause; check_runtime_request_err(req, VmmActionError::InternalVmm(VmmError::VcpuPause)); } #[test] fn test_runtime_resume() { let req = VmmAction::Resume; check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.resume_called) }); let req = VmmAction::Resume; check_runtime_request_err(req, VmmActionError::InternalVmm(VmmError::VcpuResume)); } #[cfg(target_arch = "x86_64")] #[test] fn test_runtime_ctrl_alt_del() { let req = VmmAction::SendCtrlAltDel; check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.send_ctrl_alt_del_called) }); let req = VmmAction::SendCtrlAltDel; check_runtime_request_err( req, VmmActionError::InternalVmm(VmmError::I8042Error( devices::legacy::I8042DeviceError::InternalBufferFull, )), ); } #[test] fn test_runtime_update_block_device_path() { let req = VmmAction::UpdateBlockDevicePath(String::new(), String::new()); check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.update_block_device_path_called) }); let req = VmmAction::UpdateBlockDevicePath(String::new(), String::new()); check_runtime_request_err( req, VmmActionError::DriveConfig(DriveError::DeviceUpdate(VmmError::DeviceManager( crate::device_manager::mmio::Error::IncorrectDeviceType, ))), ); } #[test] fn test_runtime_update_net_rate_limiters() { let req = VmmAction::UpdateNetworkInterface(NetworkInterfaceUpdateConfig { iface_id: String::new(), rx_rate_limiter: None, tx_rate_limiter: None, }); check_runtime_request(req, |result, vmm| { assert_eq!(result, Ok(VmmData::Empty)); assert!(vmm.update_net_rate_limiters_called) }); let req = VmmAction::UpdateNetworkInterface(NetworkInterfaceUpdateConfig { iface_id: String::new(), rx_rate_limiter: None, tx_rate_limiter: None, }); check_runtime_request_err( req, VmmActionError::NetworkConfig(NetworkInterfaceError::DeviceUpdate( VmmError::DeviceManager(crate::device_manager::mmio::Error::IncorrectDeviceType), )), ); } #[test] fn test_runtime_disallowed() { check_runtime_request_err( VmmAction::ConfigureBootSource(BootSourceConfig::default()), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::ConfigureLogger(LoggerConfig { log_path: PathBuf::new(), level: LoggerLevel::Debug, show_level: false, show_log_origin: false, }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::ConfigureMetrics(MetricsConfig { metrics_path: PathBuf::new(), }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }), VmmActionError::OperationNotSupportedPostBoot, ); check_runtime_request_err( VmmAction::SetVmConfiguration(VmConfig::default()), VmmActionError::OperationNotSupportedPostBoot, ); #[cfg(target_arch = "x86_64")] check_runtime_request_err( VmmAction::LoadSnapshot(LoadSnapshotParams { snapshot_path: PathBuf::new(), mem_file_path: PathBuf::new(), enable_diff_snapshots: false, }), VmmActionError::OperationNotSupportedPostBoot, ); } #[cfg(target_arch = "x86_64")] fn verify_load_snap_disallowed_after_boot_resources(res: VmmAction, res_name: &str) { let mut vm_resources = MockVmRes::default(); let mut evmgr = EventManager::new().unwrap(); let mut preboot = default_preboot(&mut vm_resources, &mut evmgr); preboot.handle_preboot_request(res).unwrap(); // Load snapshot should no longer be allowed. let req = VmmAction::LoadSnapshot(LoadSnapshotParams { snapshot_path: PathBuf::new(), mem_file_path: PathBuf::new(), enable_diff_snapshots: false, }); let err = preboot.handle_preboot_request(req); assert_eq!( err, Err(VmmActionError::LoadSnapshotNotAllowed), "LoadSnapshot should be disallowed after {}", res_name ); } #[cfg(target_arch = "x86_64")] #[test] fn test_preboot_load_snap_disallowed_after_boot_resources() { // Verify LoadSnapshot not allowed after configuring various boot-specific resources. let req = VmmAction::ConfigureBootSource(BootSourceConfig::default()); verify_load_snap_disallowed_after_boot_resources(req, "ConfigureBootSource"); let req = VmmAction::InsertBlockDevice(BlockDeviceConfig { path_on_host: String::new(), is_root_device: false, partuuid: None, is_read_only: false, drive_id: String::new(), rate_limiter: None, }); verify_load_snap_disallowed_after_boot_resources(req, "InsertBlockDevice"); let req = VmmAction::InsertNetworkDevice(NetworkInterfaceConfig { iface_id: String::new(), host_dev_name: String::new(), guest_mac: None, rx_rate_limiter: None, tx_rate_limiter: None, allow_mmds_requests: false, }); verify_load_snap_disallowed_after_boot_resources(req, "InsertNetworkDevice"); let req = VmmAction::SetVsockDevice(VsockDeviceConfig { vsock_id: String::new(), guest_cid: 0, uds_path: String::new(), }); verify_load_snap_disallowed_after_boot_resources(req, "SetVsockDevice"); let req = VmmAction::SetVmConfiguration(VmConfig::default()); verify_load_snap_disallowed_after_boot_resources(req, "SetVmConfiguration"); let req = VmmAction::SetMmdsConfiguration(MmdsConfig { ipv4_address: None }); verify_load_snap_disallowed_after_boot_resources(req, "SetMmdsConfiguration"); } }
extern crate x11; use std::collections::HashMap; use std::io::prelude::*; use std::io::BufReader; use std::io::Lines; use std::fs::File; use std::path::PathBuf; use std::process::Command; use std::ffi; use std::env; use x11::xlib; use std::boxed::Box; use super::layout; use super::handler::{ KeyBind, Handler }; use super::handler::{ ExecHandler, LayoutHandler, WorkspaceHandler, WindowToWorkspaceHandler, WindowFocusHandler }; pub struct Config { mod_key: u32, pub bindsyms: HashMap<KeyBind, Box<Handler>>, } #[test] fn test_map() { let mut bindsyms: HashMap<KeyBind, i32> = HashMap::new(); let b = KeyBind { key: 0, mask: 0 }; bindsyms.insert(b, 1); let c = KeyBind { key: 0, mask: 0 }; assert!(bindsyms.contains_key(&c), true); } impl Config { pub fn load() -> Config { let mut config = Config::default(); let home = match env::var_os("HOME") { Some(v) => v, None => { // can't find HOME, return default config return config; } }; let mut pathbuf = PathBuf::from(home); pathbuf.push(".rustile"); match File::open(pathbuf.as_path()) { Ok(f) => { let buf = BufReader::new(f); for line in buf.lines() { match line { Ok(s) => { config.read_line(s); } Err(err) => { // do nothing } } } } Err(err) => { // use default config println!("no config file"); } } config } pub fn default() -> Config { let mut config = Config { mod_key: xlib::Mod4Mask, bindsyms: HashMap::new(), }; // let dmenu = vec!["$mod+c", "exec", "dmenu_run"]; // let split = vec!["$mod+b", "layout", "split"]; // config.bind_sym(&dmenu); // config.bind_sym(&split); config } fn read_line(&mut self, line: String) { // # is comment if !line.starts_with("#") { let tokens: Vec<&str> = line.split(' ').collect(); let (cmd, args) = tokens.split_at(1); match cmd[0] { "set" => { if args.len() > 1 { debug!("set var"); self.set_var(args[0], args[1]); } } "exec" => { let mut handler = ExecHandler::new(args); handler.cmd.spawn(); } "bind" => { self.bind_sym(args); } _ => { // not supported cmd, ignore } } } } fn bind_sym(&mut self, args: &[&str]) { let (keyseq, cmd) = args.split_at(1); let keys: Vec<&str> = keyseq[0].split("+").collect(); let bind = KeyBind::build(self.mod_key, &keys); let (name, args) = cmd.split_at(1); match name[0] { "exec" => { println!("exec"); let handler = ExecHandler::new(args); self.bindsyms.insert(bind, Box::new(handler)); } "layout" => { println!("layout"); let layout = args[0]; match layout { "split" => { let handler = LayoutHandler::new(layout::Type::Tiling); self.bindsyms.insert(bind, Box::new(handler)); } _ => {} } } "workspace" => { println!("workspace"); let c = args[0].chars().nth(0); match c { Some(v) => { let handler = WorkspaceHandler { key: v, }; self.bindsyms.insert(bind, Box::new(handler)); } None => {} } } "window" => { let c = args[0].chars().nth(0); match c { Some(v) => { println!("window"); let handler = WindowToWorkspaceHandler { key: v, }; self.bindsyms.insert(bind, Box::new(handler)); } None => {} } } "focus" => { let direction = match args[0] { "left" => layout::Direction::Left, "right" => layout::Direction::Right, "up" => layout::Direction::Up, "down" => layout::Direction::Down, _ => layout::Direction::Right }; let handler = WindowFocusHandler { direction: direction }; self.bindsyms.insert(bind, Box::new(handler)); } "kill" => { let handler = WindowCloseHandler; self.bindsyms.insert(bind, Box::new(handler)); } _ => {} }; } fn set_var(&mut self, key: &str, val: &str) { match key { "$mod" => { self.mod_key = match val { "Shift" => xlib::ShiftMask, "Ctrl" => xlib::ControlMask, "Mod1" => xlib::Mod1Mask, "Mod2" => xlib::Mod2Mask, "Mod3" => xlib::Mod3Mask, "Mod4" => xlib::Mod4Mask, "Mod5" => xlib::Mod5Mask, _ => xlib::Mod4Mask }; } _ => {} } } } reorganize code extern crate x11; use std::collections::HashMap; use std::io::prelude::*; use std::io::BufReader; use std::io::Lines; use std::fs::File; use std::path::PathBuf; use std::process::Command; use std::ffi; use std::env; use x11::xlib; use std::boxed::Box; use super::layout; use super::handler::{ KeyBind, Handler, ExecHandler, LayoutHandler, WorkspaceHandler, WindowToWorkspaceHandler, WindowFocusHandler, WindowCloseHandler}; pub struct Config { mod_key: u32, pub bindsyms: HashMap<KeyBind, Box<Handler>>, } impl Config { pub fn load() -> Config { let mut config = Config::default(); let home = match env::var_os("HOME") { Some(v) => v, None => { // can't find HOME, return default config return config; } }; let mut pathbuf = PathBuf::from(home); pathbuf.push(".rustile"); match File::open(pathbuf.as_path()) { Ok(f) => { let buf = BufReader::new(f); for line in buf.lines() { match line { Ok(s) => { config.read_line(s); } Err(err) => { // do nothing } } } } Err(err) => { // use default config println!("no config file"); } } config } pub fn default() -> Config { let mut config = Config { mod_key: xlib::Mod4Mask, bindsyms: HashMap::new(), }; // let dmenu = vec!["$mod+c", "exec", "dmenu_run"]; // let split = vec!["$mod+b", "layout", "split"]; // config.bind_sym(&dmenu); // config.bind_sym(&split); config } fn read_line(&mut self, line: String) { // # is comment if !line.starts_with("#") { let tokens: Vec<&str> = line.split(' ').collect(); let (cmd, args) = tokens.split_at(1); match cmd[0] { "set" => { if args.len() > 1 { self.set_var(args[0], args[1]); } } "exec" => { let mut handler = ExecHandler::new(args); handler.cmd.spawn(); } "bind" => { self.bind_sym(args); } _ => { // not supported cmd, ignore } } } } fn bind_sym(&mut self, args: &[&str]) { let (keyseq, cmd) = args.split_at(1); let keys: Vec<&str> = keyseq[0].split("+").collect(); let bind = KeyBind::build(self.mod_key, &keys); let (name, args) = cmd.split_at(1); match name[0] { "exec" => { println!("exec"); let handler = ExecHandler::new(args); self.bindsyms.insert(bind, Box::new(handler)); } "layout" => { println!("layout"); let layout = args[0]; match layout { "split" => { let handler = LayoutHandler::new(layout::Type::Tiling); self.bindsyms.insert(bind, Box::new(handler)); } _ => {} } } "workspace" => { println!("workspace"); let c = args[0].chars().nth(0); match c { Some(v) => { let handler = WorkspaceHandler { key: v, }; self.bindsyms.insert(bind, Box::new(handler)); } None => {} } } "window" => { let c = args[0].chars().nth(0); match c { Some(v) => { println!("window"); let handler = WindowToWorkspaceHandler { key: v, }; self.bindsyms.insert(bind, Box::new(handler)); } None => {} } } "focus" => { let direction = match args[0] { "left" => layout::Direction::Left, "right" => layout::Direction::Right, "up" => layout::Direction::Up, "down" => layout::Direction::Down, _ => layout::Direction::Right }; let handler = WindowFocusHandler { direction: direction }; self.bindsyms.insert(bind, Box::new(handler)); } "kill" => { let handler = WindowCloseHandler; self.bindsyms.insert(bind, Box::new(handler)); } _ => {} }; } fn set_var(&mut self, key: &str, val: &str) { match key { "$mod" => { self.mod_key = match val { "Shift" => xlib::ShiftMask, "Ctrl" => xlib::ControlMask, "Mod1" => xlib::Mod1Mask, "Mod2" => xlib::Mod2Mask, "Mod3" => xlib::Mod3Mask, "Mod4" => xlib::Mod4Mask, "Mod5" => xlib::Mod5Mask, _ => xlib::Mod4Mask }; } _ => {} } } } #[test] fn test_map() { let mut bindsyms: HashMap<KeyBind, i32> = HashMap::new(); let b = KeyBind { key: 0, mask: 0 }; bindsyms.insert(b, 1); let c = KeyBind { key: 0, mask: 0 }; assert!(bindsyms.contains_key(&c), true); }
use std::io; use std::borrow; use std::uint; use std::result; use std::str; use std::vec; use std::num::{One, Zero, ToStrRadix, IntConvertible}; use std::hashmap::HashMap; use std::managed; use bounded_iterator::BoundedIterator; use extra::bigint::BigInt; use extra::complex::Cmplx; use datum::*; use primitive::*; use numeric::*; use rational::Rational; use stack::*; use parser::Parser; enum RuntimeData { RUndef, RPrim(PFunc), RProc(~[@str], Option<@str>, ~[@LDatum<RuntimeData>], @mut Stack<HashMap<@str, @LDatum<RuntimeData>>>), } impl Clone for RuntimeData { fn clone(&self) -> RuntimeData { match self { &RUndef => RUndef, &RPrim(f) => RPrim(f), &RProc(ref args, ref vargs, ref body, ref env) => { let cloneargs = do args.map |&arg| { arg }; RProc(cloneargs, *vargs, body.clone(), *env) }, } } } fn eq(lhs: &RuntimeData, rhs: &RuntimeData) -> bool { match (lhs, rhs) { (&RPrim(l), &RPrim(r)) => l == r, (&RProc(_,_,_,_), &RProc(_,_,_,_)) => lhs == rhs, _ => false, } } impl Eq for RuntimeData { fn eq(&self, other: &RuntimeData) -> bool { eq(self, other) } fn ne(&self, other: &RuntimeData) -> bool { !eq(self, other) } } fn data_to_str(data: &RuntimeData) -> ~str { match *data { RUndef => ~"<undefined>", RPrim(f) => fmt!("<primitive:%s>", f.to_str()), RProc(_, _, _, _) => fmt!("<procedure 0x%08x>", borrow::to_uint(data)), } } impl ToStr for RuntimeData { fn to_str(&self) -> ~str { data_to_str(self) } } type RDatum = LDatum<RuntimeData>; pub trait DatumConv { fn from_datum<R>(@RDatum, &fn(&Self) -> R) -> Option<R>; fn move_datum(Self) -> @RDatum; fn typename() -> ~str; } impl DatumConv for @RDatum { fn from_datum<R>(datum: @RDatum, op: &fn(&@RDatum) -> R) -> Option<R> { Some(op(&datum)) } fn move_datum(x: @RDatum) -> @RDatum { x } fn typename() -> ~str { ~"datum" } } impl DatumConv for RuntimeData { fn from_datum<R>(datum: @RDatum, op: &fn(&RuntimeData) -> R) -> Option<R> { match datum { @LExt(ref r) => Some(op(r)), _ => None, } } fn move_datum(x: RuntimeData) -> @RDatum { @LExt(x) } fn typename() -> ~str { ~"procedure" } } impl DatumConv for LNumeric { fn from_datum<R>(datum: @RDatum, op: &fn(&LNumeric) -> R) -> Option<R> { match datum { @LNum(ref n) => Some(op(n)), _ => None, } } fn move_datum(x: LNumeric) -> @RDatum { @LNum(x) } fn typename() -> ~str { ~"number" } } impl DatumConv for Cmplx<f64> { fn from_datum<R>(datum: @RDatum, op: &fn(&Cmplx<f64>) -> R) -> Option<R> { match datum { @LNum(NInexact(ref n)) => Some(op(n)), _ => None, } } fn move_datum(x: Cmplx<f64>) -> @RDatum { @LNum(NInexact(x)) } fn typename() -> ~str { ~"inexact number" } } impl DatumConv for f64 { fn from_datum<R>(datum: @RDatum, op: &fn(&f64) -> R) -> Option<R> { match datum { @LNum(NInexact(ref n)) if n.im.is_zero() => Some(op(&n.re)), _ => None, } } fn move_datum(x: f64) -> @RDatum { @LNum(from_f64(x)) } fn typename() -> ~str { ~"inexact real number" } } impl DatumConv for Cmplx<Rational> { fn from_datum<R>(datum: @RDatum, op: &fn(&Cmplx<Rational>) -> R) -> Option<R> { match datum { @LNum(NExact(ref n)) => Some(op(n)), _ => None, } } fn move_datum(x: Cmplx<Rational>) -> @RDatum { @LNum(NExact(x)) } fn typename() -> ~str { ~"exact number" } } impl DatumConv for LReal { fn from_datum<R>(datum: @RDatum, op: &fn(&LReal) -> R) -> Option<R> { match datum { @LNum(ref n) => match get_real(n) { Some(ref r) => Some(op(r)), None => None, }, _ => None, } } fn move_datum(x: LReal) -> @RDatum { @LNum(from_real(&x)) } fn typename() -> ~str { ~"real number" } } impl DatumConv for Rational { fn from_datum<R>(datum: @RDatum, op: &fn(&Rational) -> R) -> Option<R> { match datum { @LNum(NExact(ref n)) if n.im.is_zero() => { Some(op(&n.re)) }, _ => None, } } fn move_datum(x: Rational) -> @RDatum { @LNum(from_rational(&x)) } fn typename() -> ~str { ~"rational number" } } impl DatumConv for BigInt { fn from_datum<R>(datum: @RDatum, op: &fn(&BigInt) -> R) -> Option<R> { match datum { @LNum(ref n) => match *n { NExact( Cmplx{ re: ref re, im: ref im } ) => if im.is_zero() && *re.numerator() == One::one() { Some(op(re.denominator())) } else { None }, NInexact(_) => None, }, _ => None, } } fn move_datum(x: BigInt) -> @RDatum { @LNum(from_bigint(x)) } fn typename() -> ~str { ~"integer" } } impl DatumConv for uint { fn from_datum<R>(datum: @RDatum, op: &fn(&uint) -> R) -> Option<R> { let max_int:BigInt = IntConvertible::from_int(Bounded::max_value::<int>()); match datum { @LNum(NExact( Cmplx{ re: ref re, im: ref im } )) => if im.is_zero() && !re.is_negative() && *re.numerator() == One::one() { let d = re.denominator(); if *d <= max_int { Some(op(&(d.to_int() as uint))) } else { None } } else { None }, _ => None, } } fn move_datum(x: uint) -> @RDatum { @LNum(from_uint(x)) } fn typename() -> ~str { ~"unsigned integer" } } impl DatumConv for (@RDatum, @RDatum) { fn from_datum<R>(datum: @RDatum, op: &fn(&(@RDatum, @RDatum)) -> R) -> Option<R> { match datum { @LCons(a, b) => Some(op(&(a, b))), _ => None, } } fn move_datum((x, y): (@RDatum, @RDatum)) -> @RDatum { @LCons(x, y) } fn typename() -> ~str { ~"cons" } } impl DatumConv for bool { fn from_datum<R>(datum: @RDatum, op: &fn(&bool) -> R) -> Option<R> { match datum { @LBool(ref b) => Some(op(b)), _ => None, } } fn move_datum(x: bool) -> @RDatum { @LBool(x) } fn typename() -> ~str { ~"boolean" } } impl DatumConv for char { fn from_datum<R>(datum: @RDatum, op: &fn(&char) -> R) -> Option<R> { match datum { @LChar(ref c) => Some(op(c)), _ => None, } } fn move_datum(x: char) -> @RDatum { @LChar(x) } fn typename() -> ~str { ~"character" } } impl DatumConv for () { fn from_datum<R>(datum: @RDatum, op: &fn(&()) -> R) -> Option<R> { match datum { @LNil => Some(op(&())), _ => None, } } fn move_datum(_: ()) -> @RDatum { @LNil } fn typename() -> ~str { ~"()" } } struct GetList { list: ~[@RDatum] } impl DatumConv for GetList { #[inline] fn from_datum<R>(datum: @RDatum, op: &fn(&GetList) -> R) -> Option<R> { match datum.to_list() { Some(l) => Some(op(&GetList{ list: l })), _ => None, } } #[inline] fn move_datum(x: GetList) -> @RDatum { LDatum::from_list(x.list) } fn typename() -> ~str { ~"list" } } impl DatumConv for ~str { fn from_datum<R>(datum: @RDatum, op: &fn(&~str) -> R) -> Option<R> { match datum { @LString(ref s) => Some(op(s)), _ => None, } } fn move_datum(x: ~str) -> @RDatum { @LString(x) } fn typename() -> ~str { ~"string" } } struct Runtime { stdin: @Reader, stdout: @Writer, stderr: @Writer, env: @mut Stack<HashMap<@str, @RDatum>>, global: HashMap<@str, Either<@RDatum, PrimSyntax>>, qq_lvl: uint, } #[deriving(Eq)] enum RuntimeError { UnboundVariable(@str), RefMacro(@str), NotCallable, NotList, ArgNumError(uint, Option<uint>, uint), TypeError, DivideByZeroError, NilEval, BadSyntax(PrimSyntax, ~str), ParseError(uint, uint, ~str), RangeError, } impl ToStr for RuntimeError { fn to_str(&self) -> ~str { err_to_str(self) } } priv fn err_to_str(&err: &RuntimeError) -> ~str { match err { UnboundVariable(name) => ~"unbound variable: " + name, RefMacro(name) => ~"cannot reference macro name: " + name, NotCallable => ~"not callable", NotList => ~"not list", ArgNumError(min, Some(max), argnum) => { if min == max { fmt!("expected %u arguments, but found %u arguments", min, argnum) } else { fmt!("expected %u-%u arguments, but found %u arguments", min, max, argnum) } }, ArgNumError(expected, None, argnum) => { fmt!("expected %u or more arguments, but found %u arguments", expected, argnum) }, TypeError => ~"type error", DivideByZeroError => ~"divide by zero", NilEval => ~"() cannot be evaluated", BadSyntax(syn, reason) => ~"bad syntax for " + syn.to_str() + ": " + reason, ParseError(line, col, reason) => fmt!("failed to parse: %u:%u: %s", line, col, reason), RangeError => ~"index out of range", } } fn load_prelude() -> HashMap<@str, Either<@RDatum, PrimSyntax>> { let mut map = HashMap::new(); let mut prim_iter = BoundedIterator::new::<PFunc>(); for prim_iter.advance |prim:PFunc| { let key = prim.to_str(); map.insert(key.to_managed(), Left(@LExt(RPrim(prim)))); } let mut syntax_iter = BoundedIterator::new::<PrimSyntax>(); for syntax_iter.advance |syntax:PrimSyntax| { let key = syntax.to_str(); map.insert(key.to_managed(), Right(syntax)); } map.insert("pi".to_managed(), Left(@LNum(inexact(Real::pi(), 0f64)))); map } priv fn call_prim1(args: &[@RDatum], op: &fn(@RDatum) -> Result<@RDatum, RuntimeError>) -> Result<@RDatum, RuntimeError> { if args.len() == 1 { op(args[0]) } else { Err(ArgNumError(1, Some(1), args.len())) } } priv fn call_prim2(args: &[@RDatum], op: &fn(@RDatum, @RDatum) -> Result<@RDatum, RuntimeError>) -> Result<@RDatum, RuntimeError> { if args.len() == 2 { op(args[0], args[1]) } else { Err(ArgNumError(2, Some(2), args.len())) } } priv fn typecheck<A: DatumConv>(args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match args { [arg] => { let res = do DatumConv::from_datum::<A, ()>(arg) |_| { () }; match res { Some(_) => Ok(@LBool(true)), None => Ok(@LBool(false)), } }, _ => Err(ArgNumError(1, Some(1), args.len())), } } priv fn call_tc1<A: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A) -> R ) -> Result<@RDatum, RuntimeError> { match args { [arg] => { let res = DatumConv::from_datum::<A, R>(arg, op); match res { Some(x) => Ok(DatumConv::move_datum(x)), None => Err(TypeError), } }, _ => Err(ArgNumError(1, Some(1), args.len())), } } priv fn call_tc2<A: DatumConv, B:DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A, &B) -> R ) -> Result<@RDatum, RuntimeError> { match args { [arg0, arg1] => { let res = do DatumConv::from_datum::<A, Option<R>>(arg0) |a| { do DatumConv::from_datum::<B, R>(arg1) |b| { op(a, b) } }; match res { Some(Some(x)) => Ok(DatumConv::move_datum(x)), _ => Err(TypeError), } }, _ => Err(ArgNumError(2, Some(2), args.len())), } } priv fn call_vargs<A: DatumConv, R: DatumConv> (args: &[@RDatum], op: &fn(&[A]) -> R) -> Result<@RDatum, RuntimeError> { let mut idx = 0u; let mut vec = vec::with_capacity(args.len()); while idx < args.len() { match DatumConv::from_datum(args[idx], |&a| {a}) { Some(x) => vec.push(x), None => return Err(TypeError), }; idx += 1; } let res:@RDatum = DatumConv::move_datum(op(vec)); Ok(res) } priv fn call_err2<A: DatumConv, B: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A, &B) -> Result<R, RuntimeError> ) -> Result<@RDatum, RuntimeError> { match args { [arg0, arg1] => { let r = do DatumConv::from_datum::<A, Result<R, RuntimeError>>(arg0) |a| { let res = do DatumConv::from_datum::<B, Result<R, RuntimeError>>(arg1) |b| { op(a, b) }; match res { Some(x) => x, None => Err(TypeError), } }; match r { Some(Ok(x)) => Ok(DatumConv::move_datum(x)), Some(Err(e)) => Err(e), None => Err(TypeError), } }, _ => Err(ArgNumError(2, Some(2), args.len())), } } priv fn call_err3<A: DatumConv, B: DatumConv, C: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A, &B, &C) -> Result<R, RuntimeError> ) -> Result<@RDatum, RuntimeError> { match args { [arg0, arg1, arg2] => { let r0 = do DatumConv::from_datum::<A, Result<R, RuntimeError>>(arg0) |a| { let r1 = do DatumConv::from_datum::<B, Result<R, RuntimeError>>(arg1) |b| { let r2 = do DatumConv::from_datum::<C, Result<R, RuntimeError>>(arg2) |c| { op(a, b, c) }; match r2 { Some(x) => x, None => Err(TypeError), } }; match r1 { Some(x) => x, None => Err(TypeError), } }; match r0 { Some(Ok(x)) => Ok(DatumConv::move_datum(x)), Some(Err(e)) => Err(e), None => Err(TypeError), } }, _ => Err(ArgNumError(3, Some(3), args.len())), } } priv fn call_num_foldl(args: &[@RDatum], a0: &LNumeric, op: &fn(&LNumeric, &LNumeric) -> Result<LNumeric, RuntimeError>) -> Result<@RDatum, RuntimeError> { let mut res:LNumeric = a0.clone(); let mut err = false; do args.each |&arg| { match arg { @LNum(ref a) => { match op(&res, a) { Ok(n) => { res = n; err = false; }, _ => { err = true; } } }, _ => { err = true; } } !err }; if err { Err(TypeError) } else { Ok(@LNum(res)) } } priv fn call_real_bfoldl(args: &[@RDatum], op: &fn(&LReal, &LReal) -> bool) -> Result<@RDatum, RuntimeError> { let n = args.len(); if n < 2 { return Err(ArgNumError(2, None, n)); } let mut a = match args[0] { @LNum(ref n) => match get_real(n) { None => return Err(TypeError), Some(r) => r, }, _ => return Err(TypeError), }; let mut idx = 1; while idx < n { let b = match args[idx] { @LNum(ref n) => match get_real(n) { None => return Err(TypeError), Some(r) => r, }, _ => return Err(TypeError), }; if !op(&a, &b) { return Ok(@LBool(false)); } a = b; idx += 1; } return Ok(@LBool(true)); } priv fn get_bindings(arg: &RDatum) -> Result<~[(@str, @RDatum)], ~str> { match arg.to_list() { None => Err(~"non-list bindings"), Some(bindings) => do result::map_vec(bindings) |datum| { match datum.to_list() { Some([@LIdent(name), expr]) => Ok((name, expr)), Some(_) | None => Err(~"invalid binding") } } } } priv fn get_syms(&arg: &@RDatum) -> Result<(~[@str], Option<@str>), ~str> { let mut iter = arg; let mut args : ~[@str] = ~[]; let mut varargs : Option<@str> = None; loop { match *iter { LCons(h, t) => match *h { LIdent(name) => { args.push(name); iter = t; }, _ => { return Err(~"non-symbol argument"); } }, LIdent(name) => { varargs = Some(name); break; }, LNil => { break; }, _ => { return Err(~"non-list argument"); }, } } Ok((args, varargs)) } impl Runtime { fn get_syntax(&self, val: &RDatum) -> Option<PrimSyntax> { match *val { LIdent(name) => match self.global.find(&name) { Some(&Right(syn)) => Some(syn), _ => None, }, _ => None, } } fn find_var(&self, name: &@str) -> Result<@RDatum, RuntimeError> { let mut val: Option<@RDatum> = None; do self.env.each |frame| { match frame.find(name) { None => true, Some(v) => { val = Some(*v); false } } }; match val { None => match self.global.find(name) { Some(&Left(v)) => Ok(v), Some(&Right(_)) => Err(RefMacro(*name)), None => Err(UnboundVariable(*name)), }, Some(v) => Ok(v), } } fn syn_let(&mut self, bindings: &RDatum, body: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match get_bindings(bindings) { Err(e) => Err(BadSyntax(SynLet, e)), Ok(b) => { let mut arg_frame = HashMap::new(); let mut err:Option<RuntimeError> = None; do b.each |&(name, expr)| { match self.eval(expr) { Ok(val) => { arg_frame.insert(name, val); true } Err(e) => { err = Some(e); false } } }; match err { Some(e) => Err(e), None => self.local_eval(arg_frame, self.env, body) } } } } fn syn_letstar(&mut self, bindings: &RDatum, body: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match get_bindings(bindings) { Err(e) => Err(BadSyntax(SynLet, e)), Ok(b) => { let old_frame = self.env; let mut err:Option<RuntimeError> = None; do b.each |&(name, expr)| { match self.eval(expr) { Ok(val) => { let mut arg_frame = HashMap::new(); arg_frame.insert(name, val); self.env = @mut push(self.env, arg_frame); true }, Err(e) => { err = Some(e); false }, } }; let mut res:Result<@RDatum, RuntimeError> = Err(NilEval); match err { Some(e) => { res = Err(e); }, None => { do body.each |&val| { res = self.eval(val); res.is_ok() }; } }; self.env = old_frame; return res } } } fn syn_letrec(&mut self, bindings: &RDatum, body: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match get_bindings(bindings) { Err(e) => Err(BadSyntax(SynLet, e)), Ok(b) => { let old_frame = self.env; let mut arg_frame = HashMap::new(); let (names, exprs) = vec::unzip(b); for names.each |&name| { arg_frame.insert(name, @LExt(RUndef)); } self.env = @mut push(old_frame, arg_frame); let mut res:Result<@RDatum, RuntimeError> = Err(NilEval); match result::map_vec(exprs, |&expr| { self.eval(expr) }) { Ok(vals) => { do self.env.mut_top |frame| { for uint::range(0, names.len()) |i| { frame.insert(names[i], vals[i]); } }; do body.each |&val| { res = self.eval(val); res.is_ok() }; }, Err(e) => { res = Err(e); }, } self.env = old_frame; res } } } fn cond(&mut self, conds: &[@RDatum]) -> Result<@RDatum, RuntimeError> { let mut i = 0u; let mut exprs = vec::with_capacity(conds.len()); let mut else_opt = None; while i < conds.len() { match conds[i].to_list() { Some([@LIdent(els), expr]) if els.as_slice() == "else" => if i == conds.len()-1 { else_opt = Some(expr); } else { return Err(BadSyntax(SynCond, ~"trailing conditions after else")); }, Some([pred, expr]) => exprs.push((pred, expr)), _ => return Err(BadSyntax(SynCond, ~"invalid conditional expression")), } i += 1; } let mut res = Ok(@LExt(RUndef)); let expr_end = do exprs.each |&(pred, expr)| { match self.eval(pred) { Err(e) => { res = Err(e); false }, Ok(@LBool(false)) => true, _ => { res = self.eval(expr); false }, } }; match else_opt { Some(else_expr) if expr_end => self.eval(else_expr), _ => res } } fn define(&mut self, args: ~[@RDatum]) -> Result<(@str, @RDatum), RuntimeError> { match get_syms(&args[0]) { Err(e) => Err(BadSyntax(SynDefine, e)), Ok((anames, varargs)) => if anames.is_empty() { match varargs { None => Err(BadSyntax(SynDefine, ~"name not given")), Some(name) => if args.len() != 2 { Err(BadSyntax(SynDefine, ~"multiple expressions")) } else { do self.eval(args[1]).map |&val| { (name, val) } } } } else { let name = anames[0]; let anames = anames.slice(1, anames.len()).to_owned(); let seq = args.slice(1, args.len()).to_owned(); let proc = @LExt(RProc(anames, varargs, seq, self.env)); Ok((name, proc)) } } } fn run_syntax(&mut self, syn: PrimSyntax, args: ~[@RDatum]) -> Result<@RDatum, RuntimeError> { match syn { SynIf => if args.len() == 3 { do self.eval(args[0]).chain |cond| { match *cond { LBool(false) => self.eval(args[2]), _ => self.eval(args[1]), } } } else { Err(BadSyntax(SynIf, ~"bad number of arguments")) }, SynCond => self.cond(args), SynLambda => if args.len() < 2 { Err(BadSyntax(SynLambda, ~"no body given")) } else { match get_syms(&args[0]) { Err(e) => Err(BadSyntax(SynLambda, e)), Ok((anames, varargs)) => { let seq = args.slice(1, args.len()).to_owned(); Ok(@LExt(RProc(anames, varargs, seq, self.env))) }, } }, SynLet => if args.len() < 2 { Err(BadSyntax(SynLet, ~"no body given")) } else { self.syn_let(args[0], args.slice(1, args.len())) }, SynLetRec => if args.len() < 2 { Err(BadSyntax(SynLetRec, ~"no body given")) } else { self.syn_letrec(args[0], args.slice(1, args.len())) }, SynLetStar => if args.len() < 2 { Err(BadSyntax(SynLetRec, ~"no body given")) } else { self.syn_letstar(args[0], args.slice(1, args.len())) }, SynDefine => if args.len() < 2 { Err(BadSyntax(SynDefine, ~"no body given")) } else { let definition = self.define(args); match definition { Err(e) => Err(e), Ok((name, val)) => { if self.env.size_hint() == Some(0) { // this is the top-level context // just bind the definition in global self.global.insert(name, Left(val)); } else { // this is not the top-level context // create a new frame let mut frame = HashMap::new(); frame.insert(name, val); self.env = @mut push(self.env, frame); }; Ok(@LNil) }, } }, SynSet => if args.len() != 2 { Err(BadSyntax(SynSet, ~"bad number of arguments")) } else { match *args[0] { LIdent(name) => do self.eval(args[1]).chain |val| { if set_var(self.env, &name, val) { Ok(@LNil) } else { Err(BadSyntax(SynSet, ~"unbound variable")) } }, _ => Err(BadSyntax(SynSet, ~"cannot set non-variable")) } }, SynQuote => if args.len() == 1 { Ok(args[0]) } else { Err(BadSyntax(SynQuote, ~"bad number of arguments")) }, SynQQuote => if args.len() == 1 { self.quasiquote(&args[0]) } else { Err(BadSyntax(SynQQuote, ~"bad number of arguments")) }, SynUnquote => if args.len() == 1 { self.unquote(&args[0]) } else { Err(BadSyntax(SynUnquote, ~"bad number of arguments")) }, SynAnd => self.syn_and(args), SynOr => self.syn_or(args), } } priv fn syn_and(&mut self, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { let mut res = @LBool(true); let mut i = 0u; while i < args.len() { match self.eval(args[i]) { Ok(@LBool(false)) => return Ok(@LBool(false)), Ok(x) => { res = x }, Err(e) => return Err(e), }; i += 1; } return Ok(res) } priv fn syn_or(&mut self, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { let mut i = 0u; while i < args.len() { match self.eval(args[i]) { Ok(@LBool(false)) => (), Ok(x) => return Ok(x), Err(e) => return Err(e), }; i += 1; } return Ok(@LBool(false)) } fn call_proc(&mut self, anames: &[@str], vargs: Option<@str>, code: &[@RDatum], frame: @mut Stack<HashMap<@str, @RDatum>>, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { // create new frame to store args let mut arg_frame = HashMap::new(); match vargs { None => if args.len() != anames.len() { return Err(ArgNumError(anames.len(), Some(anames.len()), args.len())); }, Some(vname) => if args.len() < anames.len() { return Err(ArgNumError(anames.len(), None, args.len())); } else { let vslice = args.slice(anames.len(), args.len()); let va = do vslice.rev_iter().fold(@LNil) |a, &l| { @LCons(l, a) }; arg_frame.insert(vname, va); }, } for uint::range(0, anames.len()) |i| { arg_frame.insert(anames[i], args[i]); } self.local_eval(arg_frame, frame, code) } fn local_eval(&mut self, arg_frame: HashMap<@str, @RDatum>, frame: @mut Stack<HashMap<@str, @RDatum>>, code: &[@RDatum]) -> Result<@RDatum, RuntimeError> { // store current env let old_env = self.env; // create new local env self.env = @mut push(frame, arg_frame); let mut res:Result<@RDatum, RuntimeError> = Err(NilEval); do code.each() |&val| { res = self.eval(val); res.is_ok() }; // restore env self.env = old_env; res } fn call_prim(&mut self, f: PFunc, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match f { PEval => match args { [arg] => self.eval(arg), _ => Err(ArgNumError(1, Some(1), args.len())), }, PApply => do call_err2::<RuntimeData, GetList, @RDatum>(args) |f, l| { self.apply(f, l.list) }, PBegin => if args.len() == 0 { Ok(@LExt(RUndef)) } else { Ok(*args.last()) }, PAdd => do call_num_foldl(args, &Zero::zero()) |&lhs, &rhs| { Ok(lhs + rhs) }, PSub => match args { [] => Err(ArgNumError(1, None, 0)), [@LNum(ref x)] => Ok(@LNum(-*x)), [@LNum(ref x), ..tail] => do call_num_foldl(tail, x) |&lhs, &rhs| { Ok(lhs - rhs) }, _ => Err(TypeError), }, PMul => do call_num_foldl(args, &One::one()) |&lhs, &rhs| { Ok(lhs * rhs) }, PDiv => match args { [] => Err(ArgNumError(1, None, 0)), [@LNum(ref x)] => if x.is_zero() { Err(DivideByZeroError) } else { Ok(@LNum(x.recip())) }, [@LNum(ref x), ..tail] => do call_num_foldl(tail, x) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(lhs / rhs) } }, _ => Err(TypeError), }, PQuotient => do call_err2::<BigInt, BigInt, BigInt>(args) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(lhs / rhs) } }, PRemainder => do call_err2::<BigInt, BigInt, BigInt>(args) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(lhs % rhs) } }, PModulo => do call_err2::<BigInt, BigInt, BigInt>(args) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(modulo(lhs, rhs)) } }, PFloor => do call_tc1::<LReal, LReal>(args) |&x| { x.floor() }, PCeiling => do call_tc1::<LReal, LReal>(args) |&x| { x.ceil() }, PRound => do call_tc1::<LReal, LReal>(args) |&x| { x.round() }, PTruncate => do call_tc1::<LReal, LReal>(args) |&x| { x.trunc() }, PExp => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.exp() }, PLog => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.ln() }, PSin => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.sin() }, PCos => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.cos() }, PTan => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.tan() }, PAsin => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.asin() }, PAcos => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.acos() }, PAtan => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.atan() }, PSqrt => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.sqrt() }, PExpt => do call_tc2::<LNumeric, LNumeric, LNumeric>(args) |x, r| { x.pow(r) }, PMakeRectangular => do call_tc2::<LReal, LReal, LNumeric>(args) |rx, ry| { coerce(rx, ry, |&a, &b| { exact(a, b) }, |a, b| { inexact(a, b) }) }, PMakePolar => do call_tc2::<LReal, LReal, LNumeric>(args) |rx, ry| { polar(rx.to_inexact(), ry.to_inexact()) }, PRealPart => do call_tc1::<LNumeric, LNumeric>(args) |&x| { match x { NExact( Cmplx { re: ref re, im: _ } ) => from_rational(re), NInexact( Cmplx { re: re, im: _ } ) => from_f64(re), } }, PImagPart => do call_tc1::<LNumeric, LNumeric>(args) |&x| { match x { NExact( Cmplx { re: _, im: ref im } ) => from_rational(im), NInexact( Cmplx { re: _, im: im } ) => from_f64(im), } }, PMagnitude => do call_tc1::<LNumeric, f64>(args) |x| { x.to_inexact().to_polar().first() }, PAngle => do call_tc1::<LNumeric, f64>(args) |x| { x.to_inexact().to_polar().second() }, PNumerator => do call_tc1::<Rational, BigInt>(args) |x| { x.numerator().clone() }, PDenominator => do call_tc1::<Rational, BigInt>(args) |x| { x.denominator().clone() }, PCar => do call_tc1::<(@RDatum, @RDatum), @RDatum>(args) |&(h, _)| { h }, PCdr => do call_tc1::<(@RDatum, @RDatum), @RDatum>(args) |&(_, t)| { t }, PCons => do call_prim2(args) |arg1, arg2| { Ok(@LCons(arg1, arg2)) }, PEqv => do call_prim2(args) |arg1, arg2| { let b = match (arg1, arg2) { (@LCons(_, _), @LCons(_, _)) => managed::ptr_eq(arg1, arg2), (@LString(_), @LString(_)) => managed::ptr_eq(arg1, arg2), (@LExt(_), @LExt(_)) => managed::ptr_eq(arg1, arg2), _ => arg1 == arg2, }; Ok(@LBool(b)) }, PEqual => do call_tc2::<@RDatum, @RDatum, bool>(args) |&a, &b| { a == b }, PNumber => typecheck::<LNumeric>(args), PReal => typecheck::<LReal>(args), PInteger => do call_tc1::<@RDatum, bool>(args) |&arg| { match arg { @LNum(NExact(Cmplx { re: ref re, im: ref im })) => *re.numerator() == One::one() && *im.numerator() == One::one(), @LNum(NInexact(Cmplx { re: re, im: im })) => re.round() == re && im.round() == im, _ => false, } }, PExact => typecheck::<Cmplx<Rational>>(args), PInexact => typecheck::<Cmplx<f64>>(args), PExactInexact => do call_tc1::<LNumeric, Cmplx<f64>>(args) |x| { x.to_inexact() }, PNumberString => match args.len() { 1 => do call_tc1::<LNumeric, ~str>(args) |x| { x.to_str() }, 2 => do call_err2::<LNumeric, uint, ~str>(args) |&x, &radix| { match x { NExact(ref n) => Ok(n.to_str_radix(radix)), _ => if radix == 10 { Ok(x.to_str()) } else { Err(TypeError) }, } }, n => Err(ArgNumError(1, Some(2), n)) }, PEQ => do call_real_bfoldl(args) |&lhs, &rhs| { lhs == rhs }, PGT => do call_real_bfoldl(args) |&lhs, &rhs| { lhs > rhs }, PLT => do call_real_bfoldl(args) |&lhs, &rhs| { lhs < rhs }, PGE => do call_real_bfoldl(args) |&lhs, &rhs| { lhs >= rhs }, PLE => do call_real_bfoldl(args) |&lhs, &rhs| { lhs <= rhs }, PNot => do call_tc1::<@RDatum, bool>(args) |&arg| { match arg { @LBool(false) => true, _ => false, } }, PBoolean => typecheck::<bool>(args), PChar => typecheck::<char>(args), PProcedure => match args { [@LExt(RUndef)] => Ok(@LBool(false)), [@LExt(_)] => Ok(@LBool(true)), [_] => Ok(@LBool(false)), _ => Err(ArgNumError(1, Some(1), args.len())), }, PIsVector => match args { [@LVector(_)] => Ok(@LBool(true)), [_] => Ok(@LBool(false)), _ => Err(ArgNumError(1, Some(1), args.len())), }, PMakeVector => match args { [@LNum(ref x)] => match get_uint(x) { Some(k) => { let mut v = ~[]; v.grow(k, &@LExt(RUndef)); Ok(@LVector(v)) }, None => Err(TypeError), }, [@LNum(ref x), ref y] => match get_uint(x) { Some(k) => { let mut v = ~[]; v.grow(k, y); Ok(@LVector(v)) }, None => Err(TypeError), }, [_] | [_, _] => Err(TypeError), _ => Err(ArgNumError(1, Some(2), args.len())), }, PVector => Ok(@LVector(args.to_owned())), PVectorLength => match args { [@LVector(ref v)] => Ok(@LNum(from_uint(v.len()))), [_] => Err(TypeError), _ => Err(ArgNumError(1, Some(1), args.len())), }, PVectorRef => match args { [@LVector(ref v), @LNum(ref k)] => match get_uint(k) { Some(i) => if i < v.len() { Ok(v[i]) } else { Err(RangeError) }, None => Err(TypeError), }, [_, _] => Err(TypeError), _ => Err(ArgNumError(2, Some(2), args.len())), }, PVectorList => match args { [@LVector(ref v)] => Ok(LDatum::from_list(*v)), [_] => Err(TypeError), _ => Err(ArgNumError(1, Some(1), args.len())), }, PListVector => match args { [arg] => match arg.to_list() { Some(v) => Ok(@LVector(v)), None => Err(TypeError), }, _ => Err(ArgNumError(1, Some(1), args.len())), }, PNull => typecheck::<()>(args), PPair => typecheck::<(@RDatum, @RDatum)>(args), PIsString => typecheck::<~str>(args), PString => do call_vargs::<char, ~str>(args) |chars| { str::from_chars(chars) }, PStringLength => do call_tc1::<~str, uint>(args) |s| { s.len() }, PStringRef => do call_err2::<~str, uint, char>(args) |s, &idx| { if idx <= s.len() { Ok(s.char_at(idx)) } else { Err(RangeError) } }, PSubstring => match args.len() { 2 => do call_err2::<~str, uint, ~str>(args) |s, &start| { if start <= s.len() { Ok(s.slice(start, s.len()).to_owned()) } else { Err(RangeError) } }, 3 => do call_err3::<~str, uint, uint, ~str>(args) |s, &start, &end| { if start <= end && end <= s.len() { Ok(s.slice(start, end).to_owned()) } else { Err(RangeError) } }, n => Err(ArgNumError(2, Some(3), n)), }, PSymbol => do call_prim1(args) |arg| { match arg { @LIdent(_) => Ok(@LBool(true)), _ => Ok(@LBool(false)), } }, PSymbolString => do call_prim1(args) |arg| { match arg { @LIdent(ref s) => Ok(@LString(s.to_owned())), _ => Err(TypeError), } }, PStringSymbol => do call_prim1(args) |arg| { match arg { @LString(ref s) => Ok(@LIdent(s.to_managed())), _ => Err(TypeError), } }, } } fn recursive_qq(&mut self, val: &@RDatum) -> Result<@RDatum, RuntimeError> { match *val { @LCons(ref h, ref t) => match is_quote(h,t) { Some((QuasiQuote, ref v)) => do self.quasiquote(v).map |&qv| { @LCons(@LIdent(@"quasiquote"), @LCons(qv, @LNil)) }, Some((Unquote, ref v)) => self.unquote(v), _ => do self.recursive_qq(h).chain |qh| { do self.recursive_qq(t).map |&qt| { @LCons(qh, qt) } }, }, @LVector(ref v) => { match result::map_vec(*v, |x| { self.recursive_qq(x) }) { Ok(qmap) => Ok(@LVector(qmap)), Err(e) => Err(e), } }, _ => Ok(*val), } } fn quasiquote(&mut self, val: &@RDatum) -> Result<@RDatum, RuntimeError> { self.qq_lvl += 1; let res = self.recursive_qq(val); self.qq_lvl -= 1; res } fn unquote(&mut self, val: &@RDatum) -> Result<@RDatum, RuntimeError> { if self.qq_lvl == 0 { Err(BadSyntax(SynUnquote, ~"unquote not nested in quasiquote")) } else { self.qq_lvl -= 1; let res = if self.qq_lvl == 0 { self.eval(*val) } else { do self.recursive_qq(val).map |&qval| { @LCons(@LIdent(@"unquote"), @LCons(qval, @LNil)) } }; self.qq_lvl += 1; res } } fn apply(&mut self, proc: &RuntimeData, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match proc { &RUndef => Err(NotCallable), &RPrim(f) => self.call_prim(f, args), &RProc(ref anames, ref vargs, ref code, ref env) => self.call_proc(*anames, *vargs, *code, *env, args), } } fn call(&mut self, proc: &RuntimeData, aexprs: ~[@RDatum]) -> Result<@RDatum, RuntimeError> { match result::map_vec(aexprs, |&expr| self.eval(expr)) { Ok(args) => self.apply(proc, args), Err(e) => Err(e), } } pub fn new_std() -> Runtime { Runtime { stdin: io::stdin(), stdout: io::stdout(), stderr: io::stderr(), env: @mut Stack::new(), global: load_prelude(), qq_lvl: 0, } } pub fn eval(&mut self, val: @RDatum) -> Result<@RDatum, RuntimeError> { match *val { LIdent(name) => self.find_var(&name), LCons(fexpr, aexpr) => match aexpr.to_list() { None => Err(NotList), Some(aexprs) => { match self.get_syntax(fexpr) { Some(syntax) => self.run_syntax(syntax, aexprs), None => match self.eval(fexpr) { Ok(@LExt(ref proc)) => self.call(proc, aexprs), Ok(_) => Err(NotCallable), Err(e) => Err(e), }, } }, }, LNil => Err(NilEval), _ => Ok(val), } } pub fn load(&mut self, rdr: @io::Reader) -> Result<@RDatum, RuntimeError> { let mut parser = Parser(rdr); match parser.parse() { Ok(datum) => self.eval(@datum), Err(e) => { let (line, col) = parser.pos(); Err(ParseError(line, col, e)) }, } } } priv fn set_var(env: @mut Stack<HashMap<@str, @RDatum>>, name: &@str, val: @RDatum) -> bool { let mut success = false; do env.each_mut |frame| { match frame.find_mut(name) { None => (), Some(v) => { success = true; *v = val; } } !success }; success } impl DatumConv for vector use std::io; use std::borrow; use std::uint; use std::result; use std::str; use std::vec; use std::num::{One, Zero, ToStrRadix, IntConvertible}; use std::hashmap::HashMap; use std::managed; use bounded_iterator::BoundedIterator; use extra::bigint::BigInt; use extra::complex::Cmplx; use datum::*; use primitive::*; use numeric::*; use rational::Rational; use stack::*; use parser::Parser; enum RuntimeData { RUndef, RPrim(PFunc), RProc(~[@str], Option<@str>, ~[@LDatum<RuntimeData>], @mut Stack<HashMap<@str, @LDatum<RuntimeData>>>), } impl Clone for RuntimeData { fn clone(&self) -> RuntimeData { match self { &RUndef => RUndef, &RPrim(f) => RPrim(f), &RProc(ref args, ref vargs, ref body, ref env) => { let cloneargs = do args.map |&arg| { arg }; RProc(cloneargs, *vargs, body.clone(), *env) }, } } } fn eq(lhs: &RuntimeData, rhs: &RuntimeData) -> bool { match (lhs, rhs) { (&RPrim(l), &RPrim(r)) => l == r, (&RProc(_,_,_,_), &RProc(_,_,_,_)) => lhs == rhs, _ => false, } } impl Eq for RuntimeData { fn eq(&self, other: &RuntimeData) -> bool { eq(self, other) } fn ne(&self, other: &RuntimeData) -> bool { !eq(self, other) } } fn data_to_str(data: &RuntimeData) -> ~str { match *data { RUndef => ~"<undefined>", RPrim(f) => fmt!("<primitive:%s>", f.to_str()), RProc(_, _, _, _) => fmt!("<procedure 0x%08x>", borrow::to_uint(data)), } } impl ToStr for RuntimeData { fn to_str(&self) -> ~str { data_to_str(self) } } type RDatum = LDatum<RuntimeData>; pub trait DatumConv { fn from_datum<R>(@RDatum, &fn(&Self) -> R) -> Option<R>; fn move_datum(Self) -> @RDatum; fn typename() -> ~str; } impl DatumConv for @RDatum { fn from_datum<R>(datum: @RDatum, op: &fn(&@RDatum) -> R) -> Option<R> { Some(op(&datum)) } fn move_datum(x: @RDatum) -> @RDatum { x } fn typename() -> ~str { ~"datum" } } impl DatumConv for RuntimeData { fn from_datum<R>(datum: @RDatum, op: &fn(&RuntimeData) -> R) -> Option<R> { match datum { @LExt(ref r) => Some(op(r)), _ => None, } } fn move_datum(x: RuntimeData) -> @RDatum { @LExt(x) } fn typename() -> ~str { ~"procedure" } } impl DatumConv for LNumeric { fn from_datum<R>(datum: @RDatum, op: &fn(&LNumeric) -> R) -> Option<R> { match datum { @LNum(ref n) => Some(op(n)), _ => None, } } fn move_datum(x: LNumeric) -> @RDatum { @LNum(x) } fn typename() -> ~str { ~"number" } } impl DatumConv for Cmplx<f64> { fn from_datum<R>(datum: @RDatum, op: &fn(&Cmplx<f64>) -> R) -> Option<R> { match datum { @LNum(NInexact(ref n)) => Some(op(n)), _ => None, } } fn move_datum(x: Cmplx<f64>) -> @RDatum { @LNum(NInexact(x)) } fn typename() -> ~str { ~"inexact number" } } impl DatumConv for f64 { fn from_datum<R>(datum: @RDatum, op: &fn(&f64) -> R) -> Option<R> { match datum { @LNum(NInexact(ref n)) if n.im.is_zero() => Some(op(&n.re)), _ => None, } } fn move_datum(x: f64) -> @RDatum { @LNum(from_f64(x)) } fn typename() -> ~str { ~"inexact real number" } } impl DatumConv for Cmplx<Rational> { fn from_datum<R>(datum: @RDatum, op: &fn(&Cmplx<Rational>) -> R) -> Option<R> { match datum { @LNum(NExact(ref n)) => Some(op(n)), _ => None, } } fn move_datum(x: Cmplx<Rational>) -> @RDatum { @LNum(NExact(x)) } fn typename() -> ~str { ~"exact number" } } impl DatumConv for LReal { fn from_datum<R>(datum: @RDatum, op: &fn(&LReal) -> R) -> Option<R> { match datum { @LNum(ref n) => match get_real(n) { Some(ref r) => Some(op(r)), None => None, }, _ => None, } } fn move_datum(x: LReal) -> @RDatum { @LNum(from_real(&x)) } fn typename() -> ~str { ~"real number" } } impl DatumConv for Rational { fn from_datum<R>(datum: @RDatum, op: &fn(&Rational) -> R) -> Option<R> { match datum { @LNum(NExact(ref n)) if n.im.is_zero() => { Some(op(&n.re)) }, _ => None, } } fn move_datum(x: Rational) -> @RDatum { @LNum(from_rational(&x)) } fn typename() -> ~str { ~"rational number" } } impl DatumConv for BigInt { fn from_datum<R>(datum: @RDatum, op: &fn(&BigInt) -> R) -> Option<R> { match datum { @LNum(ref n) => match *n { NExact( Cmplx{ re: ref re, im: ref im } ) => if im.is_zero() && *re.numerator() == One::one() { Some(op(re.denominator())) } else { None }, NInexact(_) => None, }, _ => None, } } fn move_datum(x: BigInt) -> @RDatum { @LNum(from_bigint(x)) } fn typename() -> ~str { ~"integer" } } impl DatumConv for uint { fn from_datum<R>(datum: @RDatum, op: &fn(&uint) -> R) -> Option<R> { let max_int:BigInt = IntConvertible::from_int(Bounded::max_value::<int>()); match datum { @LNum(NExact( Cmplx{ re: ref re, im: ref im } )) => if im.is_zero() && !re.is_negative() && *re.numerator() == One::one() { let d = re.denominator(); if *d <= max_int { Some(op(&(d.to_int() as uint))) } else { None } } else { None }, _ => None, } } fn move_datum(x: uint) -> @RDatum { @LNum(from_uint(x)) } fn typename() -> ~str { ~"unsigned integer" } } impl DatumConv for (@RDatum, @RDatum) { fn from_datum<R>(datum: @RDatum, op: &fn(&(@RDatum, @RDatum)) -> R) -> Option<R> { match datum { @LCons(a, b) => Some(op(&(a, b))), _ => None, } } fn move_datum((x, y): (@RDatum, @RDatum)) -> @RDatum { @LCons(x, y) } fn typename() -> ~str { ~"cons" } } impl DatumConv for bool { fn from_datum<R>(datum: @RDatum, op: &fn(&bool) -> R) -> Option<R> { match datum { @LBool(ref b) => Some(op(b)), _ => None, } } fn move_datum(x: bool) -> @RDatum { @LBool(x) } fn typename() -> ~str { ~"boolean" } } impl DatumConv for char { fn from_datum<R>(datum: @RDatum, op: &fn(&char) -> R) -> Option<R> { match datum { @LChar(ref c) => Some(op(c)), _ => None, } } fn move_datum(x: char) -> @RDatum { @LChar(x) } fn typename() -> ~str { ~"character" } } impl DatumConv for () { fn from_datum<R>(datum: @RDatum, op: &fn(&()) -> R) -> Option<R> { match datum { @LNil => Some(op(&())), _ => None, } } fn move_datum(_: ()) -> @RDatum { @LNil } fn typename() -> ~str { ~"()" } } struct GetList { list: ~[@RDatum] } impl DatumConv for GetList { #[inline] fn from_datum<R>(datum: @RDatum, op: &fn(&GetList) -> R) -> Option<R> { match datum.to_list() { Some(l) => Some(op(&GetList{ list: l })), _ => None, } } #[inline] fn move_datum(x: GetList) -> @RDatum { LDatum::from_list(x.list) } fn typename() -> ~str { ~"list" } } impl DatumConv for ~[@RDatum] { #[inline] fn from_datum<R>(datum: @RDatum, op: &fn(&~[@RDatum]) -> R) -> Option<R> { match datum { @LVector(ref v) => Some(op(v)), _ => None, } } #[inline] fn move_datum(x: ~[@RDatum]) -> @RDatum { @LVector(x) } fn typename() -> ~str { ~"vector" } } impl DatumConv for ~str { fn from_datum<R>(datum: @RDatum, op: &fn(&~str) -> R) -> Option<R> { match datum { @LString(ref s) => Some(op(s)), _ => None, } } fn move_datum(x: ~str) -> @RDatum { @LString(x) } fn typename() -> ~str { ~"string" } } struct Runtime { stdin: @Reader, stdout: @Writer, stderr: @Writer, env: @mut Stack<HashMap<@str, @RDatum>>, global: HashMap<@str, Either<@RDatum, PrimSyntax>>, qq_lvl: uint, } #[deriving(Eq)] enum RuntimeError { UnboundVariable(@str), RefMacro(@str), NotCallable, NotList, ArgNumError(uint, Option<uint>, uint), TypeError, DivideByZeroError, NilEval, BadSyntax(PrimSyntax, ~str), ParseError(uint, uint, ~str), RangeError, } impl ToStr for RuntimeError { fn to_str(&self) -> ~str { err_to_str(self) } } priv fn err_to_str(&err: &RuntimeError) -> ~str { match err { UnboundVariable(name) => ~"unbound variable: " + name, RefMacro(name) => ~"cannot reference macro name: " + name, NotCallable => ~"not callable", NotList => ~"not list", ArgNumError(min, Some(max), argnum) => { if min == max { fmt!("expected %u arguments, but found %u arguments", min, argnum) } else { fmt!("expected %u-%u arguments, but found %u arguments", min, max, argnum) } }, ArgNumError(expected, None, argnum) => { fmt!("expected %u or more arguments, but found %u arguments", expected, argnum) }, TypeError => ~"type error", DivideByZeroError => ~"divide by zero", NilEval => ~"() cannot be evaluated", BadSyntax(syn, reason) => ~"bad syntax for " + syn.to_str() + ": " + reason, ParseError(line, col, reason) => fmt!("failed to parse: %u:%u: %s", line, col, reason), RangeError => ~"index out of range", } } fn load_prelude() -> HashMap<@str, Either<@RDatum, PrimSyntax>> { let mut map = HashMap::new(); let mut prim_iter = BoundedIterator::new::<PFunc>(); for prim_iter.advance |prim:PFunc| { let key = prim.to_str(); map.insert(key.to_managed(), Left(@LExt(RPrim(prim)))); } let mut syntax_iter = BoundedIterator::new::<PrimSyntax>(); for syntax_iter.advance |syntax:PrimSyntax| { let key = syntax.to_str(); map.insert(key.to_managed(), Right(syntax)); } map.insert("pi".to_managed(), Left(@LNum(inexact(Real::pi(), 0f64)))); map } priv fn call_prim1(args: &[@RDatum], op: &fn(@RDatum) -> Result<@RDatum, RuntimeError>) -> Result<@RDatum, RuntimeError> { if args.len() == 1 { op(args[0]) } else { Err(ArgNumError(1, Some(1), args.len())) } } priv fn call_prim2(args: &[@RDatum], op: &fn(@RDatum, @RDatum) -> Result<@RDatum, RuntimeError>) -> Result<@RDatum, RuntimeError> { if args.len() == 2 { op(args[0], args[1]) } else { Err(ArgNumError(2, Some(2), args.len())) } } priv fn typecheck<A: DatumConv>(args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match args { [arg] => { let res = do DatumConv::from_datum::<A, ()>(arg) |_| { () }; match res { Some(_) => Ok(@LBool(true)), None => Ok(@LBool(false)), } }, _ => Err(ArgNumError(1, Some(1), args.len())), } } priv fn call_tc1<A: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A) -> R ) -> Result<@RDatum, RuntimeError> { match args { [arg] => { let res = DatumConv::from_datum::<A, R>(arg, op); match res { Some(x) => Ok(DatumConv::move_datum(x)), None => Err(TypeError), } }, _ => Err(ArgNumError(1, Some(1), args.len())), } } priv fn call_tc2<A: DatumConv, B:DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A, &B) -> R ) -> Result<@RDatum, RuntimeError> { match args { [arg0, arg1] => { let res = do DatumConv::from_datum::<A, Option<R>>(arg0) |a| { do DatumConv::from_datum::<B, R>(arg1) |b| { op(a, b) } }; match res { Some(Some(x)) => Ok(DatumConv::move_datum(x)), _ => Err(TypeError), } }, _ => Err(ArgNumError(2, Some(2), args.len())), } } priv fn call_vargs<A: DatumConv, R: DatumConv> (args: &[@RDatum], op: &fn(&[A]) -> R) -> Result<@RDatum, RuntimeError> { let mut idx = 0u; let mut vec = vec::with_capacity(args.len()); while idx < args.len() { match DatumConv::from_datum(args[idx], |&a| {a}) { Some(x) => vec.push(x), None => return Err(TypeError), }; idx += 1; } let res:@RDatum = DatumConv::move_datum(op(vec)); Ok(res) } priv fn call_err1<A: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A) -> Result<R, RuntimeError> ) -> Result<@RDatum, RuntimeError> { match args { [arg] => { match DatumConv::from_datum(arg, op) { Some(Ok(x)) => Ok(DatumConv::move_datum(x)), Some(Err(e)) => Err(e), None => Err(TypeError), } }, _ => Err(ArgNumError(1, Some(1), args.len())), } } priv fn call_err2<A: DatumConv, B: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A, &B) -> Result<R, RuntimeError> ) -> Result<@RDatum, RuntimeError> { match args { [arg0, arg1] => { let r = do DatumConv::from_datum::<A, Result<R, RuntimeError>>(arg0) |a| { let res = do DatumConv::from_datum::<B, Result<R, RuntimeError>>(arg1) |b| { op(a, b) }; match res { Some(x) => x, None => Err(TypeError), } }; match r { Some(Ok(x)) => Ok(DatumConv::move_datum(x)), Some(Err(e)) => Err(e), None => Err(TypeError), } }, _ => Err(ArgNumError(2, Some(2), args.len())), } } priv fn call_err3<A: DatumConv, B: DatumConv, C: DatumConv, R: DatumConv> ( args: &[@RDatum], op: &fn(&A, &B, &C) -> Result<R, RuntimeError> ) -> Result<@RDatum, RuntimeError> { match args { [arg0, arg1, arg2] => { let r0 = do DatumConv::from_datum::<A, Result<R, RuntimeError>>(arg0) |a| { let r1 = do DatumConv::from_datum::<B, Result<R, RuntimeError>>(arg1) |b| { let r2 = do DatumConv::from_datum::<C, Result<R, RuntimeError>>(arg2) |c| { op(a, b, c) }; match r2 { Some(x) => x, None => Err(TypeError), } }; match r1 { Some(x) => x, None => Err(TypeError), } }; match r0 { Some(Ok(x)) => Ok(DatumConv::move_datum(x)), Some(Err(e)) => Err(e), None => Err(TypeError), } }, _ => Err(ArgNumError(3, Some(3), args.len())), } } priv fn call_num_foldl(args: &[@RDatum], a0: &LNumeric, op: &fn(&LNumeric, &LNumeric) -> Result<LNumeric, RuntimeError>) -> Result<@RDatum, RuntimeError> { let mut res:LNumeric = a0.clone(); let mut err = false; do args.each |&arg| { match arg { @LNum(ref a) => { match op(&res, a) { Ok(n) => { res = n; err = false; }, _ => { err = true; } } }, _ => { err = true; } } !err }; if err { Err(TypeError) } else { Ok(@LNum(res)) } } priv fn call_real_bfoldl(args: &[@RDatum], op: &fn(&LReal, &LReal) -> bool) -> Result<@RDatum, RuntimeError> { let n = args.len(); if n < 2 { return Err(ArgNumError(2, None, n)); } let mut a = match args[0] { @LNum(ref n) => match get_real(n) { None => return Err(TypeError), Some(r) => r, }, _ => return Err(TypeError), }; let mut idx = 1; while idx < n { let b = match args[idx] { @LNum(ref n) => match get_real(n) { None => return Err(TypeError), Some(r) => r, }, _ => return Err(TypeError), }; if !op(&a, &b) { return Ok(@LBool(false)); } a = b; idx += 1; } return Ok(@LBool(true)); } priv fn get_bindings(arg: &RDatum) -> Result<~[(@str, @RDatum)], ~str> { match arg.to_list() { None => Err(~"non-list bindings"), Some(bindings) => do result::map_vec(bindings) |datum| { match datum.to_list() { Some([@LIdent(name), expr]) => Ok((name, expr)), Some(_) | None => Err(~"invalid binding") } } } } priv fn get_syms(&arg: &@RDatum) -> Result<(~[@str], Option<@str>), ~str> { let mut iter = arg; let mut args : ~[@str] = ~[]; let mut varargs : Option<@str> = None; loop { match *iter { LCons(h, t) => match *h { LIdent(name) => { args.push(name); iter = t; }, _ => { return Err(~"non-symbol argument"); } }, LIdent(name) => { varargs = Some(name); break; }, LNil => { break; }, _ => { return Err(~"non-list argument"); }, } } Ok((args, varargs)) } impl Runtime { fn get_syntax(&self, val: &RDatum) -> Option<PrimSyntax> { match *val { LIdent(name) => match self.global.find(&name) { Some(&Right(syn)) => Some(syn), _ => None, }, _ => None, } } fn find_var(&self, name: &@str) -> Result<@RDatum, RuntimeError> { let mut val: Option<@RDatum> = None; do self.env.each |frame| { match frame.find(name) { None => true, Some(v) => { val = Some(*v); false } } }; match val { None => match self.global.find(name) { Some(&Left(v)) => Ok(v), Some(&Right(_)) => Err(RefMacro(*name)), None => Err(UnboundVariable(*name)), }, Some(v) => Ok(v), } } fn syn_let(&mut self, bindings: &RDatum, body: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match get_bindings(bindings) { Err(e) => Err(BadSyntax(SynLet, e)), Ok(b) => { let mut arg_frame = HashMap::new(); let mut err:Option<RuntimeError> = None; do b.each |&(name, expr)| { match self.eval(expr) { Ok(val) => { arg_frame.insert(name, val); true } Err(e) => { err = Some(e); false } } }; match err { Some(e) => Err(e), None => self.local_eval(arg_frame, self.env, body) } } } } fn syn_letstar(&mut self, bindings: &RDatum, body: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match get_bindings(bindings) { Err(e) => Err(BadSyntax(SynLet, e)), Ok(b) => { let old_frame = self.env; let mut err:Option<RuntimeError> = None; do b.each |&(name, expr)| { match self.eval(expr) { Ok(val) => { let mut arg_frame = HashMap::new(); arg_frame.insert(name, val); self.env = @mut push(self.env, arg_frame); true }, Err(e) => { err = Some(e); false }, } }; let mut res:Result<@RDatum, RuntimeError> = Err(NilEval); match err { Some(e) => { res = Err(e); }, None => { do body.each |&val| { res = self.eval(val); res.is_ok() }; } }; self.env = old_frame; return res } } } fn syn_letrec(&mut self, bindings: &RDatum, body: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match get_bindings(bindings) { Err(e) => Err(BadSyntax(SynLet, e)), Ok(b) => { let old_frame = self.env; let mut arg_frame = HashMap::new(); let (names, exprs) = vec::unzip(b); for names.each |&name| { arg_frame.insert(name, @LExt(RUndef)); } self.env = @mut push(old_frame, arg_frame); let mut res:Result<@RDatum, RuntimeError> = Err(NilEval); match result::map_vec(exprs, |&expr| { self.eval(expr) }) { Ok(vals) => { do self.env.mut_top |frame| { for uint::range(0, names.len()) |i| { frame.insert(names[i], vals[i]); } }; do body.each |&val| { res = self.eval(val); res.is_ok() }; }, Err(e) => { res = Err(e); }, } self.env = old_frame; res } } } fn cond(&mut self, conds: &[@RDatum]) -> Result<@RDatum, RuntimeError> { let mut i = 0u; let mut exprs = vec::with_capacity(conds.len()); let mut else_opt = None; while i < conds.len() { match conds[i].to_list() { Some([@LIdent(els), expr]) if els.as_slice() == "else" => if i == conds.len()-1 { else_opt = Some(expr); } else { return Err(BadSyntax(SynCond, ~"trailing conditions after else")); }, Some([pred, expr]) => exprs.push((pred, expr)), _ => return Err(BadSyntax(SynCond, ~"invalid conditional expression")), } i += 1; } let mut res = Ok(@LExt(RUndef)); let expr_end = do exprs.each |&(pred, expr)| { match self.eval(pred) { Err(e) => { res = Err(e); false }, Ok(@LBool(false)) => true, _ => { res = self.eval(expr); false }, } }; match else_opt { Some(else_expr) if expr_end => self.eval(else_expr), _ => res } } fn define(&mut self, args: ~[@RDatum]) -> Result<(@str, @RDatum), RuntimeError> { match get_syms(&args[0]) { Err(e) => Err(BadSyntax(SynDefine, e)), Ok((anames, varargs)) => if anames.is_empty() { match varargs { None => Err(BadSyntax(SynDefine, ~"name not given")), Some(name) => if args.len() != 2 { Err(BadSyntax(SynDefine, ~"multiple expressions")) } else { do self.eval(args[1]).map |&val| { (name, val) } } } } else { let name = anames[0]; let anames = anames.slice(1, anames.len()).to_owned(); let seq = args.slice(1, args.len()).to_owned(); let proc = @LExt(RProc(anames, varargs, seq, self.env)); Ok((name, proc)) } } } fn run_syntax(&mut self, syn: PrimSyntax, args: ~[@RDatum]) -> Result<@RDatum, RuntimeError> { match syn { SynIf => if args.len() == 3 { do self.eval(args[0]).chain |cond| { match *cond { LBool(false) => self.eval(args[2]), _ => self.eval(args[1]), } } } else { Err(BadSyntax(SynIf, ~"bad number of arguments")) }, SynCond => self.cond(args), SynLambda => if args.len() < 2 { Err(BadSyntax(SynLambda, ~"no body given")) } else { match get_syms(&args[0]) { Err(e) => Err(BadSyntax(SynLambda, e)), Ok((anames, varargs)) => { let seq = args.slice(1, args.len()).to_owned(); Ok(@LExt(RProc(anames, varargs, seq, self.env))) }, } }, SynLet => if args.len() < 2 { Err(BadSyntax(SynLet, ~"no body given")) } else { self.syn_let(args[0], args.slice(1, args.len())) }, SynLetRec => if args.len() < 2 { Err(BadSyntax(SynLetRec, ~"no body given")) } else { self.syn_letrec(args[0], args.slice(1, args.len())) }, SynLetStar => if args.len() < 2 { Err(BadSyntax(SynLetRec, ~"no body given")) } else { self.syn_letstar(args[0], args.slice(1, args.len())) }, SynDefine => if args.len() < 2 { Err(BadSyntax(SynDefine, ~"no body given")) } else { let definition = self.define(args); match definition { Err(e) => Err(e), Ok((name, val)) => { if self.env.size_hint() == Some(0) { // this is the top-level context // just bind the definition in global self.global.insert(name, Left(val)); } else { // this is not the top-level context // create a new frame let mut frame = HashMap::new(); frame.insert(name, val); self.env = @mut push(self.env, frame); }; Ok(@LNil) }, } }, SynSet => if args.len() != 2 { Err(BadSyntax(SynSet, ~"bad number of arguments")) } else { match *args[0] { LIdent(name) => do self.eval(args[1]).chain |val| { if set_var(self.env, &name, val) { Ok(@LNil) } else { Err(BadSyntax(SynSet, ~"unbound variable")) } }, _ => Err(BadSyntax(SynSet, ~"cannot set non-variable")) } }, SynQuote => if args.len() == 1 { Ok(args[0]) } else { Err(BadSyntax(SynQuote, ~"bad number of arguments")) }, SynQQuote => if args.len() == 1 { self.quasiquote(&args[0]) } else { Err(BadSyntax(SynQQuote, ~"bad number of arguments")) }, SynUnquote => if args.len() == 1 { self.unquote(&args[0]) } else { Err(BadSyntax(SynUnquote, ~"bad number of arguments")) }, SynAnd => self.syn_and(args), SynOr => self.syn_or(args), } } priv fn syn_and(&mut self, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { let mut res = @LBool(true); let mut i = 0u; while i < args.len() { match self.eval(args[i]) { Ok(@LBool(false)) => return Ok(@LBool(false)), Ok(x) => { res = x }, Err(e) => return Err(e), }; i += 1; } return Ok(res) } priv fn syn_or(&mut self, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { let mut i = 0u; while i < args.len() { match self.eval(args[i]) { Ok(@LBool(false)) => (), Ok(x) => return Ok(x), Err(e) => return Err(e), }; i += 1; } return Ok(@LBool(false)) } fn call_proc(&mut self, anames: &[@str], vargs: Option<@str>, code: &[@RDatum], frame: @mut Stack<HashMap<@str, @RDatum>>, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { // create new frame to store args let mut arg_frame = HashMap::new(); match vargs { None => if args.len() != anames.len() { return Err(ArgNumError(anames.len(), Some(anames.len()), args.len())); }, Some(vname) => if args.len() < anames.len() { return Err(ArgNumError(anames.len(), None, args.len())); } else { let vslice = args.slice(anames.len(), args.len()); let va = do vslice.rev_iter().fold(@LNil) |a, &l| { @LCons(l, a) }; arg_frame.insert(vname, va); }, } for uint::range(0, anames.len()) |i| { arg_frame.insert(anames[i], args[i]); } self.local_eval(arg_frame, frame, code) } fn local_eval(&mut self, arg_frame: HashMap<@str, @RDatum>, frame: @mut Stack<HashMap<@str, @RDatum>>, code: &[@RDatum]) -> Result<@RDatum, RuntimeError> { // store current env let old_env = self.env; // create new local env self.env = @mut push(frame, arg_frame); let mut res:Result<@RDatum, RuntimeError> = Err(NilEval); do code.each() |&val| { res = self.eval(val); res.is_ok() }; // restore env self.env = old_env; res } fn call_prim(&mut self, f: PFunc, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match f { PEval => match args { [arg] => self.eval(arg), _ => Err(ArgNumError(1, Some(1), args.len())), }, PApply => do call_err2::<RuntimeData, GetList, @RDatum>(args) |f, l| { self.apply(f, l.list) }, PBegin => if args.len() == 0 { Ok(@LExt(RUndef)) } else { Ok(*args.last()) }, PAdd => do call_num_foldl(args, &Zero::zero()) |&lhs, &rhs| { Ok(lhs + rhs) }, PSub => match args { [] => Err(ArgNumError(1, None, 0)), [@LNum(ref x)] => Ok(@LNum(-*x)), [@LNum(ref x), ..tail] => do call_num_foldl(tail, x) |&lhs, &rhs| { Ok(lhs - rhs) }, _ => Err(TypeError), }, PMul => do call_num_foldl(args, &One::one()) |&lhs, &rhs| { Ok(lhs * rhs) }, PDiv => match args { [] => Err(ArgNumError(1, None, 0)), [@LNum(ref x)] => if x.is_zero() { Err(DivideByZeroError) } else { Ok(@LNum(x.recip())) }, [@LNum(ref x), ..tail] => do call_num_foldl(tail, x) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(lhs / rhs) } }, _ => Err(TypeError), }, PQuotient => do call_err2::<BigInt, BigInt, BigInt>(args) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(lhs / rhs) } }, PRemainder => do call_err2::<BigInt, BigInt, BigInt>(args) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(lhs % rhs) } }, PModulo => do call_err2::<BigInt, BigInt, BigInt>(args) |&lhs, &rhs| { if rhs.is_zero() { Err(DivideByZeroError) } else { Ok(modulo(lhs, rhs)) } }, PFloor => do call_tc1::<LReal, LReal>(args) |&x| { x.floor() }, PCeiling => do call_tc1::<LReal, LReal>(args) |&x| { x.ceil() }, PRound => do call_tc1::<LReal, LReal>(args) |&x| { x.round() }, PTruncate => do call_tc1::<LReal, LReal>(args) |&x| { x.trunc() }, PExp => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.exp() }, PLog => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.ln() }, PSin => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.sin() }, PCos => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.cos() }, PTan => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.tan() }, PAsin => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.asin() }, PAcos => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.acos() }, PAtan => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.atan() }, PSqrt => do call_tc1::<LNumeric, LNumeric>(args) |&x| { x.sqrt() }, PExpt => do call_tc2::<LNumeric, LNumeric, LNumeric>(args) |x, r| { x.pow(r) }, PMakeRectangular => do call_tc2::<LReal, LReal, LNumeric>(args) |rx, ry| { coerce(rx, ry, |&a, &b| { exact(a, b) }, |a, b| { inexact(a, b) }) }, PMakePolar => do call_tc2::<LReal, LReal, LNumeric>(args) |rx, ry| { polar(rx.to_inexact(), ry.to_inexact()) }, PRealPart => do call_tc1::<LNumeric, LNumeric>(args) |&x| { match x { NExact( Cmplx { re: ref re, im: _ } ) => from_rational(re), NInexact( Cmplx { re: re, im: _ } ) => from_f64(re), } }, PImagPart => do call_tc1::<LNumeric, LNumeric>(args) |&x| { match x { NExact( Cmplx { re: _, im: ref im } ) => from_rational(im), NInexact( Cmplx { re: _, im: im } ) => from_f64(im), } }, PMagnitude => do call_tc1::<LNumeric, f64>(args) |x| { x.to_inexact().to_polar().first() }, PAngle => do call_tc1::<LNumeric, f64>(args) |x| { x.to_inexact().to_polar().second() }, PNumerator => do call_tc1::<Rational, BigInt>(args) |x| { x.numerator().clone() }, PDenominator => do call_tc1::<Rational, BigInt>(args) |x| { x.denominator().clone() }, PCar => do call_tc1::<(@RDatum, @RDatum), @RDatum>(args) |&(h, _)| { h }, PCdr => do call_tc1::<(@RDatum, @RDatum), @RDatum>(args) |&(_, t)| { t }, PCons => do call_prim2(args) |arg1, arg2| { Ok(@LCons(arg1, arg2)) }, PEqv => do call_prim2(args) |arg1, arg2| { let b = match (arg1, arg2) { (@LCons(_, _), @LCons(_, _)) => managed::ptr_eq(arg1, arg2), (@LString(_), @LString(_)) => managed::ptr_eq(arg1, arg2), (@LExt(_), @LExt(_)) => managed::ptr_eq(arg1, arg2), _ => arg1 == arg2, }; Ok(@LBool(b)) }, PEqual => do call_tc2::<@RDatum, @RDatum, bool>(args) |&a, &b| { a == b }, PNumber => typecheck::<LNumeric>(args), PReal => typecheck::<LReal>(args), PInteger => do call_tc1::<@RDatum, bool>(args) |&arg| { match arg { @LNum(NExact(Cmplx { re: ref re, im: ref im })) => *re.numerator() == One::one() && *im.numerator() == One::one(), @LNum(NInexact(Cmplx { re: re, im: im })) => re.round() == re && im.round() == im, _ => false, } }, PExact => typecheck::<Cmplx<Rational>>(args), PInexact => typecheck::<Cmplx<f64>>(args), PExactInexact => do call_tc1::<LNumeric, Cmplx<f64>>(args) |x| { x.to_inexact() }, PNumberString => match args.len() { 1 => do call_tc1::<LNumeric, ~str>(args) |x| { x.to_str() }, 2 => do call_err2::<LNumeric, uint, ~str>(args) |&x, &radix| { match x { NExact(ref n) => Ok(n.to_str_radix(radix)), _ => if radix == 10 { Ok(x.to_str()) } else { Err(TypeError) }, } }, n => Err(ArgNumError(1, Some(2), n)) }, PEQ => do call_real_bfoldl(args) |&lhs, &rhs| { lhs == rhs }, PGT => do call_real_bfoldl(args) |&lhs, &rhs| { lhs > rhs }, PLT => do call_real_bfoldl(args) |&lhs, &rhs| { lhs < rhs }, PGE => do call_real_bfoldl(args) |&lhs, &rhs| { lhs >= rhs }, PLE => do call_real_bfoldl(args) |&lhs, &rhs| { lhs <= rhs }, PNot => do call_tc1::<@RDatum, bool>(args) |&arg| { match arg { @LBool(false) => true, _ => false, } }, PBoolean => typecheck::<bool>(args), PChar => typecheck::<char>(args), PProcedure => match args { [@LExt(RUndef)] => Ok(@LBool(false)), [@LExt(_)] => Ok(@LBool(true)), [_] => Ok(@LBool(false)), _ => Err(ArgNumError(1, Some(1), args.len())), }, PIsVector => match args { [@LVector(_)] => Ok(@LBool(true)), [_] => Ok(@LBool(false)), _ => Err(ArgNumError(1, Some(1), args.len())), }, PMakeVector => match args { [@LNum(ref x)] => match get_uint(x) { Some(k) => { let mut v = ~[]; v.grow(k, &@LExt(RUndef)); Ok(@LVector(v)) }, None => Err(TypeError), }, [@LNum(ref x), ref y] => match get_uint(x) { Some(k) => { let mut v = ~[]; v.grow(k, y); Ok(@LVector(v)) }, None => Err(TypeError), }, [_] | [_, _] => Err(TypeError), _ => Err(ArgNumError(1, Some(2), args.len())), }, PVector => Ok(@LVector(args.to_owned())), PVectorLength => do call_tc1::<~[@RDatum], uint>(args) |v| { v.len() }, PVectorRef => do call_err2::<~[@RDatum], uint, @RDatum>(args) |v, &idx| { if idx < v.len() { Ok(v[idx]) } else { Err(RangeError) } }, PVectorList => do call_tc1::<~[@RDatum], @RDatum>(args) |&v| { LDatum::from_list(v) }, PListVector => do call_err1::<@RDatum, ~[@RDatum]>(args) |&l| { match l.to_list() { Some(v) => Ok(v), None => Err(TypeError), } }, PNull => typecheck::<()>(args), PPair => typecheck::<(@RDatum, @RDatum)>(args), PIsString => typecheck::<~str>(args), PString => do call_vargs::<char, ~str>(args) |chars| { str::from_chars(chars) }, PStringLength => do call_tc1::<~str, uint>(args) |s| { s.len() }, PStringRef => do call_err2::<~str, uint, char>(args) |s, &idx| { if idx <= s.len() { Ok(s.char_at(idx)) } else { Err(RangeError) } }, PSubstring => match args.len() { 2 => do call_err2::<~str, uint, ~str>(args) |s, &start| { if start <= s.len() { Ok(s.slice(start, s.len()).to_owned()) } else { Err(RangeError) } }, 3 => do call_err3::<~str, uint, uint, ~str>(args) |s, &start, &end| { if start <= end && end <= s.len() { Ok(s.slice(start, end).to_owned()) } else { Err(RangeError) } }, n => Err(ArgNumError(2, Some(3), n)), }, PSymbol => do call_prim1(args) |arg| { match arg { @LIdent(_) => Ok(@LBool(true)), _ => Ok(@LBool(false)), } }, PSymbolString => do call_prim1(args) |arg| { match arg { @LIdent(ref s) => Ok(@LString(s.to_owned())), _ => Err(TypeError), } }, PStringSymbol => do call_prim1(args) |arg| { match arg { @LString(ref s) => Ok(@LIdent(s.to_managed())), _ => Err(TypeError), } }, } } fn recursive_qq(&mut self, val: &@RDatum) -> Result<@RDatum, RuntimeError> { match *val { @LCons(ref h, ref t) => match is_quote(h,t) { Some((QuasiQuote, ref v)) => do self.quasiquote(v).map |&qv| { @LCons(@LIdent(@"quasiquote"), @LCons(qv, @LNil)) }, Some((Unquote, ref v)) => self.unquote(v), _ => do self.recursive_qq(h).chain |qh| { do self.recursive_qq(t).map |&qt| { @LCons(qh, qt) } }, }, @LVector(ref v) => { match result::map_vec(*v, |x| { self.recursive_qq(x) }) { Ok(qmap) => Ok(@LVector(qmap)), Err(e) => Err(e), } }, _ => Ok(*val), } } fn quasiquote(&mut self, val: &@RDatum) -> Result<@RDatum, RuntimeError> { self.qq_lvl += 1; let res = self.recursive_qq(val); self.qq_lvl -= 1; res } fn unquote(&mut self, val: &@RDatum) -> Result<@RDatum, RuntimeError> { if self.qq_lvl == 0 { Err(BadSyntax(SynUnquote, ~"unquote not nested in quasiquote")) } else { self.qq_lvl -= 1; let res = if self.qq_lvl == 0 { self.eval(*val) } else { do self.recursive_qq(val).map |&qval| { @LCons(@LIdent(@"unquote"), @LCons(qval, @LNil)) } }; self.qq_lvl += 1; res } } fn apply(&mut self, proc: &RuntimeData, args: &[@RDatum]) -> Result<@RDatum, RuntimeError> { match proc { &RUndef => Err(NotCallable), &RPrim(f) => self.call_prim(f, args), &RProc(ref anames, ref vargs, ref code, ref env) => self.call_proc(*anames, *vargs, *code, *env, args), } } fn call(&mut self, proc: &RuntimeData, aexprs: ~[@RDatum]) -> Result<@RDatum, RuntimeError> { match result::map_vec(aexprs, |&expr| self.eval(expr)) { Ok(args) => self.apply(proc, args), Err(e) => Err(e), } } pub fn new_std() -> Runtime { Runtime { stdin: io::stdin(), stdout: io::stdout(), stderr: io::stderr(), env: @mut Stack::new(), global: load_prelude(), qq_lvl: 0, } } pub fn eval(&mut self, val: @RDatum) -> Result<@RDatum, RuntimeError> { match *val { LIdent(name) => self.find_var(&name), LCons(fexpr, aexpr) => match aexpr.to_list() { None => Err(NotList), Some(aexprs) => { match self.get_syntax(fexpr) { Some(syntax) => self.run_syntax(syntax, aexprs), None => match self.eval(fexpr) { Ok(@LExt(ref proc)) => self.call(proc, aexprs), Ok(_) => Err(NotCallable), Err(e) => Err(e), }, } }, }, LNil => Err(NilEval), _ => Ok(val), } } pub fn load(&mut self, rdr: @io::Reader) -> Result<@RDatum, RuntimeError> { let mut parser = Parser(rdr); match parser.parse() { Ok(datum) => self.eval(@datum), Err(e) => { let (line, col) = parser.pos(); Err(ParseError(line, col, e)) }, } } } priv fn set_var(env: @mut Stack<HashMap<@str, @RDatum>>, name: &@str, val: @RDatum) -> bool { let mut success = false; do env.each_mut |frame| { match frame.find_mut(name) { None => (), Some(v) => { success = true; *v = val; } } !success }; success }
// Test for <https://github.com/rust-lang/rust/issues/66756> // check-pass #![feature(const_if_match)] enum E { A, B, C } const fn f(e: E) { match e { E::A => {} E::B => {} E::C => {} } } const fn g(e: E) { match e { _ => {} } } fn main() {} Remove test for #66758 // Test for <https://github.com/rust-lang/rust/issues/66756> // check-pass #![feature(const_if_match)] enum E { A, B, C } const fn f(e: E) { match e { E::A => {} E::B => {} E::C => {} } } fn main() {}
#![feature(proc_macro)] #[macro_use] extern crate clap; #[macro_use] extern crate serde_derive; extern crate mio; extern crate mio_uds; extern crate nix; extern crate libc; extern crate time; extern crate toml; extern crate serde; extern crate serde_json; extern crate sozu_lib as sozu; mod config; use mio_uds::UnixStream; use std::net::{UdpSocket,ToSocketAddrs}; use std::collections::HashMap; use clap::{App,Arg,SubCommand}; use sozu::messages::Command; use sozu::command::CommandChannel; #[derive(Debug,Clone,PartialEq,Eq,Hash,Serialize,Deserialize)] pub enum ConfigMessageStatus { Ok, Processing, Error } #[derive(Debug,Clone,PartialEq,Eq,Hash, Serialize)] pub enum ConfigCommand { ProxyConfiguration(Command), SaveState(String), LoadState(String), DumpState, } //FIXME: maybe need a custom serialize here #[derive(Debug,Clone,PartialEq,Eq,Hash,Serialize)] pub struct ConfigMessage { pub id: String, pub data: ConfigCommand, pub listener: Option<String>, } #[derive(Debug,Clone,PartialEq,Eq,Hash,Serialize,Deserialize)] pub struct ConfigMessageAnswer { pub id: String, pub status: ConfigMessageStatus, pub message: String } fn main() { let matches = App::new("sozuctl") .version(crate_version!()) .about("hot reconfigurable proxy") .arg(Arg::with_name("config") .short("c") .long("config") .value_name("FILE") .help("Sets a custom config file") .takes_value(true) .required(true)) .subcommand(SubCommand::with_name("shutdown") .about("shuts down the proxy") .arg(Arg::with_name("hard").long("hard") .help("shuts down the proxy without waiting for connections to finish"))) .subcommand(SubCommand::with_name("state") .about("state management") .subcommand(SubCommand::with_name("save") .arg(Arg::with_name("file") .short("f") .long("file") .value_name("state file") .help("Save state to that file") .takes_value(true) .required(true))) .subcommand(SubCommand::with_name("load") .arg(Arg::with_name("file") .short("f") .long("file") .value_name("state file") .help("Save state to that file") .takes_value(true))) .subcommand(SubCommand::with_name("dump"))) .get_matches(); if let Some(matches) = matches.subcommand_matches("worker") { let fd = matches.value_of("fd").expect("needs a file descriptor") .parse::<i32>().expect("the file descriptor must be a number"); let id = matches.value_of("id").expect("needs a worker id"); let tag = matches.value_of("tag").expect("needs a configuration tag"); return; } let config_file = matches.value_of("config").expect("required config file"); let config = config::Config::load_from_path(config_file).expect("could not parse configuration file"); let stream = UnixStream::connect(config.command_socket).expect("could not connect to the command unix socket"); let mut channel: CommandChannel<ConfigMessage,ConfigMessageAnswer> = CommandChannel::new(stream, 10000, 20000); channel.set_nonblocking(false); match matches.subcommand() { ("shutdown", Some(sub)) => { let hard_shutdown = sub.is_present("hard"); }, ("state", Some(sub)) => { match sub.subcommand() { ("save", Some(state_sub)) => { let file = state_sub.value_of("file").expect("missing target file"); }, ("load", Some(state_sub)) => { let file = state_sub.value_of("file").expect("missing target file"); }, ("dump", _) => { }, _ => println!("unknown state management command") } }, _ => println!("unknown subcommand") } } add an example of sending a message to the proxy #![feature(proc_macro)] #[macro_use] extern crate clap; #[macro_use] extern crate serde_derive; extern crate mio; extern crate mio_uds; extern crate nix; extern crate libc; extern crate time; extern crate toml; extern crate serde; extern crate serde_json; extern crate sozu_lib as sozu; mod config; use mio_uds::UnixStream; use std::net::{UdpSocket,ToSocketAddrs}; use std::collections::HashMap; use clap::{App,Arg,SubCommand}; use sozu::messages::Command; use sozu::command::CommandChannel; #[derive(Debug,Clone,PartialEq,Eq,Hash,Serialize,Deserialize)] pub enum ConfigMessageStatus { Ok, Processing, Error } #[derive(Debug,Clone,PartialEq,Eq,Hash, Serialize)] pub enum ConfigCommand { ProxyConfiguration(Command), SaveState(String), LoadState(String), DumpState, } //FIXME: maybe need a custom serialize here #[derive(Debug,Clone,PartialEq,Eq,Hash,Serialize)] pub struct ConfigMessage { pub id: String, pub data: ConfigCommand, pub listener: Option<String>, } #[derive(Debug,Clone,PartialEq,Eq,Hash,Serialize,Deserialize)] pub struct ConfigMessageAnswer { pub id: String, pub status: ConfigMessageStatus, pub message: String } fn main() { let matches = App::new("sozuctl") .version(crate_version!()) .about("hot reconfigurable proxy") .arg(Arg::with_name("config") .short("c") .long("config") .value_name("FILE") .help("Sets a custom config file") .takes_value(true) .required(true)) .subcommand(SubCommand::with_name("shutdown") .about("shuts down the proxy") .arg(Arg::with_name("hard").long("hard") .help("shuts down the proxy without waiting for connections to finish"))) .subcommand(SubCommand::with_name("state") .about("state management") .subcommand(SubCommand::with_name("save") .arg(Arg::with_name("file") .short("f") .long("file") .value_name("state file") .help("Save state to that file") .takes_value(true) .required(true))) .subcommand(SubCommand::with_name("load") .arg(Arg::with_name("file") .short("f") .long("file") .value_name("state file") .help("Save state to that file") .takes_value(true))) .subcommand(SubCommand::with_name("dump"))) .get_matches(); if let Some(matches) = matches.subcommand_matches("worker") { let fd = matches.value_of("fd").expect("needs a file descriptor") .parse::<i32>().expect("the file descriptor must be a number"); let id = matches.value_of("id").expect("needs a worker id"); let tag = matches.value_of("tag").expect("needs a configuration tag"); return; } let config_file = matches.value_of("config").expect("required config file"); let config = config::Config::load_from_path(config_file).expect("could not parse configuration file"); let stream = UnixStream::connect(config.command_socket).expect("could not connect to the command unix socket"); let mut channel: CommandChannel<ConfigMessage,ConfigMessageAnswer> = CommandChannel::new(stream, 10000, 20000); channel.set_nonblocking(false); match matches.subcommand() { ("shutdown", Some(sub)) => { let hard_shutdown = sub.is_present("hard"); }, ("state", Some(sub)) => { match sub.subcommand() { ("save", Some(state_sub)) => { let file = state_sub.value_of("file").expect("missing target file"); }, ("load", Some(state_sub)) => { let file = state_sub.value_of("file").expect("missing target file"); }, ("dump", _) => { channel.write_message(&ConfigMessage { //FIXME: make a random id generator id: "hello".to_string(), data: ConfigCommand::DumpState, listener: None, }); match channel.read_message() { None => println!("the proxy didn't answer"), Some(message) => { //FIXME: verify that the message id is correct match message.status { ConfigMessageStatus::Processing => { // do nothing here // for other messages, we would loop over read_message // until an error or ok message was sent }, ConfigMessageStatus::Error => { println!("could not dump proxy state: {}", message.message); }, ConfigMessageStatus::Ok => { println!("Proxy state:\n{}", message.message); } } } } }, _ => println!("unknown state management command") } }, _ => println!("unknown subcommand") } }
#![crate_name = "rust-hl-lua"] #![crate_type = "lib"] #![comment = "Lua bindings for Rust"] #![license = "MIT"] #![allow(visible_private_types)] #![feature(macro_rules)] #![feature(unsafe_destructor)] extern crate libc; extern crate collections; use std::kinds::marker::ContravariantLifetime; pub use lua_tables::LuaTable; pub use functions_read::LuaFunction; pub mod any; pub mod functions_read; pub mod lua_tables; pub mod userdata; mod ffi; mod functions_write; mod rust_tables; mod tuples; mod values; /// Main object of the library. /// The lifetime parameter corresponds to the lifetime of the Lua object itself. #[unstable] pub struct Lua<'lua> { lua: *mut ffi::lua_State, marker: ContravariantLifetime<'lua>, must_be_closed: bool, inside_callback: bool // if true, we are inside a callback } /// Trait for objects that have access to a Lua context. /// The lifetime parameter is the lifetime of the Lua context. pub trait HasLua<'lua> { fn use_lua(&mut self) -> *mut ffi::lua_State; } impl<'lua> HasLua<'lua> for Lua<'lua> { fn use_lua(&mut self) -> *mut ffi::lua_State { self.lua } } /// Object which allows access to a Lua variable. struct LoadedVariable<'var, L> { lua: &'var mut L, size: uint, // number of elements over "lua" } impl<'var, 'lua, L: HasLua<'lua>> HasLua<'lua> for LoadedVariable<'var, L> { fn use_lua(&mut self) -> *mut ffi::lua_State { self.lua.use_lua() } } /// Should be implemented by whatever type is pushable on the Lua stack. #[unstable] pub trait Push<L> { /// Pushes the value on the top of the stack. /// Must return the number of elements pushed. /// /// You can implement this for any type you want by redirecting to call to /// another implementation (for example `5.push_to_lua`) or by calling `userdata::push_userdata` fn push_to_lua(self, lua: &mut L) -> uint; } /// Should be implemented by types that can be read by consomming a LoadedVariable. #[unstable] pub trait ConsumeRead<'a, L> { /// Returns the LoadedVariable in case of failure. fn read_from_variable(var: LoadedVariable<'a, L>) -> Result<Self, LoadedVariable<'a, L>>; } /// Should be implemented by whatever type can be read by copy from the Lua stack. #[unstable] pub trait CopyRead<L> { /// Reads an object from the Lua stack. /// /// Similar to Push, you can implement this trait for your own types either by /// redirecting the calls to another implementation or by calling userdata::read_copy_userdata /// /// # Arguments /// * `lua` - The Lua object to read from /// * `index` - The index on the stack to read from fn read_from_lua(lua: &mut L, index: i32) -> Option<Self>; } /// Types that can be indices in Lua tables. #[unstable] pub trait Index<L>: Push<L> + CopyRead<L> { } /// Error that can happen when executing Lua code. #[deriving(Show)] #[unstable] pub enum LuaError { /// There was a syntax error when parsing the Lua code. SyntaxError(String), /// There was an error during execution of the Lua code /// (for example not enough parameters for a function call). ExecutionError(String), /// The call to `execute` has requested the wrong type of data. WrongType } // this alloc function is required to create a lua state. extern "C" fn alloc(_ud: *mut libc::c_void, ptr: *mut libc::c_void, _osize: libc::size_t, nsize: libc::size_t) -> *mut libc::c_void { unsafe { if nsize == 0 { libc::free(ptr as *mut libc::c_void); std::ptr::mut_null() } else { libc::realloc(ptr, nsize) } } } // called whenever lua encounters an unexpected error. extern "C" fn panic(lua: *mut ffi::lua_State) -> libc::c_int { let err = unsafe { ffi::lua_tostring(lua, -1) }; fail!("PANIC: unprotected error in call to Lua API ({})\n", err); } impl<'lua> Lua<'lua> { /// Builds a new Lua context. /// /// # Failure /// The function fails if lua_newstate fails (which indicates lack of memory). #[stable] pub fn new() -> Lua { let lua = unsafe { ffi::lua_newstate(alloc, std::ptr::mut_null()) }; if lua.is_null() { fail!("lua_newstate failed"); } unsafe { ffi::lua_atpanic(lua, panic) }; Lua { lua: lua, marker: ContravariantLifetime, must_be_closed: true, inside_callback: false } } /// Takes an existing lua_State and build a Lua object from it. /// /// # Arguments /// * close_at_the_end: if true, lua_close will be called on the lua_State on the destructor #[unstable] pub unsafe fn from_existing_state<T>(lua: *mut T, close_at_the_end: bool) -> Lua { Lua { lua: std::mem::transmute(lua), marker: ContravariantLifetime, must_be_closed: close_at_the_end, inside_callback: false } } /// Opens all standard Lua libraries. /// This is done by calling `luaL_openlibs`. #[unstable] pub fn openlibs(&mut self) { unsafe { ffi::luaL_openlibs(self.lua) } } /// Executes some Lua code on the context. #[unstable] pub fn execute<'a, T: CopyRead<LoadedVariable<'a, Lua<'lua>>>>(&'a mut self, code: &str) -> Result<T, LuaError> { let mut f = try!(functions_read::LuaFunction::load(self, code)); f.call() } /// Executes some Lua code on the context. #[unstable] pub fn execute_from_reader<'a, T: CopyRead<LoadedVariable<'a, Lua<'lua>>>, R: std::io::Reader + 'static>(&'a mut self, code: R) -> Result<T, LuaError> { let mut f = try!(functions_read::LuaFunction::load_from_reader(self, code)); f.call() } /// Loads the value of a global variable. #[unstable] pub fn load<'a, I: Str, V: ConsumeRead<'a, Lua<'lua>>>(&'a mut self, index: I) -> Option<V> { unsafe { ffi::lua_getglobal(self.lua, index.as_slice().to_c_str().unwrap()); } ConsumeRead::read_from_variable(LoadedVariable { lua: self, size: 1 }).ok() } /// Reads the value of a global variable by copying it. #[unstable] pub fn get<I: Str, V: CopyRead<Lua<'lua>>>(&mut self, index: I) -> Option<V> { unsafe { ffi::lua_getglobal(self.lua, index.as_slice().to_c_str().unwrap()); } CopyRead::read_from_lua(self, -1) } /// Modifies the value of a global variable. #[unstable] pub fn set<I: Str, V: Push<Lua<'lua>>>(&mut self, index: I, value: V) { value.push_to_lua(self); unsafe { ffi::lua_setglobal(self.lua, index.as_slice().to_c_str().unwrap()); } } #[unstable] pub fn load_new_table<'var>(&'var mut self) -> LuaTable<'var, Lua<'lua>> { unsafe { ffi::lua_newtable(self.lua) }; ConsumeRead::read_from_variable(LoadedVariable { lua: self, size: 1 }).ok().unwrap() } } #[unsafe_destructor] impl<'lua> Drop for Lua<'lua> { fn drop(&mut self) { if self.must_be_closed { unsafe { ffi::lua_close(self.lua) } } } } // TODO: crashes the compiler /*#[unsafe_destructor] impl<'a, 'lua, L: HasLua<'lua>> Drop for LoadedVariable<'a, L> { fn drop(&mut self) { unsafe { ffi::lua_pop(self.use_lua(), self.size as libc::c_int) } } }*/ Update for Rust nightly #![crate_name = "rust-hl-lua"] #![crate_type = "lib"] #![comment = "Lua bindings for Rust"] #![license = "MIT"] #![allow(visible_private_types)] #![feature(macro_rules)] #![feature(unsafe_destructor)] extern crate libc; extern crate collections; use std::kinds::marker::ContravariantLifetime; pub use lua_tables::LuaTable; pub use functions_read::LuaFunction; pub mod any; pub mod functions_read; pub mod lua_tables; pub mod userdata; mod ffi; mod functions_write; mod rust_tables; mod tuples; mod values; /// Main object of the library. /// The lifetime parameter corresponds to the lifetime of the Lua object itself. #[unstable] pub struct Lua<'lua> { lua: *mut ffi::lua_State, marker: ContravariantLifetime<'lua>, must_be_closed: bool, inside_callback: bool // if true, we are inside a callback } /// Trait for objects that have access to a Lua context. /// The lifetime parameter is the lifetime of the Lua context. pub trait HasLua<'lua> { fn use_lua(&mut self) -> *mut ffi::lua_State; } impl<'lua> HasLua<'lua> for Lua<'lua> { fn use_lua(&mut self) -> *mut ffi::lua_State { self.lua } } /// Object which allows access to a Lua variable. struct LoadedVariable<'var, L> { lua: &'var mut L, size: uint, // number of elements over "lua" } impl<'var, 'lua, L: HasLua<'lua>> HasLua<'lua> for LoadedVariable<'var, L> { fn use_lua(&mut self) -> *mut ffi::lua_State { self.lua.use_lua() } } /// Should be implemented by whatever type is pushable on the Lua stack. #[unstable] pub trait Push<L> { /// Pushes the value on the top of the stack. /// Must return the number of elements pushed. /// /// You can implement this for any type you want by redirecting to call to /// another implementation (for example `5.push_to_lua`) or by calling `userdata::push_userdata` fn push_to_lua(self, lua: &mut L) -> uint; } /// Should be implemented by types that can be read by consomming a LoadedVariable. #[unstable] pub trait ConsumeRead<'a, L> { /// Returns the LoadedVariable in case of failure. fn read_from_variable(var: LoadedVariable<'a, L>) -> Result<Self, LoadedVariable<'a, L>>; } /// Should be implemented by whatever type can be read by copy from the Lua stack. #[unstable] pub trait CopyRead<L> { /// Reads an object from the Lua stack. /// /// Similar to Push, you can implement this trait for your own types either by /// redirecting the calls to another implementation or by calling userdata::read_copy_userdata /// /// # Arguments /// * `lua` - The Lua object to read from /// * `index` - The index on the stack to read from fn read_from_lua(lua: &mut L, index: i32) -> Option<Self>; } /// Types that can be indices in Lua tables. #[unstable] pub trait Index<L>: Push<L> + CopyRead<L> { } /// Error that can happen when executing Lua code. #[deriving(Show)] #[unstable] pub enum LuaError { /// There was a syntax error when parsing the Lua code. SyntaxError(String), /// There was an error during execution of the Lua code /// (for example not enough parameters for a function call). ExecutionError(String), /// The call to `execute` has requested the wrong type of data. WrongType } // this alloc function is required to create a lua state. extern "C" fn alloc(_ud: *mut libc::c_void, ptr: *mut libc::c_void, _osize: libc::size_t, nsize: libc::size_t) -> *mut libc::c_void { unsafe { if nsize == 0 { libc::free(ptr as *mut libc::c_void); std::ptr::mut_null() } else { libc::realloc(ptr, nsize) } } } // called whenever lua encounters an unexpected error. extern "C" fn panic(lua: *mut ffi::lua_State) -> libc::c_int { let err = unsafe { ffi::lua_tostring(lua, -1) }; fail!("PANIC: unprotected error in call to Lua API ({})\n", err); } impl<'lua> Lua<'lua> { /// Builds a new Lua context. /// /// # Failure /// The function fails if lua_newstate fails (which indicates lack of memory). #[stable] pub fn new() -> Lua<'lua> { let lua = unsafe { ffi::lua_newstate(alloc, std::ptr::mut_null()) }; if lua.is_null() { fail!("lua_newstate failed"); } unsafe { ffi::lua_atpanic(lua, panic) }; Lua { lua: lua, marker: ContravariantLifetime, must_be_closed: true, inside_callback: false } } /// Takes an existing lua_State and build a Lua object from it. /// /// # Arguments /// * close_at_the_end: if true, lua_close will be called on the lua_State on the destructor #[unstable] pub unsafe fn from_existing_state<T>(lua: *mut T, close_at_the_end: bool) -> Lua<'lua> { Lua { lua: std::mem::transmute(lua), marker: ContravariantLifetime, must_be_closed: close_at_the_end, inside_callback: false } } /// Opens all standard Lua libraries. /// This is done by calling `luaL_openlibs`. #[unstable] pub fn openlibs(&mut self) { unsafe { ffi::luaL_openlibs(self.lua) } } /// Executes some Lua code on the context. #[unstable] pub fn execute<'a, T: CopyRead<LoadedVariable<'a, Lua<'lua>>>>(&'a mut self, code: &str) -> Result<T, LuaError> { let mut f = try!(functions_read::LuaFunction::load(self, code)); f.call() } /// Executes some Lua code on the context. #[unstable] pub fn execute_from_reader<'a, T: CopyRead<LoadedVariable<'a, Lua<'lua>>>, R: std::io::Reader + 'static>(&'a mut self, code: R) -> Result<T, LuaError> { let mut f = try!(functions_read::LuaFunction::load_from_reader(self, code)); f.call() } /// Loads the value of a global variable. #[unstable] pub fn load<'a, I: Str, V: ConsumeRead<'a, Lua<'lua>>>(&'a mut self, index: I) -> Option<V> { unsafe { ffi::lua_getglobal(self.lua, index.as_slice().to_c_str().unwrap()); } ConsumeRead::read_from_variable(LoadedVariable { lua: self, size: 1 }).ok() } /// Reads the value of a global variable by copying it. #[unstable] pub fn get<I: Str, V: CopyRead<Lua<'lua>>>(&mut self, index: I) -> Option<V> { unsafe { ffi::lua_getglobal(self.lua, index.as_slice().to_c_str().unwrap()); } CopyRead::read_from_lua(self, -1) } /// Modifies the value of a global variable. #[unstable] pub fn set<I: Str, V: Push<Lua<'lua>>>(&mut self, index: I, value: V) { value.push_to_lua(self); unsafe { ffi::lua_setglobal(self.lua, index.as_slice().to_c_str().unwrap()); } } #[unstable] pub fn load_new_table<'var>(&'var mut self) -> LuaTable<'var, Lua<'lua>> { unsafe { ffi::lua_newtable(self.lua) }; ConsumeRead::read_from_variable(LoadedVariable { lua: self, size: 1 }).ok().unwrap() } } #[unsafe_destructor] impl<'lua> Drop for Lua<'lua> { fn drop(&mut self) { if self.must_be_closed { unsafe { ffi::lua_close(self.lua) } } } } // TODO: crashes the compiler /*#[unsafe_destructor] impl<'a, 'lua, L: HasLua<'lua>> Drop for LoadedVariable<'a, L> { fn drop(&mut self) { unsafe { ffi::lua_pop(self.use_lua(), self.size as libc::c_int) } } }*/
extern crate sfml; extern crate nalgebra as na; use sfml::system::Vector2f; use sfml::window::{ContextSettings, VideoMode, event, Close}; use sfml::graphics::{RenderWindow, Texture, Sprite, Color}; use na::{Vec2}; fn main() { let mut env = SFMLEnv::new(800, 600); while env.win.is_open() { for event in env.win.events() { match event { event::Closed => env.win.close(), _ => { } } } env.clear(); env.win.display(); } } struct SFMLEnv { win: RenderWindow, width: u32, height: u32, shipTexture: Texture } impl SFMLEnv { fn new(width: u32, height: u32) -> SFMLEnv { let mut window = RenderWindow::new(VideoMode::new_init(width, height, 32), "SFML Example", Close, &ContextSettings::default()) .expect("Cannot create a new Render Window."); window.set_vertical_sync_enabled(true); let mut t = Texture::new_from_file("plane.png"); t.set_smooth(true); SFMLEnv { win: window, width: width, height: height, shipTexture: t } } fn clear(&mut self) { self.win.clear(&Color::black()); } } struct Boid<'a> { pos: Vec2<f32>, vel: Vec2<f32>, acc: Vec2<f32>, sprite: Sprite<'a>, maxSpeed: f32, maxSteer: f32 } impl Boid { fn new(env: SFMLEnv) -> Boid<'a> { let mut s = Sprite::new_with_texture(env.shipTexture).expect("Cannot create ship sprite."); let rect = s.get_local_bounds(); s.set_origin2f(rect.width / 2.0, rect.height / 2.0); Boid { pos: Vec2::new(0, 0), vel: Vec2::new(0, 0), acc: Vec2::new(0, 0), sprite: s, maxSpeed: 5, maxSteer: 0.2 } } } fix lifetimes extern crate sfml; extern crate nalgebra as na; use sfml::system::Vector2f; use sfml::window::{ContextSettings, VideoMode, event, Close}; use sfml::graphics::{RenderWindow, Texture, Sprite, Color}; use na::{Vec2}; fn main() { let mut env = SFMLEnv::new(800, 600); while env.win.is_open() { for event in env.win.events() { match event { event::Closed => env.win.close(), _ => { } } } env.clear(); env.win.display(); } } struct SFMLEnv { win: RenderWindow, width: u32, height: u32, shipTexture: Texture } impl SFMLEnv { fn new(width: u32, height: u32) -> SFMLEnv { let mut window = RenderWindow::new(VideoMode::new_init(width, height, 32), "SFML Example", Close, &ContextSettings::default()) .expect("Cannot create a new Render Window."); window.set_vertical_sync_enabled(true); let mut t = Texture::new_from_file("plane.png") .expect("Cannot create Texture"); t.set_smooth(true); SFMLEnv { win: window, width: width, height: height, shipTexture: t } } fn clear(&mut self) { self.win.clear(&Color::black()); } } struct Boid<'a> { pos: Vec2<f32>, vel: Vec2<f32>, acc: Vec2<f32>, sprite: Sprite<'a>, maxSpeed: f32, maxSteer: f32 } impl<'a> Boid<'a> { fn new(env: SFMLEnv) -> Boid<'a> { let mut s = Sprite::new_with_texture(env.shipTexture).expect("Cannot create ship sprite."); let rect = s.get_local_bounds(); s.set_origin2f(rect.width / 2.0, rect.height / 2.0); Boid { pos: Vec2::new(0.0, 0.0), vel: Vec2::new(0.0, 0.0), acc: Vec2::new(0.0, 0.0), sprite: s, maxSpeed: 5.0, maxSteer: 0.2 } } }
use std::io::{BufReader, Read}; use utils::extensions::{Peeking, PeekingExt, FilteringScan, FilteringScanExt}; use self::Token::{ TokPassageName, TokTagStart, TokTagEnd, TokTag, TokMakroStart, TokMakroEnd, TokVariable, TokSet, TokAssign, TokInt, TokFloat, TokNumOp, TokCompOp, TokLogOp, TokText, TokFormatBoldStart, TokFormatBoldEnd, TokFormatItalicStart, TokFormatItalicEnd, TokFormatUnderStart, TokFormatUnderEnd, TokFormatStrikeStart, TokFormatStrikeEnd, TokFormatSubStart, TokFormatSubEnd, TokFormatSupStart, TokFormatSupEnd, TokFormatMonoStart, TokFormatMonoEnd, TokString, TokBracketOpen, TokBracketClose, TokIf, TokElse, TokEndIf, TokPassageLink, TokFormatBulList, TokFormatNumbList, TokFormatIndentBlock, TokFormatHeading, TokVarSetStart, TokVarSetEnd, TokSemiColon, TokPrint, TokDisplay, TokBoolean, TokFunction , TokColon, TokArgsEnd, TokSilently, TokEndSilently, TokArrayStart, TokArrayEnd, TokNewLine, TokFormatHorizontalLine, TokMakroVar }; pub struct ScanState { current_text: String, skip_next: bool, } pub fn lex<R: Read>(input: &mut R) -> FilteringScan<Peeking<TweeLexer<BufReader<&mut R>>, Token>, ScanState, fn(&mut ScanState, (Token, Option<Token>)) -> Option<Token>> { info!("Nicht in Tokens verarbeitete Zeichen: "); TweeLexer::new(BufReader::new(input)).peeking().scan_filter( ScanState { current_text: String::new(), skip_next: false, }, { fn scan_fn(state: &mut ScanState, elem: (Token, Option<Token>)) -> Option<Token> { if state.skip_next { state.skip_next = false; return None; } match elem { (TokText(text), Some(TokText(_))) => { state.current_text.push_str(&text); None } (TokText(text), _) => { state.current_text.push_str(&text); let val = TokText(state.current_text.clone()); state.current_text.clear(); Some(val) }, (TokVariable(var), Some(TokAssign(_, op))) => { state.skip_next = true; Some(TokAssign(var, op)) }, (x, _) => Some(x), } } scan_fn } ) } #[derive(PartialEq,Debug,Clone)] pub enum Token { TokPassageName (String), TokTagStart, TokTagEnd, TokVarSetStart, TokVarSetEnd, TokPassageLink (String, String), TokTag (String), TokText (String), TokFormatBoldStart, TokFormatBoldEnd, TokFormatItalicStart, TokFormatItalicEnd, TokFormatUnderStart, TokFormatUnderEnd, TokFormatStrikeStart, TokFormatStrikeEnd, TokFormatSubStart, TokFormatSubEnd, TokFormatSupStart, TokFormatSupEnd, TokFormatMonoStart, TokFormatMonoEnd, TokFormatBulList, TokFormatNumbList, TokFormatIndentBlock, TokFormatHorizontalLine, TokFormatHeading (usize), TokMakroStart, TokMakroEnd, TokBracketOpen, TokBracketClose, TokVariable (String), TokInt (i32), TokFloat (f32), TokString (String), TokBoolean (String), TokFunction (String), TokColon, TokArgsEnd, TokArrayStart, TokArrayEnd, TokSet, TokAssign (String, String), TokNumOp (String), TokCompOp (String), TokLogOp (String), TokSemiColon, TokIf, TokElse, TokEndIf, TokPrint, TokDisplay, TokSilently, TokEndSilently, TokMakroVar(String), TokNewLine, TokPseudo } rustlex! TweeLexer { // Properties property new_line:bool = true; property format_bold_open:bool = false; property format_italic_open:bool = false; property format_under_open:bool = false; property format_strike_open:bool = false; property format_sub_open:bool = false; property format_sup_open:bool = false; property function_brackets:usize = 0; // Regular Expressions let WHITESPACE = ' ' | '\t'; let UNDERSCORE = '_'; let NEWLINE = '\n'; let INITIAL_START_CHAR = [^":"'\n'] | ':' [^":"'\n']; let INITIAL_CHAR = [^'\n']; let TEXT_INITIAL = INITIAL_START_CHAR INITIAL_CHAR*; // If for example // is at a beginning of a line, then // is matched and not just / let TEXT_START_CHAR = "ä"|"Ä"|"ü"|"Ü"|"ö"|"Ö"|"ß"|"ẞ" | [^"*!>#"'\n']; // add chars longer than one byte let TEXT_CHAR = [^"/'_=~^{@<[" '\n']; let TEXT = TEXT_CHAR+ | ["/'_=^{@<["]; let TEXT_MONO_CHAR = [^"}"'\n']; let TEXT_MONO = TEXT_MONO_CHAR+ | "}" | "}}"; let PASSAGE_START = "::" ':'*; let PASSAGE_CHAR_NORMAL = [^"]$<>:|" '\n']; let PASSAGE_CHAR = PASSAGE_CHAR_NORMAL | ':' PASSAGE_CHAR_NORMAL; let PASSAGE_NAME = PASSAGE_CHAR_NORMAL PASSAGE_CHAR* ':'?; let TAG = ['a'-'z''A'-'Z''0'-'9''.''_']+; let TAG_START = '['; let TAG_END = ']'; let FORMAT_ITALIC = "//"; let FORMAT_BOLD = "''"; let FORMAT_UNDER = "__"; let FORMAT_STRIKE = "=="; let FORMAT_SUB = "~~"; let FORMAT_SUP = "^^"; let FORMAT_MONO_START = "{{{"; let FORMAT_MONO_END = "}}}"; //TODO ignore content let FORMAT_INLINE = "@@"; let FORMAT_BUL_LIST = "*" WHITESPACE*; let FORMAT_NUMB_LIST = "#" WHITESPACE*; let FORMAT_INDENT_BLOCK = "<<<" NEWLINE; let FORMAT_HORIZONTAL_LINE = "----" NEWLINE; let FORMAT_HEADING = ("!" | "!!" | "!!!" | "!!!!" | "!!!!!") WHITESPACE*; let MAKRO_START = "<<"; let MAKRO_END = ">>"; let BR_OPEN = '('; let BR_CLOSE = ')'; let DIGIT = ['0'-'9']; let LETTER = ['a'-'z''A'-'Z']; let VAR_CHAR = LETTER | DIGIT | UNDERSCORE; let VAR_NAME = '$' (LETTER | UNDERSCORE) VAR_CHAR*; let INT = "-"? DIGIT+; let FLOAT = "-"? (DIGIT+ "." DIGIT*) | "-"? (DIGIT* "." DIGIT+) | "-"? "Infinity"; let STRING = ('"' [^'"']* '"') | ("'" [^"'"]* "'"); let BOOL = "true" | "false"; let COLON = ','; let FUNCTION = LETTER+ '('; let MACRO_NAME = LETTER+ WHITESPACE*; let ASSIGN = "=" | "to" | "+=" | "-=" | "*=" | "/="; let SEMI_COLON = ';'; let NUM_OP = ["+-*/%"]; let COMP_OP = "is" | "==" | "eq" | "neq" | ">" | "gt" | ">=" | "gte" | "<" | "lt" | "<=" | "lte"; let LOG_OP = "and" | "or" | "not"; let LINK_OPEN = '['; let LINK_CLOSE = ']'; let LINK_TEXT = [^'\n'"|]"]+; let LINK_SIMPLE = "[[" (PASSAGE_NAME | VAR_NAME) "]"; let LINK_LABELED = "[[" LINK_TEXT "|" (PASSAGE_NAME | VAR_NAME) "]"; INITIAL { PASSAGE_START => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.PASSAGE(); None } TEXT_INITIAL => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.INITIAL_NON_NEWLINE(); None } } INITIAL_NON_NEWLINE { NEWLINE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.INITIAL(); None } } NEWLINE { PASSAGE_START => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.PASSAGE(); None } MAKRO_START => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.MAKRO(); lexer.new_line = true; None } LINK_SIMPLE => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let name = &trimmed.to_string(); Some(TokPassageLink(name.clone(), name.clone())) } LINK_LABELED => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let matches = &trimmed.split("|").collect::<Vec<&str>>(); assert_eq!(matches.len(), 2); let text = matches[0].to_string(); let name = matches[1].to_string(); Some(TokPassageLink(text, name)) } FORMAT_ITALIC => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_italic_open = !lexer.format_italic_open; if lexer.format_italic_open {Some(TokFormatItalicStart)} else {Some(TokFormatItalicEnd)} } FORMAT_BOLD => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_bold_open = !lexer.format_bold_open; if lexer.format_bold_open {Some(TokFormatBoldStart)} else {Some(TokFormatBoldEnd)} } FORMAT_UNDER => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_under_open = !lexer.format_under_open; if lexer.format_under_open {Some(TokFormatUnderStart)} else {Some(TokFormatUnderEnd)} } FORMAT_STRIKE => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_strike_open = !lexer.format_strike_open; if lexer.format_strike_open {Some(TokFormatStrikeStart)} else {Some(TokFormatStrikeEnd)} } FORMAT_SUB => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_sub_open = !lexer.format_sub_open; if lexer.format_sub_open {Some(TokFormatSubStart)} else {Some(TokFormatSubEnd)} } FORMAT_SUP => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_sup_open = !lexer.format_sup_open; if lexer.format_sup_open {Some(TokFormatSupStart)} else {Some(TokFormatSupEnd)} } FORMAT_MONO_START => |lexer:&mut TweeLexer<R>| { lexer.MONO_TEXT(); Some(TokFormatMonoStart) } FORMAT_BUL_LIST => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatBulList) } FORMAT_NUMB_LIST => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatNumbList) } FORMAT_HEADING => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatHeading(lexer.yystr().len())) } TEXT_START_CHAR => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokText(lexer.yystr())) } FORMAT_HORIZONTAL_LINE => |_:&mut TweeLexer<R>| Some(TokFormatHorizontalLine) FORMAT_INDENT_BLOCK => |_:&mut TweeLexer<R>| Some(TokFormatIndentBlock) NEWLINE => |_:&mut TweeLexer<R>| Some(TokNewLine) } NON_NEWLINE { MAKRO_START => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.MAKRO(); lexer.new_line = false; None } LINK_SIMPLE => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let name = &trimmed.to_string(); Some(TokPassageLink(name.clone(), name.clone())) } LINK_LABELED => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let matches = &trimmed.split("|").collect::<Vec<&str>>(); assert_eq!(matches.len(), 2); let text = matches[0].to_string(); let name = matches[1].to_string(); Some(TokPassageLink(text, name)) } FORMAT_ITALIC => |lexer:&mut TweeLexer<R>| { lexer.format_italic_open = !lexer.format_italic_open; if lexer.format_italic_open {Some(TokFormatItalicStart)} else {Some(TokFormatItalicEnd)} } FORMAT_BOLD => |lexer:&mut TweeLexer<R>| { lexer.format_bold_open = !lexer.format_bold_open; if lexer.format_bold_open {Some(TokFormatBoldStart)} else {Some(TokFormatBoldEnd)} } FORMAT_UNDER => |lexer:&mut TweeLexer<R>| { lexer.format_under_open = !lexer.format_under_open; if lexer.format_under_open {Some(TokFormatUnderStart)} else {Some(TokFormatUnderEnd)} } FORMAT_STRIKE => |lexer:&mut TweeLexer<R>| { lexer.format_strike_open = !lexer.format_strike_open; if lexer.format_strike_open {Some(TokFormatStrikeStart)} else {Some(TokFormatStrikeEnd)} } FORMAT_SUB => |lexer:&mut TweeLexer<R>| { lexer.format_sub_open = !lexer.format_sub_open; if lexer.format_sub_open {Some(TokFormatSubStart)} else {Some(TokFormatSubEnd)} } FORMAT_SUP => |lexer:&mut TweeLexer<R>| { lexer.format_sup_open = !lexer.format_sup_open; if lexer.format_sup_open {Some(TokFormatSupStart)} else {Some(TokFormatSupEnd)} } FORMAT_MONO_START => |lexer:&mut TweeLexer<R>| { lexer.MONO_TEXT(); Some(TokFormatMonoStart) } NEWLINE => |lexer:&mut TweeLexer<R>| { lexer.NEWLINE(); Some(TokNewLine) } TEXT => |lexer:&mut TweeLexer<R>| Some(TokText(lexer.yystr())) } PASSAGE { PASSAGE_NAME => |lexer:&mut TweeLexer<R>| Some(TokPassageName(lexer.yystr().trim().to_string())) TAG_START => |lexer:&mut TweeLexer<R>| { lexer.TAGS(); Some(TokTagStart) } NEWLINE => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.NEWLINE(); None } } TAGS { TAG => |lexer:&mut TweeLexer<R>| Some(TokTag(lexer.yystr())) WHITESPACE => |_:&mut TweeLexer<R>| -> Option<Token> { None } TAG_END => |lexer:&mut TweeLexer<R>| { lexer.PASSAGE(); Some(TokTagEnd) } } MONO_TEXT { TEXT_MONO => |lexer:&mut TweeLexer<R>| Some(TokText(lexer.yystr())) FORMAT_MONO_END => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatMonoEnd) } NEWLINE => |_:&mut TweeLexer<R>| -> Option<Token> { None } } MAKRO { WHITESPACE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.NON_NEWLINE(); None } MACRO_NAME => |lexer:&mut TweeLexer<R>| { match lexer.yystr().trim().as_ref() { "set" => { lexer.MAKRO_CONTENT(); Some(TokSet) }, "if" => { lexer.MAKRO_CONTENT(); Some(TokIf) }, "else" => { lexer.MAKRO_CONTENT(); Some(TokElse) }, "endif" => { lexer.MAKRO_CONTENT(); Some(TokEndIf) }, "print" => { lexer.MAKRO_CONTENT(); Some(TokPrint) }, "display" => { lexer.DISPLAY_CONTENT(); Some(TokDisplay) }, "silently" => { lexer.MAKRO_CONTENT(); Some(TokSilently) }, "endsilently" => { lexer.MAKRO_CONTENT(); Some(TokEndSilently) }, _ => { panic!("Unknown macro: \"{}\"", lexer.yystr()); } } } VAR_NAME => |lexer:&mut TweeLexer<R>| { lexer.MAKRO_CONTENT(); Some(TokMakroVar(lexer.yystr())) } } MAKRO_CONTENT { MAKRO_END => |lexer:&mut TweeLexer<R>| { if lexer.new_line { lexer.NEWLINE() } else { lexer.NON_NEWLINE() }; Some(TokMakroEnd) } FUNCTION => |lexer:&mut TweeLexer<R>| { let s = lexer.yystr(); let trimmed = &s[0 .. s.len()-1]; let name = &trimmed.to_string(); lexer.function_brackets = 1; lexer.FUNCTION_ARGS(); Some(TokFunction(name.clone())) } // Expression Stuff VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) FLOAT => |lexer:&mut TweeLexer<R>| Some(TokFloat(lexer.yystr()[..].parse().unwrap())) INT => |lexer:&mut TweeLexer<R>| Some(TokInt(lexer.yystr()[..].parse().unwrap())) STRING => |lexer:&mut TweeLexer<R>| Some(TokString(lexer.yystr())) BOOL => |lexer:&mut TweeLexer<R>| Some(TokBoolean(lexer.yystr())) NUM_OP => |lexer:&mut TweeLexer<R>| Some(TokNumOp(lexer.yystr())) COMP_OP => |lexer:&mut TweeLexer<R>| Some(TokCompOp(lexer.yystr())) LOG_OP => |lexer:&mut TweeLexer<R>| Some(TokLogOp(lexer.yystr())) BR_OPEN => |_:&mut TweeLexer<R>| Some(TokBracketOpen) BR_CLOSE => |_:&mut TweeLexer<R>| Some(TokBracketClose) SEMI_COLON => |_:&mut TweeLexer<R>| Some(TokSemiColon) ASSIGN => |lexer:&mut TweeLexer<R>| Some(TokAssign("".to_string(), lexer.yystr())) COLON => |_:&mut TweeLexer<R>| Some(TokColon) // Expression Stuff End } FUNCTION_ARGS { COLON => |_:&mut TweeLexer<R>| Some(TokColon) VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) FLOAT => |lexer:&mut TweeLexer<R>| Some(TokFloat(lexer.yystr()[..].parse().unwrap())) INT => |lexer:&mut TweeLexer<R>| Some(TokInt(lexer.yystr()[..].parse().unwrap())) STRING => |lexer:&mut TweeLexer<R>| Some(TokString(lexer.yystr())) BOOL => |lexer:&mut TweeLexer<R>| Some(TokBoolean(lexer.yystr())) NUM_OP => |lexer:&mut TweeLexer<R>| Some(TokNumOp(lexer.yystr())) COMP_OP => |lexer:&mut TweeLexer<R>| Some(TokCompOp(lexer.yystr())) LOG_OP => |lexer:&mut TweeLexer<R>| Some(TokLogOp(lexer.yystr())) BR_OPEN => |lexer:&mut TweeLexer<R>| { lexer.function_brackets += 1; Some(TokBracketOpen) } BR_CLOSE => |lexer:&mut TweeLexer<R>| { lexer.function_brackets -= 1; if lexer.function_brackets == 0 { lexer.MAKRO_CONTENT(); Some(TokArgsEnd) } else { Some(TokBracketClose) } } } DISPLAY_CONTENT { MAKRO_END => |lexer:&mut TweeLexer<R>| { if lexer.new_line { lexer.NEWLINE() } else { lexer.NON_NEWLINE() }; Some(TokMakroEnd) } VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) PASSAGE_NAME => |lexer:&mut TweeLexer<R>| Some(TokPassageName(lexer.yystr().trim().to_string())) } LINK_VAR_CHECK { LINK_CLOSE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.NON_NEWLINE(); None } LINK_OPEN => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.LINK_VAR_SET(); Some(TokVarSetStart) } } LINK_VAR_SET { // Expression Stuff VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) FLOAT => |lexer:&mut TweeLexer<R>| Some(TokFloat(lexer.yystr()[..].parse().unwrap())) INT => |lexer:&mut TweeLexer<R>| Some(TokInt(lexer.yystr()[..].parse().unwrap())) STRING => |lexer:&mut TweeLexer<R>| Some(TokString(lexer.yystr())) BOOL => |lexer:&mut TweeLexer<R>| Some(TokBoolean(lexer.yystr())) NUM_OP => |lexer:&mut TweeLexer<R>| Some(TokNumOp(lexer.yystr())) COMP_OP => |lexer:&mut TweeLexer<R>| Some(TokCompOp(lexer.yystr())) LOG_OP => |lexer:&mut TweeLexer<R>| Some(TokLogOp(lexer.yystr())) BR_OPEN => |_:&mut TweeLexer<R>| Some(TokBracketOpen) BR_CLOSE => |_:&mut TweeLexer<R>| Some(TokBracketClose) SEMI_COLON => |_:&mut TweeLexer<R>| Some(TokSemiColon) ASSIGN => |lexer:&mut TweeLexer<R>| Some(TokAssign("".to_string(),lexer.yystr())) // Expression Stuff End LINK_CLOSE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.LINK_WAIT_CLOSE(); Some(TokVarSetEnd) } } LINK_WAIT_CLOSE { LINK_CLOSE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.NON_NEWLINE(); None } } } // ================================ // test functions #[cfg(test)] use std::io::Cursor; #[cfg(test)] fn test_lex(input: &str) -> Vec<Token> { let mut cursor: Cursor<Vec<u8>> = Cursor::new(input.to_string().into_bytes()); lex(&mut cursor).collect() } #[test] fn passage_test() { // This should detect the ::Start passage let start_tokens = test_lex("::Start"); assert_eq!(start_tokens.len(), 1); if let TokPassageName(ref name) = start_tokens[0] { assert_eq!(name, "Start") } else { panic!("Expected TokPassageName, got {:?}", start_tokens[0]) }; // This should not return any tokens let fail_tokens = test_lex(":fail"); assert_eq!(fail_tokens.len(), 0); } #[test] fn text_test() { // This should return a passage with a body text let tokens = test_lex("::MyPassage\nTestText\nTestNextLine"); assert_eq!(tokens.len(), 4); if let TokPassageName(ref name) = tokens[0] { assert_eq!(name, "MyPassage"); } else { panic!("Expected TokPassageName, got {:?}", tokens[0]); } if let TokText(ref text) = tokens[1] { assert_eq!(text, "TestText"); } else { panic!("Expected TokText, got {:?}", tokens[1]); } if let TokNewLine = tokens[2] { // valid } else { panic!("Expected TokNewLine, got {:?}", tokens[2]); } if let TokText(ref text) = tokens[3] { assert_eq!(text, "TestNextLine"); } else { panic!("Expected TokText, got {:?}", tokens[3]); } } /// TODO Tags are broken. Uncomment when #89 is fixed /* #[test] fn tag_test() { // This should return a passage with tags let tokens = test_lex("::TagPassage [tag1 tag2]\nContent"); assert_eq!(tokens.len(), 6); if let TokPassageName(ref name) = tokens[0] { assert_eq!(name, "TagPassage"); } else { panic!("Expected TokPassageName, got {:?}", tokens[0]); } if let TokTagStart = tokens[1] { // valid } else { panic!("Expected TokTagStart, got {:?}", tokens[1]); } if let TokTag(ref name) = tokens[2] { assert_eq!(name, "tag1"); } else { panic!("Expected TokTag, got {:?}", tokens[2]); } if let TokTag(ref name) = tokens[3] { assert_eq!(name, "tag2"); } else { panic!("Expected TokTag, got {:?}", tokens[3]); } if let TokTagEnd = tokens[4] { // valid } else { panic!("Expected TokTagStart, got {:?}", tokens[4]); } if let TokText(ref text) = tokens[5] { assert_eq!(text, "Content"); } else { panic!("Expected TokText, got {:?}", tokens[5]); } } */ Make tests compile use std::io::{BufReader, Read}; use utils::extensions::{Peeking, PeekingExt, FilteringScan, FilteringScanExt}; use self::Token::{ TokPassageName, TokTagStart, TokTagEnd, TokTag, TokMakroStart, TokMakroEnd, TokVariable, TokSet, TokAssign, TokInt, TokFloat, TokNumOp, TokCompOp, TokLogOp, TokText, TokFormatBoldStart, TokFormatBoldEnd, TokFormatItalicStart, TokFormatItalicEnd, TokFormatUnderStart, TokFormatUnderEnd, TokFormatStrikeStart, TokFormatStrikeEnd, TokFormatSubStart, TokFormatSubEnd, TokFormatSupStart, TokFormatSupEnd, TokFormatMonoStart, TokFormatMonoEnd, TokString, TokBracketOpen, TokBracketClose, TokIf, TokElse, TokEndIf, TokPassageLink, TokFormatBulList, TokFormatNumbList, TokFormatIndentBlock, TokFormatHeading, TokVarSetStart, TokVarSetEnd, TokSemiColon, TokPrint, TokDisplay, TokBoolean, TokFunction , TokColon, TokArgsEnd, TokSilently, TokEndSilently, TokArrayStart, TokArrayEnd, TokNewLine, TokFormatHorizontalLine, TokMakroVar }; pub struct ScanState { current_text: String, skip_next: bool, } pub fn lex<R: Read>(input: &mut R) -> FilteringScan<Peeking<TweeLexer<BufReader<&mut R>>, Token>, ScanState, fn(&mut ScanState, (Token, Option<Token>)) -> Option<Token>> { info!("Nicht in Tokens verarbeitete Zeichen: "); TweeLexer::new(BufReader::new(input)).peeking().scan_filter( ScanState { current_text: String::new(), skip_next: false, }, { fn scan_fn(state: &mut ScanState, elem: (Token, Option<Token>)) -> Option<Token> { if state.skip_next { state.skip_next = false; return None; } match elem { (TokText(text), Some(TokText(_))) => { state.current_text.push_str(&text); None } (TokText(text), _) => { state.current_text.push_str(&text); let val = TokText(state.current_text.clone()); state.current_text.clear(); Some(val) }, (TokVariable(var), Some(TokAssign(_, op))) => { state.skip_next = true; Some(TokAssign(var, op)) }, (x, _) => Some(x), } } scan_fn } ) } #[derive(PartialEq,Debug,Clone)] pub enum Token { TokPassageName (String), TokTagStart, TokTagEnd, TokVarSetStart, TokVarSetEnd, TokPassageLink (String, String), TokTag (String), TokText (String), TokFormatBoldStart, TokFormatBoldEnd, TokFormatItalicStart, TokFormatItalicEnd, TokFormatUnderStart, TokFormatUnderEnd, TokFormatStrikeStart, TokFormatStrikeEnd, TokFormatSubStart, TokFormatSubEnd, TokFormatSupStart, TokFormatSupEnd, TokFormatMonoStart, TokFormatMonoEnd, TokFormatBulList, TokFormatNumbList, TokFormatIndentBlock, TokFormatHorizontalLine, TokFormatHeading (usize), TokMakroStart, TokMakroEnd, TokBracketOpen, TokBracketClose, TokVariable (String), TokInt (i32), TokFloat (f32), TokString (String), TokBoolean (String), TokFunction (String), TokColon, TokArgsEnd, TokArrayStart, TokArrayEnd, TokSet, TokAssign (String, String), TokNumOp (String), TokCompOp (String), TokLogOp (String), TokSemiColon, TokIf, TokElse, TokEndIf, TokPrint, TokDisplay, TokSilently, TokEndSilently, TokMakroVar(String), TokNewLine, TokPseudo } rustlex! TweeLexer { // Properties property new_line:bool = true; property format_bold_open:bool = false; property format_italic_open:bool = false; property format_under_open:bool = false; property format_strike_open:bool = false; property format_sub_open:bool = false; property format_sup_open:bool = false; property function_brackets:usize = 0; // Regular Expressions let WHITESPACE = ' ' | '\t'; let UNDERSCORE = '_'; let NEWLINE = '\n'; let INITIAL_START_CHAR = [^":"'\n'] | ':' [^":"'\n']; let INITIAL_CHAR = [^'\n']; let TEXT_INITIAL = INITIAL_START_CHAR INITIAL_CHAR*; // If for example // is at a beginning of a line, then // is matched and not just / let TEXT_START_CHAR = "ä"|"Ä"|"ü"|"Ü"|"ö"|"Ö"|"ß"|"ẞ" | [^"*!>#"'\n']; // add chars longer than one byte let TEXT_CHAR = [^"/'_=~^{@<[" '\n']; let TEXT = TEXT_CHAR+ | ["/'_=^{@<["]; let TEXT_MONO_CHAR = [^"}"'\n']; let TEXT_MONO = TEXT_MONO_CHAR+ | "}" | "}}"; let PASSAGE_START = "::" ':'*; let PASSAGE_CHAR_NORMAL = [^"]$<>:|" '\n']; let PASSAGE_CHAR = PASSAGE_CHAR_NORMAL | ':' PASSAGE_CHAR_NORMAL; let PASSAGE_NAME = PASSAGE_CHAR_NORMAL PASSAGE_CHAR* ':'?; let TAG = ['a'-'z''A'-'Z''0'-'9''.''_']+; let TAG_START = '['; let TAG_END = ']'; let FORMAT_ITALIC = "//"; let FORMAT_BOLD = "''"; let FORMAT_UNDER = "__"; let FORMAT_STRIKE = "=="; let FORMAT_SUB = "~~"; let FORMAT_SUP = "^^"; let FORMAT_MONO_START = "{{{"; let FORMAT_MONO_END = "}}}"; //TODO ignore content let FORMAT_INLINE = "@@"; let FORMAT_BUL_LIST = "*" WHITESPACE*; let FORMAT_NUMB_LIST = "#" WHITESPACE*; let FORMAT_INDENT_BLOCK = "<<<" NEWLINE; let FORMAT_HORIZONTAL_LINE = "----" NEWLINE; let FORMAT_HEADING = ("!" | "!!" | "!!!" | "!!!!" | "!!!!!") WHITESPACE*; let MAKRO_START = "<<"; let MAKRO_END = ">>"; let BR_OPEN = '('; let BR_CLOSE = ')'; let DIGIT = ['0'-'9']; let LETTER = ['a'-'z''A'-'Z']; let VAR_CHAR = LETTER | DIGIT | UNDERSCORE; let VAR_NAME = '$' (LETTER | UNDERSCORE) VAR_CHAR*; let INT = "-"? DIGIT+; let FLOAT = "-"? (DIGIT+ "." DIGIT*) | "-"? (DIGIT* "." DIGIT+) | "-"? "Infinity"; let STRING = ('"' [^'"']* '"') | ("'" [^"'"]* "'"); let BOOL = "true" | "false"; let COLON = ','; let FUNCTION = LETTER+ '('; let MACRO_NAME = LETTER+ WHITESPACE*; let ASSIGN = "=" | "to" | "+=" | "-=" | "*=" | "/="; let SEMI_COLON = ';'; let NUM_OP = ["+-*/%"]; let COMP_OP = "is" | "==" | "eq" | "neq" | ">" | "gt" | ">=" | "gte" | "<" | "lt" | "<=" | "lte"; let LOG_OP = "and" | "or" | "not"; let LINK_OPEN = '['; let LINK_CLOSE = ']'; let LINK_TEXT = [^'\n'"|]"]+; let LINK_SIMPLE = "[[" (PASSAGE_NAME | VAR_NAME) "]"; let LINK_LABELED = "[[" LINK_TEXT "|" (PASSAGE_NAME | VAR_NAME) "]"; INITIAL { PASSAGE_START => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.PASSAGE(); None } TEXT_INITIAL => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.INITIAL_NON_NEWLINE(); None } } INITIAL_NON_NEWLINE { NEWLINE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.INITIAL(); None } } NEWLINE { PASSAGE_START => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.PASSAGE(); None } MAKRO_START => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.MAKRO(); lexer.new_line = true; None } LINK_SIMPLE => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let name = &trimmed.to_string(); Some(TokPassageLink(name.clone(), name.clone())) } LINK_LABELED => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let matches = &trimmed.split("|").collect::<Vec<&str>>(); assert_eq!(matches.len(), 2); let text = matches[0].to_string(); let name = matches[1].to_string(); Some(TokPassageLink(text, name)) } FORMAT_ITALIC => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_italic_open = !lexer.format_italic_open; if lexer.format_italic_open {Some(TokFormatItalicStart)} else {Some(TokFormatItalicEnd)} } FORMAT_BOLD => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_bold_open = !lexer.format_bold_open; if lexer.format_bold_open {Some(TokFormatBoldStart)} else {Some(TokFormatBoldEnd)} } FORMAT_UNDER => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_under_open = !lexer.format_under_open; if lexer.format_under_open {Some(TokFormatUnderStart)} else {Some(TokFormatUnderEnd)} } FORMAT_STRIKE => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_strike_open = !lexer.format_strike_open; if lexer.format_strike_open {Some(TokFormatStrikeStart)} else {Some(TokFormatStrikeEnd)} } FORMAT_SUB => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_sub_open = !lexer.format_sub_open; if lexer.format_sub_open {Some(TokFormatSubStart)} else {Some(TokFormatSubEnd)} } FORMAT_SUP => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); lexer.format_sup_open = !lexer.format_sup_open; if lexer.format_sup_open {Some(TokFormatSupStart)} else {Some(TokFormatSupEnd)} } FORMAT_MONO_START => |lexer:&mut TweeLexer<R>| { lexer.MONO_TEXT(); Some(TokFormatMonoStart) } FORMAT_BUL_LIST => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatBulList) } FORMAT_NUMB_LIST => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatNumbList) } FORMAT_HEADING => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatHeading(lexer.yystr().len())) } TEXT_START_CHAR => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokText(lexer.yystr())) } FORMAT_HORIZONTAL_LINE => |_:&mut TweeLexer<R>| Some(TokFormatHorizontalLine) FORMAT_INDENT_BLOCK => |_:&mut TweeLexer<R>| Some(TokFormatIndentBlock) NEWLINE => |_:&mut TweeLexer<R>| Some(TokNewLine) } NON_NEWLINE { MAKRO_START => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.MAKRO(); lexer.new_line = false; None } LINK_SIMPLE => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let name = &trimmed.to_string(); Some(TokPassageLink(name.clone(), name.clone())) } LINK_LABELED => |lexer:&mut TweeLexer<R>| { lexer.LINK_VAR_CHECK(); let s = lexer.yystr(); let trimmed = &s[2 .. s.len()-1]; let matches = &trimmed.split("|").collect::<Vec<&str>>(); assert_eq!(matches.len(), 2); let text = matches[0].to_string(); let name = matches[1].to_string(); Some(TokPassageLink(text, name)) } FORMAT_ITALIC => |lexer:&mut TweeLexer<R>| { lexer.format_italic_open = !lexer.format_italic_open; if lexer.format_italic_open {Some(TokFormatItalicStart)} else {Some(TokFormatItalicEnd)} } FORMAT_BOLD => |lexer:&mut TweeLexer<R>| { lexer.format_bold_open = !lexer.format_bold_open; if lexer.format_bold_open {Some(TokFormatBoldStart)} else {Some(TokFormatBoldEnd)} } FORMAT_UNDER => |lexer:&mut TweeLexer<R>| { lexer.format_under_open = !lexer.format_under_open; if lexer.format_under_open {Some(TokFormatUnderStart)} else {Some(TokFormatUnderEnd)} } FORMAT_STRIKE => |lexer:&mut TweeLexer<R>| { lexer.format_strike_open = !lexer.format_strike_open; if lexer.format_strike_open {Some(TokFormatStrikeStart)} else {Some(TokFormatStrikeEnd)} } FORMAT_SUB => |lexer:&mut TweeLexer<R>| { lexer.format_sub_open = !lexer.format_sub_open; if lexer.format_sub_open {Some(TokFormatSubStart)} else {Some(TokFormatSubEnd)} } FORMAT_SUP => |lexer:&mut TweeLexer<R>| { lexer.format_sup_open = !lexer.format_sup_open; if lexer.format_sup_open {Some(TokFormatSupStart)} else {Some(TokFormatSupEnd)} } FORMAT_MONO_START => |lexer:&mut TweeLexer<R>| { lexer.MONO_TEXT(); Some(TokFormatMonoStart) } NEWLINE => |lexer:&mut TweeLexer<R>| { lexer.NEWLINE(); Some(TokNewLine) } TEXT => |lexer:&mut TweeLexer<R>| Some(TokText(lexer.yystr())) } PASSAGE { PASSAGE_NAME => |lexer:&mut TweeLexer<R>| Some(TokPassageName(lexer.yystr().trim().to_string())) TAG_START => |lexer:&mut TweeLexer<R>| { lexer.TAGS(); Some(TokTagStart) } NEWLINE => |lexer:&mut TweeLexer<R>| -> Option<Token>{ lexer.NEWLINE(); None } } TAGS { TAG => |lexer:&mut TweeLexer<R>| Some(TokTag(lexer.yystr())) WHITESPACE => |_:&mut TweeLexer<R>| -> Option<Token> { None } TAG_END => |lexer:&mut TweeLexer<R>| { lexer.PASSAGE(); Some(TokTagEnd) } } MONO_TEXT { TEXT_MONO => |lexer:&mut TweeLexer<R>| Some(TokText(lexer.yystr())) FORMAT_MONO_END => |lexer:&mut TweeLexer<R>| { lexer.NON_NEWLINE(); Some(TokFormatMonoEnd) } NEWLINE => |_:&mut TweeLexer<R>| -> Option<Token> { None } } MAKRO { WHITESPACE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.NON_NEWLINE(); None } MACRO_NAME => |lexer:&mut TweeLexer<R>| { match lexer.yystr().trim().as_ref() { "set" => { lexer.MAKRO_CONTENT(); Some(TokSet) }, "if" => { lexer.MAKRO_CONTENT(); Some(TokIf) }, "else" => { lexer.MAKRO_CONTENT(); Some(TokElse) }, "endif" => { lexer.MAKRO_CONTENT(); Some(TokEndIf) }, "print" => { lexer.MAKRO_CONTENT(); Some(TokPrint) }, "display" => { lexer.DISPLAY_CONTENT(); Some(TokDisplay) }, "silently" => { lexer.MAKRO_CONTENT(); Some(TokSilently) }, "endsilently" => { lexer.MAKRO_CONTENT(); Some(TokEndSilently) }, _ => { panic!("Unknown macro: \"{}\"", lexer.yystr()); } } } VAR_NAME => |lexer:&mut TweeLexer<R>| { lexer.MAKRO_CONTENT(); Some(TokMakroVar(lexer.yystr())) } } MAKRO_CONTENT { MAKRO_END => |lexer:&mut TweeLexer<R>| { if lexer.new_line { lexer.NEWLINE() } else { lexer.NON_NEWLINE() }; Some(TokMakroEnd) } FUNCTION => |lexer:&mut TweeLexer<R>| { let s = lexer.yystr(); let trimmed = &s[0 .. s.len()-1]; let name = &trimmed.to_string(); lexer.function_brackets = 1; lexer.FUNCTION_ARGS(); Some(TokFunction(name.clone())) } // Expression Stuff VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) FLOAT => |lexer:&mut TweeLexer<R>| Some(TokFloat(lexer.yystr()[..].parse().unwrap())) INT => |lexer:&mut TweeLexer<R>| Some(TokInt(lexer.yystr()[..].parse().unwrap())) STRING => |lexer:&mut TweeLexer<R>| Some(TokString(lexer.yystr())) BOOL => |lexer:&mut TweeLexer<R>| Some(TokBoolean(lexer.yystr())) NUM_OP => |lexer:&mut TweeLexer<R>| Some(TokNumOp(lexer.yystr())) COMP_OP => |lexer:&mut TweeLexer<R>| Some(TokCompOp(lexer.yystr())) LOG_OP => |lexer:&mut TweeLexer<R>| Some(TokLogOp(lexer.yystr())) BR_OPEN => |_:&mut TweeLexer<R>| Some(TokBracketOpen) BR_CLOSE => |_:&mut TweeLexer<R>| Some(TokBracketClose) SEMI_COLON => |_:&mut TweeLexer<R>| Some(TokSemiColon) ASSIGN => |lexer:&mut TweeLexer<R>| Some(TokAssign("".to_string(), lexer.yystr())) COLON => |_:&mut TweeLexer<R>| Some(TokColon) // Expression Stuff End } FUNCTION_ARGS { COLON => |_:&mut TweeLexer<R>| Some(TokColon) VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) FLOAT => |lexer:&mut TweeLexer<R>| Some(TokFloat(lexer.yystr()[..].parse().unwrap())) INT => |lexer:&mut TweeLexer<R>| Some(TokInt(lexer.yystr()[..].parse().unwrap())) STRING => |lexer:&mut TweeLexer<R>| Some(TokString(lexer.yystr())) BOOL => |lexer:&mut TweeLexer<R>| Some(TokBoolean(lexer.yystr())) NUM_OP => |lexer:&mut TweeLexer<R>| Some(TokNumOp(lexer.yystr())) COMP_OP => |lexer:&mut TweeLexer<R>| Some(TokCompOp(lexer.yystr())) LOG_OP => |lexer:&mut TweeLexer<R>| Some(TokLogOp(lexer.yystr())) BR_OPEN => |lexer:&mut TweeLexer<R>| { lexer.function_brackets += 1; Some(TokBracketOpen) } BR_CLOSE => |lexer:&mut TweeLexer<R>| { lexer.function_brackets -= 1; if lexer.function_brackets == 0 { lexer.MAKRO_CONTENT(); Some(TokArgsEnd) } else { Some(TokBracketClose) } } } DISPLAY_CONTENT { MAKRO_END => |lexer:&mut TweeLexer<R>| { if lexer.new_line { lexer.NEWLINE() } else { lexer.NON_NEWLINE() }; Some(TokMakroEnd) } VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) PASSAGE_NAME => |lexer:&mut TweeLexer<R>| Some(TokPassageName(lexer.yystr().trim().to_string())) } LINK_VAR_CHECK { LINK_CLOSE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.NON_NEWLINE(); None } LINK_OPEN => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.LINK_VAR_SET(); Some(TokVarSetStart) } } LINK_VAR_SET { // Expression Stuff VAR_NAME => |lexer:&mut TweeLexer<R>| Some(TokVariable(lexer.yystr())) FLOAT => |lexer:&mut TweeLexer<R>| Some(TokFloat(lexer.yystr()[..].parse().unwrap())) INT => |lexer:&mut TweeLexer<R>| Some(TokInt(lexer.yystr()[..].parse().unwrap())) STRING => |lexer:&mut TweeLexer<R>| Some(TokString(lexer.yystr())) BOOL => |lexer:&mut TweeLexer<R>| Some(TokBoolean(lexer.yystr())) NUM_OP => |lexer:&mut TweeLexer<R>| Some(TokNumOp(lexer.yystr())) COMP_OP => |lexer:&mut TweeLexer<R>| Some(TokCompOp(lexer.yystr())) LOG_OP => |lexer:&mut TweeLexer<R>| Some(TokLogOp(lexer.yystr())) BR_OPEN => |_:&mut TweeLexer<R>| Some(TokBracketOpen) BR_CLOSE => |_:&mut TweeLexer<R>| Some(TokBracketClose) SEMI_COLON => |_:&mut TweeLexer<R>| Some(TokSemiColon) ASSIGN => |lexer:&mut TweeLexer<R>| Some(TokAssign("".to_string(),lexer.yystr())) // Expression Stuff End LINK_CLOSE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.LINK_WAIT_CLOSE(); Some(TokVarSetEnd) } } LINK_WAIT_CLOSE { LINK_CLOSE => |lexer:&mut TweeLexer<R>| -> Option<Token> { lexer.NON_NEWLINE(); None } } } // ================================ // test functions #[cfg(test)] use std::io::Cursor; #[cfg(test)] fn test_lex(input: &str) -> Vec<Token> { let mut cursor: Cursor<Vec<u8>> = Cursor::new(input.to_string().into_bytes()); lex(&mut cursor).collect() } #[test] fn passage_test() { // This should detect the ::Start passage let start_tokens = test_lex("::Start"); assert_eq!(start_tokens.len(), 1); if let TokPassageName(ref name) = start_tokens[0] { assert_eq!(name, "Start") } else { panic!("Expected TokPassageName, got {:?}", start_tokens[0]) }; // This should not return any tokens let fail_tokens = test_lex(":fail"); assert_eq!(fail_tokens.len(), 0); } #[test] fn text_test() { // This should return a passage with a body text let tokens = test_lex("::MyPassage\nTestText\nTestNextLine"); assert_eq!(tokens.len(), 4); if let TokPassageName(ref name) = tokens[0] { assert_eq!(name, "MyPassage"); } else { panic!("Expected TokPassageName, got {:?}", tokens[0]); } if let TokText(ref text) = tokens[1] { assert_eq!(text, "TestText"); } else { panic!("Expected TokText, got {:?}", tokens[1]); } if let TokNewLine = tokens[2] { // valid } else { panic!("Expected TokNewLine, got {:?}", tokens[2]); } if let TokText(ref text) = tokens[3] { assert_eq!(text, "TestNextLine"); } else { panic!("Expected TokText, got {:?}", tokens[3]); } } /* /// TODO Tags are broken. Uncomment when #89 is fixed #[test] fn tag_test() { // This should return a passage with tags let tokens = test_lex("::TagPassage [tag1 tag2]\nContent"); assert_eq!(tokens.len(), 6); if let TokPassageName(ref name) = tokens[0] { assert_eq!(name, "TagPassage"); } else { panic!("Expected TokPassageName, got {:?}", tokens[0]); } if let TokTagStart = tokens[1] { // valid } else { panic!("Expected TokTagStart, got {:?}", tokens[1]); } if let TokTag(ref name) = tokens[2] { assert_eq!(name, "tag1"); } else { panic!("Expected TokTag, got {:?}", tokens[2]); } if let TokTag(ref name) = tokens[3] { assert_eq!(name, "tag2"); } else { panic!("Expected TokTag, got {:?}", tokens[3]); } if let TokTagEnd = tokens[4] { // valid } else { panic!("Expected TokTagStart, got {:?}", tokens[4]); } if let TokText(ref text) = tokens[5] { assert_eq!(text, "Content"); } else { panic!("Expected TokText, got {:?}", tokens[5]); } } */
// Copyright 2014 Johannes Köster. // Licensed under the MIT license (http://opensource.org/licenses/MIT) // This file may not be copied, modified, or distributed // except according to those terms. use std::num::{Int, UnsignedInt, NumCast, cast, Float}; use std::collections; use std::slice; use std; use alphabets::{Alphabet, RankTransform}; struct QGrams<'a, Q: UnsignedInt + NumCast> { text: slice::Iter<'a, u8>, qgram: Q, bits: usize, mask: Q, ranks: RankTransform, } impl<'a, Q: UnsignedInt + NumCast> QGrams<'a, Q> { pub fn new(q: usize, text: &'a [u8], alphabet: &Alphabet) -> Self { let ranks = RankTransform::new(alphabet); let mut qgrams = QGrams { text: text.iter(), qgram: cast(0).unwrap(), ranks: ranks, bits: (alphabet.len() as f32).log2().ceil() as usize, mask: cast((1 << q) - 1).unwrap(), }; for _ in 0..q-1 { qgrams.next(); } qgrams } fn qgram_push(&mut self, a: u8) { self.qgram = self.qgram << self.bits; self.qgram = (self.qgram | cast(a).unwrap()) & self.mask; } } impl<'a, Q: UnsignedInt + NumCast> Iterator for QGrams<'a, Q> { type Item = Q; fn next(&mut self) -> Option<Q> { match self.text.next() { Some(a) => { let b = self.ranks.get(*a); self.qgram_push(b); Some(self.qgram) }, None => None } } } pub struct QGramIndex<'a> { q: usize, alphabet: &'a Alphabet, address: Vec<usize>, pos: Vec<usize>, } impl<'a> QGramIndex<'a> { pub fn new(q: usize, text: &[u8], alphabet: &'a Alphabet) -> Self { QGramIndex::with_max_count(q, text, alphabet, std::usize::MAX) } pub fn with_max_count(q: usize, text: &[u8], alphabet: &'a Alphabet, max_count: usize) -> Self { let qgram_count = alphabet.len().pow(q as u32); let mut address = vec![0; qgram_count + 1]; let mut pos = vec![0; text.len()]; for qgram in QGrams::<u32>::new(q, text, alphabet) { address[qgram as usize] += 1; } for g in 1..address.len() { if address[g] > max_count { // mask qgram address[g] = 0; } } for i in 1..address.len() { address[i] += address[i - 1]; } { let mut offset = vec![0; qgram_count]; for (i, qgram) in QGrams::<u32>::new(q, text, alphabet).enumerate() { let a = address[qgram as usize]; if address[qgram as usize + 1] - a != 0 { // if not masked, insert positions pos[a + offset[qgram as usize]] = i; offset[qgram as usize] += 1; } } } QGramIndex { q: q, alphabet: alphabet, address: address, pos: pos } } pub fn matches(&self, qgram: u32) -> &[usize] { &self.pos[self.address[qgram as usize]..self.address[qgram as usize + 1]] } pub fn diagonals(&self, pattern: &[u8]) -> Vec<Diagonal> { let mut diagonals = collections::HashMap::new(); for (i, qgram) in QGrams::<u32>::new(self.q, pattern, self.alphabet).enumerate() { for p in self.matches(qgram) { let diagonal = p - i; if diagonals.contains_key(&diagonal) { diagonals.insert(diagonal, 1); } else { *diagonals.get_mut(&diagonal).unwrap() += 1; } } } diagonals.into_iter().map(|(diagonal, count)| Diagonal { pos: diagonal, count: count }).collect() } pub fn exact_matches(&self, pattern: &[u8]) -> Vec<ExactMatch> { let mut diagonals: collections::HashMap<usize, ExactMatch> = collections::HashMap::new(); let mut intervals = Vec::new(); for (i, qgram) in QGrams::<u32>::new(self.q, pattern, self.alphabet).enumerate() { for &p in self.matches(qgram) { let diagonal = p - i; if !diagonals.contains_key(&diagonal) { // nothing yet, start new match diagonals.insert(diagonal, ExactMatch { pattern_start: i, pattern_stop: i + self.q, text_start: p, text_stop: p + self.q }); } else { let interval = diagonals.get_mut(&diagonal).unwrap(); if interval.pattern_stop == i { // extend exact match interval.pattern_stop = i + self.q; interval.text_stop = p + self.q; } else { // report previous match intervals.push(interval.clone()); // mismatch or indel, start new match interval.pattern_start = i; interval.pattern_stop = i + self.q; interval.text_start = p; interval.text_stop = p + self.q; } } } } // report remaining intervals for (_, interval) in diagonals.into_iter() { intervals.push(interval); } intervals } } pub struct Diagonal { pub pos: usize, pub count: usize, } #[derive(Clone)] pub struct ExactMatch { pub pattern_start: usize, pub pattern_stop: usize, pub text_start: usize, pub text_stop: usize, } Fixes. TODO make qgrams always u64. // Copyright 2014 Johannes Köster. // Licensed under the MIT license (http://opensource.org/licenses/MIT) // This file may not be copied, modified, or distributed // except according to those terms. use std::num::{Int, UnsignedInt, NumCast, cast, Float}; use std::collections; use std::slice; use std; use alphabets::{Alphabet, RankTransform}; use utils; /// Iterator over the q-grams of a given text. Q-grams are encoded as integers. /// The number of bits for encoding a single symbol is chosen as log2(A) with A being the alphabet /// size. /// /// The type Q has to be chosen such that the q-gram fits into it. pub struct QGrams<'a, Q: UnsignedInt + NumCast> { text: &'a [u8], bits: usize, mask: Q, ranks: RankTransform, } impl<'a, Q: UnsignedInt + NumCast> QGrams<'a, Q> { /// Create new instance. /// /// # Arguments /// /// * `q` - the length of the q-gram /// * `text` - the text /// * `alphabet` - the alphabet to use pub fn new(q: usize, text: &'a [u8], alphabet: &Alphabet) -> Self { let ranks = RankTransform::new(alphabet); let bits = (alphabet.len() as f32).log2().ceil() as usize; assert!(bits * q <= Q:: QGrams { text: text, ranks: ranks, bits: bits, mask: cast((1 << q * bits) - 1).unwrap(), } } pub fn iter(&self) -> QGramIter { let mut iter = QGramIter { qgrams: self, text: text.iter(), qgram: cast(0).unwrap() }; for _ in 0..q-1 { iter.next(); } iter } } pub struct QGramIter<'a, Q: UnsignedInt + NumCast> { qgrams: &'a QGrams, text: slice::Iter<'a, u8>, qgram: Q, } impl<'a, Q: UnsignedInt + NumCast> QGramIter<'a, Q> { fn qgram_push(&mut self, a: u8) { self.qgram = self.qgram << self.qgrams.bits; self.qgram = (self.qgram | cast(a).unwrap()) & self.qgrams.mask; } } impl<'a, Q: UnsignedInt + NumCast> Iterator for QGramIter<'a, Q> { type Item = Q; fn next(&mut self) -> Option<Q> { match self.text.next() { Some(a) => { let b = self.qgrams.ranks.get(*a); self.qgram_push(b); Some(self.qgram) }, None => None } } } pub struct QGramIndex<'a> { q: usize, alphabet: &'a Alphabet, address: Vec<usize>, pos: Vec<usize>, } impl<'a> QGramIndex<'a> { pub fn new(q: usize, text: &[u8], alphabet: &'a Alphabet) -> Self { QGramIndex::with_max_count(q, text, alphabet, std::usize::MAX) } pub fn with_max_count(q: usize, text: &[u8], alphabet: &'a Alphabet, max_count: usize) -> Self { let qgram_count = alphabet.len().pow(q as u32); let mut address = vec![0; qgram_count + 1]; let mut pos = vec![0; text.len()]; for qgram in QGrams::<u32>::new(q, text, alphabet) { address[qgram as usize] += 1; } for g in 1..address.len() { if address[g] > max_count { // mask qgram address[g] = 0; } } utils::prescan(&mut address, 0, |a, b| a + b); { let mut offset = vec![0; qgram_count]; for (i, qgram) in QGrams::<u32>::new(q, text, alphabet).enumerate() { let a = address[qgram as usize]; if address[qgram as usize + 1] - a != 0 { // if not masked, insert positions pos[a + offset[qgram as usize]] = i; offset[qgram as usize] += 1; } } } QGramIndex { q: q, alphabet: alphabet, address: address, pos: pos } } pub fn matches(&self, qgram: u32) -> &[usize] { &self.pos[self.address[qgram as usize]..self.address[qgram as usize + 1]] } pub fn diagonals(&self, pattern: &[u8]) -> Vec<Diagonal> { let mut diagonals = collections::HashMap::new(); for (i, qgram) in QGrams::<u32>::new(self.q, pattern, self.alphabet).enumerate() { for p in self.matches(qgram) { let diagonal = p - i; if !diagonals.contains_key(&diagonal) { diagonals.insert(diagonal, 1); } else { *diagonals.get_mut(&diagonal).unwrap() += 1; } } } diagonals.into_iter().map(|(diagonal, count)| Diagonal { pos: diagonal, count: count }).collect() } pub fn exact_matches(&self, pattern: &[u8]) -> Vec<ExactMatch> { let mut diagonals: collections::HashMap<usize, ExactMatch> = collections::HashMap::new(); let mut intervals = Vec::new(); for (i, qgram) in QGrams::<u32>::new(self.q, pattern, self.alphabet).enumerate() { for &p in self.matches(qgram) { let diagonal = p - i; if !diagonals.contains_key(&diagonal) { // nothing yet, start new match diagonals.insert(diagonal, ExactMatch { pattern: Interval{ start: i, stop: i + self.q}, text: Interval { start: p, stop: p + self.q }, }); } else { let interval = diagonals.get_mut(&diagonal).unwrap(); if interval.pattern.stop - self.q + 1 == i { // extend exact match interval.pattern.stop = i + self.q; interval.text.stop = p + self.q; } else { // report previous match intervals.push(interval.clone()); // mismatch or indel, start new match interval.pattern.start = i; interval.pattern.stop = i + self.q; interval.text.start = p; interval.text.stop = p + self.q; } } } } // report remaining intervals for (_, interval) in diagonals.into_iter() { intervals.push(interval); } intervals } } #[derive(PartialEq)] #[derive(Debug)] pub struct Diagonal { pub pos: usize, pub count: usize, } /// An interval, consisting of start and stop position (the latter exclusive). #[derive(Clone)] #[derive(PartialEq)] #[derive(Debug)] pub struct Interval { pub start: usize, pub stop: usize } impl Interval { pub fn get<'a>(&self, text: &'a [u8]) -> &'a [u8] { &text[self.start..self.stop] } } #[derive(Clone)] #[derive(PartialEq)] #[derive(Debug)] pub struct ExactMatch { pub pattern: Interval, pub text: Interval, } #[cfg(test)] mod tests { use super::*; use alphabets; fn setup() -> (&'static [u8], alphabets::Alphabet) { let text = b"ACGGCTGAGATGAT"; let alphabet = alphabets::dna::alphabet(); (text, alphabet) } #[test] fn test_matches() { let (text, alphabet) = setup(); let q = 3; let qgram_index = QGramIndex::new(q, text, &alphabet); let qgram = QGrams::new(q, b"TGA", &alphabet).next().unwrap(); let matches = qgram_index.matches(qgram); assert_eq!(matches, [5, 10]); } #[test] fn test_diagonals() { let (text, alphabet) = setup(); let q = 3; let qgram_index = QGramIndex::new(q, text, &alphabet); let pattern = b"GCTG"; let diagonals = qgram_index.diagonals(pattern); assert_eq!(diagonals, [Diagonal { pos: 3, count: 2 }]); } #[test] fn test_exact_matches() { let (text, alphabet) = setup(); let q = 3; let qgram_index = QGramIndex::new(q, text, &alphabet); let pattern = b"GCTGA"; let exact_matches = qgram_index.exact_matches(pattern); assert!(exact_matches.len() == 2); for m in exact_matches { assert_eq!(m.pattern.get(pattern), m.text.get(text)); } } }
Enable setting triplet via environment variable
//! This provides the logic for the finalized and head chains. //! //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use slog::{crit, debug, error}; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; /// The number of head syncing chains to sync at a time. const PARALLEL_HEAD_CHAINS: usize = 2; /// Minimum work we require a finalized chain to do before picking a chain with more peers. const MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS: u64 = 10; /// The state of the long range/batch sync. #[derive(Clone)] pub enum RangeSyncState { /// A finalized chain is being synced. Finalized(u64), /// There are no finalized chains and we are syncing one more head chains. Head(SmallVec<[u64; PARALLEL_HEAD_CHAINS]>), /// There are no head or finalized chains and no long range sync is in progress. Idle, } /// A collection of finalized and head chains currently being processed. pub struct ChainCollection<T: BeaconChainTypes> { /// The beacon chain for processing. beacon_chain: Arc<BeaconChain<T>>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap<ChainId, SyncingChain<T>>, /// The set of head chains being synced. head_chains: FnvHashMap<ChainId, SyncingChain<T>>, /// The current sync state of the process. state: RangeSyncState, /// Logger for the collection. log: slog::Logger, } impl<T: BeaconChainTypes> ChainCollection<T> { pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), head_chains: FnvHashMap::default(), state: RangeSyncState::Idle, log, } } /// Updates the Syncing state of the collection after a chain is removed. fn on_chain_removed(&mut self, id: &ChainId, was_syncing: bool, sync_type: RangeSyncType) { let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) .map(|m| m.dec()); match self.state { RangeSyncState::Finalized(ref syncing_id) => { if syncing_id == id { // the finalized chain that was syncing was removed debug_assert!(was_syncing); let syncing_head_ids: SmallVec<[u64; PARALLEL_HEAD_CHAINS]> = self .head_chains .iter() .filter(|(_id, chain)| chain.is_syncing()) .map(|(id, _)| *id) .collect(); self.state = if syncing_head_ids.is_empty() { RangeSyncState::Idle } else { RangeSyncState::Head(syncing_head_ids) }; } else { debug_assert!(!was_syncing); } } RangeSyncState::Head(ref mut syncing_head_ids) => { if let Some(index) = syncing_head_ids .iter() .enumerate() .find(|(_, &chain_id)| &chain_id == id) .map(|(i, _)| i) { // a syncing head chain was removed debug_assert!(was_syncing); syncing_head_ids.swap_remove(index); if syncing_head_ids.is_empty() { self.state = RangeSyncState::Idle; } } else { debug_assert!(!was_syncing); } } RangeSyncState::Idle => { // the removed chain should not be syncing debug_assert!(!was_syncing) } } } /// Calls `func` on every chain of the collection. If the result is /// `ProcessingResult::RemoveChain`, the chain is removed and returned. /// NOTE: `func` must not change the syncing state of a chain. pub fn call_all<F>(&mut self, mut func: F) -> Vec<(SyncingChain<T>, RangeSyncType, RemoveChain)> where F: FnMut(&mut SyncingChain<T>) -> ProcessingResult, { let mut to_remove = Vec::new(); for (id, chain) in self.finalized_chains.iter_mut() { if let Err(remove_reason) = func(chain) { to_remove.push((*id, RangeSyncType::Finalized, remove_reason)); } } for (id, chain) in self.head_chains.iter_mut() { if let Err(remove_reason) = func(chain) { to_remove.push((*id, RangeSyncType::Head, remove_reason)); } } let mut results = Vec::with_capacity(to_remove.len()); for (id, sync_type, reason) in to_remove.into_iter() { let chain = match sync_type { RangeSyncType::Finalized => self.finalized_chains.remove(&id), RangeSyncType::Head => self.head_chains.remove(&id), }; let chain = chain.expect("Chain exists"); self.on_chain_removed(&id, chain.is_syncing(), sync_type); results.push((chain, sync_type, reason)); } results } /// Executes a function on the chain with the given id. /// /// If the function returns `ProcessingResult::RemoveChain`, the chain is removed and returned. /// If the chain is found, its syncing type is returned, or an error otherwise. /// NOTE: `func` should not change the sync state of a chain. #[allow(clippy::type_complexity)] pub fn call_by_id<F>( &mut self, id: ChainId, func: F, ) -> Result<(Option<(SyncingChain<T>, RemoveChain)>, RangeSyncType), ()> where F: FnOnce(&mut SyncingChain<T>) -> ProcessingResult, { if let Entry::Occupied(mut entry) = self.finalized_chains.entry(id) { // Search in our finalized chains first if let Err(remove_reason) = func(entry.get_mut()) { let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), RangeSyncType::Finalized); Ok((Some((chain, remove_reason)), RangeSyncType::Finalized)) } else { Ok((None, RangeSyncType::Finalized)) } } else if let Entry::Occupied(mut entry) = self.head_chains.entry(id) { // Search in our head chains next if let Err(remove_reason) = func(entry.get_mut()) { let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), RangeSyncType::Head); Ok((Some((chain, remove_reason)), RangeSyncType::Head)) } else { Ok((None, RangeSyncType::Head)) } } else { // Chain was not found in the finalized collection, nor the head collection Err(()) } } /// Updates the state of the chain collection. /// /// This removes any out-dated chains, swaps to any higher priority finalized chains and /// updates the state of the collection. This starts head chains syncing if any are required to /// do so. pub fn update( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, ) { // Remove any outdated finalized/head chains self.purge_outdated_chains(local, awaiting_head_peers); let local_head_epoch = local.head_slot.epoch(T::EthSpec::slots_per_epoch()); // Choose the best finalized chain if one needs to be selected. self.update_finalized_chains(network, local.finalized_epoch, local_head_epoch); if !matches!(self.state, RangeSyncState::Finalized(_)) { // Handle head syncing chains if there are no finalized chains left. self.update_head_chains( network, local.finalized_epoch, local_head_epoch, awaiting_head_peers, beacon_processor_send, ); } } pub fn state( &self, ) -> Result<Option<(RangeSyncType, Slot /* from */, Slot /* to */)>, &'static str> { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self .finalized_chains .get(syncing_id) .ok_or("Finalized syncing chain not found")?; Ok(Some(( RangeSyncType::Finalized, chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), chain.target_head_slot, ))) } RangeSyncState::Head(ref syncing_head_ids) => { let mut range: Option<(Slot, Slot)> = None; for id in syncing_head_ids { let chain = self .head_chains .get(id) .ok_or("Head syncing chain not found")?; let start = chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()); let target = chain.target_head_slot; range = range .map(|(min_start, max_slot)| (min_start.min(start), max_slot.max(target))) .or(Some((start, target))); } let (start_slot, target_slot) = range.ok_or("Syncing head with empty head ids")?; Ok(Some((RangeSyncType::Head, start_slot, target_slot))) } RangeSyncState::Idle => Ok(None), } } /// This looks at all current finalized chains and decides if a new chain should be prioritised /// or not. fn update_finalized_chains( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local_epoch: Epoch, local_head_epoch: Epoch, ) { // Find the chain with most peers and check if it is already syncing if let Some((mut new_id, max_peers)) = self .finalized_chains .iter() .max_by_key(|(_, chain)| chain.available_peers()) .map(|(id, chain)| (*id, chain.available_peers())) { let mut old_id = None; if let RangeSyncState::Finalized(syncing_id) = self.state { if syncing_id == new_id { // best chain is already syncing old_id = Some(None); } else { // chains are different, check that they don't have the same number of peers if let Some(syncing_chain) = self.finalized_chains.get_mut(&syncing_id) { if max_peers > syncing_chain.available_peers() && syncing_chain.validated_epochs() > MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS { syncing_chain.stop_syncing(); old_id = Some(Some(syncing_id)); } else { // chains have the same number of peers, pick the currently syncing // chain to avoid unnecesary switchings and try to advance it new_id = syncing_id; old_id = Some(None); } } } } let chain = self .finalized_chains .get_mut(&new_id) .expect("Chain exists"); match old_id { Some(Some(old_id)) => debug!(self.log, "Switching finalized chains"; "old_id" => old_id, &chain), None => debug!(self.log, "Syncing new finalized chain"; &chain), Some(None) => { // this is the same chain. We try to advance it. } } // update the state to a new finalized state self.state = RangeSyncState::Finalized(new_id); if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { if remove_reason.is_critical() { crit!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); } else { // this happens only if sending a batch over the `network` fails a lot error!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); } self.finalized_chains.remove(&new_id); self.on_chain_removed(&new_id, true, RangeSyncType::Finalized); } } } /// Start syncing any head chains if required. fn update_head_chains( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local_epoch: Epoch, local_head_epoch: Epoch, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, ) { // Include the awaiting head peers for (peer_id, peer_sync_info) in awaiting_head_peers.drain() { debug!(self.log, "including head peer"); self.add_peer_or_create_chain( local_epoch, peer_sync_info.head_root, peer_sync_info.head_slot, peer_id, RangeSyncType::Head, beacon_processor_send, network, ); } if self.head_chains.is_empty() { // There are no finalized chains, update the state. self.state = RangeSyncState::Idle; return; } // Order chains by available peers, if two chains have the same number of peers, prefer one // that is already syncing let mut preferred_ids = self .head_chains .iter() .map(|(id, chain)| (chain.available_peers(), !chain.is_syncing(), *id)) .collect::<Vec<_>>(); preferred_ids.sort_unstable(); let mut syncing_chains = SmallVec::<[u64; PARALLEL_HEAD_CHAINS]>::new(); for (_, _, id) in preferred_ids { let chain = self.head_chains.get_mut(&id).expect("known chain"); if syncing_chains.len() < PARALLEL_HEAD_CHAINS { // start this chain if it's not already syncing if !chain.is_syncing() { debug!(self.log, "New head chain started syncing"; &chain); } if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { self.head_chains.remove(&id); if remove_reason.is_critical() { crit!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); } else { error!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); } } else { syncing_chains.push(id); } } else { // stop any other chain chain.stop_syncing(); } } self.state = if syncing_chains.is_empty() { RangeSyncState::Idle } else { RangeSyncState::Head(syncing_chains) }; } /// Returns if `true` if any finalized chains exist, `false` otherwise. pub fn is_finalizing_sync(&self) -> bool { !self.finalized_chains.is_empty() } /// Removes any outdated finalized or head chains. /// This removes chains with no peers, or chains whose start block slot is less than our current /// finalized block slot. Peers that would create outdated chains are removed too. pub fn purge_outdated_chains( &mut self, local_info: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, ) { let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); let beacon_chain = &self.beacon_chain; let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { target_slot <= &local_finalized_slot || beacon_chain.fork_choice.read().contains_block(target_root) }; // Retain only head peers that remain relevant awaiting_head_peers.retain(|_peer_id, peer_sync_info| { !is_outdated(&peer_sync_info.head_slot, &peer_sync_info.head_root) }); // Remove chains that are out-dated let mut removed_chains = Vec::new(); self.finalized_chains.retain(|id, chain| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of finalized chain"; &chain); removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Finalized)); false } else { true } }); self.head_chains.retain(|id, chain| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of date head chain"; &chain); removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Head)); false } else { true } }); // update the state of the collection for (id, was_syncing, sync_type) in removed_chains { self.on_chain_removed(&id, was_syncing, sync_type); } } /// Adds a peer to a chain with the given target, or creates a new syncing chain if it doesn't /// exists. #[allow(clippy::too_many_arguments)] pub fn add_peer_or_create_chain( &mut self, start_epoch: Epoch, target_head_root: Hash256, target_head_slot: Slot, peer: PeerId, sync_type: RangeSyncType, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, network: &mut SyncNetworkContext<T::EthSpec>, ) { let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot); let collection = if let RangeSyncType::Finalized = sync_type { &mut self.finalized_chains } else { &mut self.head_chains }; match collection.entry(id) { Entry::Occupied(mut entry) => { let chain = entry.get_mut(); debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain); debug_assert_eq!(chain.target_head_root, target_head_root); debug_assert_eq!(chain.target_head_slot, target_head_slot); if let Err(remove_reason) = chain.add_peer(network, peer) { if remove_reason.is_critical() { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } else { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), sync_type); } } Entry::Vacant(entry) => { let peer_rpr = peer.to_string(); let new_chain = SyncingChain::new( start_epoch, target_head_slot, target_head_root, peer, beacon_processor_send.clone(), &self.log, ); debug_assert_eq!(new_chain.get_id(), id); debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); entry.insert(new_chain); let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) .map(|m| m.inc()); } } } } Tiny fix: wrong log level (#2720) ## Proposed Changes If the `RemoveChain` is critical log level should be crit. 🙂 //! This provides the logic for the finalized and head chains. //! //! Each chain type is stored in it's own map. A variety of helper functions are given along with //! this struct to simplify the logic of the other layers of sync. use super::chain::{ChainId, ProcessingResult, RemoveChain, SyncingChain}; use super::sync_type::RangeSyncType; use crate::beacon_processor::WorkEvent as BeaconWorkEvent; use crate::metrics; use crate::sync::network_context::SyncNetworkContext; use beacon_chain::{BeaconChain, BeaconChainTypes}; use fnv::FnvHashMap; use lighthouse_network::PeerId; use lighthouse_network::SyncInfo; use slog::{crit, debug, error}; use smallvec::SmallVec; use std::collections::hash_map::Entry; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::mpsc; use types::EthSpec; use types::{Epoch, Hash256, Slot}; /// The number of head syncing chains to sync at a time. const PARALLEL_HEAD_CHAINS: usize = 2; /// Minimum work we require a finalized chain to do before picking a chain with more peers. const MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS: u64 = 10; /// The state of the long range/batch sync. #[derive(Clone)] pub enum RangeSyncState { /// A finalized chain is being synced. Finalized(u64), /// There are no finalized chains and we are syncing one more head chains. Head(SmallVec<[u64; PARALLEL_HEAD_CHAINS]>), /// There are no head or finalized chains and no long range sync is in progress. Idle, } /// A collection of finalized and head chains currently being processed. pub struct ChainCollection<T: BeaconChainTypes> { /// The beacon chain for processing. beacon_chain: Arc<BeaconChain<T>>, /// The set of finalized chains being synced. finalized_chains: FnvHashMap<ChainId, SyncingChain<T>>, /// The set of head chains being synced. head_chains: FnvHashMap<ChainId, SyncingChain<T>>, /// The current sync state of the process. state: RangeSyncState, /// Logger for the collection. log: slog::Logger, } impl<T: BeaconChainTypes> ChainCollection<T> { pub fn new(beacon_chain: Arc<BeaconChain<T>>, log: slog::Logger) -> Self { ChainCollection { beacon_chain, finalized_chains: FnvHashMap::default(), head_chains: FnvHashMap::default(), state: RangeSyncState::Idle, log, } } /// Updates the Syncing state of the collection after a chain is removed. fn on_chain_removed(&mut self, id: &ChainId, was_syncing: bool, sync_type: RangeSyncType) { let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) .map(|m| m.dec()); match self.state { RangeSyncState::Finalized(ref syncing_id) => { if syncing_id == id { // the finalized chain that was syncing was removed debug_assert!(was_syncing); let syncing_head_ids: SmallVec<[u64; PARALLEL_HEAD_CHAINS]> = self .head_chains .iter() .filter(|(_id, chain)| chain.is_syncing()) .map(|(id, _)| *id) .collect(); self.state = if syncing_head_ids.is_empty() { RangeSyncState::Idle } else { RangeSyncState::Head(syncing_head_ids) }; } else { debug_assert!(!was_syncing); } } RangeSyncState::Head(ref mut syncing_head_ids) => { if let Some(index) = syncing_head_ids .iter() .enumerate() .find(|(_, &chain_id)| &chain_id == id) .map(|(i, _)| i) { // a syncing head chain was removed debug_assert!(was_syncing); syncing_head_ids.swap_remove(index); if syncing_head_ids.is_empty() { self.state = RangeSyncState::Idle; } } else { debug_assert!(!was_syncing); } } RangeSyncState::Idle => { // the removed chain should not be syncing debug_assert!(!was_syncing) } } } /// Calls `func` on every chain of the collection. If the result is /// `ProcessingResult::RemoveChain`, the chain is removed and returned. /// NOTE: `func` must not change the syncing state of a chain. pub fn call_all<F>(&mut self, mut func: F) -> Vec<(SyncingChain<T>, RangeSyncType, RemoveChain)> where F: FnMut(&mut SyncingChain<T>) -> ProcessingResult, { let mut to_remove = Vec::new(); for (id, chain) in self.finalized_chains.iter_mut() { if let Err(remove_reason) = func(chain) { to_remove.push((*id, RangeSyncType::Finalized, remove_reason)); } } for (id, chain) in self.head_chains.iter_mut() { if let Err(remove_reason) = func(chain) { to_remove.push((*id, RangeSyncType::Head, remove_reason)); } } let mut results = Vec::with_capacity(to_remove.len()); for (id, sync_type, reason) in to_remove.into_iter() { let chain = match sync_type { RangeSyncType::Finalized => self.finalized_chains.remove(&id), RangeSyncType::Head => self.head_chains.remove(&id), }; let chain = chain.expect("Chain exists"); self.on_chain_removed(&id, chain.is_syncing(), sync_type); results.push((chain, sync_type, reason)); } results } /// Executes a function on the chain with the given id. /// /// If the function returns `ProcessingResult::RemoveChain`, the chain is removed and returned. /// If the chain is found, its syncing type is returned, or an error otherwise. /// NOTE: `func` should not change the sync state of a chain. #[allow(clippy::type_complexity)] pub fn call_by_id<F>( &mut self, id: ChainId, func: F, ) -> Result<(Option<(SyncingChain<T>, RemoveChain)>, RangeSyncType), ()> where F: FnOnce(&mut SyncingChain<T>) -> ProcessingResult, { if let Entry::Occupied(mut entry) = self.finalized_chains.entry(id) { // Search in our finalized chains first if let Err(remove_reason) = func(entry.get_mut()) { let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), RangeSyncType::Finalized); Ok((Some((chain, remove_reason)), RangeSyncType::Finalized)) } else { Ok((None, RangeSyncType::Finalized)) } } else if let Entry::Occupied(mut entry) = self.head_chains.entry(id) { // Search in our head chains next if let Err(remove_reason) = func(entry.get_mut()) { let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), RangeSyncType::Head); Ok((Some((chain, remove_reason)), RangeSyncType::Head)) } else { Ok((None, RangeSyncType::Head)) } } else { // Chain was not found in the finalized collection, nor the head collection Err(()) } } /// Updates the state of the chain collection. /// /// This removes any out-dated chains, swaps to any higher priority finalized chains and /// updates the state of the collection. This starts head chains syncing if any are required to /// do so. pub fn update( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, ) { // Remove any outdated finalized/head chains self.purge_outdated_chains(local, awaiting_head_peers); let local_head_epoch = local.head_slot.epoch(T::EthSpec::slots_per_epoch()); // Choose the best finalized chain if one needs to be selected. self.update_finalized_chains(network, local.finalized_epoch, local_head_epoch); if !matches!(self.state, RangeSyncState::Finalized(_)) { // Handle head syncing chains if there are no finalized chains left. self.update_head_chains( network, local.finalized_epoch, local_head_epoch, awaiting_head_peers, beacon_processor_send, ); } } pub fn state( &self, ) -> Result<Option<(RangeSyncType, Slot /* from */, Slot /* to */)>, &'static str> { match self.state { RangeSyncState::Finalized(ref syncing_id) => { let chain = self .finalized_chains .get(syncing_id) .ok_or("Finalized syncing chain not found")?; Ok(Some(( RangeSyncType::Finalized, chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()), chain.target_head_slot, ))) } RangeSyncState::Head(ref syncing_head_ids) => { let mut range: Option<(Slot, Slot)> = None; for id in syncing_head_ids { let chain = self .head_chains .get(id) .ok_or("Head syncing chain not found")?; let start = chain.start_epoch.start_slot(T::EthSpec::slots_per_epoch()); let target = chain.target_head_slot; range = range .map(|(min_start, max_slot)| (min_start.min(start), max_slot.max(target))) .or(Some((start, target))); } let (start_slot, target_slot) = range.ok_or("Syncing head with empty head ids")?; Ok(Some((RangeSyncType::Head, start_slot, target_slot))) } RangeSyncState::Idle => Ok(None), } } /// This looks at all current finalized chains and decides if a new chain should be prioritised /// or not. fn update_finalized_chains( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local_epoch: Epoch, local_head_epoch: Epoch, ) { // Find the chain with most peers and check if it is already syncing if let Some((mut new_id, max_peers)) = self .finalized_chains .iter() .max_by_key(|(_, chain)| chain.available_peers()) .map(|(id, chain)| (*id, chain.available_peers())) { let mut old_id = None; if let RangeSyncState::Finalized(syncing_id) = self.state { if syncing_id == new_id { // best chain is already syncing old_id = Some(None); } else { // chains are different, check that they don't have the same number of peers if let Some(syncing_chain) = self.finalized_chains.get_mut(&syncing_id) { if max_peers > syncing_chain.available_peers() && syncing_chain.validated_epochs() > MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS { syncing_chain.stop_syncing(); old_id = Some(Some(syncing_id)); } else { // chains have the same number of peers, pick the currently syncing // chain to avoid unnecesary switchings and try to advance it new_id = syncing_id; old_id = Some(None); } } } } let chain = self .finalized_chains .get_mut(&new_id) .expect("Chain exists"); match old_id { Some(Some(old_id)) => debug!(self.log, "Switching finalized chains"; "old_id" => old_id, &chain), None => debug!(self.log, "Syncing new finalized chain"; &chain), Some(None) => { // this is the same chain. We try to advance it. } } // update the state to a new finalized state self.state = RangeSyncState::Finalized(new_id); if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { if remove_reason.is_critical() { crit!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); } else { // this happens only if sending a batch over the `network` fails a lot error!(self.log, "Chain removed while switching chains"; "chain" => new_id, "reason" => ?remove_reason); } self.finalized_chains.remove(&new_id); self.on_chain_removed(&new_id, true, RangeSyncType::Finalized); } } } /// Start syncing any head chains if required. fn update_head_chains( &mut self, network: &mut SyncNetworkContext<T::EthSpec>, local_epoch: Epoch, local_head_epoch: Epoch, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, ) { // Include the awaiting head peers for (peer_id, peer_sync_info) in awaiting_head_peers.drain() { debug!(self.log, "including head peer"); self.add_peer_or_create_chain( local_epoch, peer_sync_info.head_root, peer_sync_info.head_slot, peer_id, RangeSyncType::Head, beacon_processor_send, network, ); } if self.head_chains.is_empty() { // There are no finalized chains, update the state. self.state = RangeSyncState::Idle; return; } // Order chains by available peers, if two chains have the same number of peers, prefer one // that is already syncing let mut preferred_ids = self .head_chains .iter() .map(|(id, chain)| (chain.available_peers(), !chain.is_syncing(), *id)) .collect::<Vec<_>>(); preferred_ids.sort_unstable(); let mut syncing_chains = SmallVec::<[u64; PARALLEL_HEAD_CHAINS]>::new(); for (_, _, id) in preferred_ids { let chain = self.head_chains.get_mut(&id).expect("known chain"); if syncing_chains.len() < PARALLEL_HEAD_CHAINS { // start this chain if it's not already syncing if !chain.is_syncing() { debug!(self.log, "New head chain started syncing"; &chain); } if let Err(remove_reason) = chain.start_syncing(network, local_epoch, local_head_epoch) { self.head_chains.remove(&id); if remove_reason.is_critical() { crit!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); } else { error!(self.log, "Chain removed while switching head chains"; "chain" => id, "reason" => ?remove_reason); } } else { syncing_chains.push(id); } } else { // stop any other chain chain.stop_syncing(); } } self.state = if syncing_chains.is_empty() { RangeSyncState::Idle } else { RangeSyncState::Head(syncing_chains) }; } /// Returns if `true` if any finalized chains exist, `false` otherwise. pub fn is_finalizing_sync(&self) -> bool { !self.finalized_chains.is_empty() } /// Removes any outdated finalized or head chains. /// This removes chains with no peers, or chains whose start block slot is less than our current /// finalized block slot. Peers that would create outdated chains are removed too. pub fn purge_outdated_chains( &mut self, local_info: &SyncInfo, awaiting_head_peers: &mut HashMap<PeerId, SyncInfo>, ) { let local_finalized_slot = local_info .finalized_epoch .start_slot(T::EthSpec::slots_per_epoch()); let beacon_chain = &self.beacon_chain; let log_ref = &self.log; let is_outdated = |target_slot: &Slot, target_root: &Hash256| { target_slot <= &local_finalized_slot || beacon_chain.fork_choice.read().contains_block(target_root) }; // Retain only head peers that remain relevant awaiting_head_peers.retain(|_peer_id, peer_sync_info| { !is_outdated(&peer_sync_info.head_slot, &peer_sync_info.head_root) }); // Remove chains that are out-dated let mut removed_chains = Vec::new(); self.finalized_chains.retain(|id, chain| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of finalized chain"; &chain); removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Finalized)); false } else { true } }); self.head_chains.retain(|id, chain| { if is_outdated(&chain.target_head_slot, &chain.target_head_root) || chain.available_peers() == 0 { debug!(log_ref, "Purging out of date head chain"; &chain); removed_chains.push((*id, chain.is_syncing(), RangeSyncType::Head)); false } else { true } }); // update the state of the collection for (id, was_syncing, sync_type) in removed_chains { self.on_chain_removed(&id, was_syncing, sync_type); } } /// Adds a peer to a chain with the given target, or creates a new syncing chain if it doesn't /// exists. #[allow(clippy::too_many_arguments)] pub fn add_peer_or_create_chain( &mut self, start_epoch: Epoch, target_head_root: Hash256, target_head_slot: Slot, peer: PeerId, sync_type: RangeSyncType, beacon_processor_send: &mpsc::Sender<BeaconWorkEvent<T>>, network: &mut SyncNetworkContext<T::EthSpec>, ) { let id = SyncingChain::<T>::id(&target_head_root, &target_head_slot); let collection = if let RangeSyncType::Finalized = sync_type { &mut self.finalized_chains } else { &mut self.head_chains }; match collection.entry(id) { Entry::Occupied(mut entry) => { let chain = entry.get_mut(); debug!(self.log, "Adding peer to known chain"; "peer_id" => %peer, "sync_type" => ?sync_type, &chain); debug_assert_eq!(chain.target_head_root, target_head_root); debug_assert_eq!(chain.target_head_slot, target_head_slot); if let Err(remove_reason) = chain.add_peer(network, peer) { if remove_reason.is_critical() { crit!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } else { error!(self.log, "Chain removed after adding peer"; "chain" => id, "reason" => ?remove_reason); } let chain = entry.remove(); self.on_chain_removed(&id, chain.is_syncing(), sync_type); } } Entry::Vacant(entry) => { let peer_rpr = peer.to_string(); let new_chain = SyncingChain::new( start_epoch, target_head_slot, target_head_root, peer, beacon_processor_send.clone(), &self.log, ); debug_assert_eq!(new_chain.get_id(), id); debug!(self.log, "New chain added to sync"; "peer_id" => peer_rpr, "sync_type" => ?sync_type, &new_chain); entry.insert(new_chain); let _ = metrics::get_int_gauge(&metrics::SYNCING_CHAINS_COUNT, &[sync_type.as_str()]) .map(|m| m.inc()); } } } }
#![allow(dead_code)] use std::fmt; use std::iter; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Sym { Add, Sub, Mul, Div, } impl fmt::Display for Sym { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Sym::*; write!(f, "{}", match *self { Add => "+", Sub => "-", Mul => "*", Div => "/", } ) } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Atom<T> { Num(T), Sym(Sym), } impl<T> From<Sym> for Atom<T> { fn from(s: Sym) -> Self { Atom::Sym(s) } } impl<T> fmt::Display for Atom<T> where T: fmt::Display { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Atom::*; match *self { Num(ref t) => write!(f, "{}", t), Sym(ref t) => write!(f, "{}", t), } } } #[derive(Clone, PartialEq, Eq, Debug)] pub enum Expr<T> { Atom(Atom<T>), List(Vec<Expr<T>>), } impl<T> Expr<T> { fn is_atom(&self) -> bool { use self::Expr::*; match *self { Atom(_) => true, List(_) => false, } } fn is_list(&self) -> bool { use self::Expr::*; match *self { Atom(_) => false, List(_) => true, } } } impl<T> From<T> for Expr<T> { fn from(t: T) -> Self { Expr::Atom(Atom::Num(t)) } } impl<T> From<Vec<T>> for Expr<T> { fn from(v: Vec<T>) -> Self { Expr::List(v.into_iter().map(Atom::Num).map(Expr::Atom).collect()) } } impl<T> fmt::Display for Expr<T> where T: fmt::Display { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &Expr::Atom(ref a) => write!(f, "{}", a), &Expr::List(ref l) => write!(f, "( {} )", l.iter().map(ToString::to_string).collect::<Vec<_>>().join(" ")) // l.iter().fold("".to_string(), |a, ref e| format!("{} {}", a, e))) } } } impl<T> iter::FromIterator<Expr<T>> for Expr<T> { fn from_iter<U>(iterator: U) -> Self where U: IntoIterator<Item = Expr<T>> { Expr::List::<T>(iterator.into_iter().collect()) } } impl<T> iter::FromIterator<Atom<T>> for Expr<T> { fn from_iter<U>(iterator: U) -> Self where U: IntoIterator<Item = Atom<T>> { Expr::List::<T>(iterator.into_iter().map(Expr::Atom).collect()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_display() { assert_eq!("1", Expr::from(1).to_string()); assert_eq!("( 1 2 )", Expr::<i32>::from(vec![1, 2]).to_string()); } } Apply rustfmt to atom.rs #![allow(dead_code)] use std::fmt; use std::iter; use std::borrow::Borrow; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Sym { Add, Sub, Mul, Div, } impl fmt::Display for Sym { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Sym::*; write!(f, "{}", match *self { Add => "+", Sub => "-", Mul => "*", Div => "/", }) } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum Atom<T> { Num(T), Sym(Sym), } impl<T> From<Sym> for Atom<T> { fn from(s: Sym) -> Self { Atom::Sym(s) } } impl<T> fmt::Display for Atom<T> where T: fmt::Display { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Atom::*; match *self { Num(ref t) => write!(f, "{}", t), Sym(ref t) => write!(f, "{}", t), } } } #[derive(Clone, PartialEq, Eq, Debug)] pub enum Expr<T> { Atom(Atom<T>), List(Vec<Expr<T>>), } impl<T> Expr<T> { fn is_atom(&self) -> bool { if let Expr::Atom(_) = *self { true } else { false } } fn is_list(&self) -> bool { if let Expr::List(_) = *self { true } else { false } } } impl<T> From<T> for Expr<T> { fn from(t: T) -> Self { Expr::Atom(Atom::Num(t)) } } impl<T> From<Vec<T>> for Expr<T> { fn from(v: Vec<T>) -> Self { Expr::List(v.into_iter().map(Expr::from).collect()) } } impl<T> Borrow<[Expr<T>]> for Expr<T> { fn borrow(&self) -> &[Expr<T>] { use self::Expr::*; match *self { Atom(ref a) => { let v = Vec::new(); v.push(Expr::from(*a.clone())); v.as_slice() }, List(ref l) => l.as_slice(), } } } impl<T> fmt::Display for Expr<T> where T: fmt::Display { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::Expr::*; match *self { Atom(ref a) => write!(f, "{}", a), List(ref l) => write!(f, "( {} )", l.join(" ")), } } } impl<T> iter::FromIterator<Expr<T>> for Expr<T> { fn from_iter<U>(iterator: U) -> Self where U: IntoIterator<Item = Expr<T>> { Expr::List::<T>(iterator.into_iter().collect()) } } impl<T> iter::FromIterator<Atom<T>> for Expr<T> { fn from_iter<U>(iterator: U) -> Self where U: IntoIterator<Item = Atom<T>> { Expr::List::<T>(iterator.into_iter().map(Expr::Atom).collect()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_display() { assert_eq!("1", Expr::from(1).to_string()); assert_eq!("( 1 2 )", Expr::<i32>::from(vec![1, 2]).to_string()); } }
use rustc::ty::adjustment::PointerCast; use crate::prelude::*; pub fn trans_fn<'a, 'clif, 'tcx: 'a, B: Backend + 'static>( cx: &mut crate::CodegenCx<'a, 'clif, 'tcx, B>, instance: Instance<'tcx>, linkage: Linkage, ) { let tcx = cx.tcx; let mir = tcx.instance_mir(instance.def); // Check fn sig for u128 and i128 and replace those functions with a trap. { // FIXME implement u128 and i128 support // Check sig for u128 and i128 let fn_sig = tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &instance.fn_sig(tcx)); struct UI128Visitor<'a, 'tcx: 'a>(TyCtxt<'a, 'tcx, 'tcx>, bool); impl<'a, 'tcx: 'a> rustc::ty::fold::TypeVisitor<'tcx> for UI128Visitor<'a, 'tcx> { fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { if t.sty == self.0.types.u128.sty || t.sty == self.0.types.i128.sty { self.1 = true; return false; // stop visiting } t.super_visit_with(self) } } let mut visitor = UI128Visitor(tcx, false); fn_sig.visit_with(&mut visitor); //If found replace function with a trap. if visitor.1 { tcx.sess.warn("u128 and i128 are not yet supported. \ Functions using these as args will be replaced with a trap."); // Declare function with fake signature let sig = Signature { params: vec![AbiParam::new(types::INVALID)], returns: vec![], call_conv: CallConv::Fast, }; let name = tcx.symbol_name(instance).as_str(); let func_id = cx.module.declare_function(&*name, linkage, &sig).unwrap(); // Create trapping function let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx); let start_ebb = bcx.create_ebb(); bcx.append_ebb_params_for_function_params(start_ebb); bcx.switch_to_block(start_ebb); let mut fx = FunctionCx { tcx, module: cx.module, pointer_type: pointer_ty(tcx), instance, mir, bcx, ebb_map: HashMap::new(), local_map: HashMap::new(), clif_comments: crate::pretty_clif::CommentWriter::new(tcx, instance), constants: &mut cx.ccx, caches: &mut cx.caches, source_info_set: indexmap::IndexSet::new(), }; crate::trap::trap_unreachable(&mut fx, "[unimplemented] Called function with u128 or i128 as argument."); fx.bcx.seal_all_blocks(); fx.bcx.finalize(); // Define function cx.caches.context.func = func; cx.module .define_function(func_id, &mut cx.caches.context) .unwrap(); cx.caches.context.clear(); return; } } // Declare function let (name, sig) = get_function_name_and_sig(tcx, instance, false); let func_id = cx.module.declare_function(&name, linkage, &sig).unwrap(); let mut debug_context = cx .debug_context .as_mut() .map(|debug_context| FunctionDebugContext::new(tcx, debug_context, mir, &name, &sig)); // Make FunctionBuilder let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx); // Predefine ebb's let start_ebb = bcx.create_ebb(); let mut ebb_map: HashMap<BasicBlock, Ebb> = HashMap::new(); for (bb, _bb_data) in mir.basic_blocks().iter_enumerated() { ebb_map.insert(bb, bcx.create_ebb()); } // Make FunctionCx let pointer_type = cx.module.target_config().pointer_type(); let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance); let mut fx = FunctionCx { tcx, module: cx.module, pointer_type, instance, mir, bcx, ebb_map, local_map: HashMap::new(), clif_comments, constants: &mut cx.ccx, caches: &mut cx.caches, source_info_set: indexmap::IndexSet::new(), }; with_unimpl_span(fx.mir.span, || { crate::abi::codegen_fn_prelude(&mut fx, start_ebb); codegen_fn_content(&mut fx); }); // Recover all necessary data from fx, before accessing func will prevent future access to it. let instance = fx.instance; let clif_comments = fx.clif_comments; let source_info_set = fx.source_info_set; #[cfg(debug_assertions)] crate::pretty_clif::write_clif_file(cx.tcx, "unopt", instance, &func, &clif_comments, None); // Verify function verify_func(tcx, &clif_comments, &func); // Define function let context = &mut cx.caches.context; context.func = func; cx.module .define_function(func_id, context) .unwrap(); let value_ranges = context.build_value_labels_ranges(cx.module.isa()).expect("value location ranges"); // Write optimized function to file for debugging #[cfg(debug_assertions)] crate::pretty_clif::write_clif_file(cx.tcx, "opt", instance, &context.func, &clif_comments, Some(&value_ranges)); // Define debuginfo for function let isa = cx.module.isa(); debug_context .as_mut() .map(|x| x.define(tcx, context, isa, &source_info_set)); // Clear context to make it usable for the next function context.clear(); } fn verify_func(tcx: TyCtxt, writer: &crate::pretty_clif::CommentWriter, func: &Function) { let flags = settings::Flags::new(settings::builder()); match ::cranelift::codegen::verify_function(&func, &flags) { Ok(_) => {} Err(err) => { tcx.sess.err(&format!("{:?}", err)); let pretty_error = ::cranelift::codegen::print_errors::pretty_verifier_error( &func, None, Some(Box::new(writer)), err, ); tcx.sess .fatal(&format!("cranelift verify error:\n{}", pretty_error)); } } } fn codegen_fn_content<'a, 'tcx: 'a>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>) { for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() { if bb_data.is_cleanup { // Unwinding after panicking is not supported continue; } let ebb = fx.get_ebb(bb); fx.bcx.switch_to_block(ebb); fx.bcx.ins().nop(); for stmt in &bb_data.statements { fx.set_debug_loc(stmt.source_info); trans_stmt(fx, ebb, stmt); } #[cfg(debug_assertions)] { let mut terminator_head = "\n".to_string(); bb_data .terminator() .kind .fmt_head(&mut terminator_head) .unwrap(); let inst = fx.bcx.func.layout.last_inst(ebb).unwrap(); fx.add_comment(inst, terminator_head); } fx.set_debug_loc(bb_data.terminator().source_info); match &bb_data.terminator().kind { TerminatorKind::Goto { target } => { let ebb = fx.get_ebb(*target); fx.bcx.ins().jump(ebb, &[]); } TerminatorKind::Return => { crate::abi::codegen_return(fx); } TerminatorKind::Assert { cond, expected, msg, target, cleanup: _, } => { let cond = trans_operand(fx, cond).load_scalar(fx); // TODO HACK brz/brnz for i8/i16 is not yet implemented let cond = fx.bcx.ins().uextend(types::I32, cond); let target = fx.get_ebb(*target); if *expected { fx.bcx.ins().brnz(cond, target, &[]); } else { fx.bcx.ins().brz(cond, target, &[]); }; trap_panic(fx, format!("[panic] Assert {:?} failed.", msg)); } TerminatorKind::SwitchInt { discr, switch_ty: _, values, targets, } => { let discr = trans_operand(fx, discr).load_scalar(fx); let mut switch = ::cranelift::frontend::Switch::new(); for (i, value) in values.iter().enumerate() { let ebb = fx.get_ebb(targets[i]); switch.set_entry(*value as u64, ebb); } let otherwise_ebb = fx.get_ebb(targets[targets.len() - 1]); switch.emit(&mut fx.bcx, discr, otherwise_ebb); } TerminatorKind::Call { func, args, destination, cleanup: _, from_hir_call: _, } => { crate::abi::codegen_terminator_call(fx, func, args, destination); } TerminatorKind::Resume | TerminatorKind::Abort => { trap_unreachable(fx, "[corruption] Unwinding bb reached."); } TerminatorKind::Unreachable => { trap_unreachable(fx, "[corruption] Hit unreachable code."); } TerminatorKind::Yield { .. } | TerminatorKind::FalseEdges { .. } | TerminatorKind::FalseUnwind { .. } | TerminatorKind::DropAndReplace { .. } | TerminatorKind::GeneratorDrop => { bug!("shouldn't exist at trans {:?}", bb_data.terminator()); } TerminatorKind::Drop { location, target, unwind: _, } => { let ty = location.ty(fx.mir, fx.tcx).ty; let ty = fx.monomorphize(&ty); let drop_fn = Instance::resolve_drop_in_place(fx.tcx, ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything } else { let drop_place = trans_place(fx, location); let drop_fn_ty = drop_fn.ty(fx.tcx); match ty.sty { ty::Dynamic(..) => { crate::abi::codegen_drop(fx, drop_place, drop_fn_ty); } _ => { let arg_place = CPlace::new_stack_slot( fx, fx.tcx.mk_ref( &ty::RegionKind::ReErased, TypeAndMut { ty, mutbl: crate::rustc::hir::Mutability::MutMutable, }, ), ); drop_place.write_place_ref(fx, arg_place); let arg_value = arg_place.to_cvalue(fx); crate::abi::codegen_call_inner( fx, None, drop_fn_ty, vec![arg_value], None, ); } } } let target_ebb = fx.get_ebb(*target); fx.bcx.ins().jump(target_ebb, &[]); } }; } fx.bcx.seal_all_blocks(); fx.bcx.finalize(); } fn trans_stmt<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, cur_ebb: Ebb, stmt: &Statement<'tcx>, ) { let _print_guard = PrintOnPanic(|| format!("stmt {:?}", stmt)); fx.set_debug_loc(stmt.source_info); #[cfg(debug_assertions)] match &stmt.kind { StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful _ => { let inst = fx.bcx.func.layout.last_inst(cur_ebb).unwrap(); fx.add_comment(inst, format!("{:?}", stmt)); } } match &stmt.kind { StatementKind::SetDiscriminant { place, variant_index, } => { let place = trans_place(fx, place); let layout = place.layout(); if layout.for_variant(&*fx, *variant_index).abi == layout::Abi::Uninhabited { return; } match layout.variants { layout::Variants::Single { index } => { assert_eq!(index, *variant_index); } layout::Variants::Multiple { discr: _, discr_index, discr_kind: layout::DiscriminantKind::Tag, variants: _, } => { let ptr = place.place_field(fx, mir::Field::new(discr_index)); let to = layout .ty .ty_adt_def() .unwrap() .discriminant_for_variant(fx.tcx, *variant_index) .val; let discr = CValue::const_val(fx, ptr.layout().ty, to as u64 as i64); ptr.write_cvalue(fx, discr); } layout::Variants::Multiple { discr: _, discr_index, discr_kind: layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start, }, variants: _, } => { if *variant_index != dataful_variant { let niche = place.place_field(fx, mir::Field::new(discr_index)); //let niche_llty = niche.layout.immediate_llvm_type(bx.cx); let niche_value = ((variant_index.as_u32() - niche_variants.start().as_u32()) as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { CValue::const_val(fx, niche.layout().ty, 0) } else { CValue::const_val(fx, niche.layout().ty, niche_value as u64 as i64) }; niche.write_cvalue(fx, niche_llval); } } } } StatementKind::Assign(to_place, rval) => { let lval = trans_place(fx, to_place); let dest_layout = lval.layout(); match &**rval { Rvalue::Use(operand) => { let val = trans_operand(fx, operand); lval.write_cvalue(fx, val); } Rvalue::Ref(_, _, place) => { let place = trans_place(fx, place); place.write_place_ref(fx, lval); } Rvalue::BinaryOp(bin_op, lhs, rhs) => { let ty = fx.monomorphize(&lhs.ty(fx.mir, fx.tcx)); let lhs = trans_operand(fx, lhs); let rhs = trans_operand(fx, rhs); let res = match ty.sty { ty::Bool => trans_bool_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::Uint(_) => { trans_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, false) } ty::Int(_) => { trans_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, true) } ty::Float(_) => trans_float_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::Char => trans_char_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::RawPtr(..) => trans_ptr_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::FnPtr(..) => trans_ptr_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), _ => unimplemented!("binop {:?} for {:?}", bin_op, ty), }; lval.write_cvalue(fx, res); } Rvalue::CheckedBinaryOp(bin_op, lhs, rhs) => { let ty = fx.monomorphize(&lhs.ty(fx.mir, fx.tcx)); let lhs = trans_operand(fx, lhs); let rhs = trans_operand(fx, rhs); let res = match ty.sty { ty::Uint(_) => { trans_checked_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, false) } ty::Int(_) => { trans_checked_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, true) } _ => unimplemented!("checked binop {:?} for {:?}", bin_op, ty), }; lval.write_cvalue(fx, res); } Rvalue::UnaryOp(un_op, operand) => { let operand = trans_operand(fx, operand); let layout = operand.layout(); let val = operand.load_scalar(fx); let res = match un_op { UnOp::Not => { match layout.ty.sty { ty::Bool => { let val = fx.bcx.ins().uextend(types::I32, val); // WORKAROUND for CraneStation/cranelift#466 let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0); fx.bcx.ins().bint(types::I8, res) } ty::Uint(_) | ty::Int(_) => fx.bcx.ins().bnot(val), _ => unimplemented!("un op Not for {:?}", layout.ty), } } UnOp::Neg => match layout.ty.sty { ty::Int(_) => { let clif_ty = fx.clif_type(layout.ty).unwrap(); let zero = fx.bcx.ins().iconst(clif_ty, 0); fx.bcx.ins().isub(zero, val) } ty::Float(_) => fx.bcx.ins().fneg(val), _ => unimplemented!("un op Neg for {:?}", layout.ty), }, }; lval.write_cvalue(fx, CValue::by_val(res, layout)); } Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, ty) => { let layout = fx.layout_of(ty); match fx .monomorphize(&operand.ty(&fx.mir.local_decls, fx.tcx)) .sty { ty::FnDef(def_id, substs) => { let func_ref = fx.get_function_ref( Instance::resolve(fx.tcx, ParamEnv::reveal_all(), def_id, substs) .unwrap(), ); let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref); lval.write_cvalue(fx, CValue::by_val(func_addr, layout)); } _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", ty), } } Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), operand, ty) | Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), operand, ty) => { let operand = trans_operand(fx, operand); let layout = fx.layout_of(ty); lval.write_cvalue(fx, operand.unchecked_cast_to(layout)); } Rvalue::Cast(CastKind::Misc, operand, to_ty) => { let operand = trans_operand(fx, operand); let from_ty = operand.layout().ty; fn is_fat_ptr<'a, 'tcx: 'a>(fx: &FunctionCx<'a, 'tcx, impl Backend>, ty: Ty<'tcx>) -> bool { ty .builtin_deref(true) .map(|ty::TypeAndMut {ty: pointee_ty, mutbl: _ }| fx.layout_of(pointee_ty).is_unsized()) .unwrap_or(false) } if is_fat_ptr(fx, from_ty) { if is_fat_ptr(fx, to_ty) { // fat-ptr -> fat-ptr lval.write_cvalue(fx, operand.unchecked_cast_to(dest_layout)); } else { // fat-ptr -> thin-ptr let (ptr, _extra) = operand.load_scalar_pair(fx); lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout)) } } else if let ty::Adt(adt_def, _substs) = from_ty.sty { // enum -> discriminant value assert!(adt_def.is_enum()); match to_ty.sty { ty::Uint(_) | ty::Int(_) => {}, _ => unreachable!("cast adt {} -> {}", from_ty, to_ty), } // FIXME avoid forcing to stack let place = CPlace::for_addr(operand.force_stack(fx), operand.layout()); let discr = trans_get_discriminant(fx, place, fx.layout_of(to_ty)); lval.write_cvalue(fx, discr); } else { let from_clif_ty = fx.clif_type(from_ty).unwrap(); let to_clif_ty = fx.clif_type(to_ty).unwrap(); let from = operand.load_scalar(fx); let signed = match from_ty.sty { ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false, ty::Int(..) => true, ty::Float(..) => false, // `signed` is unused for floats _ => panic!("{}", from_ty), }; let res = if from_clif_ty.is_int() && to_clif_ty.is_int() { // int-like -> int-like crate::common::clif_intcast( fx, from, to_clif_ty, signed, ) } else if from_clif_ty.is_int() && to_clif_ty.is_float() { // int-like -> float // FIXME missing encoding for fcvt_from_sint.f32.i8 let from = if from_clif_ty == types::I8 || from_clif_ty == types::I16 { fx.bcx.ins().uextend(types::I32, from) } else { from }; if signed { fx.bcx.ins().fcvt_from_sint(to_clif_ty, from) } else { fx.bcx.ins().fcvt_from_uint(to_clif_ty, from) } } else if from_clif_ty.is_float() && to_clif_ty.is_int() { // float -> int-like let from = operand.load_scalar(fx); if signed { fx.bcx.ins().fcvt_to_sint_sat(to_clif_ty, from) } else { fx.bcx.ins().fcvt_to_uint_sat(to_clif_ty, from) } } else if from_clif_ty.is_float() && to_clif_ty.is_float() { // float -> float match (from_clif_ty, to_clif_ty) { (types::F32, types::F64) => { fx.bcx.ins().fpromote(types::F64, from) } (types::F64, types::F32) => { fx.bcx.ins().fdemote(types::F32, from) } _ => from, } } else { unimpl!("rval misc {:?} {:?}", from_ty, to_ty) }; lval.write_cvalue(fx, CValue::by_val(res, dest_layout)); } } Rvalue::Cast(CastKind::Pointer(PointerCast::ClosureFnPointer(_)), operand, _ty) => { let operand = trans_operand(fx, operand); match operand.layout().ty.sty { ty::Closure(def_id, substs) => { let instance = Instance::resolve_closure( fx.tcx, def_id, substs, ty::ClosureKind::FnOnce, ); let func_ref = fx.get_function_ref(instance); let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref); lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout())); } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout().ty) } } } Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, _ty) => { let operand = trans_operand(fx, operand); operand.unsize_value(fx, lval); } Rvalue::Discriminant(place) => { let place = trans_place(fx, place); let discr = trans_get_discriminant(fx, place, dest_layout); lval.write_cvalue(fx, discr); } Rvalue::Repeat(operand, times) => { let operand = trans_operand(fx, operand); for i in 0..*times { let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64); let to = lval.place_index(fx, index); to.write_cvalue(fx, operand); } } Rvalue::Len(place) => { let place = trans_place(fx, place); let usize_layout = fx.layout_of(fx.tcx.types.usize); let len = codegen_array_len(fx, place); lval.write_cvalue(fx, CValue::by_val(len, usize_layout)); } Rvalue::NullaryOp(NullOp::Box, content_ty) => { use rustc::middle::lang_items::ExchangeMallocFnLangItem; let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap(); let layout = fx.layout_of(content_ty); let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64); let llalign = fx .bcx .ins() .iconst(usize_type, layout.align.abi.bytes() as i64); let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty)); // Allocate space: let def_id = match fx.tcx.lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { fx.tcx .sess .fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(fx.tcx, def_id); let func_ref = fx.get_function_ref(instance); let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]); let ptr = fx.bcx.inst_results(call)[0]; lval.write_cvalue(fx, CValue::by_val(ptr, box_layout)); } Rvalue::NullaryOp(NullOp::SizeOf, ty) => { assert!(lval .layout() .ty .is_sized(fx.tcx.at(DUMMY_SP), ParamEnv::reveal_all())); let ty_size = fx.layout_of(ty).size.bytes(); let val = CValue::const_val(fx, fx.tcx.types.usize, ty_size as i64); lval.write_cvalue(fx, val); } Rvalue::Aggregate(kind, operands) => match **kind { AggregateKind::Array(_ty) => { for (i, operand) in operands.into_iter().enumerate() { let operand = trans_operand(fx, operand); let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64); let to = lval.place_index(fx, index); to.write_cvalue(fx, operand); } } _ => unimpl!("shouldn't exist at trans {:?}", rval), }, } } StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::Nop | StatementKind::FakeRead(..) | StatementKind::Retag { .. } | StatementKind::AscribeUserType(..) => {} StatementKind::InlineAsm { .. } => unimpl!("Inline assembly is not supported"), } } fn codegen_array_len<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, place: CPlace<'tcx>, ) -> Value { match place.layout().ty.sty { ty::Array(_elem_ty, len) => { let len = crate::constant::force_eval_const(fx, len).unwrap_usize(fx.tcx) as i64; fx.bcx.ins().iconst(fx.pointer_type, len) } ty::Slice(_elem_ty) => place .to_addr_maybe_unsized(fx) .1 .expect("Length metadata for slice place"), _ => bug!("Rvalue::Len({:?})", place), } } pub fn trans_get_discriminant<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, place: CPlace<'tcx>, dest_layout: TyLayout<'tcx>, ) -> CValue<'tcx> { let layout = place.layout(); if layout.abi == layout::Abi::Uninhabited { return trap_unreachable_ret_value(fx, dest_layout, "[panic] Tried to get discriminant for uninhabited type."); } let (discr_scalar, discr_index, discr_kind) = match &layout.variants { layout::Variants::Single { index } => { let discr_val = layout .ty .ty_adt_def() .map_or(index.as_u32() as u128, |def| { def.discriminant_for_variant(fx.tcx, *index).val }); return CValue::const_val(fx, dest_layout.ty, discr_val as u64 as i64); } layout::Variants::Multiple { discr, discr_index, discr_kind, variants: _ } => { (discr, *discr_index, discr_kind) } }; let discr = place.place_field(fx, mir::Field::new(discr_index)).to_cvalue(fx); let discr_ty = discr.layout().ty; let lldiscr = discr.load_scalar(fx); match discr_kind { layout::DiscriminantKind::Tag => { let signed = match discr_scalar.value { layout::Int(_, signed) => signed, _ => false, }; let val = clif_intcast(fx, lldiscr, fx.clif_type(dest_layout.ty).unwrap(), signed); return CValue::by_val(val, dest_layout); } layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start, } => { let niche_llty = fx.clif_type(discr_ty).unwrap(); let dest_clif_ty = fx.clif_type(dest_layout.ty).unwrap(); if niche_variants.start() == niche_variants.end() { let b = fx .bcx .ins() .icmp_imm(IntCC::Equal, lldiscr, *niche_start as u64 as i64); let if_true = fx .bcx .ins() .iconst(dest_clif_ty, niche_variants.start().as_u32() as i64); let if_false = fx .bcx .ins() .iconst(dest_clif_ty, dataful_variant.as_u32() as i64); let val = fx.bcx.ins().select(b, if_true, if_false); return CValue::by_val(val, dest_layout); } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); let delta = fx.bcx.ins().iconst(niche_llty, delta as u64 as i64); let lldiscr = fx.bcx.ins().isub(lldiscr, delta); let b = fx.bcx.ins().icmp_imm( IntCC::UnsignedLessThanOrEqual, lldiscr, niche_variants.end().as_u32() as i64, ); let if_true = clif_intcast(fx, lldiscr, fx.clif_type(dest_layout.ty).unwrap(), false); let if_false = fx .bcx .ins() .iconst(dest_clif_ty, dataful_variant.as_u32() as i64); let val = fx.bcx.ins().select(b, if_true, if_false); return CValue::by_val(val, dest_layout); } } } } macro_rules! binop_match { (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, bug) => { bug!("binop {} on {} lhs: {:?} rhs: {:?}", stringify!($var), $bug_fmt, $lhs, $rhs) }; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, icmp($cc:ident)) => {{ assert_eq!($fx.tcx.types.bool, $ret_ty); let ret_layout = $fx.layout_of($ret_ty); let b = $fx.bcx.ins().icmp(IntCC::$cc, $lhs, $rhs); CValue::by_val($fx.bcx.ins().bint(types::I8, b), ret_layout) }}; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, fcmp($cc:ident)) => {{ assert_eq!($fx.tcx.types.bool, $ret_ty); let ret_layout = $fx.layout_of($ret_ty); let b = $fx.bcx.ins().fcmp(FloatCC::$cc, $lhs, $rhs); CValue::by_val($fx.bcx.ins().bint(types::I8, b), ret_layout) }}; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, custom(|| $body:expr)) => {{ $body }}; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, $name:ident) => {{ let ret_layout = $fx.layout_of($ret_ty); CValue::by_val($fx.bcx.ins().$name($lhs, $rhs), ret_layout) }}; ( $fx:expr, $bin_op:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, $bug_fmt:expr; $( $var:ident ($sign:pat) $name:tt $( ( $($next:tt)* ) )? ; )* ) => {{ let lhs = $lhs.load_scalar($fx); let rhs = $rhs.load_scalar($fx); match ($bin_op, $signed) { $( (BinOp::$var, $sign) => binop_match!(@single $fx, $bug_fmt, $var, $signed, lhs, rhs, $ret_ty, $name $( ( $($next)* ) )?), )* } }} } fn trans_bool_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ty: Ty<'tcx>, ) -> CValue<'tcx> { let res = binop_match! { fx, bin_op, false, lhs, rhs, ty, "bool"; Add (_) bug; Sub (_) bug; Mul (_) bug; Div (_) bug; Rem (_) bug; BitXor (_) bxor; BitAnd (_) band; BitOr (_) bor; Shl (_) bug; Shr (_) bug; Eq (_) icmp(Equal); Lt (_) icmp(UnsignedLessThan); Le (_) icmp(UnsignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (_) icmp(UnsignedGreaterThanOrEqual); Gt (_) icmp(UnsignedGreaterThan); Offset (_) bug; }; res } pub fn trans_int_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, out_ty: Ty<'tcx>, signed: bool, ) -> CValue<'tcx> { if bin_op != BinOp::Shl && bin_op != BinOp::Shr { assert_eq!( lhs.layout().ty, rhs.layout().ty, "int binop requires lhs and rhs of same type" ); } binop_match! { fx, bin_op, signed, lhs, rhs, out_ty, "int/uint"; Add (_) iadd; Sub (_) isub; Mul (_) imul; Div (false) udiv; Div (true) sdiv; Rem (false) urem; Rem (true) srem; BitXor (_) bxor; BitAnd (_) band; BitOr (_) bor; Shl (_) ishl; Shr (false) ushr; Shr (true) sshr; Eq (_) icmp(Equal); Lt (false) icmp(UnsignedLessThan); Lt (true) icmp(SignedLessThan); Le (false) icmp(UnsignedLessThanOrEqual); Le (true) icmp(SignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (false) icmp(UnsignedGreaterThanOrEqual); Ge (true) icmp(SignedGreaterThanOrEqual); Gt (false) icmp(UnsignedGreaterThan); Gt (true) icmp(SignedGreaterThan); Offset (_) bug; } } pub fn trans_checked_int_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, in_lhs: CValue<'tcx>, in_rhs: CValue<'tcx>, out_ty: Ty<'tcx>, signed: bool, ) -> CValue<'tcx> { if bin_op != BinOp::Shl && bin_op != BinOp::Shr { assert_eq!( in_lhs.layout().ty, in_rhs.layout().ty, "checked int binop requires lhs and rhs of same type" ); } let lhs = in_lhs.load_scalar(fx); let rhs = in_rhs.load_scalar(fx); let res = match bin_op { BinOp::Add => fx.bcx.ins().iadd(lhs, rhs), BinOp::Sub => fx.bcx.ins().isub(lhs, rhs), BinOp::Mul => fx.bcx.ins().imul(lhs, rhs), BinOp::Shl => fx.bcx.ins().ishl(lhs, rhs), BinOp::Shr => { if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) } } _ => bug!( "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs ), }; // TODO: check for overflow let has_overflow = fx.bcx.ins().iconst(types::I8, 0); let out_place = CPlace::new_stack_slot(fx, out_ty); let out_layout = out_place.layout(); out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout)); out_place.to_cvalue(fx) } fn trans_float_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ty: Ty<'tcx>, ) -> CValue<'tcx> { let res = binop_match! { fx, bin_op, false, lhs, rhs, ty, "float"; Add (_) fadd; Sub (_) fsub; Mul (_) fmul; Div (_) fdiv; Rem (_) custom(|| { assert_eq!(lhs.layout().ty, ty); assert_eq!(rhs.layout().ty, ty); match ty.sty { ty::Float(FloatTy::F32) => fx.easy_call("fmodf", &[lhs, rhs], ty), ty::Float(FloatTy::F64) => fx.easy_call("fmod", &[lhs, rhs], ty), _ => bug!(), } }); BitXor (_) bxor; BitAnd (_) band; BitOr (_) bor; Shl (_) bug; Shr (_) bug; Eq (_) fcmp(Equal); Lt (_) fcmp(LessThan); Le (_) fcmp(LessThanOrEqual); Ne (_) fcmp(NotEqual); Ge (_) fcmp(GreaterThanOrEqual); Gt (_) fcmp(GreaterThan); Offset (_) bug; }; res } fn trans_char_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ty: Ty<'tcx>, ) -> CValue<'tcx> { let res = binop_match! { fx, bin_op, false, lhs, rhs, ty, "char"; Add (_) bug; Sub (_) bug; Mul (_) bug; Div (_) bug; Rem (_) bug; BitXor (_) bug; BitAnd (_) bug; BitOr (_) bug; Shl (_) bug; Shr (_) bug; Eq (_) icmp(Equal); Lt (_) icmp(UnsignedLessThan); Le (_) icmp(UnsignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (_) icmp(UnsignedGreaterThanOrEqual); Gt (_) icmp(UnsignedGreaterThan); Offset (_) bug; }; res } fn trans_ptr_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ret_ty: Ty<'tcx>, ) -> CValue<'tcx> { let not_fat = match lhs.layout().ty.sty { ty::RawPtr(TypeAndMut { ty, mutbl: _ }) => { ty.is_sized(fx.tcx.at(DUMMY_SP), ParamEnv::reveal_all()) } ty::FnPtr(..) => true, _ => bug!("trans_ptr_binop on non ptr"), }; if not_fat { if let BinOp::Offset = bin_op { let (base, offset) = (lhs, rhs.load_scalar(fx)); let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty; let pointee_size = fx.layout_of(pointee_ty).size.bytes(); let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64); let base_val = base.load_scalar(fx); let res = fx.bcx.ins().iadd(base_val, ptr_diff); return CValue::by_val(res, base.layout()); } binop_match! { fx, bin_op, false, lhs, rhs, ret_ty, "ptr"; Add (_) bug; Sub (_) bug; Mul (_) bug; Div (_) bug; Rem (_) bug; BitXor (_) bug; BitAnd (_) bug; BitOr (_) bug; Shl (_) bug; Shr (_) bug; Eq (_) icmp(Equal); Lt (_) icmp(UnsignedLessThan); Le (_) icmp(UnsignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (_) icmp(UnsignedGreaterThanOrEqual); Gt (_) icmp(UnsignedGreaterThan); Offset (_) bug; // Handled above } } else { let (lhs_ptr, lhs_extra) = lhs.load_scalar_pair(fx); let (rhs_ptr, rhs_extra) = rhs.load_scalar_pair(fx); let res = match bin_op { BinOp::Eq => { let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr); let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra); fx.bcx.ins().band(ptr_eq, extra_eq) } BinOp::Ne => { let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr); let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra); fx.bcx.ins().bor(ptr_ne, extra_ne) } _ => unimplemented!( "trans_ptr_binop({:?}, <fat ptr>, <fat ptr>) not implemented", bin_op ), }; assert_eq!(fx.tcx.types.bool, ret_ty); let ret_layout = fx.layout_of(ret_ty); CValue::by_val(fx.bcx.ins().bint(types::I8, res), ret_layout) } } pub fn trans_place<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, place: &Place<'tcx>, ) -> CPlace<'tcx> { match place { Place::Base(base) => match base { PlaceBase::Local(local) => fx.get_local_place(*local), PlaceBase::Static(static_) => match static_.kind { StaticKind::Static(def_id) => { crate::constant::codegen_static_ref(fx, def_id, static_.ty) } StaticKind::Promoted(promoted) => { crate::constant::trans_promoted(fx, promoted, static_.ty) } } } Place::Projection(projection) => { let base = trans_place(fx, &projection.base); match projection.elem { ProjectionElem::Deref => base.place_deref(fx), ProjectionElem::Field(field, _ty) => base.place_field(fx, field), ProjectionElem::Index(local) => { let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx); base.place_index(fx, index) } ProjectionElem::ConstantIndex { offset, min_length: _, from_end, } => { let index = if !from_end { fx.bcx.ins().iconst(fx.pointer_type, offset as i64) } else { let len = codegen_array_len(fx, base); fx.bcx.ins().iadd_imm(len, -(offset as i64)) }; base.place_index(fx, index) } ProjectionElem::Subslice { from, to } => { // These indices are generated by slice patterns. // slice[from:-to] in Python terms. match base.layout().ty.sty { ty::Array(elem_ty, len) => { let elem_layout = fx.layout_of(elem_ty); let ptr = base.to_addr(fx); let len = crate::constant::force_eval_const(fx, len).unwrap_usize(fx.tcx); CPlace::for_addr( fx.bcx.ins().iadd_imm(ptr, elem_layout.size.bytes() as i64 * from as i64), fx.layout_of(fx.tcx.mk_array(elem_ty, len - from as u64 - to as u64)), ) } ty::Slice(elem_ty) => { let elem_layout = fx.layout_of(elem_ty); let (ptr, len) = base.to_addr_maybe_unsized(fx); let len = len.unwrap(); CPlace::for_addr_with_extra( fx.bcx.ins().iadd_imm(ptr, elem_layout.size.bytes() as i64 * from as i64), fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)), base.layout(), ) } _ => unreachable!(), } } ProjectionElem::Downcast(_adt_def, variant) => base.downcast_variant(fx, variant), } } } } pub fn trans_operand<'a, 'tcx>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, operand: &Operand<'tcx>, ) -> CValue<'tcx> { match operand { Operand::Move(place) | Operand::Copy(place) => { let cplace = trans_place(fx, place); cplace.to_cvalue(fx) } Operand::Constant(const_) => crate::constant::trans_constant(fx, const_), } } Remove workaround for previously missing encoding use rustc::ty::adjustment::PointerCast; use crate::prelude::*; pub fn trans_fn<'a, 'clif, 'tcx: 'a, B: Backend + 'static>( cx: &mut crate::CodegenCx<'a, 'clif, 'tcx, B>, instance: Instance<'tcx>, linkage: Linkage, ) { let tcx = cx.tcx; let mir = tcx.instance_mir(instance.def); // Check fn sig for u128 and i128 and replace those functions with a trap. { // FIXME implement u128 and i128 support // Check sig for u128 and i128 let fn_sig = tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &instance.fn_sig(tcx)); struct UI128Visitor<'a, 'tcx: 'a>(TyCtxt<'a, 'tcx, 'tcx>, bool); impl<'a, 'tcx: 'a> rustc::ty::fold::TypeVisitor<'tcx> for UI128Visitor<'a, 'tcx> { fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { if t.sty == self.0.types.u128.sty || t.sty == self.0.types.i128.sty { self.1 = true; return false; // stop visiting } t.super_visit_with(self) } } let mut visitor = UI128Visitor(tcx, false); fn_sig.visit_with(&mut visitor); //If found replace function with a trap. if visitor.1 { tcx.sess.warn("u128 and i128 are not yet supported. \ Functions using these as args will be replaced with a trap."); // Declare function with fake signature let sig = Signature { params: vec![AbiParam::new(types::INVALID)], returns: vec![], call_conv: CallConv::Fast, }; let name = tcx.symbol_name(instance).as_str(); let func_id = cx.module.declare_function(&*name, linkage, &sig).unwrap(); // Create trapping function let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx); let start_ebb = bcx.create_ebb(); bcx.append_ebb_params_for_function_params(start_ebb); bcx.switch_to_block(start_ebb); let mut fx = FunctionCx { tcx, module: cx.module, pointer_type: pointer_ty(tcx), instance, mir, bcx, ebb_map: HashMap::new(), local_map: HashMap::new(), clif_comments: crate::pretty_clif::CommentWriter::new(tcx, instance), constants: &mut cx.ccx, caches: &mut cx.caches, source_info_set: indexmap::IndexSet::new(), }; crate::trap::trap_unreachable(&mut fx, "[unimplemented] Called function with u128 or i128 as argument."); fx.bcx.seal_all_blocks(); fx.bcx.finalize(); // Define function cx.caches.context.func = func; cx.module .define_function(func_id, &mut cx.caches.context) .unwrap(); cx.caches.context.clear(); return; } } // Declare function let (name, sig) = get_function_name_and_sig(tcx, instance, false); let func_id = cx.module.declare_function(&name, linkage, &sig).unwrap(); let mut debug_context = cx .debug_context .as_mut() .map(|debug_context| FunctionDebugContext::new(tcx, debug_context, mir, &name, &sig)); // Make FunctionBuilder let mut func = Function::with_name_signature(ExternalName::user(0, 0), sig); let mut func_ctx = FunctionBuilderContext::new(); let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx); // Predefine ebb's let start_ebb = bcx.create_ebb(); let mut ebb_map: HashMap<BasicBlock, Ebb> = HashMap::new(); for (bb, _bb_data) in mir.basic_blocks().iter_enumerated() { ebb_map.insert(bb, bcx.create_ebb()); } // Make FunctionCx let pointer_type = cx.module.target_config().pointer_type(); let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance); let mut fx = FunctionCx { tcx, module: cx.module, pointer_type, instance, mir, bcx, ebb_map, local_map: HashMap::new(), clif_comments, constants: &mut cx.ccx, caches: &mut cx.caches, source_info_set: indexmap::IndexSet::new(), }; with_unimpl_span(fx.mir.span, || { crate::abi::codegen_fn_prelude(&mut fx, start_ebb); codegen_fn_content(&mut fx); }); // Recover all necessary data from fx, before accessing func will prevent future access to it. let instance = fx.instance; let clif_comments = fx.clif_comments; let source_info_set = fx.source_info_set; #[cfg(debug_assertions)] crate::pretty_clif::write_clif_file(cx.tcx, "unopt", instance, &func, &clif_comments, None); // Verify function verify_func(tcx, &clif_comments, &func); // Define function let context = &mut cx.caches.context; context.func = func; cx.module .define_function(func_id, context) .unwrap(); let value_ranges = context.build_value_labels_ranges(cx.module.isa()).expect("value location ranges"); // Write optimized function to file for debugging #[cfg(debug_assertions)] crate::pretty_clif::write_clif_file(cx.tcx, "opt", instance, &context.func, &clif_comments, Some(&value_ranges)); // Define debuginfo for function let isa = cx.module.isa(); debug_context .as_mut() .map(|x| x.define(tcx, context, isa, &source_info_set)); // Clear context to make it usable for the next function context.clear(); } fn verify_func(tcx: TyCtxt, writer: &crate::pretty_clif::CommentWriter, func: &Function) { let flags = settings::Flags::new(settings::builder()); match ::cranelift::codegen::verify_function(&func, &flags) { Ok(_) => {} Err(err) => { tcx.sess.err(&format!("{:?}", err)); let pretty_error = ::cranelift::codegen::print_errors::pretty_verifier_error( &func, None, Some(Box::new(writer)), err, ); tcx.sess .fatal(&format!("cranelift verify error:\n{}", pretty_error)); } } } fn codegen_fn_content<'a, 'tcx: 'a>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>) { for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() { if bb_data.is_cleanup { // Unwinding after panicking is not supported continue; } let ebb = fx.get_ebb(bb); fx.bcx.switch_to_block(ebb); fx.bcx.ins().nop(); for stmt in &bb_data.statements { fx.set_debug_loc(stmt.source_info); trans_stmt(fx, ebb, stmt); } #[cfg(debug_assertions)] { let mut terminator_head = "\n".to_string(); bb_data .terminator() .kind .fmt_head(&mut terminator_head) .unwrap(); let inst = fx.bcx.func.layout.last_inst(ebb).unwrap(); fx.add_comment(inst, terminator_head); } fx.set_debug_loc(bb_data.terminator().source_info); match &bb_data.terminator().kind { TerminatorKind::Goto { target } => { let ebb = fx.get_ebb(*target); fx.bcx.ins().jump(ebb, &[]); } TerminatorKind::Return => { crate::abi::codegen_return(fx); } TerminatorKind::Assert { cond, expected, msg, target, cleanup: _, } => { let cond = trans_operand(fx, cond).load_scalar(fx); // TODO HACK brz/brnz for i8/i16 is not yet implemented let cond = fx.bcx.ins().uextend(types::I32, cond); let target = fx.get_ebb(*target); if *expected { fx.bcx.ins().brnz(cond, target, &[]); } else { fx.bcx.ins().brz(cond, target, &[]); }; trap_panic(fx, format!("[panic] Assert {:?} failed.", msg)); } TerminatorKind::SwitchInt { discr, switch_ty: _, values, targets, } => { let discr = trans_operand(fx, discr).load_scalar(fx); let mut switch = ::cranelift::frontend::Switch::new(); for (i, value) in values.iter().enumerate() { let ebb = fx.get_ebb(targets[i]); switch.set_entry(*value as u64, ebb); } let otherwise_ebb = fx.get_ebb(targets[targets.len() - 1]); switch.emit(&mut fx.bcx, discr, otherwise_ebb); } TerminatorKind::Call { func, args, destination, cleanup: _, from_hir_call: _, } => { crate::abi::codegen_terminator_call(fx, func, args, destination); } TerminatorKind::Resume | TerminatorKind::Abort => { trap_unreachable(fx, "[corruption] Unwinding bb reached."); } TerminatorKind::Unreachable => { trap_unreachable(fx, "[corruption] Hit unreachable code."); } TerminatorKind::Yield { .. } | TerminatorKind::FalseEdges { .. } | TerminatorKind::FalseUnwind { .. } | TerminatorKind::DropAndReplace { .. } | TerminatorKind::GeneratorDrop => { bug!("shouldn't exist at trans {:?}", bb_data.terminator()); } TerminatorKind::Drop { location, target, unwind: _, } => { let ty = location.ty(fx.mir, fx.tcx).ty; let ty = fx.monomorphize(&ty); let drop_fn = Instance::resolve_drop_in_place(fx.tcx, ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything } else { let drop_place = trans_place(fx, location); let drop_fn_ty = drop_fn.ty(fx.tcx); match ty.sty { ty::Dynamic(..) => { crate::abi::codegen_drop(fx, drop_place, drop_fn_ty); } _ => { let arg_place = CPlace::new_stack_slot( fx, fx.tcx.mk_ref( &ty::RegionKind::ReErased, TypeAndMut { ty, mutbl: crate::rustc::hir::Mutability::MutMutable, }, ), ); drop_place.write_place_ref(fx, arg_place); let arg_value = arg_place.to_cvalue(fx); crate::abi::codegen_call_inner( fx, None, drop_fn_ty, vec![arg_value], None, ); } } } let target_ebb = fx.get_ebb(*target); fx.bcx.ins().jump(target_ebb, &[]); } }; } fx.bcx.seal_all_blocks(); fx.bcx.finalize(); } fn trans_stmt<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, cur_ebb: Ebb, stmt: &Statement<'tcx>, ) { let _print_guard = PrintOnPanic(|| format!("stmt {:?}", stmt)); fx.set_debug_loc(stmt.source_info); #[cfg(debug_assertions)] match &stmt.kind { StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful _ => { let inst = fx.bcx.func.layout.last_inst(cur_ebb).unwrap(); fx.add_comment(inst, format!("{:?}", stmt)); } } match &stmt.kind { StatementKind::SetDiscriminant { place, variant_index, } => { let place = trans_place(fx, place); let layout = place.layout(); if layout.for_variant(&*fx, *variant_index).abi == layout::Abi::Uninhabited { return; } match layout.variants { layout::Variants::Single { index } => { assert_eq!(index, *variant_index); } layout::Variants::Multiple { discr: _, discr_index, discr_kind: layout::DiscriminantKind::Tag, variants: _, } => { let ptr = place.place_field(fx, mir::Field::new(discr_index)); let to = layout .ty .ty_adt_def() .unwrap() .discriminant_for_variant(fx.tcx, *variant_index) .val; let discr = CValue::const_val(fx, ptr.layout().ty, to as u64 as i64); ptr.write_cvalue(fx, discr); } layout::Variants::Multiple { discr: _, discr_index, discr_kind: layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start, }, variants: _, } => { if *variant_index != dataful_variant { let niche = place.place_field(fx, mir::Field::new(discr_index)); //let niche_llty = niche.layout.immediate_llvm_type(bx.cx); let niche_value = ((variant_index.as_u32() - niche_variants.start().as_u32()) as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { CValue::const_val(fx, niche.layout().ty, 0) } else { CValue::const_val(fx, niche.layout().ty, niche_value as u64 as i64) }; niche.write_cvalue(fx, niche_llval); } } } } StatementKind::Assign(to_place, rval) => { let lval = trans_place(fx, to_place); let dest_layout = lval.layout(); match &**rval { Rvalue::Use(operand) => { let val = trans_operand(fx, operand); lval.write_cvalue(fx, val); } Rvalue::Ref(_, _, place) => { let place = trans_place(fx, place); place.write_place_ref(fx, lval); } Rvalue::BinaryOp(bin_op, lhs, rhs) => { let ty = fx.monomorphize(&lhs.ty(fx.mir, fx.tcx)); let lhs = trans_operand(fx, lhs); let rhs = trans_operand(fx, rhs); let res = match ty.sty { ty::Bool => trans_bool_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::Uint(_) => { trans_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, false) } ty::Int(_) => { trans_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, true) } ty::Float(_) => trans_float_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::Char => trans_char_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::RawPtr(..) => trans_ptr_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), ty::FnPtr(..) => trans_ptr_binop(fx, *bin_op, lhs, rhs, lval.layout().ty), _ => unimplemented!("binop {:?} for {:?}", bin_op, ty), }; lval.write_cvalue(fx, res); } Rvalue::CheckedBinaryOp(bin_op, lhs, rhs) => { let ty = fx.monomorphize(&lhs.ty(fx.mir, fx.tcx)); let lhs = trans_operand(fx, lhs); let rhs = trans_operand(fx, rhs); let res = match ty.sty { ty::Uint(_) => { trans_checked_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, false) } ty::Int(_) => { trans_checked_int_binop(fx, *bin_op, lhs, rhs, lval.layout().ty, true) } _ => unimplemented!("checked binop {:?} for {:?}", bin_op, ty), }; lval.write_cvalue(fx, res); } Rvalue::UnaryOp(un_op, operand) => { let operand = trans_operand(fx, operand); let layout = operand.layout(); let val = operand.load_scalar(fx); let res = match un_op { UnOp::Not => { match layout.ty.sty { ty::Bool => { let val = fx.bcx.ins().uextend(types::I32, val); // WORKAROUND for CraneStation/cranelift#466 let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0); fx.bcx.ins().bint(types::I8, res) } ty::Uint(_) | ty::Int(_) => fx.bcx.ins().bnot(val), _ => unimplemented!("un op Not for {:?}", layout.ty), } } UnOp::Neg => match layout.ty.sty { ty::Int(_) => { let clif_ty = fx.clif_type(layout.ty).unwrap(); let zero = fx.bcx.ins().iconst(clif_ty, 0); fx.bcx.ins().isub(zero, val) } ty::Float(_) => fx.bcx.ins().fneg(val), _ => unimplemented!("un op Neg for {:?}", layout.ty), }, }; lval.write_cvalue(fx, CValue::by_val(res, layout)); } Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, ty) => { let layout = fx.layout_of(ty); match fx .monomorphize(&operand.ty(&fx.mir.local_decls, fx.tcx)) .sty { ty::FnDef(def_id, substs) => { let func_ref = fx.get_function_ref( Instance::resolve(fx.tcx, ParamEnv::reveal_all(), def_id, substs) .unwrap(), ); let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref); lval.write_cvalue(fx, CValue::by_val(func_addr, layout)); } _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", ty), } } Rvalue::Cast(CastKind::Pointer(PointerCast::UnsafeFnPointer), operand, ty) | Rvalue::Cast(CastKind::Pointer(PointerCast::MutToConstPointer), operand, ty) => { let operand = trans_operand(fx, operand); let layout = fx.layout_of(ty); lval.write_cvalue(fx, operand.unchecked_cast_to(layout)); } Rvalue::Cast(CastKind::Misc, operand, to_ty) => { let operand = trans_operand(fx, operand); let from_ty = operand.layout().ty; fn is_fat_ptr<'a, 'tcx: 'a>(fx: &FunctionCx<'a, 'tcx, impl Backend>, ty: Ty<'tcx>) -> bool { ty .builtin_deref(true) .map(|ty::TypeAndMut {ty: pointee_ty, mutbl: _ }| fx.layout_of(pointee_ty).is_unsized()) .unwrap_or(false) } if is_fat_ptr(fx, from_ty) { if is_fat_ptr(fx, to_ty) { // fat-ptr -> fat-ptr lval.write_cvalue(fx, operand.unchecked_cast_to(dest_layout)); } else { // fat-ptr -> thin-ptr let (ptr, _extra) = operand.load_scalar_pair(fx); lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout)) } } else if let ty::Adt(adt_def, _substs) = from_ty.sty { // enum -> discriminant value assert!(adt_def.is_enum()); match to_ty.sty { ty::Uint(_) | ty::Int(_) => {}, _ => unreachable!("cast adt {} -> {}", from_ty, to_ty), } // FIXME avoid forcing to stack let place = CPlace::for_addr(operand.force_stack(fx), operand.layout()); let discr = trans_get_discriminant(fx, place, fx.layout_of(to_ty)); lval.write_cvalue(fx, discr); } else { let from_clif_ty = fx.clif_type(from_ty).unwrap(); let to_clif_ty = fx.clif_type(to_ty).unwrap(); let from = operand.load_scalar(fx); let signed = match from_ty.sty { ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false, ty::Int(..) => true, ty::Float(..) => false, // `signed` is unused for floats _ => panic!("{}", from_ty), }; let res = if from_clif_ty.is_int() && to_clif_ty.is_int() { // int-like -> int-like crate::common::clif_intcast( fx, from, to_clif_ty, signed, ) } else if from_clif_ty.is_int() && to_clif_ty.is_float() { // int-like -> float if signed { fx.bcx.ins().fcvt_from_sint(to_clif_ty, from) } else { fx.bcx.ins().fcvt_from_uint(to_clif_ty, from) } } else if from_clif_ty.is_float() && to_clif_ty.is_int() { // float -> int-like let from = operand.load_scalar(fx); if signed { fx.bcx.ins().fcvt_to_sint_sat(to_clif_ty, from) } else { fx.bcx.ins().fcvt_to_uint_sat(to_clif_ty, from) } } else if from_clif_ty.is_float() && to_clif_ty.is_float() { // float -> float match (from_clif_ty, to_clif_ty) { (types::F32, types::F64) => { fx.bcx.ins().fpromote(types::F64, from) } (types::F64, types::F32) => { fx.bcx.ins().fdemote(types::F32, from) } _ => from, } } else { unimpl!("rval misc {:?} {:?}", from_ty, to_ty) }; lval.write_cvalue(fx, CValue::by_val(res, dest_layout)); } } Rvalue::Cast(CastKind::Pointer(PointerCast::ClosureFnPointer(_)), operand, _ty) => { let operand = trans_operand(fx, operand); match operand.layout().ty.sty { ty::Closure(def_id, substs) => { let instance = Instance::resolve_closure( fx.tcx, def_id, substs, ty::ClosureKind::FnOnce, ); let func_ref = fx.get_function_ref(instance); let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref); lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout())); } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout().ty) } } } Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, _ty) => { let operand = trans_operand(fx, operand); operand.unsize_value(fx, lval); } Rvalue::Discriminant(place) => { let place = trans_place(fx, place); let discr = trans_get_discriminant(fx, place, dest_layout); lval.write_cvalue(fx, discr); } Rvalue::Repeat(operand, times) => { let operand = trans_operand(fx, operand); for i in 0..*times { let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64); let to = lval.place_index(fx, index); to.write_cvalue(fx, operand); } } Rvalue::Len(place) => { let place = trans_place(fx, place); let usize_layout = fx.layout_of(fx.tcx.types.usize); let len = codegen_array_len(fx, place); lval.write_cvalue(fx, CValue::by_val(len, usize_layout)); } Rvalue::NullaryOp(NullOp::Box, content_ty) => { use rustc::middle::lang_items::ExchangeMallocFnLangItem; let usize_type = fx.clif_type(fx.tcx.types.usize).unwrap(); let layout = fx.layout_of(content_ty); let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64); let llalign = fx .bcx .ins() .iconst(usize_type, layout.align.abi.bytes() as i64); let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty)); // Allocate space: let def_id = match fx.tcx.lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { fx.tcx .sess .fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(fx.tcx, def_id); let func_ref = fx.get_function_ref(instance); let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]); let ptr = fx.bcx.inst_results(call)[0]; lval.write_cvalue(fx, CValue::by_val(ptr, box_layout)); } Rvalue::NullaryOp(NullOp::SizeOf, ty) => { assert!(lval .layout() .ty .is_sized(fx.tcx.at(DUMMY_SP), ParamEnv::reveal_all())); let ty_size = fx.layout_of(ty).size.bytes(); let val = CValue::const_val(fx, fx.tcx.types.usize, ty_size as i64); lval.write_cvalue(fx, val); } Rvalue::Aggregate(kind, operands) => match **kind { AggregateKind::Array(_ty) => { for (i, operand) in operands.into_iter().enumerate() { let operand = trans_operand(fx, operand); let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64); let to = lval.place_index(fx, index); to.write_cvalue(fx, operand); } } _ => unimpl!("shouldn't exist at trans {:?}", rval), }, } } StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::Nop | StatementKind::FakeRead(..) | StatementKind::Retag { .. } | StatementKind::AscribeUserType(..) => {} StatementKind::InlineAsm { .. } => unimpl!("Inline assembly is not supported"), } } fn codegen_array_len<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, place: CPlace<'tcx>, ) -> Value { match place.layout().ty.sty { ty::Array(_elem_ty, len) => { let len = crate::constant::force_eval_const(fx, len).unwrap_usize(fx.tcx) as i64; fx.bcx.ins().iconst(fx.pointer_type, len) } ty::Slice(_elem_ty) => place .to_addr_maybe_unsized(fx) .1 .expect("Length metadata for slice place"), _ => bug!("Rvalue::Len({:?})", place), } } pub fn trans_get_discriminant<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, place: CPlace<'tcx>, dest_layout: TyLayout<'tcx>, ) -> CValue<'tcx> { let layout = place.layout(); if layout.abi == layout::Abi::Uninhabited { return trap_unreachable_ret_value(fx, dest_layout, "[panic] Tried to get discriminant for uninhabited type."); } let (discr_scalar, discr_index, discr_kind) = match &layout.variants { layout::Variants::Single { index } => { let discr_val = layout .ty .ty_adt_def() .map_or(index.as_u32() as u128, |def| { def.discriminant_for_variant(fx.tcx, *index).val }); return CValue::const_val(fx, dest_layout.ty, discr_val as u64 as i64); } layout::Variants::Multiple { discr, discr_index, discr_kind, variants: _ } => { (discr, *discr_index, discr_kind) } }; let discr = place.place_field(fx, mir::Field::new(discr_index)).to_cvalue(fx); let discr_ty = discr.layout().ty; let lldiscr = discr.load_scalar(fx); match discr_kind { layout::DiscriminantKind::Tag => { let signed = match discr_scalar.value { layout::Int(_, signed) => signed, _ => false, }; let val = clif_intcast(fx, lldiscr, fx.clif_type(dest_layout.ty).unwrap(), signed); return CValue::by_val(val, dest_layout); } layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start, } => { let niche_llty = fx.clif_type(discr_ty).unwrap(); let dest_clif_ty = fx.clif_type(dest_layout.ty).unwrap(); if niche_variants.start() == niche_variants.end() { let b = fx .bcx .ins() .icmp_imm(IntCC::Equal, lldiscr, *niche_start as u64 as i64); let if_true = fx .bcx .ins() .iconst(dest_clif_ty, niche_variants.start().as_u32() as i64); let if_false = fx .bcx .ins() .iconst(dest_clif_ty, dataful_variant.as_u32() as i64); let val = fx.bcx.ins().select(b, if_true, if_false); return CValue::by_val(val, dest_layout); } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); let delta = fx.bcx.ins().iconst(niche_llty, delta as u64 as i64); let lldiscr = fx.bcx.ins().isub(lldiscr, delta); let b = fx.bcx.ins().icmp_imm( IntCC::UnsignedLessThanOrEqual, lldiscr, niche_variants.end().as_u32() as i64, ); let if_true = clif_intcast(fx, lldiscr, fx.clif_type(dest_layout.ty).unwrap(), false); let if_false = fx .bcx .ins() .iconst(dest_clif_ty, dataful_variant.as_u32() as i64); let val = fx.bcx.ins().select(b, if_true, if_false); return CValue::by_val(val, dest_layout); } } } } macro_rules! binop_match { (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, bug) => { bug!("binop {} on {} lhs: {:?} rhs: {:?}", stringify!($var), $bug_fmt, $lhs, $rhs) }; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, icmp($cc:ident)) => {{ assert_eq!($fx.tcx.types.bool, $ret_ty); let ret_layout = $fx.layout_of($ret_ty); let b = $fx.bcx.ins().icmp(IntCC::$cc, $lhs, $rhs); CValue::by_val($fx.bcx.ins().bint(types::I8, b), ret_layout) }}; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, fcmp($cc:ident)) => {{ assert_eq!($fx.tcx.types.bool, $ret_ty); let ret_layout = $fx.layout_of($ret_ty); let b = $fx.bcx.ins().fcmp(FloatCC::$cc, $lhs, $rhs); CValue::by_val($fx.bcx.ins().bint(types::I8, b), ret_layout) }}; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, custom(|| $body:expr)) => {{ $body }}; (@single $fx:expr, $bug_fmt:expr, $var:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, $name:ident) => {{ let ret_layout = $fx.layout_of($ret_ty); CValue::by_val($fx.bcx.ins().$name($lhs, $rhs), ret_layout) }}; ( $fx:expr, $bin_op:expr, $signed:expr, $lhs:expr, $rhs:expr, $ret_ty:expr, $bug_fmt:expr; $( $var:ident ($sign:pat) $name:tt $( ( $($next:tt)* ) )? ; )* ) => {{ let lhs = $lhs.load_scalar($fx); let rhs = $rhs.load_scalar($fx); match ($bin_op, $signed) { $( (BinOp::$var, $sign) => binop_match!(@single $fx, $bug_fmt, $var, $signed, lhs, rhs, $ret_ty, $name $( ( $($next)* ) )?), )* } }} } fn trans_bool_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ty: Ty<'tcx>, ) -> CValue<'tcx> { let res = binop_match! { fx, bin_op, false, lhs, rhs, ty, "bool"; Add (_) bug; Sub (_) bug; Mul (_) bug; Div (_) bug; Rem (_) bug; BitXor (_) bxor; BitAnd (_) band; BitOr (_) bor; Shl (_) bug; Shr (_) bug; Eq (_) icmp(Equal); Lt (_) icmp(UnsignedLessThan); Le (_) icmp(UnsignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (_) icmp(UnsignedGreaterThanOrEqual); Gt (_) icmp(UnsignedGreaterThan); Offset (_) bug; }; res } pub fn trans_int_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, out_ty: Ty<'tcx>, signed: bool, ) -> CValue<'tcx> { if bin_op != BinOp::Shl && bin_op != BinOp::Shr { assert_eq!( lhs.layout().ty, rhs.layout().ty, "int binop requires lhs and rhs of same type" ); } binop_match! { fx, bin_op, signed, lhs, rhs, out_ty, "int/uint"; Add (_) iadd; Sub (_) isub; Mul (_) imul; Div (false) udiv; Div (true) sdiv; Rem (false) urem; Rem (true) srem; BitXor (_) bxor; BitAnd (_) band; BitOr (_) bor; Shl (_) ishl; Shr (false) ushr; Shr (true) sshr; Eq (_) icmp(Equal); Lt (false) icmp(UnsignedLessThan); Lt (true) icmp(SignedLessThan); Le (false) icmp(UnsignedLessThanOrEqual); Le (true) icmp(SignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (false) icmp(UnsignedGreaterThanOrEqual); Ge (true) icmp(SignedGreaterThanOrEqual); Gt (false) icmp(UnsignedGreaterThan); Gt (true) icmp(SignedGreaterThan); Offset (_) bug; } } pub fn trans_checked_int_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, in_lhs: CValue<'tcx>, in_rhs: CValue<'tcx>, out_ty: Ty<'tcx>, signed: bool, ) -> CValue<'tcx> { if bin_op != BinOp::Shl && bin_op != BinOp::Shr { assert_eq!( in_lhs.layout().ty, in_rhs.layout().ty, "checked int binop requires lhs and rhs of same type" ); } let lhs = in_lhs.load_scalar(fx); let rhs = in_rhs.load_scalar(fx); let res = match bin_op { BinOp::Add => fx.bcx.ins().iadd(lhs, rhs), BinOp::Sub => fx.bcx.ins().isub(lhs, rhs), BinOp::Mul => fx.bcx.ins().imul(lhs, rhs), BinOp::Shl => fx.bcx.ins().ishl(lhs, rhs), BinOp::Shr => { if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) } } _ => bug!( "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs ), }; // TODO: check for overflow let has_overflow = fx.bcx.ins().iconst(types::I8, 0); let out_place = CPlace::new_stack_slot(fx, out_ty); let out_layout = out_place.layout(); out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout)); out_place.to_cvalue(fx) } fn trans_float_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ty: Ty<'tcx>, ) -> CValue<'tcx> { let res = binop_match! { fx, bin_op, false, lhs, rhs, ty, "float"; Add (_) fadd; Sub (_) fsub; Mul (_) fmul; Div (_) fdiv; Rem (_) custom(|| { assert_eq!(lhs.layout().ty, ty); assert_eq!(rhs.layout().ty, ty); match ty.sty { ty::Float(FloatTy::F32) => fx.easy_call("fmodf", &[lhs, rhs], ty), ty::Float(FloatTy::F64) => fx.easy_call("fmod", &[lhs, rhs], ty), _ => bug!(), } }); BitXor (_) bxor; BitAnd (_) band; BitOr (_) bor; Shl (_) bug; Shr (_) bug; Eq (_) fcmp(Equal); Lt (_) fcmp(LessThan); Le (_) fcmp(LessThanOrEqual); Ne (_) fcmp(NotEqual); Ge (_) fcmp(GreaterThanOrEqual); Gt (_) fcmp(GreaterThan); Offset (_) bug; }; res } fn trans_char_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ty: Ty<'tcx>, ) -> CValue<'tcx> { let res = binop_match! { fx, bin_op, false, lhs, rhs, ty, "char"; Add (_) bug; Sub (_) bug; Mul (_) bug; Div (_) bug; Rem (_) bug; BitXor (_) bug; BitAnd (_) bug; BitOr (_) bug; Shl (_) bug; Shr (_) bug; Eq (_) icmp(Equal); Lt (_) icmp(UnsignedLessThan); Le (_) icmp(UnsignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (_) icmp(UnsignedGreaterThanOrEqual); Gt (_) icmp(UnsignedGreaterThan); Offset (_) bug; }; res } fn trans_ptr_binop<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ret_ty: Ty<'tcx>, ) -> CValue<'tcx> { let not_fat = match lhs.layout().ty.sty { ty::RawPtr(TypeAndMut { ty, mutbl: _ }) => { ty.is_sized(fx.tcx.at(DUMMY_SP), ParamEnv::reveal_all()) } ty::FnPtr(..) => true, _ => bug!("trans_ptr_binop on non ptr"), }; if not_fat { if let BinOp::Offset = bin_op { let (base, offset) = (lhs, rhs.load_scalar(fx)); let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty; let pointee_size = fx.layout_of(pointee_ty).size.bytes(); let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64); let base_val = base.load_scalar(fx); let res = fx.bcx.ins().iadd(base_val, ptr_diff); return CValue::by_val(res, base.layout()); } binop_match! { fx, bin_op, false, lhs, rhs, ret_ty, "ptr"; Add (_) bug; Sub (_) bug; Mul (_) bug; Div (_) bug; Rem (_) bug; BitXor (_) bug; BitAnd (_) bug; BitOr (_) bug; Shl (_) bug; Shr (_) bug; Eq (_) icmp(Equal); Lt (_) icmp(UnsignedLessThan); Le (_) icmp(UnsignedLessThanOrEqual); Ne (_) icmp(NotEqual); Ge (_) icmp(UnsignedGreaterThanOrEqual); Gt (_) icmp(UnsignedGreaterThan); Offset (_) bug; // Handled above } } else { let (lhs_ptr, lhs_extra) = lhs.load_scalar_pair(fx); let (rhs_ptr, rhs_extra) = rhs.load_scalar_pair(fx); let res = match bin_op { BinOp::Eq => { let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr); let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra); fx.bcx.ins().band(ptr_eq, extra_eq) } BinOp::Ne => { let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr); let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra); fx.bcx.ins().bor(ptr_ne, extra_ne) } _ => unimplemented!( "trans_ptr_binop({:?}, <fat ptr>, <fat ptr>) not implemented", bin_op ), }; assert_eq!(fx.tcx.types.bool, ret_ty); let ret_layout = fx.layout_of(ret_ty); CValue::by_val(fx.bcx.ins().bint(types::I8, res), ret_layout) } } pub fn trans_place<'a, 'tcx: 'a>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, place: &Place<'tcx>, ) -> CPlace<'tcx> { match place { Place::Base(base) => match base { PlaceBase::Local(local) => fx.get_local_place(*local), PlaceBase::Static(static_) => match static_.kind { StaticKind::Static(def_id) => { crate::constant::codegen_static_ref(fx, def_id, static_.ty) } StaticKind::Promoted(promoted) => { crate::constant::trans_promoted(fx, promoted, static_.ty) } } } Place::Projection(projection) => { let base = trans_place(fx, &projection.base); match projection.elem { ProjectionElem::Deref => base.place_deref(fx), ProjectionElem::Field(field, _ty) => base.place_field(fx, field), ProjectionElem::Index(local) => { let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx); base.place_index(fx, index) } ProjectionElem::ConstantIndex { offset, min_length: _, from_end, } => { let index = if !from_end { fx.bcx.ins().iconst(fx.pointer_type, offset as i64) } else { let len = codegen_array_len(fx, base); fx.bcx.ins().iadd_imm(len, -(offset as i64)) }; base.place_index(fx, index) } ProjectionElem::Subslice { from, to } => { // These indices are generated by slice patterns. // slice[from:-to] in Python terms. match base.layout().ty.sty { ty::Array(elem_ty, len) => { let elem_layout = fx.layout_of(elem_ty); let ptr = base.to_addr(fx); let len = crate::constant::force_eval_const(fx, len).unwrap_usize(fx.tcx); CPlace::for_addr( fx.bcx.ins().iadd_imm(ptr, elem_layout.size.bytes() as i64 * from as i64), fx.layout_of(fx.tcx.mk_array(elem_ty, len - from as u64 - to as u64)), ) } ty::Slice(elem_ty) => { let elem_layout = fx.layout_of(elem_ty); let (ptr, len) = base.to_addr_maybe_unsized(fx); let len = len.unwrap(); CPlace::for_addr_with_extra( fx.bcx.ins().iadd_imm(ptr, elem_layout.size.bytes() as i64 * from as i64), fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)), base.layout(), ) } _ => unreachable!(), } } ProjectionElem::Downcast(_adt_def, variant) => base.downcast_variant(fx, variant), } } } } pub fn trans_operand<'a, 'tcx>( fx: &mut FunctionCx<'a, 'tcx, impl Backend>, operand: &Operand<'tcx>, ) -> CValue<'tcx> { match operand { Operand::Move(place) | Operand::Copy(place) => { let cplace = trans_place(fx, place); cplace.to_cvalue(fx) } Operand::Constant(const_) => crate::constant::trans_constant(fx, const_), } }
#[derive(Debug, PartialEq)] pub struct Card { suit: Suit, rank: Rank } impl Card { } #[derive(Debug, PartialEq)] pub enum Suit { Heart, Diamond, Spade, Club } impl Suit { pub fn from_number(num: &u32) -> Option<Suit> { match *num { 1 => Some(Suit::Heart), 2 => Some(Suit::Diamond), 3 => Some(Suit::Spade), 4 => Some(Suit::Club), _ => None } } } impl Into<u32> for Suit { fn into(self) -> u32 { match self { Suit::Heart => 1, Suit::Diamond => 2, Suit::Spade => 3, Suit::Club => 4 } } } #[derive(Debug, PartialEq)] pub enum Rank { Ace, Two, Three, Four, Five, Six, Seven, Eight, Nine, Ten, Jack, Queen, King } impl Rank { pub fn from_number(num: &u32) -> Option<Rank> { match *num { 1 => Some(Rank::Ace), 2 => Some(Rank::Two), 3 => Some(Rank::Three), 4 => Some(Rank::Four), 5 => Some(Rank::Five), 6 => Some(Rank::Six), 7 => Some(Rank::Seven), 8 => Some(Rank::Eight), 9 => Some(Rank::Nine), 10 => Some(Rank::Ten), 11 => Some(Rank::Jack), 12 => Some(Rank::Queen), 13 => Some(Rank::King), _ => None } } } impl Into<u32> for Rank { fn into(self) -> u32 { match self { Rank::Ace => 1, Rank::Two => 2, Rank::Three => 3, Rank::Four => 4, Rank::Five => 5, Rank::Six => 6, Rank::Seven => 7, Rank::Eight => 8, Rank::Nine => 9, Rank::Ten => 10, Rank::Jack => 11, Rank::Queen => 12, Rank::King => 13 } } } #[cfg(test)] mod tests { use super::*; #[test] fn reversible_suit() { assert_eq!(Some(Suit::Heart), Suit::from_number(&Suit::Heart.into())); } #[test] fn reversible_rank() { assert_eq!(Some(Rank::Ace), Rank::from_number(&Rank::Ace.into())); } } Update card.rs #[derive(Debug, PartialEq, Clone, Copy)] pub struct Card { pub suit: Suit, pub rank: Rank } impl Card { } #[derive(Debug, PartialEq, Clone, Copy)] pub enum Suit { Club, Diamond, Heart, Spade } impl Suit { pub fn from_number(num: &u32) -> Option<Suit> { match *num { 1 => Some(Suit::Heart), 2 => Some(Suit::Diamond), 3 => Some(Suit::Spade), 4 => Some(Suit::Club), _ => None } } } impl Into<u32> for Suit { fn into(self) -> u32 { match self { Suit::Heart => 1, Suit::Diamond => 2, Suit::Spade => 3, Suit::Club => 4 } } } #[derive(Debug, PartialEq, Clone, Copy)] pub enum Rank { Ace, Two, Three, Four, Five, Six, Seven, Eight, Nine, Ten, Jack, Queen, King } impl Rank { pub fn from_number(num: &u32) -> Option<Rank> { match *num { 1 => Some(Rank::Ace), 2 => Some(Rank::Two), 3 => Some(Rank::Three), 4 => Some(Rank::Four), 5 => Some(Rank::Five), 6 => Some(Rank::Six), 7 => Some(Rank::Seven), 8 => Some(Rank::Eight), 9 => Some(Rank::Nine), 10 => Some(Rank::Ten), 11 => Some(Rank::Jack), 12 => Some(Rank::Queen), 13 => Some(Rank::King), _ => None } } } impl Into<u32> for Rank { fn into(self) -> u32 { match self { Rank::Ace => 1, Rank::Two => 2, Rank::Three => 3, Rank::Four => 4, Rank::Five => 5, Rank::Six => 6, Rank::Seven => 7, Rank::Eight => 8, Rank::Nine => 9, Rank::Ten => 10, Rank::Jack => 11, Rank::Queen => 12, Rank::King => 13 } } } #[cfg(test)] mod tests { use super::*; #[test] fn reversible_suit() { assert_eq!(Some(Suit::Heart), Suit::from_number(&Suit::Heart.into())); } #[test] fn reversible_rank() { assert_eq!(Some(Rank::Ace), Rank::from_number(&Rank::Ace.into())); } }
use activation_function::ActivationFunction; use acyclic_network::{NodeType, Network}; pub use acyclic_network::NodeIndex as CppnNodeIndex; use fixedbitset::FixedBitSet; use std::fmt::Debug; pub trait CppnNodeType: NodeType + ActivationFunction { fn is_input_node(&self) -> bool; fn is_output_node(&self) -> bool; } #[derive(Clone, Copy, Debug)] pub enum CppnNodeKind { Bias, Input, Output, Hidden, } /// A concrete implementation of a CppnNodeType. #[derive(Clone, Debug)] pub struct CppnNode<A: ActivationFunction> { kind: CppnNodeKind, activation_function: A, } impl<A> CppnNode<A> where A: ActivationFunction { pub fn new(kind: CppnNodeKind, activation_function: A) -> Self { CppnNode { kind: kind, activation_function: activation_function } } pub fn input(activation_function: A) -> Self { Self::new(CppnNodeKind::Input, activation_function) } pub fn output(activation_function: A) -> Self { Self::new(CppnNodeKind::Output, activation_function) } pub fn hidden(activation_function: A) -> Self { Self::new(CppnNodeKind::Hidden, activation_function) } pub fn bias(activation_function: A) -> Self { Self::new(CppnNodeKind::Bias, activation_function) } } impl<A: ActivationFunction> ActivationFunction for CppnNode<A> { fn formula_gnuplot(&self, x: String) -> String { match self.kind { CppnNodeKind::Input | CppnNodeKind::Output | CppnNodeKind::Hidden | CppnNodeKind::Bias => { self.activation_function.formula_gnuplot(x) } } } fn calculate(&self, input: f64) -> f64 { self.activation_function.calculate(input) } } impl<A: ActivationFunction> NodeType for CppnNode<A> { fn accept_incoming_links(&self) -> bool { match self.kind { CppnNodeKind::Hidden | CppnNodeKind::Output => true, CppnNodeKind::Input | CppnNodeKind::Bias => false, } } fn accept_outgoing_links(&self) -> bool { match self.kind { CppnNodeKind::Hidden | CppnNodeKind::Input | CppnNodeKind::Bias => true, CppnNodeKind::Output => false, } } } impl<A: ActivationFunction> CppnNodeType for CppnNode<A> { fn is_input_node(&self) -> bool { match self.kind { CppnNodeKind::Input => true, _ => false, } } fn is_output_node(&self) -> bool { match self.kind { CppnNodeKind::Output => true, _ => false, } } } pub type CppnGraph<N, L, EXTID> where N: CppnNodeType, L: Copy + Debug + Send + Sized + Into<f64>, EXTID: Copy + Debug + Send + Sized + Ord = Network<N, L, EXTID>; /// Represents a Compositional Pattern Producing Network (CPPN) pub struct Cppn<'a, N, L, EXTID> where N: CppnNodeType + 'a, L: Copy + Debug + Send + Sized + Into<f64> + 'a, EXTID: Copy + Debug + Send + Sized + Ord + 'a { graph: &'a CppnGraph<N, L, EXTID>, inputs: Vec<CppnNodeIndex>, outputs: Vec<CppnNodeIndex>, // For each node in `graph` there exists a corresponding field in `incoming_signals` describing // the sum of all input signals for that node. We could store it inline in the `CppnNode`, but // this would require to make the whole CppnGraph mutable. incoming_signals: Vec<f64>, } impl<'a, N, L, EXTID> Cppn<'a, N, L, EXTID> where N: CppnNodeType + 'a, L: Copy + Debug + Send + Sized + Into<f64> + 'a, EXTID: Copy + Debug + Send + Sized + Ord + 'a { pub fn new(graph: &'a CppnGraph<N, L, EXTID>) -> Cppn<'a, N, L, EXTID> { let mut inputs = Vec::new(); let mut outputs = Vec::new(); graph.each_node_with_index(|node, index| { if node.node_type().is_input_node() { inputs.push(index); } if node.node_type().is_output_node() { outputs.push(index); } }); Cppn { graph: graph, inputs: inputs, outputs: outputs, incoming_signals: graph.nodes().iter().map(|_| 0.0).collect(), } } fn set_signal(&mut self, node_idx: CppnNodeIndex, value: f64) { self.incoming_signals[node_idx.index()] = value; } fn reset_signals(&mut self) { for value in self.incoming_signals.iter_mut() { *value = 0.0; } } /// Forward-propagate the signals starting from `from_nodes`. We use /// breadth-first-search (BFS). fn propagate_signals(&mut self, mut nodes: Vec<CppnNodeIndex>, mut seen: FixedBitSet) { while let Some(node_idx) = nodes.pop() { let input = self.incoming_signals[node_idx.index()]; let output = self.graph.node(node_idx).node_type().calculate(input); // propagate output signal to outgoing links. self.graph.each_active_forward_link_of_node(node_idx, |out_node_idx, weight| { let out_node = out_node_idx.index(); let weight: f64 = weight.into(); self.incoming_signals[out_node] += weight * output; if !seen.contains(out_node) { seen.insert(out_node); nodes.push(out_node_idx); } }); } } /// Calculate all outputs pub fn calculate(&mut self, inputs: &[&[f64]]) -> Vec<f64> { assert!(self.incoming_signals.len() == self.graph.nodes().len()); self.reset_signals(); // assign all inputs let mut i = 0; for input_list in inputs.iter() { for &input in input_list.iter() { let input_idx = self.inputs[i]; self.set_signal(input_idx, input); i += 1; } } assert!(i == self.inputs.len()); let mut nodes = Vec::new(); // XXX: worst case capacity let mut seen = FixedBitSet::with_capacity(self.incoming_signals.len()); // start from all nodes which have zero in_degree() self.graph.each_node_with_index(|node, index| { if node.in_degree() == 0 { nodes.push(index); seen.insert(index.index()); } }); // propagate the signals starting from the nodes with zero in degree. self.propagate_signals(nodes, seen); self.outputs .iter() .map(|&node_idx| self.incoming_signals[node_idx.index()]) .collect() } } #[cfg(test)] mod tests { use activation_function::GeometricActivationFunction as AF; use super::{Cppn, CppnGraph, CppnNode}; use acyclic_network::ExternalId; use rand; #[test] fn test_cycle() { let mut g = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let h1 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(2)); let h2 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(3)); assert_eq!(true, g.valid_link(i1, i1).is_err()); assert_eq!(true, g.valid_link(h1, h1).is_err()); assert_eq!(true, g.valid_link(h1, i1).is_err()); assert_eq!(Ok(()), g.valid_link(i1, h1)); assert_eq!(Ok(()), g.valid_link(i1, h2)); assert_eq!(Ok(()), g.valid_link(h1, h2)); g.add_link(i1, h1, 0.0, ExternalId(1)); assert_eq!(true, g.link_would_cycle(h1, i1)); assert_eq!(false, g.link_would_cycle(i1, h1)); assert_eq!(false, g.link_would_cycle(i1, h2)); assert_eq!(true, g.link_would_cycle(i1, i1)); assert_eq!(false, g.link_would_cycle(h1, h2)); assert_eq!(false, g.link_would_cycle(h2, h1)); assert_eq!(false, g.link_would_cycle(h2, i1)); g.add_link(h1, h2, 0.0, ExternalId(2)); assert_eq!(true, g.link_would_cycle(h2, i1)); assert_eq!(true, g.link_would_cycle(h1, i1)); assert_eq!(true, g.link_would_cycle(h2, h1)); assert_eq!(false, g.link_would_cycle(i1, h2)); } #[test] fn test_simple_cppn() { let mut g = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let h1 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(2)); let o1 = g.add_node(CppnNode::output(AF::Linear), ExternalId(3)); g.add_link(i1, h1, 0.5, ExternalId(1)); g.add_link(h1, o1, 1.0, ExternalId(2)); let mut cppn = Cppn::new(&g); let f = |x| 0.5 * x * 1.0; assert_eq!(vec![f(0.5)], cppn.calculate(&[&[0.5]])); assert_eq!(vec![f(4.0)], cppn.calculate(&[&[4.0]])); assert_eq!(vec![f(-4.0)], cppn.calculate(&[&[-4.0]])); } #[test] fn test_find_random_unconnected_link_no_cycle() { let mut g: CppnGraph<CppnNode<AF>, _, _> = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let o1 = g.add_node(CppnNode::output(AF::Linear), ExternalId(2)); let o2 = g.add_node(CppnNode::output(AF::Linear), ExternalId(3)); let mut rng = rand::thread_rng(); let link = g.find_random_unconnected_link_no_cycle(&mut rng); assert_eq!(true, link.is_some()); let l = link.unwrap(); assert!((i1, o1) == l || (i1, o2) == l); g.add_link(i1, o2, 0.0, ExternalId(1)); let link = g.find_random_unconnected_link_no_cycle(&mut rng); assert_eq!(true, link.is_some()); assert_eq!((i1, o1), link.unwrap()); g.add_link(i1, o1, 0.0, ExternalId(2)); let link = g.find_random_unconnected_link_no_cycle(&mut rng); assert_eq!(false, link.is_some()); } } Apply the activation function to output when read use activation_function::ActivationFunction; use acyclic_network::{NodeType, Network}; pub use acyclic_network::NodeIndex as CppnNodeIndex; use fixedbitset::FixedBitSet; use std::fmt::Debug; pub trait CppnNodeType: NodeType + ActivationFunction { fn is_input_node(&self) -> bool; fn is_output_node(&self) -> bool; } #[derive(Clone, Copy, Debug)] pub enum CppnNodeKind { Bias, Input, Output, Hidden, } /// A concrete implementation of a CppnNodeType. #[derive(Clone, Debug)] pub struct CppnNode<A: ActivationFunction> { kind: CppnNodeKind, activation_function: A, } impl<A> CppnNode<A> where A: ActivationFunction { pub fn new(kind: CppnNodeKind, activation_function: A) -> Self { CppnNode { kind: kind, activation_function: activation_function, } } pub fn input(activation_function: A) -> Self { Self::new(CppnNodeKind::Input, activation_function) } pub fn output(activation_function: A) -> Self { Self::new(CppnNodeKind::Output, activation_function) } pub fn hidden(activation_function: A) -> Self { Self::new(CppnNodeKind::Hidden, activation_function) } pub fn bias(activation_function: A) -> Self { Self::new(CppnNodeKind::Bias, activation_function) } } impl<A: ActivationFunction> ActivationFunction for CppnNode<A> { fn formula_gnuplot(&self, x: String) -> String { match self.kind { CppnNodeKind::Input | CppnNodeKind::Output | CppnNodeKind::Hidden | CppnNodeKind::Bias => self.activation_function.formula_gnuplot(x), } } fn calculate(&self, input: f64) -> f64 { self.activation_function.calculate(input) } } impl<A: ActivationFunction> NodeType for CppnNode<A> { fn accept_incoming_links(&self) -> bool { match self.kind { CppnNodeKind::Hidden | CppnNodeKind::Output => true, CppnNodeKind::Input | CppnNodeKind::Bias => false, } } fn accept_outgoing_links(&self) -> bool { match self.kind { CppnNodeKind::Hidden | CppnNodeKind::Input | CppnNodeKind::Bias => true, CppnNodeKind::Output => false, } } } impl<A: ActivationFunction> CppnNodeType for CppnNode<A> { fn is_input_node(&self) -> bool { match self.kind { CppnNodeKind::Input => true, _ => false, } } fn is_output_node(&self) -> bool { match self.kind { CppnNodeKind::Output => true, _ => false, } } } pub type CppnGraph<N, L, EXTID> where N: CppnNodeType, L: Copy + Debug + Send + Sized + Into<f64>, EXTID: Copy + Debug + Send + Sized + Ord = Network<N, L, EXTID>; /// Represents a Compositional Pattern Producing Network (CPPN) pub struct Cppn<'a, N, L, EXTID> where N: CppnNodeType + 'a, L: Copy + Debug + Send + Sized + Into<f64> + 'a, EXTID: Copy + Debug + Send + Sized + Ord + 'a { graph: &'a CppnGraph<N, L, EXTID>, inputs: Vec<CppnNodeIndex>, outputs: Vec<CppnNodeIndex>, // For each node in `graph` there exists a corresponding field in `incoming_signals` describing // the sum of all input signals for that node. We could store it inline in the `CppnNode`, but // this would require to make the whole CppnGraph mutable. incoming_signals: Vec<f64>, } impl<'a, N, L, EXTID> Cppn<'a, N, L, EXTID> where N: CppnNodeType + 'a, L: Copy + Debug + Send + Sized + Into<f64> + 'a, EXTID: Copy + Debug + Send + Sized + Ord + 'a { pub fn new(graph: &'a CppnGraph<N, L, EXTID>) -> Cppn<'a, N, L, EXTID> { let mut inputs = Vec::new(); let mut outputs = Vec::new(); graph.each_node_with_index(|node, index| { if node.node_type().is_input_node() { inputs.push(index); } if node.node_type().is_output_node() { outputs.push(index); } }); Cppn { graph: graph, inputs: inputs, outputs: outputs, incoming_signals: graph.nodes().iter().map(|_| 0.0).collect(), } } fn set_signal(&mut self, node_idx: CppnNodeIndex, value: f64) { self.incoming_signals[node_idx.index()] = value; } fn reset_signals(&mut self) { for value in self.incoming_signals.iter_mut() { *value = 0.0; } } /// Forward-propagate the signals starting from `from_nodes`. We use /// breadth-first-search (BFS). fn propagate_signals(&mut self, mut nodes: Vec<CppnNodeIndex>, mut seen: FixedBitSet) { while let Some(node_idx) = nodes.pop() { let input = self.incoming_signals[node_idx.index()]; let output = self.graph.node(node_idx).node_type().calculate(input); // propagate output signal to outgoing links. self.graph.each_active_forward_link_of_node(node_idx, |out_node_idx, weight| { let out_node = out_node_idx.index(); let weight: f64 = weight.into(); self.incoming_signals[out_node] += weight * output; if !seen.contains(out_node) { seen.insert(out_node); nodes.push(out_node_idx); } }); } } /// Calculate all outputs pub fn calculate(&mut self, inputs: &[&[f64]]) -> Vec<f64> { assert!(self.incoming_signals.len() == self.graph.nodes().len()); self.reset_signals(); // assign all inputs let mut i = 0; for input_list in inputs.iter() { for &input in input_list.iter() { let input_idx = self.inputs[i]; self.set_signal(input_idx, input); i += 1; } } assert!(i == self.inputs.len()); let mut nodes = Vec::new(); // XXX: worst case capacity let mut seen = FixedBitSet::with_capacity(self.incoming_signals.len()); // start from all nodes which have zero in_degree() self.graph.each_node_with_index(|node, index| { if node.in_degree() == 0 { nodes.push(index); seen.insert(index.index()); } }); // propagate the signals starting from the nodes with zero in degree. self.propagate_signals(nodes, seen); self.outputs .iter() .map(|&node_idx| { let input = self.incoming_signals[node_idx.index()]; let output = self.graph.node(node_idx).node_type().calculate(input); output }) .collect() } } #[cfg(test)] mod tests { use activation_function::GeometricActivationFunction as AF; use super::{Cppn, CppnGraph, CppnNode}; use acyclic_network::ExternalId; use rand; #[test] fn test_cycle() { let mut g = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let h1 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(2)); let h2 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(3)); assert_eq!(true, g.valid_link(i1, i1).is_err()); assert_eq!(true, g.valid_link(h1, h1).is_err()); assert_eq!(true, g.valid_link(h1, i1).is_err()); assert_eq!(Ok(()), g.valid_link(i1, h1)); assert_eq!(Ok(()), g.valid_link(i1, h2)); assert_eq!(Ok(()), g.valid_link(h1, h2)); g.add_link(i1, h1, 0.0, ExternalId(1)); assert_eq!(true, g.link_would_cycle(h1, i1)); assert_eq!(false, g.link_would_cycle(i1, h1)); assert_eq!(false, g.link_would_cycle(i1, h2)); assert_eq!(true, g.link_would_cycle(i1, i1)); assert_eq!(false, g.link_would_cycle(h1, h2)); assert_eq!(false, g.link_would_cycle(h2, h1)); assert_eq!(false, g.link_would_cycle(h2, i1)); g.add_link(h1, h2, 0.0, ExternalId(2)); assert_eq!(true, g.link_would_cycle(h2, i1)); assert_eq!(true, g.link_would_cycle(h1, i1)); assert_eq!(true, g.link_would_cycle(h2, h1)); assert_eq!(false, g.link_would_cycle(i1, h2)); } #[test] fn test_simple_cppn() { let mut g = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let h1 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(2)); let o1 = g.add_node(CppnNode::output(AF::Linear), ExternalId(3)); g.add_link(i1, h1, 0.5, ExternalId(1)); g.add_link(h1, o1, 1.0, ExternalId(2)); let mut cppn = Cppn::new(&g); let f = |x| 0.5 * x * 1.0; assert_eq!(vec![f(0.5)], cppn.calculate(&[&[0.5]])); assert_eq!(vec![f(4.0)], cppn.calculate(&[&[4.0]])); assert_eq!(vec![f(-4.0)], cppn.calculate(&[&[-4.0]])); } #[test] fn test_cppn_with_output_activation_function() { let mut g = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let h1 = g.add_node(CppnNode::hidden(AF::Linear), ExternalId(2)); let o1 = g.add_node(CppnNode::output(AF::Constant1), ExternalId(3)); g.add_link(i1, h1, 0.5, ExternalId(1)); g.add_link(h1, o1, 1.0, ExternalId(2)); let mut cppn = Cppn::new(&g); assert_eq!(vec![1.0], cppn.calculate(&[&[0.5]])); assert_eq!(vec![1.0], cppn.calculate(&[&[4.0]])); assert_eq!(vec![1.0], cppn.calculate(&[&[-4.0]])); } #[test] fn test_find_random_unconnected_link_no_cycle() { let mut g: CppnGraph<CppnNode<AF>, _, _> = CppnGraph::new(); let i1 = g.add_node(CppnNode::input(AF::Linear), ExternalId(1)); let o1 = g.add_node(CppnNode::output(AF::Linear), ExternalId(2)); let o2 = g.add_node(CppnNode::output(AF::Linear), ExternalId(3)); let mut rng = rand::thread_rng(); let link = g.find_random_unconnected_link_no_cycle(&mut rng); assert_eq!(true, link.is_some()); let l = link.unwrap(); assert!((i1, o1) == l || (i1, o2) == l); g.add_link(i1, o2, 0.0, ExternalId(1)); let link = g.find_random_unconnected_link_no_cycle(&mut rng); assert_eq!(true, link.is_some()); assert_eq!((i1, o1), link.unwrap()); g.add_link(i1, o1, 0.0, ExternalId(2)); let link = g.find_random_unconnected_link_no_cycle(&mut rng); assert_eq!(false, link.is_some()); } }
//! Bindings to the "easy" libcurl API. //! //! This module contains some simple types like `Easy` and `List` which are just //! wrappers around the corresponding libcurl types. There's also a few enums //! scattered about for various options here and there. //! //! Most simple usage of libcurl will likely use the `Easy` structure here, and //! you can find more docs about its usage on that struct. use std::cell::{RefCell, Cell}; use std::ffi::{CString, CStr}; use std::io::SeekFrom; use std::path::Path; use std::slice; use std::str; use std::time::Duration; use curl_sys; use libc::{self, c_long, c_int, c_char, c_void, size_t, c_double, c_ulong}; use {Error, FormError}; use panic; // TODO: checked casts everywhere /// Raw bindings to a libcurl "easy session". /// /// This type corresponds to the `CURL` type in libcurl, and is probably what /// you want for just sending off a simple HTTP request and fetching a response. /// Each easy handle can be thought of as a large builder before calling the /// final `perform` function. /// /// There are many many configuration options for each `Easy` handle, and they /// should all have their own documentation indicating what it affects and how /// it interacts with other options. Some implementations of libcurl can use /// this handle to interact with many different protocols, although by default /// this crate only guarantees the HTTP/HTTPS protocols working. /// /// Note that almost all methods on this structure which configure various /// properties return a `Result`. This is largely used to detect whether the /// underlying implementation of libcurl actually implements the option being /// requested. If you're linked to a version of libcurl which doesn't support /// the option, then an error will be returned. Some options also perform some /// validation when they're set, and the error is returned through this vector. /// /// ## Examples /// /// Creating a handle which can be used later /// /// ``` /// use curl::easy::Easy; /// /// let handle = Easy::new(); /// ``` /// /// Send an HTTP request, writing the response to stdout. /// /// ``` /// use std::io::{stdout, Write}; /// /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.write_function(|data| { /// Ok(stdout().write(data).unwrap()) /// }).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Collect all output of an HTTP request to a vector. /// /// ``` /// use curl::easy::Easy; /// /// let mut data = Vec::new(); /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// { /// let mut transfer = handle.transfer(); /// transfer.write_function(|new_data| { /// data.extend_from_slice(new_data); /// Ok(new_data.len()) /// }).unwrap(); /// transfer.perform().unwrap(); /// } /// println!("{:?}", data); /// ``` /// /// More examples of various properties of an HTTP request can be found on the /// specific methods as well. pub struct Easy { handle: *mut curl_sys::CURL, data: Box<EasyData>, } /// A scoped transfer of information which borrows an `Easy` and allows /// referencing stack-local data of the lifetime `'data`. /// /// Usage of `Easy` requires the `'static` and `Send` bounds on all callbacks /// registered, but that's not often wanted if all you need is to collect a /// bunch of data in memory to a vector, for example. The `Transfer` structure, /// created by the `Easy::transfer` method, is used for this sort of request. /// /// The callbacks attached to a `Transfer` are only active for that one transfer /// object, and they're allows to elide both the `Send` and `'static` bounds to /// close over stack-local information. pub struct Transfer<'easy, 'data> { easy: &'easy mut Easy, data: Box<TransferData<'data>>, } #[derive(Default)] struct EasyData { running: Cell<bool>, write: Option<Box<FnMut(&[u8]) -> Result<usize, WriteError> + Send>>, read: Option<Box<FnMut(&mut [u8]) -> Result<usize, ReadError> + Send>>, seek: Option<Box<FnMut(SeekFrom) -> SeekResult + Send>>, debug: Option<Box<FnMut(InfoType, &[u8]) + Send>>, header: Option<Box<FnMut(&[u8]) -> bool + Send>>, progress: Option<Box<FnMut(f64, f64, f64, f64) -> bool + Send>>, header_list: Option<List>, form: Option<Form>, error_buf: RefCell<Vec<u8>>, } #[derive(Default)] struct TransferData<'a> { write: Option<Box<FnMut(&[u8]) -> Result<usize, WriteError> + 'a>>, read: Option<Box<FnMut(&mut [u8]) -> Result<usize, ReadError> + 'a>>, seek: Option<Box<FnMut(SeekFrom) -> SeekResult + 'a>>, debug: Option<Box<FnMut(InfoType, &[u8]) + 'a>>, header: Option<Box<FnMut(&[u8]) -> bool + 'a>>, progress: Option<Box<FnMut(f64, f64, f64, f64) -> bool + 'a>>, } // libcurl guarantees that a CURL handle is fine to be transferred so long as // it's not used concurrently, and we do that correctly ourselves. unsafe impl Send for Easy {} /// Multipart/formdata for an HTTP POST request. /// /// This structure is built up and then passed to the `Easy::httppost` method to /// be sent off with a request. pub struct Form { head: *mut curl_sys::curl_httppost, tail: *mut curl_sys::curl_httppost, headers: Vec<List>, buffers: Vec<Vec<u8>>, strings: Vec<CString>, } /// One part in a multipart upload, added to a `Form`. pub struct Part<'form, 'data> { form: &'form mut Form, name: &'data str, array: Vec<curl_sys::curl_forms>, error: Option<FormError>, } /// Possible proxy types that libcurl currently understands. #[allow(missing_docs)] pub enum ProxyType { Http = curl_sys::CURLPROXY_HTTP as isize, Http1 = curl_sys::CURLPROXY_HTTP_1_0 as isize, Socks4 = curl_sys::CURLPROXY_SOCKS4 as isize, Socks5 = curl_sys::CURLPROXY_SOCKS5 as isize, Socks4a = curl_sys::CURLPROXY_SOCKS4A as isize, Socks5Hostname = curl_sys::CURLPROXY_SOCKS5_HOSTNAME as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Possible conditions for the `time_condition` method. #[allow(missing_docs)] pub enum TimeCondition { None = curl_sys::CURL_TIMECOND_NONE as isize, IfModifiedSince = curl_sys::CURL_TIMECOND_IFMODSINCE as isize, IfUnmodifiedSince = curl_sys::CURL_TIMECOND_IFUNMODSINCE as isize, LastModified = curl_sys::CURL_TIMECOND_LASTMOD as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Possible values to pass to the `ip_resolve` method. #[allow(missing_docs)] pub enum IpResolve { V4 = curl_sys::CURL_IPRESOLVE_V4 as isize, V6 = curl_sys::CURL_IPRESOLVE_V6 as isize, Any = curl_sys::CURL_IPRESOLVE_WHATEVER as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive = 500, } /// Possible values to pass to the `ip_resolve` method. #[allow(missing_docs)] pub enum SslVersion { Default = curl_sys::CURL_SSLVERSION_DEFAULT as isize, Tlsv1 = curl_sys::CURL_SSLVERSION_TLSv1 as isize, Sslv2 = curl_sys::CURL_SSLVERSION_SSLv2 as isize, Sslv3 = curl_sys::CURL_SSLVERSION_SSLv3 as isize, // Tlsv10 = curl_sys::CURL_SSLVERSION_TLSv1_0 as isize, // Tlsv11 = curl_sys::CURL_SSLVERSION_TLSv1_1 as isize, // Tlsv12 = curl_sys::CURL_SSLVERSION_TLSv1_2 as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive = 500, } /// Possible return values from the `seek_function` callback. pub enum SeekResult { /// Indicates that the seek operation was a success Ok = curl_sys::CURL_SEEKFUNC_OK as isize, /// Indicates that the seek operation failed, and the entire request should /// fail as a result. Fail = curl_sys::CURL_SEEKFUNC_FAIL as isize, /// Indicates that although the seek failed libcurl should attempt to keep /// working if possible (for example "seek" through reading). CantSeek = curl_sys::CURL_SEEKFUNC_CANTSEEK as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive = 500, } /// Possible data chunks that can be witnessed as part of the `debug_function` /// callback. pub enum InfoType { /// The data is informational text. Text, /// The data is header (or header-like) data received from the peer. HeaderIn, /// The data is header (or header-like) data sent to the peer. HeaderOut, /// The data is protocol data received from the peer. DataIn, /// The data is protocol data sent to the peer. DataOut, /// The data is SSL/TLS (binary) data received from the peer. SslDataIn, /// The data is SSL/TLS (binary) data sent to the peer. SslDataOut, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// A linked list of a strings pub struct List { raw: *mut curl_sys::curl_slist, } /// An iterator over `List` pub struct Iter<'a> { _me: &'a List, cur: *mut curl_sys::curl_slist, } unsafe impl Send for List {} /// Possible error codes that can be returned from the `read_function` callback. pub enum ReadError { /// Indicates that the connection should be aborted immediately Abort, /// Indicates that reading should be paused until `unpause` is called. Pause, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Possible error codes that can be returned from the `write_function` callback. pub enum WriteError { /// Indicates that reading should be paused until `unpause` is called. Pause, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Structure which stores possible authentication methods to get passed to /// `http_auth` and `proxy_auth`. #[derive(Clone, Debug)] pub struct Auth { bits: c_long, } impl Easy { /// Creates a new "easy" handle which is the core of almost all operations /// in libcurl. /// /// To use a handle, applications typically configure a number of options /// followed by a call to `perform`. Options are preserved across calls to /// `perform` and need to be reset manually (or via the `reset` method) if /// this is not desired. pub fn new() -> Easy { ::init(); unsafe { let handle = curl_sys::curl_easy_init(); assert!(!handle.is_null()); let mut ret = Easy { handle: handle, data: Default::default(), }; default_configure(&mut ret); return ret } } // ========================================================================= // Behavior options /// Configures this handle to have verbose output to help debug protocol /// information. /// /// By default output goes to stderr, but the `stderr` function on this type /// can configure that. You can also use the `debug_function` method to get /// all protocol data sent and received. /// /// By default, this option is `false`. pub fn verbose(&mut self, verbose: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_VERBOSE, verbose as c_long) } /// Indicates whether header information is streamed to the output body of /// this request. /// /// This option is only relevant for protocols which have header metadata /// (like http or ftp). It's not generally possible to extract headers /// from the body if using this method, that use case should be intended for /// the `header_function` method. /// /// To set HTTP headers, use the `http_header` method. /// /// By default, this option is `false` and corresponds to /// `CURLOPT_HEADER`. pub fn show_header(&mut self, show: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HEADER, show as c_long) } /// Indicates whether a progress meter will be shown for requests done with /// this handle. /// /// This will also prevent the `progress_function` from being called. /// /// By default this option is `false` and corresponds to /// `CURLOPT_NOPROGRESS`. pub fn progress(&mut self, progress: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_NOPROGRESS, (!progress) as c_long) } /// Inform libcurl whether or not it should install signal handlers or /// attempt to use signals to perform library functions. /// /// If this option is disabled then timeouts during name resolution will not /// work unless libcurl is built against c-ares. Note that enabling this /// option, however, may not cause libcurl to work with multiple threads. /// /// By default this option is `false` and corresponds to `CURLOPT_NOSIGNAL`. /// Note that this default is **different than libcurl** as it is intended /// that this library is threadsafe by default. See the [libcurl docs] for /// some more information. /// /// [libcurl docs]: https://curl.haxx.se/libcurl/c/threadsafe.html pub fn signal(&mut self, signal: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_NOSIGNAL, (!signal) as c_long) } /// Indicates whether multiple files will be transferred based on the file /// name pattern. /// /// The last part of a filename uses fnmatch-like pattern matching. /// /// By default this option is `false` and corresponds to /// `CURLOPT_WILDCARDMATCH`. pub fn wildcard_match(&mut self, m: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_WILDCARDMATCH, m as c_long) } // ========================================================================= // Callback options /// Set callback for writing received data. /// /// This callback function gets called by libcurl as soon as there is data /// received that needs to be saved. /// /// The callback function will be passed as much data as possible in all /// invokes, but you must not make any assumptions. It may be one byte, it /// may be thousands. If `show_header` is enabled, which makes header data /// get passed to the write callback, you can get up to /// `CURL_MAX_HTTP_HEADER` bytes of header data passed into it. This /// usually means 100K. /// /// This function may be called with zero bytes data if the transferred file /// is empty. /// /// The callback should return the number of bytes actually taken care of. /// If that amount differs from the amount passed to your callback function, /// it'll signal an error condition to the library. This will cause the /// transfer to get aborted and the libcurl function used will return /// an error with `is_write_error`. /// /// If your callback function returns `Err(WriteError::Pause)` it will cause /// this transfer to become paused. See `unpause_write` for further details. /// /// By default data is sent into the void, and this corresponds to the /// `CURLOPT_WRITEFUNCTION` and `CURLOPT_WRITEDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `write_function` to configure a /// callback that can reference stack-local data. /// /// # Examples /// /// ``` /// use std::io::{stdout, Write}; /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.write_function(|data| { /// Ok(stdout().write(data).unwrap()) /// }).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Writing to a stack-local buffer /// /// ``` /// use std::io::{stdout, Write}; /// use curl::easy::Easy; /// /// let mut buf = Vec::new(); /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// /// let mut transfer = handle.transfer(); /// transfer.write_function(|data| { /// buf.extend_from_slice(data); /// Ok(data.len()) /// }).unwrap(); /// transfer.perform().unwrap(); /// ``` pub fn write_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> Result<usize, WriteError> + Send + 'static { self.data.write = Some(Box::new(f)); unsafe { return self.set_write_function(easy_write_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_write_function(&self, cb: curl_sys::curl_write_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEDATA, ptr as *const _)); return Ok(()); } /// Read callback for data uploads. /// /// This callback function gets called by libcurl as soon as it needs to /// read data in order to send it to the peer - like if you ask it to upload /// or post data to the server. /// /// Your function must then return the actual number of bytes that it stored /// in that memory area. Returning 0 will signal end-of-file to the library /// and cause it to stop the current transfer. /// /// If you stop the current transfer by returning 0 "pre-maturely" (i.e /// before the server expected it, like when you've said you will upload N /// bytes and you upload less than N bytes), you may experience that the /// server "hangs" waiting for the rest of the data that won't come. /// /// The read callback may return `Err(ReadError::Abort)` to stop the /// current operation immediately, resulting in a `is_aborted_by_callback` /// error code from the transfer. /// /// The callback can return `Err(ReadError::Pause)` to cause reading from /// this connection to pause. See `unpause_read` for further details. /// /// By default data not input, and this corresponds to the /// `CURLOPT_READFUNCTION` and `CURLOPT_READDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `read_function` to configure a /// callback that can reference stack-local data. /// /// # Examples /// /// Read input from stdin /// /// ```no_run /// use std::io::{stdin, Read}; /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://example.com/login").unwrap(); /// handle.read_function(|into| { /// Ok(stdin().read(into).unwrap()) /// }).unwrap(); /// handle.post(true).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Reading from stack-local data: /// /// ```no_run /// use std::io::{stdin, Read}; /// use curl::easy::Easy; /// /// let mut data_to_upload = &b"foobar"[..]; /// let mut handle = Easy::new(); /// handle.url("https://example.com/login").unwrap(); /// handle.post(true).unwrap(); /// /// let mut transfer = handle.transfer(); /// transfer.read_function(|into| { /// Ok(data_to_upload.read(into).unwrap()) /// }).unwrap(); /// transfer.perform().unwrap(); /// ``` pub fn read_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&mut [u8]) -> Result<usize, ReadError> + Send + 'static { self.data.read = Some(Box::new(f)); unsafe { self.set_read_function(easy_read_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_read_function(&self, cb: curl_sys::curl_read_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_READFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_READDATA, ptr as *const _)); return Ok(()); } /// User callback for seeking in input stream. /// /// This function gets called by libcurl to seek to a certain position in /// the input stream and can be used to fast forward a file in a resumed /// upload (instead of reading all uploaded bytes with the normal read /// function/callback). It is also called to rewind a stream when data has /// already been sent to the server and needs to be sent again. This may /// happen when doing a HTTP PUT or POST with a multi-pass authentication /// method, or when an existing HTTP connection is reused too late and the /// server closes the connection. /// /// The callback function must return `SeekResult::Ok` on success, /// `SeekResult::Fail` to cause the upload operation to fail or /// `SeekResult::CantSeek` to indicate that while the seek failed, libcurl /// is free to work around the problem if possible. The latter can sometimes /// be done by instead reading from the input or similar. /// /// By default data this option is not set, and this corresponds to the /// `CURLOPT_SEEKFUNCTION` and `CURLOPT_SEEKDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `seek_function` to configure a /// callback that can reference stack-local data. pub fn seek_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(SeekFrom) -> SeekResult + Send + 'static { self.data.seek = Some(Box::new(f)); unsafe { self.set_seek_function(easy_seek_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_seek_function(&self, cb: curl_sys::curl_seek_callback, ptr: *mut c_void) -> Result<(), Error> { let cb = cb as curl_sys::curl_seek_callback; try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKDATA, ptr as *const _)); Ok(()) } /// Callback to progress meter function /// /// This function gets called by libcurl instead of its internal equivalent /// with a frequent interval. While data is being transferred it will be /// called very frequently, and during slow periods like when nothing is /// being transferred it can slow down to about one call per second. /// /// The callback gets told how much data libcurl will transfer and has /// transferred, in number of bytes. The first argument is the total number /// of bytes libcurl expects to download in this transfer. The second /// argument is the number of bytes downloaded so far. The third argument is /// the total number of bytes libcurl expects to upload in this transfer. /// The fourth argument is the number of bytes uploaded so far. /// /// Unknown/unused argument values passed to the callback will be set to /// zero (like if you only download data, the upload size will remain 0). /// Many times the callback will be called one or more times first, before /// it knows the data sizes so a program must be made to handle that. /// /// Returning `false` from this callback will cause libcurl to abort the /// transfer and return `is_aborted_by_callback`. /// /// If you transfer data with the multi interface, this function will not be /// called during periods of idleness unless you call the appropriate /// libcurl function that performs transfers. /// /// `noprogress` must be set to 0 to make this function actually get /// called. /// /// By default this function calls an internal method and corresponds to /// `CURLOPT_XFERINFOFUNCTION` and `CURLOPT_XFERINFODATA`. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `progress_function` to configure a /// callback that can reference stack-local data. pub fn progress_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(f64, f64, f64, f64) -> bool + Send + 'static { self.data.progress = Some(Box::new(f)); unsafe { self.set_progress_function(easy_progress_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_progress_function(&self, cb: curl_sys::curl_progress_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSDATA, ptr as *const _)); Ok(()) } /// Specify a debug callback /// /// `debug_function` replaces the standard debug function used when /// `verbose` is in effect. This callback receives debug information, /// as specified in the type argument. /// /// By default this option is not set and corresponds to the /// `CURLOPT_DEBUGFUNCTION` and `CURLOPT_DEBUGDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `debug_function` to configure a /// callback that can reference stack-local data. pub fn debug_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(InfoType, &[u8]) + Send + 'static { self.data.debug = Some(Box::new(f)); unsafe { self.set_debug_function(easy_debug_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_debug_function(&self, cb: curl_sys::curl_debug_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGDATA, ptr as *const _)); return Ok(()); } /// Callback that receives header data /// /// This function gets called by libcurl as soon as it has received header /// data. The header callback will be called once for each header and only /// complete header lines are passed on to the callback. Parsing headers is /// very easy using this. If this callback returns `false` it'll signal an /// error to the library. This will cause the transfer to get aborted and /// the libcurl function in progress will return `is_write_error`. /// /// A complete HTTP header that is passed to this function can be up to /// CURL_MAX_HTTP_HEADER (100K) bytes. /// /// It's important to note that the callback will be invoked for the headers /// of all responses received after initiating a request and not just the /// final response. This includes all responses which occur during /// authentication negotiation. If you need to operate on only the headers /// from the final response, you will need to collect headers in the /// callback yourself and use HTTP status lines, for example, to delimit /// response boundaries. /// /// When a server sends a chunked encoded transfer, it may contain a /// trailer. That trailer is identical to a HTTP header and if such a /// trailer is received it is passed to the application using this callback /// as well. There are several ways to detect it being a trailer and not an /// ordinary header: 1) it comes after the response-body. 2) it comes after /// the final header line (CR LF) 3) a Trailer: header among the regular /// response-headers mention what header(s) to expect in the trailer. /// /// For non-HTTP protocols like FTP, POP3, IMAP and SMTP this function will /// get called with the server responses to the commands that libcurl sends. /// /// By default this option is not set and corresponds to the /// `CURLOPT_HEADERFUNCTION` and `CURLOPT_HEADERDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `header_function` to configure a /// callback that can reference stack-local data. /// /// # Examples /// /// ``` /// use std::str; /// /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.header_function(|header| { /// print!("header: {}", str::from_utf8(header).unwrap()); /// true /// }).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Collecting headers to a stack local vector /// /// ``` /// use std::str; /// /// use curl::easy::Easy; /// /// let mut headers = Vec::new(); /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// /// { /// let mut transfer = handle.transfer(); /// transfer.header_function(|header| { /// headers.push(str::from_utf8(header).unwrap().to_string()); /// true /// }).unwrap(); /// transfer.perform().unwrap(); /// } /// /// println!("{:?}", headers); /// ``` pub fn header_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> bool + Send + 'static { self.data.header = Some(Box::new(f)); unsafe { self.set_header_function(easy_header_cb, &*self.data as *const _ as *mut _) } } // TODO: shouldn't there be a libcurl typedef for this? unsafe fn set_header_function(&self, cb: extern fn(*mut c_char, size_t, size_t, *mut c_void) -> size_t, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERDATA, ptr as *const _)); Ok(()) } // ========================================================================= // Error options // TODO: error buffer and stderr /// Indicates whether this library will fail on HTTP response codes >= 400. /// /// This method is not fail-safe especially when authentication is involved. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FAILONERROR`. pub fn fail_on_error(&mut self, fail: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FAILONERROR, fail as c_long) } // ========================================================================= // Network options /// Provides the URL which this handle will work with. /// /// The string provided must be URL-encoded with the format: /// /// ```text /// scheme://host:port/path /// ``` /// /// The syntax is not validated as part of this function and that is /// deferred until later. /// /// By default this option is not set and `perform` will not work until it /// is set. This option corresponds to `CURLOPT_URL`. pub fn url(&mut self, url: &str) -> Result<(), Error> { let url = try!(CString::new(url)); self.setopt_str(curl_sys::CURLOPT_URL, &url) } /// Configures the port number to connect to, instead of the one specified /// in the URL or the default of the protocol. pub fn port(&mut self, port: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PORT, port as c_long) } // /// Indicates whether sequences of `/../` and `/./` will be squashed or not. // /// // /// By default this option is `false` and corresponds to // /// `CURLOPT_PATH_AS_IS`. // pub fn path_as_is(&mut self, as_is: bool) -> Result<(), Error> { // } /// Provide the URL of a proxy to use. /// /// By default this option is not set and corresponds to `CURLOPT_PROXY`. pub fn proxy(&mut self, url: &str) -> Result<(), Error> { let url = try!(CString::new(url)); self.setopt_str(curl_sys::CURLOPT_PROXY, &url) } /// Provide port number the proxy is listening on. /// /// By default this option is not set (the default port for the proxy /// protocol is used) and corresponds to `CURLOPT_PROXYPORT`. pub fn proxy_port(&mut self, port: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PROXYPORT, port as c_long) } /// Indicates the type of proxy being used. /// /// By default this option is `ProxyType::Http` and corresponds to /// `CURLOPT_PROXYTYPE`. pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PROXYTYPE, kind as c_long) } /// Provide a list of hosts that should not be proxied to. /// /// This string is a comma-separated list of hosts which should not use the /// proxy specified for connections. A single `*` character is also accepted /// as a wildcard for all hosts. /// /// By default this option is not set and corresponds to /// `CURLOPT_NOPROXY`. pub fn noproxy(&mut self, skip: &str) -> Result<(), Error> { let skip = try!(CString::new(skip)); self.setopt_str(curl_sys::CURLOPT_PROXYTYPE, &skip) } /// Inform curl whether it should tunnel all operations through the proxy. /// /// This essentially means that a `CONNECT` is sent to the proxy for all /// outbound requests. /// /// By default this option is `false` and corresponds to /// `CURLOPT_HTTPPROXYTUNNEL`. pub fn http_proxy_tunnel(&mut self, tunnel: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTPPROXYTUNNEL, tunnel as c_long) } /// Tell curl which interface to bind to for an outgoing network interface. /// /// The interface name, IP address, or host name can be specified here. /// /// By default this option is not set and corresponds to /// `CURLOPT_INTERFACE`. pub fn interface(&mut self, interface: &str) -> Result<(), Error> { let s = try!(CString::new(interface)); self.setopt_str(curl_sys::CURLOPT_INTERFACE, &s) } /// Indicate which port should be bound to locally for this connection. /// /// By default this option is 0 (any port) and corresponds to /// `CURLOPT_LOCALPORT`. pub fn set_local_port(&mut self, port: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOCALPORT, port as c_long) } /// Indicates the number of attempts libcurl will perform to find a working /// port number. /// /// By default this option is 1 and corresponds to /// `CURLOPT_LOCALPORTRANGE`. pub fn local_port_range(&mut self, range: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOCALPORTRANGE, range as c_long) } /// Sets the timeout of how long name resolves will be kept in memory. /// /// This is distinct from DNS TTL options and is entirely speculative. /// /// By default this option is 60s and corresponds to /// `CURLOPT_DNS_CACHE_TIMEOUT`. pub fn dns_cache_timeout(&mut self, dur: Duration) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_DNS_CACHE_TIMEOUT, dur.as_secs() as c_long) } /// Specify the preferred receive buffer size, in bytes. /// /// This is treated as a request, not an order, and the main point of this /// is that the write callback may get called more often with smaller /// chunks. /// /// By default this option is the maximum write size and corresopnds to /// `CURLOPT_BUFFERSIZE`. pub fn buffer_size(&mut self, size: usize) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_BUFFERSIZE, size as c_long) } // /// Enable or disable TCP Fast Open // /// // /// By default this options defaults to `false` and corresponds to // /// `CURLOPT_TCP_FASTOPEN` // pub fn fast_open(&mut self, enable: bool) -> Result<(), Error> { // } /// Configures whether the TCP_NODELAY option is set, or Nagle's algorithm /// is disabled. /// /// The purpose of Nagle's algorithm is to minimize the number of small /// packet's on the network, and disabling this may be less efficient in /// some situations. /// /// By default this option is `false` and corresponds to /// `CURLOPT_TCP_NODELAY`. pub fn tcp_nodelay(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TCP_NODELAY, enable as c_long) } // /// Configures whether TCP keepalive probes will be sent. // /// // /// The delay and frequency of these probes is controlled by `tcp_keepidle` // /// and `tcp_keepintvl`. // /// // /// By default this option is `false` and corresponds to // /// `CURLOPT_TCP_KEEPALIVE`. // pub fn tcp_keepalive(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPALIVE, enable as c_long) // } // /// Configures the TCP keepalive idle time wait. // /// // /// This is the delay, after which the connection is idle, keepalive probes // /// will be sent. Not all operating systems support this. // /// // /// By default this corresponds to `CURLOPT_TCP_KEEPIDLE`. // pub fn tcp_keepidle(&mut self, amt: Duration) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPIDLE, // amt.as_secs() as c_long) // } // // /// Configures the delay between keepalive probes. // /// // /// By default this corresponds to `CURLOPT_TCP_KEEPINTVL`. // pub fn tcp_keepintvl(&mut self, amt: Duration) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPINTVL, // amt.as_secs() as c_long) // } /// Configures the scope for local IPv6 addresses. /// /// Sets the scope_id value to use when connecting to IPv6 or link-local /// addresses. /// /// By default this value is 0 and corresponds to `CURLOPT_ADDRESS_SCOPE` pub fn address_scope(&mut self, scope: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_ADDRESS_SCOPE, scope as c_long) } // ========================================================================= // Names and passwords /// Configures the username to pass as authentication for this connection. /// /// By default this value is not set and corresponds to `CURLOPT_USERNAME`. pub fn username(&mut self, user: &str) -> Result<(), Error> { let user = try!(CString::new(user)); self.setopt_str(curl_sys::CURLOPT_USERNAME, &user) } /// Configures the password to pass as authentication for this connection. /// /// By default this value is not set and corresponds to `CURLOPT_PASSWORD`. pub fn password(&mut self, pass: &str) -> Result<(), Error> { let pass = try!(CString::new(pass)); self.setopt_str(curl_sys::CURLOPT_PASSWORD, &pass) } /// Set HTTP server authentication methods to try /// /// If more than one method is set, libcurl will first query the site to see /// which authentication methods it supports and then pick the best one you /// allow it to use. For some methods, this will induce an extra network /// round-trip. Set the actual name and password with the `password` and /// `username` methods. /// /// For authentication with a proxy, see `proxy_auth`. /// /// By default this value is basic and corresponds to `CURLOPT_HTTPAUTH`. pub fn http_auth(&mut self, auth: &Auth) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTPAUTH, auth.bits) } /// Configures the proxy username to pass as authentication for this /// connection. /// /// By default this value is not set and corresponds to /// `CURLOPT_PROXYUSERNAME`. pub fn proxy_username(&mut self, user: &str) -> Result<(), Error> { let user = try!(CString::new(user)); self.setopt_str(curl_sys::CURLOPT_PROXYUSERNAME, &user) } /// Configures the proxy password to pass as authentication for this /// connection. /// /// By default this value is not set and corresponds to /// `CURLOPT_PROXYPASSWORD`. pub fn proxy_password(&mut self, pass: &str) -> Result<(), Error> { let pass = try!(CString::new(pass)); self.setopt_str(curl_sys::CURLOPT_PROXYPASSWORD, &pass) } /// Set HTTP proxy authentication methods to try /// /// If more than one method is set, libcurl will first query the site to see /// which authentication methods it supports and then pick the best one you /// allow it to use. For some methods, this will induce an extra network /// round-trip. Set the actual name and password with the `proxy_password` /// and `proxy_username` methods. /// /// By default this value is basic and corresponds to `CURLOPT_PROXYAUTH`. pub fn proxy_auth(&mut self, auth: &Auth) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PROXYAUTH, auth.bits) } // ========================================================================= // HTTP Options /// Indicates whether the referer header is automatically updated /// /// By default this option is `false` and corresponds to /// `CURLOPT_AUTOREFERER`. pub fn autoreferer(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_AUTOREFERER, enable as c_long) } /// Enables automatic decompression of HTTP downloads. /// /// Sets the contents of the Accept-Encoding header sent in an HTTP request. /// This enables decoding of a response with Content-Encoding. /// /// Currently supported encoding are `identity`, `zlib`, and `gzip`. A /// zero-length string passed in will send all accepted encodings. /// /// By default this option is not set and corresponds to /// `CURLOPT_ACCEPT_ENCODING`. pub fn accept_encoding(&mut self, encoding: &str) -> Result<(), Error> { let encoding = try!(CString::new(encoding)); self.setopt_str(curl_sys::CURLOPT_ACCEPT_ENCODING, &encoding) } /// Request the HTTP Transfer Encoding. /// /// By default this option is `false` and corresponds to /// `CURLOPT_TRANSFER_ENCODING`. pub fn transfer_encoding(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TRANSFER_ENCODING, enable as c_long) } /// Follow HTTP 3xx redirects. /// /// Indicates whether any `Location` headers in the response should get /// followed. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FOLLOWLOCATION`. pub fn follow_location(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FOLLOWLOCATION, enable as c_long) } /// Send credentials to hosts other than the first as well. /// /// Sends username/password credentials even when the host changes as part /// of a redirect. /// /// By default this option is `false` and corresponds to /// `CURLOPT_UNRESTRICTED_AUTH`. pub fn unrestricted_auth(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_UNRESTRICTED_AUTH, enable as c_long) } /// Set the maximum number of redirects allowed. /// /// A value of 0 will refuse any redirect. /// /// By default this option is `-1` (unlimited) and corresponds to /// `CURLOPT_MAXREDIRS`. pub fn max_redirections(&mut self, max: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_MAXREDIRS, max as c_long) } // TODO: post_redirections /// Make an HTTP PUT request. /// /// By default this option is `false` and corresponds to `CURLOPT_PUT`. pub fn put(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PUT, enable as c_long) } /// Make an HTTP POST request. /// /// This will also make the library use the /// `Content-Type: application/x-www-form-urlencoded` header. /// /// POST data can be specified through `post_fields` or by specifying a read /// function. /// /// By default this option is `false` and corresponds to `CURLOPT_POST`. pub fn post(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_POST, enable as c_long) } /// Configures the data that will be uploaded as part of a POST. /// /// Note that the data is copied into this handle and if that's not desired /// then the read callbacks can be used instead. /// /// By default this option is not set and corresponds to /// `CURLOPT_COPYPOSTFIELDS`. pub fn post_fields_copy(&mut self, data: &[u8]) -> Result<(), Error> { // Set the length before the pointer so libcurl knows how much to read try!(self.post_field_size(data.len() as u64)); self.setopt_ptr(curl_sys::CURLOPT_COPYPOSTFIELDS, data.as_ptr() as *const _) } /// Configures the size of data that's going to be uploaded as part of a /// POST operation. /// /// This is called automaticsally as part of `post_fields` and should only /// be called if data is being provided in a read callback (and even then /// it's optional). /// /// By default this option is not set and corresponds to /// `CURLOPT_POSTFIELDSIZE_LARGE`. pub fn post_field_size(&mut self, size: u64) -> Result<(), Error> { // Clear anything previous to ensure we don't read past a buffer try!(self.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, 0 as *const _)); self.setopt_off_t(curl_sys::CURLOPT_POSTFIELDSIZE_LARGE, size as curl_sys::curl_off_t) } /// Tells libcurl you want a multipart/formdata HTTP POST to be made and you /// instruct what data to pass on to the server in the `form` argument. /// /// By default this option is set to null and corresponds to /// `CURLOPT_HTTPPOST`. pub fn httppost(&mut self, form: Form) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_HTTPPOST, form.head as *const _)); self.data.form = Some(form); Ok(()) } /// Sets the HTTP referer header /// /// By default this option is not set and corresponds to `CURLOPT_REFERER`. pub fn referer(&mut self, referer: &str) -> Result<(), Error> { let referer = try!(CString::new(referer)); self.setopt_str(curl_sys::CURLOPT_REFERER, &referer) } /// Sets the HTTP user-agent header /// /// By default this option is not set and corresponds to /// `CURLOPT_USERAGENT`. pub fn useragent(&mut self, useragent: &str) -> Result<(), Error> { let useragent = try!(CString::new(useragent)); self.setopt_str(curl_sys::CURLOPT_USERAGENT, &useragent) } /// Add some headers to this HTTP request. /// /// If you add a header that is otherwise used internally, the value here /// takes precedence. If a header is added with no content (like `Accept:`) /// the internally the header will get disabled. To add a header with no /// content, use the form `MyHeader;` (not the trailing semicolon). /// /// Headers must not be CRLF terminated. Many replaced headers have common /// shortcuts which should be prefered. /// /// By default this option is not set and corresponds to /// `CURLOPT_HTTPHEADER` /// /// # Examples /// /// ``` /// use curl::easy::{Easy, List}; /// /// let mut list = List::new(); /// list.append("Foo: bar").unwrap(); /// list.append("Bar: baz").unwrap(); /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.http_headers(list).unwrap(); /// handle.perform().unwrap(); /// ``` pub fn http_headers(&mut self, list: List) -> Result<(), Error> { let ptr = list.raw; self.data.header_list = Some(list); self.setopt_ptr(curl_sys::CURLOPT_HTTPHEADER, ptr as *const _) } // /// Add some headers to send to the HTTP proxy. // /// // /// This function is essentially the same as `http_headers`. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_PROXYHEADER` // pub fn proxy_headers(&mut self, list: &'a List) -> Result<(), Error> { // self.setopt_ptr(curl_sys::CURLOPT_PROXYHEADER, list.raw as *const _) // } /// Set the contents of the HTTP Cookie header. /// /// Pass a string of the form `name=contents` for one cookie value or /// `name1=val1; name2=val2` for multiple values. /// /// Using this option multiple times will only make the latest string /// override the previous ones. This option will not enable the cookie /// engine, use `cookie_file` or `cookie_jar` to do that. /// /// By default this option is not set and corresponds to `CURLOPT_COOKIE`. pub fn cookie(&mut self, cookie: &str) -> Result<(), Error> { let cookie = try!(CString::new(cookie)); self.setopt_str(curl_sys::CURLOPT_COOKIE, &cookie) } /// Set the file name to read cookies from. /// /// The cookie data can be in either the old Netscape / Mozilla cookie data /// format or just regular HTTP headers (Set-Cookie style) dumped to a file. /// /// This also enables the cookie engine, making libcurl parse and send /// cookies on subsequent requests with this handle. /// /// Given an empty or non-existing file or by passing the empty string ("") /// to this option, you can enable the cookie engine without reading any /// initial cookies. /// /// If you use this option multiple times, you just add more files to read. /// Subsequent files will add more cookies. /// /// By default this option is not set and corresponds to /// `CURLOPT_COOKIEFILE`. pub fn cookie_file<P: AsRef<Path>>(&mut self, file: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_COOKIEFILE, file.as_ref()) } /// Set the file name to store cookies to. /// /// This will make libcurl write all internally known cookies to the file /// when this handle is dropped. If no cookies are known, no file will be /// created. Specify "-" as filename to instead have the cookies written to /// stdout. Using this option also enables cookies for this session, so if /// you for example follow a location it will make matching cookies get sent /// accordingly. /// /// Note that libcurl doesn't read any cookies from the cookie jar. If you /// want to read cookies from a file, use `cookie_file`. /// /// By default this option is not set and corresponds to /// `CURLOPT_COOKIEJAR`. pub fn cookie_jar<P: AsRef<Path>>(&mut self, file: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_COOKIEJAR, file.as_ref()) } /// Start a new cookie session /// /// Marks this as a new cookie "session". It will force libcurl to ignore /// all cookies it is about to load that are "session cookies" from the /// previous session. By default, libcurl always stores and loads all /// cookies, independent if they are session cookies or not. Session cookies /// are cookies without expiry date and they are meant to be alive and /// existing for this "session" only. /// /// By default this option is `false` and corresponds to /// `CURLOPT_COOKIESESSION`. pub fn cookie_session(&mut self, session: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_COOKIESESSION, session as c_long) } /// Add to or manipulate cookies held in memory. /// /// Such a cookie can be either a single line in Netscape / Mozilla format /// or just regular HTTP-style header (Set-Cookie: ...) format. This will /// also enable the cookie engine. This adds that single cookie to the /// internal cookie store. /// /// Exercise caution if you are using this option and multiple transfers may /// occur. If you use the Set-Cookie format and don't specify a domain then /// the cookie is sent for any domain (even after redirects are followed) /// and cannot be modified by a server-set cookie. If a server sets a cookie /// of the same name (or maybe you've imported one) then both will be sent /// on a future transfer to that server, likely not what you intended. /// address these issues set a domain in Set-Cookie or use the Netscape /// format. /// /// Additionally, there are commands available that perform actions if you /// pass in these exact strings: /// /// * "ALL" - erases all cookies held in memory /// * "SESS" - erases all session cookies held in memory /// * "FLUSH" - write all known cookies to the specified cookie jar /// * "RELOAD" - reread all cookies from the cookie file /// /// By default this options corresponds to `CURLOPT_COOKIELIST` pub fn cookie_list(&mut self, cookie: &str) -> Result<(), Error> { let cookie = try!(CString::new(cookie)); self.setopt_str(curl_sys::CURLOPT_COOKIELIST, &cookie) } /// Ask for a HTTP GET request. /// /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. pub fn get(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) } // /// Ask for a HTTP GET request. // /// // /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. // pub fn http_version(&mut self, vers: &str) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) // } /// Ignore the content-length header. /// /// By default this option is `false` and corresponds to /// `CURLOPT_IGNORE_CONTENT_LENGTH`. pub fn ignore_content_length(&mut self, ignore: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_IGNORE_CONTENT_LENGTH, ignore as c_long) } /// Enable or disable HTTP content decoding. /// /// By default this option is `true` and corresponds to /// `CURLOPT_HTTP_CONTENT_DECODING`. pub fn http_content_decoding(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTP_CONTENT_DECODING, enable as c_long) } /// Enable or disable HTTP transfer decoding. /// /// By default this option is `true` and corresponds to /// `CURLOPT_HTTP_TRANSFER_DECODING`. pub fn http_transfer_decoding(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, enable as c_long) } // /// Timeout for the Expect: 100-continue response // /// // /// By default this option is 1s and corresponds to // /// `CURLOPT_EXPECT_100_TIMEOUT_MS`. // pub fn expect_100_timeout(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, // enable as c_long) // } // /// Wait for pipelining/multiplexing. // /// // /// Tells libcurl to prefer to wait for a connection to confirm or deny that // /// it can do pipelining or multiplexing before continuing. // /// // /// When about to perform a new transfer that allows pipelining or // /// multiplexing, libcurl will check for existing connections to re-use and // /// pipeline on. If no such connection exists it will immediately continue // /// and create a fresh new connection to use. // /// // /// By setting this option to `true` - having `pipeline` enabled for the // /// multi handle this transfer is associated with - libcurl will instead // /// wait for the connection to reveal if it is possible to // /// pipeline/multiplex on before it continues. This enables libcurl to much // /// better keep the number of connections to a minimum when using pipelining // /// or multiplexing protocols. // /// // /// The effect thus becomes that with this option set, libcurl prefers to // /// wait and re-use an existing connection for pipelining rather than the // /// opposite: prefer to open a new connection rather than waiting. // /// // /// The waiting time is as long as it takes for the connection to get up and // /// for libcurl to get the necessary response back that informs it about its // /// protocol and support level. // pub fn http_pipewait(&mut self, enable: bool) -> Result<(), Error> { // } // ========================================================================= // Protocol Options /// Indicates the range that this request should retrieve. /// /// The string provided should be of the form `N-M` where either `N` or `M` /// can be left out. For HTTP transfers multiple ranges separated by commas /// are also accepted. /// /// By default this option is not set and corresponds to `CURLOPT_RANGE`. pub fn range(&mut self, range: &str) -> Result<(), Error> { let range = try!(CString::new(range)); self.setopt_str(curl_sys::CURLOPT_RANGE, &range) } /// Set a point to resume transfer from /// /// Specify the offset in bytes you want the transfer to start from. /// /// By default this option is 0 and corresponds to /// `CURLOPT_RESUME_FROM_LARGE`. pub fn resume_from(&mut self, from: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_RESUME_FROM_LARGE, from as curl_sys::curl_off_t) } /// Set a custom request string /// /// Specifies that a custom request will be made (e.g. a custom HTTP /// method). This does not change how libcurl performs internally, just /// changes the string sent to the server. /// /// By default this option is not set and corresponds to /// `CURLOPT_CUSTOMREQUEST`. pub fn custom_request(&mut self, request: &str) -> Result<(), Error> { let request = try!(CString::new(request)); self.setopt_str(curl_sys::CURLOPT_CUSTOMREQUEST, &request) } /// Get the modification time of the remote resource /// /// If true, libcurl will attempt to get the modification time of the /// remote document in this operation. This requires that the remote server /// sends the time or replies to a time querying command. The `filetime` /// function can be used after a transfer to extract the received time (if /// any). /// /// By default this option is `false` and corresponds to `CURLOPT_FILETIME` pub fn fetch_filetime(&mut self, fetch: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FILETIME, fetch as c_long) } /// Indicate whether to download the request without getting the body /// /// This is useful, for example, for doing a HEAD request. /// /// By default this option is `false` and corresponds to `CURLOPT_NOBODY`. pub fn nobody(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_NOBODY, enable as c_long) } /// Set the size of the input file to send off. /// /// By default this option is not set and corresponds to /// `CURLOPT_INFILESIZE_LARGE`. pub fn in_filesize(&mut self, size: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_INFILESIZE_LARGE, size as curl_sys::curl_off_t) } /// Enable or disable data upload. /// /// This means that a PUT request will be made for HTTP and probably wants /// to be combined with the read callback as well as the `in_filesize` /// method. /// /// By default this option is `false` and corresponds to `CURLOPT_UPLOAD`. pub fn upload(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_UPLOAD, enable as c_long) } /// Configure the maximum file size to download. /// /// By default this option is not set and corresponds to /// `CURLOPT_MAXFILESIZE_LARGE`. pub fn max_filesize(&mut self, size: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_MAXFILESIZE_LARGE, size as curl_sys::curl_off_t) } /// Selects a condition for a time request. /// /// This value indicates how the `time_value` option is interpreted. /// /// By default this option is not set and corresponds to /// `CURLOPT_TIMECONDITION`. pub fn time_condition(&mut self, cond: TimeCondition) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TIMECONDITION, cond as c_long) } /// Sets the time value for a conditional request. /// /// The value here should be the number of seconds elapsed since January 1, /// 1970. To pass how to interpret this value, use `time_condition`. /// /// By default this option is not set and corresponds to /// `CURLOPT_TIMEVALUE`. pub fn time_value(&mut self, val: i64) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TIMEVALUE, val as c_long) } // ========================================================================= // Connection Options /// Set maximum time the request is allowed to take. /// /// Normally, name lookups can take a considerable time and limiting /// operations to less than a few minutes risk aborting perfectly normal /// operations. /// /// If libcurl is built to use the standard system name resolver, that /// portion of the transfer will still use full-second resolution for /// timeouts with a minimum timeout allowed of one second. /// /// In unix-like systems, this might cause signals to be used unless /// `nosignal` is set. /// /// Since this puts a hard limit for how long time a request is allowed to /// take, it has limited use in dynamic use cases with varying transfer /// times. You are then advised to explore `low_speed_limit`, /// `low_speed_time` or using `progress_function` to implement your own /// timeout logic. /// /// By default this option is not set and corresponds to /// `CURLOPT_TIMEOUT_MS`. pub fn timeout(&mut self, timeout: Duration) -> Result<(), Error> { // TODO: checked arithmetic and casts // TODO: use CURLOPT_TIMEOUT if the timeout is too great let ms = timeout.as_secs() * 1000 + (timeout.subsec_nanos() / 1_000_000) as u64; self.setopt_long(curl_sys::CURLOPT_TIMEOUT_MS, ms as c_long) } /// Set the low speed limit in bytes per second. /// /// This specifies the average transfer speed in bytes per second that the /// transfer should be below during `low_speed_time` for libcurl to consider /// it to be too slow and abort. /// /// By default this option is not set and corresponds to /// `CURLOPT_LOW_SPEED_LIMIT`. pub fn low_speed_limit(&mut self, limit: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_LIMIT, limit as c_long) } /// Set the low speed time period. /// /// Specifies the window of time for which if the transfer rate is below /// `low_speed_limit` the request will be aborted. /// /// By default this option is not set and corresponds to /// `CURLOPT_LOW_SPEED_TIME`. pub fn low_speed_time(&mut self, dur: Duration) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_TIME, dur.as_secs() as c_long) } /// Rate limit data upload speed /// /// If an upload exceeds this speed (counted in bytes per second) on /// cumulative average during the transfer, the transfer will pause to keep /// the average rate less than or equal to the parameter value. /// /// By default this option is not set (unlimited speed) and corresponds to /// `CURLOPT_MAX_SEND_SPEED_LARGE`. pub fn max_send_speed(&mut self, speed: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_MAX_SEND_SPEED_LARGE, speed as curl_sys::curl_off_t) } /// Rate limit data download speed /// /// If a download exceeds this speed (counted in bytes per second) on /// cumulative average during the transfer, the transfer will pause to keep /// the average rate less than or equal to the parameter value. /// /// By default this option is not set (unlimited speed) and corresponds to /// `CURLOPT_MAX_RECV_SPEED_LARGE`. pub fn max_recv_speed(&mut self, speed: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_MAX_RECV_SPEED_LARGE, speed as curl_sys::curl_off_t) } /// Set the maximum connection cache size. /// /// The set amount will be the maximum number of simultaneously open /// persistent connections that libcurl may cache in the pool associated /// with this handle. The default is 5, and there isn't much point in /// changing this value unless you are perfectly aware of how this works and /// changes libcurl's behaviour. This concerns connections using any of the /// protocols that support persistent connections. /// /// When reaching the maximum limit, curl closes the oldest one in the cache /// to prevent increasing the number of open connections. /// /// By default this option is set to 5 and corresponds to /// `CURLOPT_MAXCONNECTS` pub fn max_connects(&mut self, max: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_MAXCONNECTS, max as c_long) } /// Force a new connection to be used. /// /// Makes the next transfer use a new (fresh) connection by force instead of /// trying to re-use an existing one. This option should be used with /// caution and only if you understand what it does as it may seriously /// impact performance. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FRESH_CONNECT`. pub fn fresh_connect(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FRESH_CONNECT, enable as c_long) } /// Make connection get closed at once after use. /// /// Makes libcurl explicitly close the connection when done with the /// transfer. Normally, libcurl keeps all connections alive when done with /// one transfer in case a succeeding one follows that can re-use them. /// This option should be used with caution and only if you understand what /// it does as it can seriously impact performance. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FORBID_REUSE`. pub fn forbid_reuse(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FORBID_REUSE, enable as c_long) } /// Timeout for the connect phase /// /// This is the maximum time that you allow the connection phase to the /// server to take. This only limits the connection phase, it has no impact /// once it has connected. /// /// By default this value is 300 seconds and corresponds to /// `CURLOPT_CONNECTTIMEOUT_MS`. pub fn connect_timeout(&mut self, timeout: Duration) -> Result<(), Error> { let ms = timeout.as_secs() * 1000 + (timeout.subsec_nanos() / 1_000_000) as u64; self.setopt_long(curl_sys::CURLOPT_CONNECTTIMEOUT_MS, ms as c_long) } /// Specify which IP protocol version to use /// /// Allows an application to select what kind of IP addresses to use when /// resolving host names. This is only interesting when using host names /// that resolve addresses using more than one version of IP. /// /// By default this value is "any" and corresponds to `CURLOPT_IPRESOLVE`. pub fn ip_resolve(&mut self, resolve: IpResolve) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_IPRESOLVE, resolve as c_long) } /// Configure whether to stop when connected to target server /// /// When enabled it tells the library to perform all the required proxy /// authentication and connection setup, but no data transfer, and then /// return. /// /// The option can be used to simply test a connection to a server. /// /// By default this value is `false` and corresponds to /// `CURLOPT_CONNECT_ONLY`. pub fn connect_only(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_CONNECT_ONLY, enable as c_long) } // /// Set interface to speak DNS over. // /// // /// Set the name of the network interface that the DNS resolver should bind // /// to. This must be an interface name (not an address). // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_INTERFACE`. // pub fn dns_interface(&mut self, interface: &str) -> Result<(), Error> { // let interface = try!(CString::new(interface)); // self.setopt_str(curl_sys::CURLOPT_DNS_INTERFACE, &interface) // } // // /// IPv4 address to bind DNS resolves to // /// // /// Set the local IPv4 address that the resolver should bind to. The // /// argument should be of type char * and contain a single numerical IPv4 // /// address as a string. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_LOCAL_IP4`. // pub fn dns_local_ip4(&mut self, ip: &str) -> Result<(), Error> { // let ip = try!(CString::new(ip)); // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP4, &ip) // } // // /// IPv6 address to bind DNS resolves to // /// // /// Set the local IPv6 address that the resolver should bind to. The // /// argument should be of type char * and contain a single numerical IPv6 // /// address as a string. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_LOCAL_IP6`. // pub fn dns_local_ip6(&mut self, ip: &str) -> Result<(), Error> { // let ip = try!(CString::new(ip)); // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP6, &ip) // } // // /// Set preferred DNS servers. // /// // /// Provides a list of DNS servers to be used instead of the system default. // /// The format of the dns servers option is: // /// // /// ```text // /// host[:port],[host[:port]]... // /// ``` // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_SERVERS`. // pub fn dns_servers(&mut self, servers: &str) -> Result<(), Error> { // let servers = try!(CString::new(servers)); // self.setopt_str(curl_sys::CURLOPT_DNS_SERVERS, &servers) // } // ========================================================================= // SSL/Security Options /// Sets the SSL client certificate. /// /// The string should be the file name of your client certificate. The /// default format is "P12" on Secure Transport and "PEM" on other engines, /// and can be changed with `ssl_cert_type`. /// /// With NSS or Secure Transport, this can also be the nickname of the /// certificate you wish to authenticate with as it is named in the security /// database. If you want to use a file from the current directory, please /// precede it with "./" prefix, in order to avoid confusion with a /// nickname. /// /// When using a client certificate, you most likely also need to provide a /// private key with `ssl_key`. /// /// By default this option is not set and corresponds to `CURLOPT_SSLCERT`. pub fn ssl_cert<P: AsRef<Path>>(&mut self, cert: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_SSLCERT, cert.as_ref()) } /// Specify type of the client SSL certificate. /// /// The string should be the format of your certificate. Supported formats /// are "PEM" and "DER", except with Secure Transport. OpenSSL (versions /// 0.9.3 and later) and Secure Transport (on iOS 5 or later, or OS X 10.7 /// or later) also support "P12" for PKCS#12-encoded files. /// /// By default this option is "PEM" and corresponds to /// `CURLOPT_SSLCERTTYPE`. pub fn ssl_cert_type(&mut self, kind: &str) -> Result<(), Error> { let kind = try!(CString::new(kind)); self.setopt_str(curl_sys::CURLOPT_SSLCERTTYPE, &kind) } /// Specify private keyfile for TLS and SSL client cert. /// /// The string should be the file name of your private key. The default /// format is "PEM" and can be changed with `ssl_key_type`. /// /// (iOS and Mac OS X only) This option is ignored if curl was built against /// Secure Transport. Secure Transport expects the private key to be already /// present in the keychain or PKCS#12 file containing the certificate. /// /// By default this option is not set and corresponds to `CURLOPT_SSLKEY`. pub fn ssl_key<P: AsRef<Path>>(&mut self, key: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_SSLKEY, key.as_ref()) } /// Set type of the private key file. /// /// The string should be the format of your private key. Supported formats /// are "PEM", "DER" and "ENG". /// /// The format "ENG" enables you to load the private key from a crypto /// engine. In this case `ssl_key` is used as an identifier passed to /// the engine. You have to set the crypto engine with `ssl_engine`. /// "DER" format key file currently does not work because of a bug in /// OpenSSL. /// /// By default this option is "PEM" and corresponds to /// `CURLOPT_SSLKEYTYPE`. pub fn ssl_key_type(&mut self, kind: &str) -> Result<(), Error> { let kind = try!(CString::new(kind)); self.setopt_str(curl_sys::CURLOPT_SSLKEYTYPE, &kind) } /// Set passphrase to private key. /// /// This will be used as the password required to use the `ssl_key`. /// You never needed a pass phrase to load a certificate but you need one to /// load your private key. /// /// By default this option is not set and corresponds to /// `CURLOPT_KEYPASSWD`. pub fn key_password(&mut self, password: &str) -> Result<(), Error> { let password = try!(CString::new(password)); self.setopt_str(curl_sys::CURLOPT_KEYPASSWD, &password) } /// Set the SSL engine identifier. /// /// This will be used as the identifier for the crypto engine you want to /// use for your private key. /// /// By default this option is not set and corresponds to /// `CURLOPT_SSLENGINE`. pub fn ssl_engine(&mut self, engine: &str) -> Result<(), Error> { let engine = try!(CString::new(engine)); self.setopt_str(curl_sys::CURLOPT_SSLENGINE, &engine) } /// Make this handle's SSL engine the default. /// /// By default this option is not set and corresponds to /// `CURLOPT_SSLENGINE_DEFAULT`. pub fn ssl_engine_default(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) } // /// Enable TLS false start. // /// // /// This option determines whether libcurl should use false start during the // /// TLS handshake. False start is a mode where a TLS client will start // /// sending application data before verifying the server's Finished message, // /// thus saving a round trip when performing a full handshake. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_SSL_FALSESTARTE`. // pub fn ssl_false_start(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) // } /// Set preferred TLS/SSL version. /// /// By default this option is not set and corresponds to /// `CURLOPT_SSLVERSION`. pub fn ssl_version(&mut self, version: SslVersion) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSLVERSION, version as c_long) } /// Verify the certificate's name against host. /// /// This should be disabled with great caution! It basically disables the /// security features of SSL if it is disabled. /// /// By default this option is set to `true` and corresponds to /// `CURLOPT_SSL_VERIFYHOST`. pub fn ssl_verify_host(&mut self, verify: bool) -> Result<(), Error> { let val = if verify {2} else {0}; self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYHOST, val) } /// Verify the peer's SSL certificate. /// /// This should be disabled with great caution! It basically disables the /// security features of SSL if it is disabled. /// /// By default this option is set to `true` and corresponds to /// `CURLOPT_SSL_VERIFYPEER`. pub fn ssl_verify_peer(&mut self, verify: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYPEER, verify as c_long) } // /// Verify the certificate's status. // /// // /// This option determines whether libcurl verifies the status of the server // /// cert using the "Certificate Status Request" TLS extension (aka. OCSP // /// stapling). // /// // /// By default this option is set to `false` and corresponds to // /// `CURLOPT_SSL_VERIFYSTATUS`. // pub fn ssl_verify_status(&mut self, verify: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYSTATUS, verify as c_long) // } /// Specify the path to Certificate Authority (CA) bundle /// /// The file referenced should hold one or more certificates to verify the /// peer with. /// /// This option is by default set to the system path where libcurl's cacert /// bundle is assumed to be stored, as established at build time. /// /// If curl is built against the NSS SSL library, the NSS PEM PKCS#11 module /// (libnsspem.so) needs to be available for this option to work properly. /// /// By default this option is the system defaults, and corresponds to /// `CURLOPT_CAINFO`. pub fn cainfo<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_CAINFO, path.as_ref()) } /// Set the issuer SSL certificate filename /// /// Specifies a file holding a CA certificate in PEM format. If the option /// is set, an additional check against the peer certificate is performed to /// verify the issuer is indeed the one associated with the certificate /// provided by the option. This additional check is useful in multi-level /// PKI where one needs to enforce that the peer certificate is from a /// specific branch of the tree. /// /// This option makes sense only when used in combination with the /// `ssl_verify_peer` option. Otherwise, the result of the check is not /// considered as failure. /// /// By default this option is not set and corresponds to /// `CURLOPT_ISSUERCERT`. pub fn issuer_cert<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_ISSUERCERT, path.as_ref()) } /// Specify directory holding CA certificates /// /// Names a directory holding multiple CA certificates to verify the peer /// with. If libcurl is built against OpenSSL, the certificate directory /// must be prepared using the openssl c_rehash utility. This makes sense /// only when used in combination with the `ssl_verify_peer` option. /// /// By default this option is not set and corresponds to `CURLOPT_CAPATH`. pub fn capath<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_CAPATH, path.as_ref()) } /// Specify a Certificate Revocation List file /// /// Names a file with the concatenation of CRL (in PEM format) to use in the /// certificate validation that occurs during the SSL exchange. /// /// When curl is built to use NSS or GnuTLS, there is no way to influence /// the use of CRL passed to help in the verification process. When libcurl /// is built with OpenSSL support, X509_V_FLAG_CRL_CHECK and /// X509_V_FLAG_CRL_CHECK_ALL are both set, requiring CRL check against all /// the elements of the certificate chain if a CRL file is passed. /// /// This option makes sense only when used in combination with the /// `ssl_verify_peer` option. /// /// A specific error code (`is_ssl_crl_badfile`) is defined with the /// option. It is returned when the SSL exchange fails because the CRL file /// cannot be loaded. A failure in certificate verification due to a /// revocation information found in the CRL does not trigger this specific /// error. /// /// By default this option is not set and corresponds to `CURLOPT_CRLFILE`. pub fn crlfile<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_CRLFILE, path.as_ref()) } /// Request SSL certificate information /// /// Enable libcurl's certificate chain info gatherer. With this enabled, /// libcurl will extract lots of information and data about the certificates /// in the certificate chain used in the SSL connection. /// /// By default this option is `false` and corresponds to /// `CURLOPT_CERTINFO`. pub fn certinfo(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) } // /// Set pinned public key. // /// // /// Pass a pointer to a zero terminated string as parameter. The string can // /// be the file name of your pinned public key. The file format expected is // /// "PEM" or "DER". The string can also be any number of base64 encoded // /// sha256 hashes preceded by "sha256//" and separated by ";" // /// // /// When negotiating a TLS or SSL connection, the server sends a certificate // /// indicating its identity. A public key is extracted from this certificate // /// and if it does not exactly match the public key provided to this option, // /// curl will abort the connection before sending or receiving any data. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_PINNEDPUBLICKEY`. // pub fn pinned_public_key(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) // } /// Specify a source for random data /// /// The file will be used to read from to seed the random engine for SSL and /// more. /// /// By default this option is not set and corresponds to /// `CURLOPT_RANDOM_FILE`. pub fn random_file<P: AsRef<Path>>(&mut self, p: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_RANDOM_FILE, p.as_ref()) } /// Specify EGD socket path. /// /// Indicates the path name to the Entropy Gathering Daemon socket. It will /// be used to seed the random engine for SSL. /// /// By default this option is not set and corresponds to /// `CURLOPT_EGDSOCKET`. pub fn egd_socket<P: AsRef<Path>>(&mut self, p: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_EGDSOCKET, p.as_ref()) } /// Specify ciphers to use for TLS. /// /// Holds the list of ciphers to use for the SSL connection. The list must /// be syntactically correct, it consists of one or more cipher strings /// separated by colons. Commas or spaces are also acceptable separators /// but colons are normally used, !, - and + can be used as operators. /// /// For OpenSSL and GnuTLS valid examples of cipher lists include 'RC4-SHA', /// ´SHA1+DES´, 'TLSv1' and 'DEFAULT'. The default list is normally set when /// you compile OpenSSL. /// /// You'll find more details about cipher lists on this URL: /// /// https://www.openssl.org/docs/apps/ciphers.html /// /// For NSS, valid examples of cipher lists include 'rsa_rc4_128_md5', /// ´rsa_aes_128_sha´, etc. With NSS you don't add/remove ciphers. If one /// uses this option then all known ciphers are disabled and only those /// passed in are enabled. /// /// You'll find more details about the NSS cipher lists on this URL: /// /// http://git.fedorahosted.org/cgit/mod_nss.git/plain/docs/mod_nss.html#Directives /// /// By default this option is not set and corresponds to /// `CURLOPT_SSL_CIPHER_LIST`. pub fn ssl_cipher_list(&mut self, ciphers: &str) -> Result<(), Error> { let ciphers = try!(CString::new(ciphers)); self.setopt_str(curl_sys::CURLOPT_SSL_CIPHER_LIST, &ciphers) } /// Enable or disable use of the SSL session-ID cache /// /// By default all transfers are done using the cache enabled. While nothing /// ever should get hurt by attempting to reuse SSL session-IDs, there seem /// to be or have been broken SSL implementations in the wild that may /// require you to disable this in order for you to succeed. /// /// This corresponds to the `CURLOPT_SSL_SESSIONID_CACHE` option. pub fn ssl_sessionid_cache(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSL_SESSIONID_CACHE, enable as c_long) } // /// Stores a private pointer-sized piece of data. // /// // /// This can be retrieved through the `private` function and otherwise // /// libcurl does not tamper with this value. This corresponds to // /// `CURLOPT_PRIVATE` and defaults to 0. // pub fn set_private(&mut self, private: usize) -> Result<(), Error> { // self.setopt_ptr(curl_sys::CURLOPT_PRIVATE, private as *const _) // } // // /// Fetches this handle's private pointer-sized piece of data. // /// // /// This corresponds to `CURLINFO_PRIVATE` and defaults to 0. // pub fn private(&mut self) -> Result<usize, Error> { // self.getopt_ptr(curl_sys::CURLINFO_PRIVATE).map(|p| p as usize) // } // ========================================================================= // getters /// Get the last used URL /// /// In cases when you've asked libcurl to follow redirects, it may /// not be the same value you set with `url`. /// /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. /// /// Returns `Ok(None)` if no effective url is listed or `Err` if an error /// happens or the underlying bytes aren't valid utf-8. pub fn effective_url(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_EFFECTIVE_URL) } /// Get the last used URL, in bytes /// /// In cases when you've asked libcurl to follow redirects, it may /// not be the same value you set with `url`. /// /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. /// /// Returns `Ok(None)` if no effective url is listed or `Err` if an error /// happens or the underlying bytes aren't valid utf-8. pub fn effective_url_bytes(&mut self) -> Result<Option<&[u8]>, Error> { self.getopt_bytes(curl_sys::CURLINFO_EFFECTIVE_URL) } /// Get the last response code /// /// The stored value will be zero if no server response code has been /// received. Note that a proxy's CONNECT response should be read with /// `http_connectcode` and not this. /// /// Corresponds to `CURLINFO_RESPONSE_CODE` and returns an error if this /// option is not supported. pub fn response_code(&mut self) -> Result<u32, Error> { self.getopt_long(curl_sys::CURLINFO_RESPONSE_CODE).map(|c| c as u32) } /// Get the CONNECT response code /// /// Returns the last received HTTP proxy response code to a CONNECT request. /// The returned value will be zero if no such response code was available. /// /// Corresponds to `CURLINFO_HTTP_CONNECTCODE` and returns an error if this /// option is not supported. pub fn http_connectcode(&mut self) -> Result<u32, Error> { self.getopt_long(curl_sys::CURLINFO_HTTP_CONNECTCODE).map(|c| c as u32) } /// Get the remote time of the retrieved document /// /// Returns the remote time of the retrieved document (in number of seconds /// since 1 Jan 1970 in the GMT/UTC time zone). If you get `None`, it can be /// because of many reasons (it might be unknown, the server might hide it /// or the server doesn't support the command that tells document time etc) /// and the time of the document is unknown. /// /// Note that you must tell the server to collect this information before /// the transfer is made, by using the `filetime` method to /// or you will unconditionally get a `None` back. /// /// This corresponds to `CURLINFO_FILETIME` and may return an error if the /// option is not supported pub fn filetime(&mut self) -> Result<Option<i64>, Error> { self.getopt_long(curl_sys::CURLINFO_FILETIME).map(|r| { if r == -1 { None } else { Some(r as i64) } }) } /// Get the number of redirects /// /// Corresponds to `CURLINFO_REDIRECT_COUNT` and may return an error if the /// option isn't supported. pub fn redirect_count(&mut self) -> Result<u32, Error> { self.getopt_long(curl_sys::CURLINFO_REDIRECT_COUNT).map(|c| c as u32) } /// Get the URL a redirect would go to /// /// Returns the URL a redirect would take you to if you would enable /// `follow_location`. This can come very handy if you think using the /// built-in libcurl redirect logic isn't good enough for you but you would /// still prefer to avoid implementing all the magic of figuring out the new /// URL. /// /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error if the /// url isn't valid utf-8 or an error happens. pub fn redirect_url(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_REDIRECT_URL) } /// Get the URL a redirect would go to, in bytes /// /// Returns the URL a redirect would take you to if you would enable /// `follow_location`. This can come very handy if you think using the /// built-in libcurl redirect logic isn't good enough for you but you would /// still prefer to avoid implementing all the magic of figuring out the new /// URL. /// /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error. pub fn redirect_url_bytes(&mut self) -> Result<Option<&[u8]>, Error> { self.getopt_bytes(curl_sys::CURLINFO_REDIRECT_URL) } /// Get size of retrieved headers /// /// Corresponds to `CURLINFO_HEADER_SIZE` and may return an error if the /// option isn't supported. pub fn header_size(&mut self) -> Result<u64, Error> { self.getopt_long(curl_sys::CURLINFO_HEADER_SIZE).map(|c| c as u64) } /// Get size of sent request. /// /// Corresponds to `CURLINFO_REQUEST_SIZE` and may return an error if the /// option isn't supported. pub fn request_size(&mut self) -> Result<u64, Error> { self.getopt_long(curl_sys::CURLINFO_REQUEST_SIZE).map(|c| c as u64) } /// Get Content-Type /// /// Returns the content-type of the downloaded object. This is the value /// read from the Content-Type: field. If you get `None`, it means that the /// server didn't send a valid Content-Type header or that the protocol /// used doesn't support this. /// /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the /// option isn't supported. pub fn content_type(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_CONTENT_TYPE) } /// Get Content-Type, in bytes /// /// Returns the content-type of the downloaded object. This is the value /// read from the Content-Type: field. If you get `None`, it means that the /// server didn't send a valid Content-Type header or that the protocol /// used doesn't support this. /// /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the /// option isn't supported. pub fn content_type_bytes(&mut self) -> Result<Option<&[u8]>, Error> { self.getopt_bytes(curl_sys::CURLINFO_CONTENT_TYPE) } /// Get errno number from last connect failure. /// /// Note that the value is only set on failure, it is not reset upon a /// successful operation. The number is OS and system specific. /// /// Corresponds to `CURLINFO_OS_ERRNO` and may return an error if the /// option isn't supported. pub fn os_errno(&mut self) -> Result<i32, Error> { self.getopt_long(curl_sys::CURLINFO_OS_ERRNO).map(|c| c as i32) } /// Get IP address of last connection. /// /// Returns a string holding the IP address of the most recent connection /// done with this curl handle. This string may be IPv6 when that is /// enabled. /// /// Corresponds to `CURLINFO_PRIMARY_IP` and may return an error if the /// option isn't supported. pub fn primary_ip(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_PRIMARY_IP) } /// Get the latest destination port number /// /// Corresponds to `CURLINFO_PRIMARY_PORT` and may return an error if the /// option isn't supported. pub fn primary_port(&mut self) -> Result<u16, Error> { self.getopt_long(curl_sys::CURLINFO_PRIMARY_PORT).map(|c| c as u16) } /// Get local IP address of last connection /// /// Returns a string holding the IP address of the local end of most recent /// connection done with this curl handle. This string may be IPv6 when that /// is enabled. /// /// Corresponds to `CURLINFO_LOCAL_IP` and may return an error if the /// option isn't supported. pub fn local_ip(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_LOCAL_IP) } /// Get the latest local port number /// /// Corresponds to `CURLINFO_LOCAL_PORT` and may return an error if the /// option isn't supported. pub fn local_port(&mut self) -> Result<u16, Error> { self.getopt_long(curl_sys::CURLINFO_LOCAL_PORT).map(|c| c as u16) } /// Get all known cookies /// /// Returns a linked-list of all cookies cURL knows (expired ones, too). /// /// Corresponds to the `CURLINFO_COOKIELIST` option and may return an error /// if the option isn't supported. pub fn cookies(&mut self) -> Result<List, Error> { unsafe { let mut list = 0 as *mut _; let rc = curl_sys::curl_easy_getinfo(self.handle, curl_sys::CURLINFO_COOKIELIST, &mut list); try!(self.cvt(rc)); Ok(List { raw: list }) } } // ========================================================================= // Other methods /// After options have been set, this will perform the transfer described by /// the options. /// /// This performs the request in a synchronous fashion. This can be used /// multiple times for one easy handle and libcurl will attempt to re-use /// the same connection for all transfers. /// /// This method will preserve all options configured in this handle for the /// next request, and if that is not desired then the options can be /// manually reset or the `reset` method can be called. /// /// Note that this method takes `&self`, which is quite important! This /// allows applications to close over the handle in various callbacks to /// call methods like `unpause_write` and `unpause_read` while a transfer is /// in progress. pub fn perform(&self) -> Result<(), Error> { unsafe { self.reset_scoped_configuration(); } self.do_perform() } fn do_perform(&self) -> Result<(), Error> { if self.data.running.get() { return Err(Error::new(curl_sys::CURLE_FAILED_INIT)) } self.data.running.set(true); let ret = unsafe { self.cvt(curl_sys::curl_easy_perform(self.handle)) }; self.data.running.set(false); panic::propagate(); return ret } /// Creates a new scoped transfer which can be used to set callbacks and /// data which only live for the scope of the returned object. /// /// An `Easy` handle is often reused between different requests to cache /// connections to servers, but often the lifetime of the data as part of /// each transfer is unique. This function serves as an ability to share an /// `Easy` across many transfers while ergonomically using possibly /// stack-local data as part of each transfer. /// /// Configuration can be set on the `Easy` and then a `Transfer` can be /// created to set scoped configuration (like callbacks). Finally, the /// `perform` method on the `Transfer` function can be used. /// /// When the `Transfer` option is dropped then all configuration set on the /// transfer itself will be reset. pub fn transfer<'data, 'easy>(&'easy mut self) -> Transfer<'easy, 'data> { // NB: We need to be *very* careful here about how we treat the // callbacks set on a `Transfer`! It may be possible for that type // to leak, and if we were to continue using the callbacks there // there could possibly be use-after-free as they reference // stack-local data. As a result, we attempt to be robust in the // face of leaking a `Transfer` (one that didn't drop). // // What this basically amounts to is that whenever we poke libcurl that // *might* call one of those callbacks or use some of that data we clear // out everything that would have been set on a `Transfer` and instead // start fresh. This call to `reset_scoped_configuration` will reset all // callbacks based on the state in *this* handle which we know is still // alive, so it's safe to configure. // // Also note that because we have to be resilient in the face of // `Transfer` leaks anyway we just don't bother with a `Drop` impl and // instead rely on this always running to reset any configuration. assert!(!self.data.running.get()); unsafe { self.reset_scoped_configuration(); } Transfer { data: Box::new(TransferData::default()), easy: self, } } // See note above in `transfer` for what this is doing. unsafe fn reset_scoped_configuration(&self) { let EasyData { ref write, ref read, ref seek, ref debug, ref header, ref progress, ref running, header_list: _, form: _, error_buf: _, } = *self.data; // Can't reset while running, we'll detect this elsewhere if running.get() { return } let ptr = |set| { if set { &*self.data as *const _ as *mut c_void } else { 0 as *mut _ } }; let write = ptr(write.is_some()); let read = ptr(read.is_some()); let seek = ptr(seek.is_some()); let debug = ptr(debug.is_some()); let header = ptr(header.is_some()); let progress = ptr(progress.is_some()); let _ = self.set_write_function(easy_write_cb, write); let _ = self.set_read_function(easy_read_cb, read); let _ = self.set_seek_function(easy_seek_cb, seek); let _ = self.set_debug_function(easy_debug_cb, debug); let _ = self.set_header_function(easy_header_cb, header); let _ = self.set_progress_function(easy_progress_cb, progress); // Clear out the post fields which may be referencing stale data. // curl_sys::curl_easy_setopt(easy, // curl_sys::CURLOPT_POSTFIELDS, // 0 as *const i32); } /// Unpause reading on a connection. /// /// Using this function, you can explicitly unpause a connection that was /// previously paused. /// /// A connection can be paused by letting the read or the write callbacks /// return `ReadError::Pause` or `WriteError::Pause`. /// /// To unpause, you may for example call this from the progress callback /// which gets called at least once per second, even if the connection is /// paused. /// /// The chance is high that you will get your write callback called before /// this function returns. pub fn unpause_read(&self) -> Result<(), Error> { unsafe { let rc = curl_sys::curl_easy_pause(self.handle, curl_sys::CURLPAUSE_RECV_CONT); self.cvt(rc) } } /// Unpause writing on a connection. /// /// Using this function, you can explicitly unpause a connection that was /// previously paused. /// /// A connection can be paused by letting the read or the write callbacks /// return `ReadError::Pause` or `WriteError::Pause`. A write callback that /// returns pause signals to the library that it couldn't take care of any /// data at all, and that data will then be delivered again to the callback /// when the writing is later unpaused. /// /// To unpause, you may for example call this from the progress callback /// which gets called at least once per second, even if the connection is /// paused. pub fn unpause_write(&self) -> Result<(), Error> { unsafe { let rc = curl_sys::curl_easy_pause(self.handle, curl_sys::CURLPAUSE_SEND_CONT); self.cvt(rc) } } /// URL encodes a string `s` pub fn url_encode(&mut self, s: &[u8]) -> String { if s.len() == 0 { return String::new() } unsafe { let p = curl_sys::curl_easy_escape(self.handle, s.as_ptr() as *const _, s.len() as c_int); assert!(!p.is_null()); let ret = str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap(); let ret = String::from(ret); curl_sys::curl_free(p as *mut _); return ret } } /// URL decodes a string `s`, returning `None` if it fails pub fn url_decode(&mut self, s: &str) -> Vec<u8> { if s.len() == 0 { return Vec::new(); } // Work around https://curl.haxx.se/docs/adv_20130622.html, a bug where // if the last few characters are a bad escape then curl will have a // buffer overrun. let mut iter = s.chars().rev(); let orig_len = s.len(); let mut data; let mut s = s; if iter.next() == Some('%') || iter.next() == Some('%') || iter.next() == Some('%') { data = s.to_string(); data.push(0u8 as char); s = &data[..]; } unsafe { let mut len = 0; let p = curl_sys::curl_easy_unescape(self.handle, s.as_ptr() as *const _, orig_len as c_int, &mut len); assert!(!p.is_null()); let slice = slice::from_raw_parts(p as *const u8, len as usize); let ret = slice.to_vec(); curl_sys::curl_free(p as *mut _); return ret } } // TODO: I don't think this is safe, you can drop this which has all the // callback data and then the next is use-after-free // // /// Attempts to clone this handle, returning a new session handle with the // /// same options set for this handle. // /// // /// Internal state info and things like persistent connections ccannot be // /// transferred. // /// // /// # Errors // /// // /// If a new handle could not be allocated or another error happens, `None` // /// is returned. // pub fn try_clone<'b>(&mut self) -> Option<Easy<'b>> { // unsafe { // let handle = curl_sys::curl_easy_duphandle(self.handle); // if handle.is_null() { // None // } else { // Some(Easy { // handle: handle, // data: blank_data(), // _marker: marker::PhantomData, // }) // } // } // } /// Re-initializes this handle to the default values. /// /// This puts the handle to the same state as it was in when it was just /// created. This does, however, keep live connections, the session id /// cache, the dns cache, and cookies. pub fn reset(&mut self) { unsafe { curl_sys::curl_easy_reset(self.handle); } default_configure(self); } /// Receives data from a connected socket. /// /// Only useful after a successful `perform` with the `connect_only` option /// set as well. pub fn recv(&mut self, data: &mut [u8]) -> Result<usize, Error> { unsafe { let mut n = 0; let r = curl_sys::curl_easy_recv(self.handle, data.as_mut_ptr() as *mut _, data.len(), &mut n); if r == curl_sys::CURLE_OK { Ok(n) } else { Err(Error::new(r)) } } } /// Sends data over the connected socket. /// /// Only useful after a successful `perform` with the `connect_only` option /// set as well. pub fn send(&mut self, data: &[u8]) -> Result<usize, Error> { unsafe { let mut n = 0; let rc = curl_sys::curl_easy_send(self.handle, data.as_ptr() as *const _, data.len(), &mut n); try!(self.cvt(rc)); Ok(n) } } /// Get a pointer to the raw underlying CURL handle. pub fn raw(&self) -> *mut curl_sys::CURL { self.handle } #[cfg(unix)] fn setopt_path(&mut self, opt: curl_sys::CURLoption, val: &Path) -> Result<(), Error> { use std::os::unix::prelude::*; let s = try!(CString::new(val.as_os_str().as_bytes())); self.setopt_str(opt, &s) } #[cfg(windows)] fn setopt_path(&mut self, opt: curl_sys::CURLoption, val: &Path) -> Result<(), Error> { match val.to_str() { Some(s) => self.setopt_str(opt, &try!(CString::new(s))), None => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), } } fn setopt_long(&mut self, opt: curl_sys::CURLoption, val: c_long) -> Result<(), Error> { unsafe { self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) } } fn setopt_str(&mut self, opt: curl_sys::CURLoption, val: &CStr) -> Result<(), Error> { self.setopt_ptr(opt, val.as_ptr()) } fn setopt_ptr(&self, opt: curl_sys::CURLoption, val: *const c_char) -> Result<(), Error> { unsafe { self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) } } fn setopt_off_t(&mut self, opt: curl_sys::CURLoption, val: curl_sys::curl_off_t) -> Result<(), Error> { unsafe { let rc = curl_sys::curl_easy_setopt(self.handle, opt, val); self.cvt(rc) } } fn getopt_bytes(&mut self, opt: curl_sys::CURLINFO) -> Result<Option<&[u8]>, Error> { unsafe { let p = try!(self.getopt_ptr(opt)); if p.is_null() { Ok(None) } else { Ok(Some(CStr::from_ptr(p).to_bytes())) } } } fn getopt_ptr(&mut self, opt: curl_sys::CURLINFO) -> Result<*const c_char, Error> { unsafe { let mut p = 0 as *const c_char; let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); try!(self.cvt(rc)); Ok(p) } } fn getopt_str(&mut self, opt: curl_sys::CURLINFO) -> Result<Option<&str>, Error> { match self.getopt_bytes(opt) { Ok(None) => Ok(None), Err(e) => Err(e), Ok(Some(bytes)) => { match str::from_utf8(bytes) { Ok(s) => Ok(Some(s)), Err(_) => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), } } } } fn getopt_long(&mut self, opt: curl_sys::CURLINFO) -> Result<c_long, Error> { unsafe { let mut p = 0; let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); try!(self.cvt(rc)); Ok(p) } } fn cvt(&self, rc: curl_sys::CURLcode) -> Result<(), Error> { if rc == curl_sys::CURLE_OK { return Ok(()) } let mut buf = self.data.error_buf.borrow_mut(); if buf[0] == 0 { return Err(Error::new(rc)) } let pos = buf.iter().position(|i| *i == 0).unwrap_or(buf.len()); let msg = str::from_utf8(&buf[..pos]).expect("non-utf8 error").to_owned(); buf[0] = 0; Err(::error::error_with_extra(rc, msg.into_boxed_str())) } } extern fn easy_write_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { write_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut EasyData)).write.as_mut().map(|f| f(buf)) }) } extern fn transfer_write_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { write_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut TransferData)).write.as_mut().map(|f| f(buf)) }) } fn write_cb<F>(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void, f: F) -> size_t where F: FnOnce(&[u8]) -> Option<Result<usize, WriteError>> { if data.is_null() { return size * nmemb } panic::catch(|| unsafe { let input = slice::from_raw_parts(ptr as *const u8, size * nmemb); match f(input) { Some(Ok(s)) => s, Some(Err(WriteError::Pause)) | Some(Err(WriteError::__Nonexhaustive)) => { curl_sys::CURL_WRITEFUNC_PAUSE } None => !0, } }).unwrap_or(!0) } extern fn easy_read_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { read_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut EasyData)).read.as_mut().map(|f| f(buf)) }) } extern fn transfer_read_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { read_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut TransferData)).read.as_mut().map(|f| f(buf)) }) } fn read_cb<F>(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void, f: F) -> size_t where F: FnOnce(&mut [u8]) -> Option<Result<usize, ReadError>> { unsafe { if data.is_null() { return 0 } let input = slice::from_raw_parts_mut(ptr as *mut u8, size * nmemb); panic::catch(|| { match f(input) { Some(Ok(s)) => s, Some(Err(ReadError::Pause)) => { curl_sys::CURL_READFUNC_PAUSE } Some(Err(ReadError::__Nonexhaustive)) | Some(Err(ReadError::Abort)) => { curl_sys::CURL_READFUNC_ABORT } None => !0, } }).unwrap_or(!0) } } extern fn easy_seek_cb(data: *mut c_void, offset: curl_sys::curl_off_t, origin: c_int) -> c_int { seek_cb(data, offset, origin, |s| unsafe { (*(data as *mut EasyData)).seek.as_mut().map(|f| f(s)) }) } extern fn transfer_seek_cb(data: *mut c_void, offset: curl_sys::curl_off_t, origin: c_int) -> c_int { seek_cb(data, offset, origin, |s| unsafe { (*(data as *mut TransferData)).seek.as_mut().map(|f| f(s)) }) } fn seek_cb<F>(data: *mut c_void, offset: curl_sys::curl_off_t, origin: c_int, f: F) -> c_int where F: FnOnce(SeekFrom) -> Option<SeekResult> { if data.is_null() { return -1 } panic::catch(|| { let from = if origin == libc::SEEK_SET { SeekFrom::Start(offset as u64) } else { panic!("unknown origin from libcurl: {}", origin); }; match f(from) { Some(to) => to as c_int, None => -1, } }).unwrap_or(!0) } extern fn easy_progress_cb(data: *mut c_void, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int { progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { (*(data as *mut EasyData)).progress.as_mut().map(|f| f(a, b, c, d)) }) } extern fn transfer_progress_cb(data: *mut c_void, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int { progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { (*(data as *mut TransferData)).progress.as_mut().map(|f| f(a, b, c, d)) }) } fn progress_cb<F>(data: *mut c_void, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double, f: F) -> c_int where F: FnOnce(f64, f64, f64, f64) -> Option<bool>, { if data.is_null() { return 0 } let keep_going = panic::catch(|| { f(dltotal, dlnow, ultotal, ulnow).unwrap_or(false) }).unwrap_or(false); if keep_going { 0 } else { 1 } } extern fn easy_debug_cb(handle: *mut curl_sys::CURL, kind: curl_sys::curl_infotype, data: *mut c_char, size: size_t, userptr: *mut c_void) -> c_int { debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { (*(userptr as *mut EasyData)).debug.as_mut().map(|f| f(a, b)) }) } extern fn transfer_debug_cb(handle: *mut curl_sys::CURL, kind: curl_sys::curl_infotype, data: *mut c_char, size: size_t, userptr: *mut c_void) -> c_int { debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { (*(userptr as *mut TransferData)).debug.as_mut().map(|f| f(a, b)) }) } // TODO: expose `handle`? is that safe? fn debug_cb<F>(_handle: *mut curl_sys::CURL, kind: curl_sys::curl_infotype, data: *mut c_char, size: size_t, userptr: *mut c_void, f: F) -> c_int where F: FnOnce(InfoType, &[u8]) -> Option<()> { if userptr.is_null() { return 0 } panic::catch(|| unsafe { let data = slice::from_raw_parts(data as *const u8, size); let kind = match kind { curl_sys::CURLINFO_TEXT => InfoType::Text, curl_sys::CURLINFO_HEADER_IN => InfoType::HeaderIn, curl_sys::CURLINFO_HEADER_OUT => InfoType::HeaderOut, curl_sys::CURLINFO_DATA_IN => InfoType::DataIn, curl_sys::CURLINFO_DATA_OUT => InfoType::DataOut, curl_sys::CURLINFO_SSL_DATA_IN => InfoType::SslDataIn, curl_sys::CURLINFO_SSL_DATA_OUT => InfoType::SslDataOut, _ => return, }; f(kind, data); }); return 0 } extern fn easy_header_cb(buffer: *mut c_char, size: size_t, nitems: size_t, userptr: *mut c_void) -> size_t { header_cb(buffer, size, nitems, userptr, |buf| unsafe { (*(userptr as *mut EasyData)).header.as_mut().map(|f| f(buf)) }) } extern fn transfer_header_cb(buffer: *mut c_char, size: size_t, nitems: size_t, userptr: *mut c_void) -> size_t { header_cb(buffer, size, nitems, userptr, |buf| unsafe { (*(userptr as *mut TransferData)).header.as_mut().map(|f| f(buf)) }) } fn header_cb<F>(buffer: *mut c_char, size: size_t, nitems: size_t, userptr: *mut c_void, f: F) -> size_t where F: FnOnce(&[u8]) -> Option<bool>, { if userptr.is_null() { return size * nitems } let keep_going = panic::catch(|| unsafe { let data = slice::from_raw_parts(buffer as *const u8, size * nitems); f(data).unwrap_or(false) }).unwrap_or(false); if keep_going { size * nitems } else { !0 } } impl<'easy, 'data> Transfer<'easy, 'data> { /// Same as `Easy::write_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn write_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> Result<usize, WriteError> + 'data { self.data.write = Some(Box::new(f)); unsafe { self.easy.set_write_function(transfer_write_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::read_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn read_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&mut [u8]) -> Result<usize, ReadError> + 'data { self.data.read = Some(Box::new(f)); unsafe { self.easy.set_read_function(transfer_read_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::seek_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn seek_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(SeekFrom) -> SeekResult + 'data { self.data.seek = Some(Box::new(f)); unsafe { self.easy.set_seek_function(transfer_seek_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::progress_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn progress_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(f64, f64, f64, f64) -> bool + 'data { self.data.progress = Some(Box::new(f)); unsafe { self.easy.set_progress_function(transfer_progress_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::debug_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn debug_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(InfoType, &[u8]) + 'data { self.data.debug = Some(Box::new(f)); unsafe { self.easy.set_debug_function(transfer_debug_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::header_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn header_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> bool + 'data { self.data.header = Some(Box::new(f)); unsafe { self.easy.set_header_function(transfer_header_cb, &*self.data as *const _ as *mut _) } } // TODO: need to figure out how to expose this, but it also needs to be // reset as part of `reset_scoped_configuration` above. Unfortunately // setting `CURLOPT_POSTFIELDS` to null will switch the request to // POST, which is not what we want. // // /// Configures the data that will be uploaded as part of a POST. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_POSTFIELDS`. // pub fn post_fields(&mut self, data: &'data [u8]) -> Result<(), Error> { // // Set the length before the pointer so libcurl knows how much to read // try!(self.easy.post_field_size(data.len() as u64)); // self.easy.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, // data.as_ptr() as *const _) // } /// Same as `Easy::transfer`. pub fn perform(&self) -> Result<(), Error> { self.easy.do_perform() } /// Same as `Easy::unpause_read`. pub fn unpause_read(&self) -> Result<(), Error> { self.easy.unpause_read() } /// Same as `Easy::unpause_write` pub fn unpause_write(&self) -> Result<(), Error> { self.easy.unpause_write() } } fn default_configure(handle: &mut Easy) { handle.data.error_buf = RefCell::new(vec![0; curl_sys::CURL_ERROR_SIZE]); handle.setopt_ptr(curl_sys::CURLOPT_ERRORBUFFER, handle.data.error_buf.borrow().as_ptr() as *const _) .expect("failed to set error buffer"); let _ = handle.signal(false); ssl_configure(handle); } #[cfg(all(unix, not(target_os = "macos")))] fn ssl_configure(handle: &mut Easy) { let probe = ::openssl_sys::probe::probe(); if let Some(ref path) = probe.cert_file { let _ = handle.cainfo(path); } if let Some(ref path) = probe.cert_dir { let _ = handle.capath(path); } } #[cfg(not(all(unix, not(target_os = "macos"))))] fn ssl_configure(_handle: &mut Easy) {} impl Drop for Easy { fn drop(&mut self) { unsafe { curl_sys::curl_easy_cleanup(self.handle); } } } impl List { /// Creates a new empty list of strings. pub fn new() -> List { List { raw: 0 as *mut _ } } /// Appends some data into this list. pub fn append(&mut self, data: &str) -> Result<(), Error> { let data = try!(CString::new(data)); unsafe { let raw = curl_sys::curl_slist_append(self.raw, data.as_ptr()); assert!(!raw.is_null()); self.raw = raw; Ok(()) } } /// Returns an iterator over the nodes in this list. pub fn iter(&self) -> Iter { Iter { _me: self, cur: self.raw } } } impl Drop for List { fn drop(&mut self) { unsafe { curl_sys::curl_slist_free_all(self.raw) } } } impl<'a> Iterator for Iter<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<&'a [u8]> { if self.cur.is_null() { return None } unsafe { let ret = Some(CStr::from_ptr((*self.cur).data).to_bytes()); self.cur = (*self.cur).next; return ret } } } impl Form { /// Creates a new blank form ready for the addition of new data. pub fn new() -> Form { Form { head: 0 as *mut _, tail: 0 as *mut _, headers: Vec::new(), buffers: Vec::new(), strings: Vec::new(), } } /// Prepares adding a new part to this `Form` /// /// Note that the part is not actually added to the form until the `add` /// method is called on `Part`, which may or may not fail. pub fn part<'a, 'data>(&'a mut self, name: &'data str) -> Part<'a, 'data> { Part { error: None, form: self, name: name, array: vec![curl_sys::curl_forms { option: curl_sys::CURLFORM_END, value: 0 as *mut _, }], } } } impl Drop for Form { fn drop(&mut self) { unsafe { curl_sys::curl_formfree(self.head); } } } impl<'form, 'data> Part<'form, 'data> { /// A pointer to the contents of this part, the actual data to send away. pub fn contents(&mut self, contents: &'data [u8]) -> &mut Self { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_COPYCONTENTS, value: contents.as_ptr() as *mut _, }); self.array.insert(pos + 1, curl_sys::curl_forms { option: curl_sys::CURLFORM_CONTENTSLENGTH, value: contents.len() as *mut _, }); self } /// Causes this file to be read and its contents used as data in this part /// /// This part does not automatically become a file upload part simply /// because its data was read from a file. /// /// # Errors /// /// If the filename has any internal nul bytes or if on Windows it does not /// contain a unicode filename then the `add` function will eventually /// return an error. pub fn file_content<P>(&mut self, file: P) -> &mut Self where P: AsRef<Path> { self._file_content(file.as_ref()) } fn _file_content(&mut self, file: &Path) -> &mut Self { if let Some(bytes) = self.path2cstr(file) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_FILECONTENT, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// Makes this part a file upload part of the given file. /// /// Sets the filename field to the basename of the provided file name, and /// it reads the contents of the file and passes them as data and sets the /// content type if the given file matches one of the internally known file /// extensions. /// /// The given upload file must exist entirely on the filesystem before the /// upload is started because libcurl needs to read the size of it /// beforehand. /// /// Multiple files can be uploaded by calling this method multiple times and /// content types can also be configured for each file (by calling that /// next). /// /// # Errors /// /// If the filename has any internal nul bytes or if on Windows it does not /// contain a unicode filename then this function will cause `add` to return /// an error when called. pub fn file<P: ?Sized>(&mut self, file: &'data P) -> &mut Self where P: AsRef<Path> { self._file(file.as_ref()) } fn _file(&mut self, file: &'data Path) -> &mut Self { if let Some(bytes) = self.path2cstr(file) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_FILE, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// Used in combination with `Part::file`, provides the content-type for /// this part, possibly instead of choosing an internal one. /// /// # Panics /// /// This function will panic if `content_type` contains an internal nul /// byte. pub fn content_type(&mut self, content_type: &'data str) -> &mut Self { if let Some(bytes) = self.bytes2cstr(content_type.as_bytes()) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_CONTENTTYPE, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// Used in combination with `Part::file`, provides the filename for /// this part instead of the actual one. /// /// # Errors /// /// If `name` contains an internal nul byte, or if on Windows the path is /// not valid unicode then this function will return an error when `add` is /// called. pub fn filename<P: ?Sized>(&mut self, name: &'data P) -> &mut Self where P: AsRef<Path> { self._filename(name.as_ref()) } fn _filename(&mut self, name: &'data Path) -> &mut Self { if let Some(bytes) = self.path2cstr(name) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_FILENAME, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// This is used to provide a custom file upload part without using the /// `file` method above. /// /// The first parameter is for the filename field and the second is the /// in-memory contents. /// /// # Errors /// /// If `name` contains an internal nul byte, or if on Windows the path is /// not valid unicode then this function will return an error when `add` is /// called. pub fn buffer<P: ?Sized>(&mut self, name: &'data P, data: Vec<u8>) -> &mut Self where P: AsRef<Path> { self._buffer(name.as_ref(), data) } fn _buffer(&mut self, name: &'data Path, data: Vec<u8>) -> &mut Self { if let Some(bytes) = self.path2cstr(name) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_BUFFER, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); self.array.insert(pos + 1, curl_sys::curl_forms { option: curl_sys::CURLFORM_BUFFERPTR, value: data.as_ptr() as *mut _, }); self.array.insert(pos + 2, curl_sys::curl_forms { option: curl_sys::CURLFORM_BUFFERLENGTH, value: data.len() as *mut _, }); self.form.buffers.push(data); } self } /// Specifies extra headers for the form POST section. /// /// Appends the list of headers to those libcurl automatically generates. pub fn content_header(&mut self, headers: List) -> &mut Self { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_CONTENTHEADER, value: headers.raw as *mut _, }); self.form.headers.push(headers); self } /// Attempts to add this part to the `Form` that it was created from. /// /// If any error happens while adding that error is returned, otherwise if /// the part was successfully appended then `Ok(())` is returned. pub fn add(&mut self) -> Result<(), FormError> { if let Some(err) = self.error.clone() { return Err(err) } let rc = unsafe { curl_sys::curl_formadd(&mut self.form.head, &mut self.form.tail, curl_sys::CURLFORM_COPYNAME, self.name.as_ptr(), curl_sys::CURLFORM_NAMELENGTH, self.name.len(), curl_sys::CURLFORM_ARRAY, self.array.as_ptr(), curl_sys::CURLFORM_END) }; if rc == curl_sys::CURL_FORMADD_OK { Ok(()) } else { Err(FormError::new(rc)) } } #[cfg(unix)] fn path2cstr(&mut self, p: &Path) -> Option<CString> { use std::os::unix::prelude::*; self.bytes2cstr(p.as_os_str().as_bytes()) } #[cfg(windows)] fn path2cstr(&mut self, p: &Path) -> Option<CString> { match p.to_str() { Some(bytes) => self.bytes2cstr(bytes.as_bytes()), None if self.error.is_none() => { // TODO: better error code self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); None } None => None, } } fn bytes2cstr(&mut self, bytes: &[u8]) -> Option<CString> { match CString::new(bytes) { Ok(c) => Some(c), Err(..) if self.error.is_none() => { // TODO: better error code self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); None } Err(..) => None, } } } impl Auth { /// Creates a new set of authentications with no members. /// /// An `Auth` structure is used to configure which forms of authentication /// are attempted when negotiating connections with servers. pub fn new() -> Auth { Auth { bits: 0 } } /// HTTP Basic authentication. /// /// This is the default choice, and the only method that is in wide-spread /// use and supported virtually everywhere. This sends the user name and /// password over the network in plain text, easily captured by others. pub fn basic(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_BASIC, on) } /// HTTP Digest authentication. /// /// Digest authentication is defined in RFC 2617 and is a more secure way to /// do authentication over public networks than the regular old-fashioned /// Basic method. pub fn digest(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_DIGEST, on) } /// HTTP Digest authentication with an IE flavor. /// /// Digest authentication is defined in RFC 2617 and is a more secure way to /// do authentication over public networks than the regular old-fashioned /// Basic method. The IE flavor is simply that libcurl will use a special /// "quirk" that IE is known to have used before version 7 and that some /// servers require the client to use. pub fn digest_ie(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_DIGEST_IE, on) } /// HTTP Negotiate (SPNEGO) authentication. /// /// Negotiate authentication is defined in RFC 4559 and is the most secure /// way to perform authentication over HTTP. /// /// You need to build libcurl with a suitable GSS-API library or SSPI on /// Windows for this to work. pub fn gssnegotiate(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_GSSNEGOTIATE, on) } /// HTTP NTLM authentication. /// /// A proprietary protocol invented and used by Microsoft. It uses a /// challenge-response and hash concept similar to Digest, to prevent the /// password from being eavesdropped. /// /// You need to build libcurl with either OpenSSL, GnuTLS or NSS support for /// this option to work, or build libcurl on Windows with SSPI support. pub fn ntlm(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_NTLM, on) } /// NTLM delegating to winbind helper. /// /// Authentication is performed by a separate binary application that is /// executed when needed. The name of the application is specified at /// compile time but is typically /usr/bin/ntlm_auth /// /// Note that libcurl will fork when necessary to run the winbind /// application and kill it when complete, calling waitpid() to await its /// exit when done. On POSIX operating systems, killing the process will /// cause a SIGCHLD signal to be raised (regardless of whether /// CURLOPT_NOSIGNAL is set), which must be handled intelligently by the /// application. In particular, the application must not unconditionally /// call wait() in its SIGCHLD signal handler to avoid being subject to a /// race condition. This behavior is subject to change in future versions of /// libcurl. /// /// A proprietary protocol invented and used by Microsoft. It uses a /// challenge-response and hash concept similar to Digest, to prevent the /// password from being eavesdropped. pub fn ntlm_wb(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_NTLM_WB, on) } fn flag(&mut self, bit: c_ulong, on: bool) -> &mut Auth { if on { self.bits |= bit as c_long; } else { self.bits &= !bit as c_long; } self } } Implement a safe interface to CURLOPT_SSL_CTX_FUNCTION and _DATA. //! Bindings to the "easy" libcurl API. //! //! This module contains some simple types like `Easy` and `List` which are just //! wrappers around the corresponding libcurl types. There's also a few enums //! scattered about for various options here and there. //! //! Most simple usage of libcurl will likely use the `Easy` structure here, and //! you can find more docs about its usage on that struct. use std::cell::{RefCell, Cell}; use std::ffi::{CString, CStr}; use std::io::SeekFrom; use std::path::Path; use std::slice; use std::str; use std::time::Duration; use curl_sys; use libc::{self, c_long, c_int, c_char, c_void, size_t, c_double, c_ulong}; use {Error, FormError}; use panic; // TODO: checked casts everywhere /// Raw bindings to a libcurl "easy session". /// /// This type corresponds to the `CURL` type in libcurl, and is probably what /// you want for just sending off a simple HTTP request and fetching a response. /// Each easy handle can be thought of as a large builder before calling the /// final `perform` function. /// /// There are many many configuration options for each `Easy` handle, and they /// should all have their own documentation indicating what it affects and how /// it interacts with other options. Some implementations of libcurl can use /// this handle to interact with many different protocols, although by default /// this crate only guarantees the HTTP/HTTPS protocols working. /// /// Note that almost all methods on this structure which configure various /// properties return a `Result`. This is largely used to detect whether the /// underlying implementation of libcurl actually implements the option being /// requested. If you're linked to a version of libcurl which doesn't support /// the option, then an error will be returned. Some options also perform some /// validation when they're set, and the error is returned through this vector. /// /// ## Examples /// /// Creating a handle which can be used later /// /// ``` /// use curl::easy::Easy; /// /// let handle = Easy::new(); /// ``` /// /// Send an HTTP request, writing the response to stdout. /// /// ``` /// use std::io::{stdout, Write}; /// /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.write_function(|data| { /// Ok(stdout().write(data).unwrap()) /// }).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Collect all output of an HTTP request to a vector. /// /// ``` /// use curl::easy::Easy; /// /// let mut data = Vec::new(); /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// { /// let mut transfer = handle.transfer(); /// transfer.write_function(|new_data| { /// data.extend_from_slice(new_data); /// Ok(new_data.len()) /// }).unwrap(); /// transfer.perform().unwrap(); /// } /// println!("{:?}", data); /// ``` /// /// More examples of various properties of an HTTP request can be found on the /// specific methods as well. pub struct Easy { handle: *mut curl_sys::CURL, data: Box<EasyData>, } /// A scoped transfer of information which borrows an `Easy` and allows /// referencing stack-local data of the lifetime `'data`. /// /// Usage of `Easy` requires the `'static` and `Send` bounds on all callbacks /// registered, but that's not often wanted if all you need is to collect a /// bunch of data in memory to a vector, for example. The `Transfer` structure, /// created by the `Easy::transfer` method, is used for this sort of request. /// /// The callbacks attached to a `Transfer` are only active for that one transfer /// object, and they're allows to elide both the `Send` and `'static` bounds to /// close over stack-local information. pub struct Transfer<'easy, 'data> { easy: &'easy mut Easy, data: Box<TransferData<'data>>, } #[derive(Default)] struct EasyData { running: Cell<bool>, write: Option<Box<FnMut(&[u8]) -> Result<usize, WriteError> + Send>>, read: Option<Box<FnMut(&mut [u8]) -> Result<usize, ReadError> + Send>>, seek: Option<Box<FnMut(SeekFrom) -> SeekResult + Send>>, debug: Option<Box<FnMut(InfoType, &[u8]) + Send>>, header: Option<Box<FnMut(&[u8]) -> bool + Send>>, progress: Option<Box<FnMut(f64, f64, f64, f64) -> bool + Send>>, ssl_ctx: Option<Box<FnMut(*mut c_void) -> Result<(), Error> + Send>>, header_list: Option<List>, form: Option<Form>, error_buf: RefCell<Vec<u8>>, } #[derive(Default)] struct TransferData<'a> { write: Option<Box<FnMut(&[u8]) -> Result<usize, WriteError> + 'a>>, read: Option<Box<FnMut(&mut [u8]) -> Result<usize, ReadError> + 'a>>, seek: Option<Box<FnMut(SeekFrom) -> SeekResult + 'a>>, debug: Option<Box<FnMut(InfoType, &[u8]) + 'a>>, header: Option<Box<FnMut(&[u8]) -> bool + 'a>>, progress: Option<Box<FnMut(f64, f64, f64, f64) -> bool + 'a>>, ssl_ctx: Option<Box<FnMut(*mut c_void) -> Result<(), Error> + 'a>>, } // libcurl guarantees that a CURL handle is fine to be transferred so long as // it's not used concurrently, and we do that correctly ourselves. unsafe impl Send for Easy {} /// Multipart/formdata for an HTTP POST request. /// /// This structure is built up and then passed to the `Easy::httppost` method to /// be sent off with a request. pub struct Form { head: *mut curl_sys::curl_httppost, tail: *mut curl_sys::curl_httppost, headers: Vec<List>, buffers: Vec<Vec<u8>>, strings: Vec<CString>, } /// One part in a multipart upload, added to a `Form`. pub struct Part<'form, 'data> { form: &'form mut Form, name: &'data str, array: Vec<curl_sys::curl_forms>, error: Option<FormError>, } /// Possible proxy types that libcurl currently understands. #[allow(missing_docs)] pub enum ProxyType { Http = curl_sys::CURLPROXY_HTTP as isize, Http1 = curl_sys::CURLPROXY_HTTP_1_0 as isize, Socks4 = curl_sys::CURLPROXY_SOCKS4 as isize, Socks5 = curl_sys::CURLPROXY_SOCKS5 as isize, Socks4a = curl_sys::CURLPROXY_SOCKS4A as isize, Socks5Hostname = curl_sys::CURLPROXY_SOCKS5_HOSTNAME as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Possible conditions for the `time_condition` method. #[allow(missing_docs)] pub enum TimeCondition { None = curl_sys::CURL_TIMECOND_NONE as isize, IfModifiedSince = curl_sys::CURL_TIMECOND_IFMODSINCE as isize, IfUnmodifiedSince = curl_sys::CURL_TIMECOND_IFUNMODSINCE as isize, LastModified = curl_sys::CURL_TIMECOND_LASTMOD as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Possible values to pass to the `ip_resolve` method. #[allow(missing_docs)] pub enum IpResolve { V4 = curl_sys::CURL_IPRESOLVE_V4 as isize, V6 = curl_sys::CURL_IPRESOLVE_V6 as isize, Any = curl_sys::CURL_IPRESOLVE_WHATEVER as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive = 500, } /// Possible values to pass to the `ip_resolve` method. #[allow(missing_docs)] pub enum SslVersion { Default = curl_sys::CURL_SSLVERSION_DEFAULT as isize, Tlsv1 = curl_sys::CURL_SSLVERSION_TLSv1 as isize, Sslv2 = curl_sys::CURL_SSLVERSION_SSLv2 as isize, Sslv3 = curl_sys::CURL_SSLVERSION_SSLv3 as isize, // Tlsv10 = curl_sys::CURL_SSLVERSION_TLSv1_0 as isize, // Tlsv11 = curl_sys::CURL_SSLVERSION_TLSv1_1 as isize, // Tlsv12 = curl_sys::CURL_SSLVERSION_TLSv1_2 as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive = 500, } /// Possible return values from the `seek_function` callback. pub enum SeekResult { /// Indicates that the seek operation was a success Ok = curl_sys::CURL_SEEKFUNC_OK as isize, /// Indicates that the seek operation failed, and the entire request should /// fail as a result. Fail = curl_sys::CURL_SEEKFUNC_FAIL as isize, /// Indicates that although the seek failed libcurl should attempt to keep /// working if possible (for example "seek" through reading). CantSeek = curl_sys::CURL_SEEKFUNC_CANTSEEK as isize, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive = 500, } /// Possible data chunks that can be witnessed as part of the `debug_function` /// callback. pub enum InfoType { /// The data is informational text. Text, /// The data is header (or header-like) data received from the peer. HeaderIn, /// The data is header (or header-like) data sent to the peer. HeaderOut, /// The data is protocol data received from the peer. DataIn, /// The data is protocol data sent to the peer. DataOut, /// The data is SSL/TLS (binary) data received from the peer. SslDataIn, /// The data is SSL/TLS (binary) data sent to the peer. SslDataOut, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// A linked list of a strings pub struct List { raw: *mut curl_sys::curl_slist, } /// An iterator over `List` pub struct Iter<'a> { _me: &'a List, cur: *mut curl_sys::curl_slist, } unsafe impl Send for List {} /// Possible error codes that can be returned from the `read_function` callback. pub enum ReadError { /// Indicates that the connection should be aborted immediately Abort, /// Indicates that reading should be paused until `unpause` is called. Pause, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Possible error codes that can be returned from the `write_function` callback. pub enum WriteError { /// Indicates that reading should be paused until `unpause` is called. Pause, /// Hidden variant to indicate that this enum should not be matched on, it /// may grow over time. #[doc(hidden)] __Nonexhaustive, } /// Structure which stores possible authentication methods to get passed to /// `http_auth` and `proxy_auth`. #[derive(Clone, Debug)] pub struct Auth { bits: c_long, } impl Easy { /// Creates a new "easy" handle which is the core of almost all operations /// in libcurl. /// /// To use a handle, applications typically configure a number of options /// followed by a call to `perform`. Options are preserved across calls to /// `perform` and need to be reset manually (or via the `reset` method) if /// this is not desired. pub fn new() -> Easy { ::init(); unsafe { let handle = curl_sys::curl_easy_init(); assert!(!handle.is_null()); let mut ret = Easy { handle: handle, data: Default::default(), }; default_configure(&mut ret); return ret } } // ========================================================================= // Behavior options /// Configures this handle to have verbose output to help debug protocol /// information. /// /// By default output goes to stderr, but the `stderr` function on this type /// can configure that. You can also use the `debug_function` method to get /// all protocol data sent and received. /// /// By default, this option is `false`. pub fn verbose(&mut self, verbose: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_VERBOSE, verbose as c_long) } /// Indicates whether header information is streamed to the output body of /// this request. /// /// This option is only relevant for protocols which have header metadata /// (like http or ftp). It's not generally possible to extract headers /// from the body if using this method, that use case should be intended for /// the `header_function` method. /// /// To set HTTP headers, use the `http_header` method. /// /// By default, this option is `false` and corresponds to /// `CURLOPT_HEADER`. pub fn show_header(&mut self, show: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HEADER, show as c_long) } /// Indicates whether a progress meter will be shown for requests done with /// this handle. /// /// This will also prevent the `progress_function` from being called. /// /// By default this option is `false` and corresponds to /// `CURLOPT_NOPROGRESS`. pub fn progress(&mut self, progress: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_NOPROGRESS, (!progress) as c_long) } /// Inform libcurl whether or not it should install signal handlers or /// attempt to use signals to perform library functions. /// /// If this option is disabled then timeouts during name resolution will not /// work unless libcurl is built against c-ares. Note that enabling this /// option, however, may not cause libcurl to work with multiple threads. /// /// By default this option is `false` and corresponds to `CURLOPT_NOSIGNAL`. /// Note that this default is **different than libcurl** as it is intended /// that this library is threadsafe by default. See the [libcurl docs] for /// some more information. /// /// [libcurl docs]: https://curl.haxx.se/libcurl/c/threadsafe.html pub fn signal(&mut self, signal: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_NOSIGNAL, (!signal) as c_long) } /// Indicates whether multiple files will be transferred based on the file /// name pattern. /// /// The last part of a filename uses fnmatch-like pattern matching. /// /// By default this option is `false` and corresponds to /// `CURLOPT_WILDCARDMATCH`. pub fn wildcard_match(&mut self, m: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_WILDCARDMATCH, m as c_long) } // ========================================================================= // Callback options /// Set callback for writing received data. /// /// This callback function gets called by libcurl as soon as there is data /// received that needs to be saved. /// /// The callback function will be passed as much data as possible in all /// invokes, but you must not make any assumptions. It may be one byte, it /// may be thousands. If `show_header` is enabled, which makes header data /// get passed to the write callback, you can get up to /// `CURL_MAX_HTTP_HEADER` bytes of header data passed into it. This /// usually means 100K. /// /// This function may be called with zero bytes data if the transferred file /// is empty. /// /// The callback should return the number of bytes actually taken care of. /// If that amount differs from the amount passed to your callback function, /// it'll signal an error condition to the library. This will cause the /// transfer to get aborted and the libcurl function used will return /// an error with `is_write_error`. /// /// If your callback function returns `Err(WriteError::Pause)` it will cause /// this transfer to become paused. See `unpause_write` for further details. /// /// By default data is sent into the void, and this corresponds to the /// `CURLOPT_WRITEFUNCTION` and `CURLOPT_WRITEDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `write_function` to configure a /// callback that can reference stack-local data. /// /// # Examples /// /// ``` /// use std::io::{stdout, Write}; /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.write_function(|data| { /// Ok(stdout().write(data).unwrap()) /// }).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Writing to a stack-local buffer /// /// ``` /// use std::io::{stdout, Write}; /// use curl::easy::Easy; /// /// let mut buf = Vec::new(); /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// /// let mut transfer = handle.transfer(); /// transfer.write_function(|data| { /// buf.extend_from_slice(data); /// Ok(data.len()) /// }).unwrap(); /// transfer.perform().unwrap(); /// ``` pub fn write_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> Result<usize, WriteError> + Send + 'static { self.data.write = Some(Box::new(f)); unsafe { return self.set_write_function(easy_write_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_write_function(&self, cb: curl_sys::curl_write_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_WRITEDATA, ptr as *const _)); return Ok(()); } /// Read callback for data uploads. /// /// This callback function gets called by libcurl as soon as it needs to /// read data in order to send it to the peer - like if you ask it to upload /// or post data to the server. /// /// Your function must then return the actual number of bytes that it stored /// in that memory area. Returning 0 will signal end-of-file to the library /// and cause it to stop the current transfer. /// /// If you stop the current transfer by returning 0 "pre-maturely" (i.e /// before the server expected it, like when you've said you will upload N /// bytes and you upload less than N bytes), you may experience that the /// server "hangs" waiting for the rest of the data that won't come. /// /// The read callback may return `Err(ReadError::Abort)` to stop the /// current operation immediately, resulting in a `is_aborted_by_callback` /// error code from the transfer. /// /// The callback can return `Err(ReadError::Pause)` to cause reading from /// this connection to pause. See `unpause_read` for further details. /// /// By default data not input, and this corresponds to the /// `CURLOPT_READFUNCTION` and `CURLOPT_READDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `read_function` to configure a /// callback that can reference stack-local data. /// /// # Examples /// /// Read input from stdin /// /// ```no_run /// use std::io::{stdin, Read}; /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://example.com/login").unwrap(); /// handle.read_function(|into| { /// Ok(stdin().read(into).unwrap()) /// }).unwrap(); /// handle.post(true).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Reading from stack-local data: /// /// ```no_run /// use std::io::{stdin, Read}; /// use curl::easy::Easy; /// /// let mut data_to_upload = &b"foobar"[..]; /// let mut handle = Easy::new(); /// handle.url("https://example.com/login").unwrap(); /// handle.post(true).unwrap(); /// /// let mut transfer = handle.transfer(); /// transfer.read_function(|into| { /// Ok(data_to_upload.read(into).unwrap()) /// }).unwrap(); /// transfer.perform().unwrap(); /// ``` pub fn read_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&mut [u8]) -> Result<usize, ReadError> + Send + 'static { self.data.read = Some(Box::new(f)); unsafe { self.set_read_function(easy_read_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_read_function(&self, cb: curl_sys::curl_read_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_READFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_READDATA, ptr as *const _)); return Ok(()); } /// User callback for seeking in input stream. /// /// This function gets called by libcurl to seek to a certain position in /// the input stream and can be used to fast forward a file in a resumed /// upload (instead of reading all uploaded bytes with the normal read /// function/callback). It is also called to rewind a stream when data has /// already been sent to the server and needs to be sent again. This may /// happen when doing a HTTP PUT or POST with a multi-pass authentication /// method, or when an existing HTTP connection is reused too late and the /// server closes the connection. /// /// The callback function must return `SeekResult::Ok` on success, /// `SeekResult::Fail` to cause the upload operation to fail or /// `SeekResult::CantSeek` to indicate that while the seek failed, libcurl /// is free to work around the problem if possible. The latter can sometimes /// be done by instead reading from the input or similar. /// /// By default data this option is not set, and this corresponds to the /// `CURLOPT_SEEKFUNCTION` and `CURLOPT_SEEKDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `seek_function` to configure a /// callback that can reference stack-local data. pub fn seek_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(SeekFrom) -> SeekResult + Send + 'static { self.data.seek = Some(Box::new(f)); unsafe { self.set_seek_function(easy_seek_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_seek_function(&self, cb: curl_sys::curl_seek_callback, ptr: *mut c_void) -> Result<(), Error> { let cb = cb as curl_sys::curl_seek_callback; try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_SEEKDATA, ptr as *const _)); Ok(()) } /// Callback to progress meter function /// /// This function gets called by libcurl instead of its internal equivalent /// with a frequent interval. While data is being transferred it will be /// called very frequently, and during slow periods like when nothing is /// being transferred it can slow down to about one call per second. /// /// The callback gets told how much data libcurl will transfer and has /// transferred, in number of bytes. The first argument is the total number /// of bytes libcurl expects to download in this transfer. The second /// argument is the number of bytes downloaded so far. The third argument is /// the total number of bytes libcurl expects to upload in this transfer. /// The fourth argument is the number of bytes uploaded so far. /// /// Unknown/unused argument values passed to the callback will be set to /// zero (like if you only download data, the upload size will remain 0). /// Many times the callback will be called one or more times first, before /// it knows the data sizes so a program must be made to handle that. /// /// Returning `false` from this callback will cause libcurl to abort the /// transfer and return `is_aborted_by_callback`. /// /// If you transfer data with the multi interface, this function will not be /// called during periods of idleness unless you call the appropriate /// libcurl function that performs transfers. /// /// `noprogress` must be set to 0 to make this function actually get /// called. /// /// By default this function calls an internal method and corresponds to /// `CURLOPT_XFERINFOFUNCTION` and `CURLOPT_XFERINFODATA`. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `progress_function` to configure a /// callback that can reference stack-local data. pub fn progress_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(f64, f64, f64, f64) -> bool + Send + 'static { self.data.progress = Some(Box::new(f)); unsafe { self.set_progress_function(easy_progress_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_progress_function(&self, cb: curl_sys::curl_progress_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_PROGRESSDATA, ptr as *const _)); Ok(()) } /// Callback to SSL context /// /// This callback function gets called by libcurl just before the /// initialization of an SSL connection after having processed all /// other SSL related options to give a last chance to an /// application to modify the behaviour of the SSL /// initialization. The `ssl_ctx` parameter is actually a pointer /// to the SSL library's SSL_CTX. If an error is returned from the /// callback no attempt to establish a connection is made and the /// perform operation will return the callback's error code. /// /// This function will get called on all new connections made to a /// server, during the SSL negotiation. The SSL_CTX pointer will /// be a new one every time. /// /// To use this properly, a non-trivial amount of knowledge of /// your SSL library is necessary. For example, you can use this /// function to call library-specific callbacks to add additional /// validation code for certificates, and even to change the /// actual URI of a HTTPS request. /// /// By default this function calls an internal method and /// corresponds to `CURLOPT_SSL_CTX_FUNCTION` and /// `CURLOPT_SSL_CTX_DATA`. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `progress_function` to configure a /// callback that can reference stack-local data. pub fn ssl_ctx_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'static { self.data.ssl_ctx = Some(Box::new(f)); unsafe { self.set_ssl_ctx_function(easy_ssl_ctx_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_ssl_ctx_function(&self, cb: curl_sys::curl_ssl_ctx_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_FUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_SSL_CTX_DATA, ptr as *const _)); Ok(()) } /// Specify a debug callback /// /// `debug_function` replaces the standard debug function used when /// `verbose` is in effect. This callback receives debug information, /// as specified in the type argument. /// /// By default this option is not set and corresponds to the /// `CURLOPT_DEBUGFUNCTION` and `CURLOPT_DEBUGDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `debug_function` to configure a /// callback that can reference stack-local data. pub fn debug_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(InfoType, &[u8]) + Send + 'static { self.data.debug = Some(Box::new(f)); unsafe { self.set_debug_function(easy_debug_cb, &*self.data as *const _ as *mut _) } } unsafe fn set_debug_function(&self, cb: curl_sys::curl_debug_callback, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_DEBUGDATA, ptr as *const _)); return Ok(()); } /// Callback that receives header data /// /// This function gets called by libcurl as soon as it has received header /// data. The header callback will be called once for each header and only /// complete header lines are passed on to the callback. Parsing headers is /// very easy using this. If this callback returns `false` it'll signal an /// error to the library. This will cause the transfer to get aborted and /// the libcurl function in progress will return `is_write_error`. /// /// A complete HTTP header that is passed to this function can be up to /// CURL_MAX_HTTP_HEADER (100K) bytes. /// /// It's important to note that the callback will be invoked for the headers /// of all responses received after initiating a request and not just the /// final response. This includes all responses which occur during /// authentication negotiation. If you need to operate on only the headers /// from the final response, you will need to collect headers in the /// callback yourself and use HTTP status lines, for example, to delimit /// response boundaries. /// /// When a server sends a chunked encoded transfer, it may contain a /// trailer. That trailer is identical to a HTTP header and if such a /// trailer is received it is passed to the application using this callback /// as well. There are several ways to detect it being a trailer and not an /// ordinary header: 1) it comes after the response-body. 2) it comes after /// the final header line (CR LF) 3) a Trailer: header among the regular /// response-headers mention what header(s) to expect in the trailer. /// /// For non-HTTP protocols like FTP, POP3, IMAP and SMTP this function will /// get called with the server responses to the commands that libcurl sends. /// /// By default this option is not set and corresponds to the /// `CURLOPT_HEADERFUNCTION` and `CURLOPT_HEADERDATA` options. /// /// Note that the lifetime bound on this function is `'static`, but that /// is often too restrictive. To use stack data consider calling the /// `transfer` method and then using `header_function` to configure a /// callback that can reference stack-local data. /// /// # Examples /// /// ``` /// use std::str; /// /// use curl::easy::Easy; /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.header_function(|header| { /// print!("header: {}", str::from_utf8(header).unwrap()); /// true /// }).unwrap(); /// handle.perform().unwrap(); /// ``` /// /// Collecting headers to a stack local vector /// /// ``` /// use std::str; /// /// use curl::easy::Easy; /// /// let mut headers = Vec::new(); /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// /// { /// let mut transfer = handle.transfer(); /// transfer.header_function(|header| { /// headers.push(str::from_utf8(header).unwrap().to_string()); /// true /// }).unwrap(); /// transfer.perform().unwrap(); /// } /// /// println!("{:?}", headers); /// ``` pub fn header_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> bool + Send + 'static { self.data.header = Some(Box::new(f)); unsafe { self.set_header_function(easy_header_cb, &*self.data as *const _ as *mut _) } } // TODO: shouldn't there be a libcurl typedef for this? unsafe fn set_header_function(&self, cb: extern fn(*mut c_char, size_t, size_t, *mut c_void) -> size_t, ptr: *mut c_void) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERFUNCTION, cb as *const _)); try!(self.setopt_ptr(curl_sys::CURLOPT_HEADERDATA, ptr as *const _)); Ok(()) } // ========================================================================= // Error options // TODO: error buffer and stderr /// Indicates whether this library will fail on HTTP response codes >= 400. /// /// This method is not fail-safe especially when authentication is involved. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FAILONERROR`. pub fn fail_on_error(&mut self, fail: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FAILONERROR, fail as c_long) } // ========================================================================= // Network options /// Provides the URL which this handle will work with. /// /// The string provided must be URL-encoded with the format: /// /// ```text /// scheme://host:port/path /// ``` /// /// The syntax is not validated as part of this function and that is /// deferred until later. /// /// By default this option is not set and `perform` will not work until it /// is set. This option corresponds to `CURLOPT_URL`. pub fn url(&mut self, url: &str) -> Result<(), Error> { let url = try!(CString::new(url)); self.setopt_str(curl_sys::CURLOPT_URL, &url) } /// Configures the port number to connect to, instead of the one specified /// in the URL or the default of the protocol. pub fn port(&mut self, port: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PORT, port as c_long) } // /// Indicates whether sequences of `/../` and `/./` will be squashed or not. // /// // /// By default this option is `false` and corresponds to // /// `CURLOPT_PATH_AS_IS`. // pub fn path_as_is(&mut self, as_is: bool) -> Result<(), Error> { // } /// Provide the URL of a proxy to use. /// /// By default this option is not set and corresponds to `CURLOPT_PROXY`. pub fn proxy(&mut self, url: &str) -> Result<(), Error> { let url = try!(CString::new(url)); self.setopt_str(curl_sys::CURLOPT_PROXY, &url) } /// Provide port number the proxy is listening on. /// /// By default this option is not set (the default port for the proxy /// protocol is used) and corresponds to `CURLOPT_PROXYPORT`. pub fn proxy_port(&mut self, port: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PROXYPORT, port as c_long) } /// Indicates the type of proxy being used. /// /// By default this option is `ProxyType::Http` and corresponds to /// `CURLOPT_PROXYTYPE`. pub fn proxy_type(&mut self, kind: ProxyType) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PROXYTYPE, kind as c_long) } /// Provide a list of hosts that should not be proxied to. /// /// This string is a comma-separated list of hosts which should not use the /// proxy specified for connections. A single `*` character is also accepted /// as a wildcard for all hosts. /// /// By default this option is not set and corresponds to /// `CURLOPT_NOPROXY`. pub fn noproxy(&mut self, skip: &str) -> Result<(), Error> { let skip = try!(CString::new(skip)); self.setopt_str(curl_sys::CURLOPT_PROXYTYPE, &skip) } /// Inform curl whether it should tunnel all operations through the proxy. /// /// This essentially means that a `CONNECT` is sent to the proxy for all /// outbound requests. /// /// By default this option is `false` and corresponds to /// `CURLOPT_HTTPPROXYTUNNEL`. pub fn http_proxy_tunnel(&mut self, tunnel: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTPPROXYTUNNEL, tunnel as c_long) } /// Tell curl which interface to bind to for an outgoing network interface. /// /// The interface name, IP address, or host name can be specified here. /// /// By default this option is not set and corresponds to /// `CURLOPT_INTERFACE`. pub fn interface(&mut self, interface: &str) -> Result<(), Error> { let s = try!(CString::new(interface)); self.setopt_str(curl_sys::CURLOPT_INTERFACE, &s) } /// Indicate which port should be bound to locally for this connection. /// /// By default this option is 0 (any port) and corresponds to /// `CURLOPT_LOCALPORT`. pub fn set_local_port(&mut self, port: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOCALPORT, port as c_long) } /// Indicates the number of attempts libcurl will perform to find a working /// port number. /// /// By default this option is 1 and corresponds to /// `CURLOPT_LOCALPORTRANGE`. pub fn local_port_range(&mut self, range: u16) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOCALPORTRANGE, range as c_long) } /// Sets the timeout of how long name resolves will be kept in memory. /// /// This is distinct from DNS TTL options and is entirely speculative. /// /// By default this option is 60s and corresponds to /// `CURLOPT_DNS_CACHE_TIMEOUT`. pub fn dns_cache_timeout(&mut self, dur: Duration) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_DNS_CACHE_TIMEOUT, dur.as_secs() as c_long) } /// Specify the preferred receive buffer size, in bytes. /// /// This is treated as a request, not an order, and the main point of this /// is that the write callback may get called more often with smaller /// chunks. /// /// By default this option is the maximum write size and corresopnds to /// `CURLOPT_BUFFERSIZE`. pub fn buffer_size(&mut self, size: usize) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_BUFFERSIZE, size as c_long) } // /// Enable or disable TCP Fast Open // /// // /// By default this options defaults to `false` and corresponds to // /// `CURLOPT_TCP_FASTOPEN` // pub fn fast_open(&mut self, enable: bool) -> Result<(), Error> { // } /// Configures whether the TCP_NODELAY option is set, or Nagle's algorithm /// is disabled. /// /// The purpose of Nagle's algorithm is to minimize the number of small /// packet's on the network, and disabling this may be less efficient in /// some situations. /// /// By default this option is `false` and corresponds to /// `CURLOPT_TCP_NODELAY`. pub fn tcp_nodelay(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TCP_NODELAY, enable as c_long) } // /// Configures whether TCP keepalive probes will be sent. // /// // /// The delay and frequency of these probes is controlled by `tcp_keepidle` // /// and `tcp_keepintvl`. // /// // /// By default this option is `false` and corresponds to // /// `CURLOPT_TCP_KEEPALIVE`. // pub fn tcp_keepalive(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPALIVE, enable as c_long) // } // /// Configures the TCP keepalive idle time wait. // /// // /// This is the delay, after which the connection is idle, keepalive probes // /// will be sent. Not all operating systems support this. // /// // /// By default this corresponds to `CURLOPT_TCP_KEEPIDLE`. // pub fn tcp_keepidle(&mut self, amt: Duration) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPIDLE, // amt.as_secs() as c_long) // } // // /// Configures the delay between keepalive probes. // /// // /// By default this corresponds to `CURLOPT_TCP_KEEPINTVL`. // pub fn tcp_keepintvl(&mut self, amt: Duration) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_TCP_KEEPINTVL, // amt.as_secs() as c_long) // } /// Configures the scope for local IPv6 addresses. /// /// Sets the scope_id value to use when connecting to IPv6 or link-local /// addresses. /// /// By default this value is 0 and corresponds to `CURLOPT_ADDRESS_SCOPE` pub fn address_scope(&mut self, scope: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_ADDRESS_SCOPE, scope as c_long) } // ========================================================================= // Names and passwords /// Configures the username to pass as authentication for this connection. /// /// By default this value is not set and corresponds to `CURLOPT_USERNAME`. pub fn username(&mut self, user: &str) -> Result<(), Error> { let user = try!(CString::new(user)); self.setopt_str(curl_sys::CURLOPT_USERNAME, &user) } /// Configures the password to pass as authentication for this connection. /// /// By default this value is not set and corresponds to `CURLOPT_PASSWORD`. pub fn password(&mut self, pass: &str) -> Result<(), Error> { let pass = try!(CString::new(pass)); self.setopt_str(curl_sys::CURLOPT_PASSWORD, &pass) } /// Set HTTP server authentication methods to try /// /// If more than one method is set, libcurl will first query the site to see /// which authentication methods it supports and then pick the best one you /// allow it to use. For some methods, this will induce an extra network /// round-trip. Set the actual name and password with the `password` and /// `username` methods. /// /// For authentication with a proxy, see `proxy_auth`. /// /// By default this value is basic and corresponds to `CURLOPT_HTTPAUTH`. pub fn http_auth(&mut self, auth: &Auth) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTPAUTH, auth.bits) } /// Configures the proxy username to pass as authentication for this /// connection. /// /// By default this value is not set and corresponds to /// `CURLOPT_PROXYUSERNAME`. pub fn proxy_username(&mut self, user: &str) -> Result<(), Error> { let user = try!(CString::new(user)); self.setopt_str(curl_sys::CURLOPT_PROXYUSERNAME, &user) } /// Configures the proxy password to pass as authentication for this /// connection. /// /// By default this value is not set and corresponds to /// `CURLOPT_PROXYPASSWORD`. pub fn proxy_password(&mut self, pass: &str) -> Result<(), Error> { let pass = try!(CString::new(pass)); self.setopt_str(curl_sys::CURLOPT_PROXYPASSWORD, &pass) } /// Set HTTP proxy authentication methods to try /// /// If more than one method is set, libcurl will first query the site to see /// which authentication methods it supports and then pick the best one you /// allow it to use. For some methods, this will induce an extra network /// round-trip. Set the actual name and password with the `proxy_password` /// and `proxy_username` methods. /// /// By default this value is basic and corresponds to `CURLOPT_PROXYAUTH`. pub fn proxy_auth(&mut self, auth: &Auth) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PROXYAUTH, auth.bits) } // ========================================================================= // HTTP Options /// Indicates whether the referer header is automatically updated /// /// By default this option is `false` and corresponds to /// `CURLOPT_AUTOREFERER`. pub fn autoreferer(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_AUTOREFERER, enable as c_long) } /// Enables automatic decompression of HTTP downloads. /// /// Sets the contents of the Accept-Encoding header sent in an HTTP request. /// This enables decoding of a response with Content-Encoding. /// /// Currently supported encoding are `identity`, `zlib`, and `gzip`. A /// zero-length string passed in will send all accepted encodings. /// /// By default this option is not set and corresponds to /// `CURLOPT_ACCEPT_ENCODING`. pub fn accept_encoding(&mut self, encoding: &str) -> Result<(), Error> { let encoding = try!(CString::new(encoding)); self.setopt_str(curl_sys::CURLOPT_ACCEPT_ENCODING, &encoding) } /// Request the HTTP Transfer Encoding. /// /// By default this option is `false` and corresponds to /// `CURLOPT_TRANSFER_ENCODING`. pub fn transfer_encoding(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TRANSFER_ENCODING, enable as c_long) } /// Follow HTTP 3xx redirects. /// /// Indicates whether any `Location` headers in the response should get /// followed. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FOLLOWLOCATION`. pub fn follow_location(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FOLLOWLOCATION, enable as c_long) } /// Send credentials to hosts other than the first as well. /// /// Sends username/password credentials even when the host changes as part /// of a redirect. /// /// By default this option is `false` and corresponds to /// `CURLOPT_UNRESTRICTED_AUTH`. pub fn unrestricted_auth(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_UNRESTRICTED_AUTH, enable as c_long) } /// Set the maximum number of redirects allowed. /// /// A value of 0 will refuse any redirect. /// /// By default this option is `-1` (unlimited) and corresponds to /// `CURLOPT_MAXREDIRS`. pub fn max_redirections(&mut self, max: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_MAXREDIRS, max as c_long) } // TODO: post_redirections /// Make an HTTP PUT request. /// /// By default this option is `false` and corresponds to `CURLOPT_PUT`. pub fn put(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_PUT, enable as c_long) } /// Make an HTTP POST request. /// /// This will also make the library use the /// `Content-Type: application/x-www-form-urlencoded` header. /// /// POST data can be specified through `post_fields` or by specifying a read /// function. /// /// By default this option is `false` and corresponds to `CURLOPT_POST`. pub fn post(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_POST, enable as c_long) } /// Configures the data that will be uploaded as part of a POST. /// /// Note that the data is copied into this handle and if that's not desired /// then the read callbacks can be used instead. /// /// By default this option is not set and corresponds to /// `CURLOPT_COPYPOSTFIELDS`. pub fn post_fields_copy(&mut self, data: &[u8]) -> Result<(), Error> { // Set the length before the pointer so libcurl knows how much to read try!(self.post_field_size(data.len() as u64)); self.setopt_ptr(curl_sys::CURLOPT_COPYPOSTFIELDS, data.as_ptr() as *const _) } /// Configures the size of data that's going to be uploaded as part of a /// POST operation. /// /// This is called automaticsally as part of `post_fields` and should only /// be called if data is being provided in a read callback (and even then /// it's optional). /// /// By default this option is not set and corresponds to /// `CURLOPT_POSTFIELDSIZE_LARGE`. pub fn post_field_size(&mut self, size: u64) -> Result<(), Error> { // Clear anything previous to ensure we don't read past a buffer try!(self.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, 0 as *const _)); self.setopt_off_t(curl_sys::CURLOPT_POSTFIELDSIZE_LARGE, size as curl_sys::curl_off_t) } /// Tells libcurl you want a multipart/formdata HTTP POST to be made and you /// instruct what data to pass on to the server in the `form` argument. /// /// By default this option is set to null and corresponds to /// `CURLOPT_HTTPPOST`. pub fn httppost(&mut self, form: Form) -> Result<(), Error> { try!(self.setopt_ptr(curl_sys::CURLOPT_HTTPPOST, form.head as *const _)); self.data.form = Some(form); Ok(()) } /// Sets the HTTP referer header /// /// By default this option is not set and corresponds to `CURLOPT_REFERER`. pub fn referer(&mut self, referer: &str) -> Result<(), Error> { let referer = try!(CString::new(referer)); self.setopt_str(curl_sys::CURLOPT_REFERER, &referer) } /// Sets the HTTP user-agent header /// /// By default this option is not set and corresponds to /// `CURLOPT_USERAGENT`. pub fn useragent(&mut self, useragent: &str) -> Result<(), Error> { let useragent = try!(CString::new(useragent)); self.setopt_str(curl_sys::CURLOPT_USERAGENT, &useragent) } /// Add some headers to this HTTP request. /// /// If you add a header that is otherwise used internally, the value here /// takes precedence. If a header is added with no content (like `Accept:`) /// the internally the header will get disabled. To add a header with no /// content, use the form `MyHeader;` (not the trailing semicolon). /// /// Headers must not be CRLF terminated. Many replaced headers have common /// shortcuts which should be prefered. /// /// By default this option is not set and corresponds to /// `CURLOPT_HTTPHEADER` /// /// # Examples /// /// ``` /// use curl::easy::{Easy, List}; /// /// let mut list = List::new(); /// list.append("Foo: bar").unwrap(); /// list.append("Bar: baz").unwrap(); /// /// let mut handle = Easy::new(); /// handle.url("https://www.rust-lang.org/").unwrap(); /// handle.http_headers(list).unwrap(); /// handle.perform().unwrap(); /// ``` pub fn http_headers(&mut self, list: List) -> Result<(), Error> { let ptr = list.raw; self.data.header_list = Some(list); self.setopt_ptr(curl_sys::CURLOPT_HTTPHEADER, ptr as *const _) } // /// Add some headers to send to the HTTP proxy. // /// // /// This function is essentially the same as `http_headers`. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_PROXYHEADER` // pub fn proxy_headers(&mut self, list: &'a List) -> Result<(), Error> { // self.setopt_ptr(curl_sys::CURLOPT_PROXYHEADER, list.raw as *const _) // } /// Set the contents of the HTTP Cookie header. /// /// Pass a string of the form `name=contents` for one cookie value or /// `name1=val1; name2=val2` for multiple values. /// /// Using this option multiple times will only make the latest string /// override the previous ones. This option will not enable the cookie /// engine, use `cookie_file` or `cookie_jar` to do that. /// /// By default this option is not set and corresponds to `CURLOPT_COOKIE`. pub fn cookie(&mut self, cookie: &str) -> Result<(), Error> { let cookie = try!(CString::new(cookie)); self.setopt_str(curl_sys::CURLOPT_COOKIE, &cookie) } /// Set the file name to read cookies from. /// /// The cookie data can be in either the old Netscape / Mozilla cookie data /// format or just regular HTTP headers (Set-Cookie style) dumped to a file. /// /// This also enables the cookie engine, making libcurl parse and send /// cookies on subsequent requests with this handle. /// /// Given an empty or non-existing file or by passing the empty string ("") /// to this option, you can enable the cookie engine without reading any /// initial cookies. /// /// If you use this option multiple times, you just add more files to read. /// Subsequent files will add more cookies. /// /// By default this option is not set and corresponds to /// `CURLOPT_COOKIEFILE`. pub fn cookie_file<P: AsRef<Path>>(&mut self, file: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_COOKIEFILE, file.as_ref()) } /// Set the file name to store cookies to. /// /// This will make libcurl write all internally known cookies to the file /// when this handle is dropped. If no cookies are known, no file will be /// created. Specify "-" as filename to instead have the cookies written to /// stdout. Using this option also enables cookies for this session, so if /// you for example follow a location it will make matching cookies get sent /// accordingly. /// /// Note that libcurl doesn't read any cookies from the cookie jar. If you /// want to read cookies from a file, use `cookie_file`. /// /// By default this option is not set and corresponds to /// `CURLOPT_COOKIEJAR`. pub fn cookie_jar<P: AsRef<Path>>(&mut self, file: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_COOKIEJAR, file.as_ref()) } /// Start a new cookie session /// /// Marks this as a new cookie "session". It will force libcurl to ignore /// all cookies it is about to load that are "session cookies" from the /// previous session. By default, libcurl always stores and loads all /// cookies, independent if they are session cookies or not. Session cookies /// are cookies without expiry date and they are meant to be alive and /// existing for this "session" only. /// /// By default this option is `false` and corresponds to /// `CURLOPT_COOKIESESSION`. pub fn cookie_session(&mut self, session: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_COOKIESESSION, session as c_long) } /// Add to or manipulate cookies held in memory. /// /// Such a cookie can be either a single line in Netscape / Mozilla format /// or just regular HTTP-style header (Set-Cookie: ...) format. This will /// also enable the cookie engine. This adds that single cookie to the /// internal cookie store. /// /// Exercise caution if you are using this option and multiple transfers may /// occur. If you use the Set-Cookie format and don't specify a domain then /// the cookie is sent for any domain (even after redirects are followed) /// and cannot be modified by a server-set cookie. If a server sets a cookie /// of the same name (or maybe you've imported one) then both will be sent /// on a future transfer to that server, likely not what you intended. /// address these issues set a domain in Set-Cookie or use the Netscape /// format. /// /// Additionally, there are commands available that perform actions if you /// pass in these exact strings: /// /// * "ALL" - erases all cookies held in memory /// * "SESS" - erases all session cookies held in memory /// * "FLUSH" - write all known cookies to the specified cookie jar /// * "RELOAD" - reread all cookies from the cookie file /// /// By default this options corresponds to `CURLOPT_COOKIELIST` pub fn cookie_list(&mut self, cookie: &str) -> Result<(), Error> { let cookie = try!(CString::new(cookie)); self.setopt_str(curl_sys::CURLOPT_COOKIELIST, &cookie) } /// Ask for a HTTP GET request. /// /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. pub fn get(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) } // /// Ask for a HTTP GET request. // /// // /// By default this option is `false` and corresponds to `CURLOPT_HTTPGET`. // pub fn http_version(&mut self, vers: &str) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_HTTPGET, enable as c_long) // } /// Ignore the content-length header. /// /// By default this option is `false` and corresponds to /// `CURLOPT_IGNORE_CONTENT_LENGTH`. pub fn ignore_content_length(&mut self, ignore: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_IGNORE_CONTENT_LENGTH, ignore as c_long) } /// Enable or disable HTTP content decoding. /// /// By default this option is `true` and corresponds to /// `CURLOPT_HTTP_CONTENT_DECODING`. pub fn http_content_decoding(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTP_CONTENT_DECODING, enable as c_long) } /// Enable or disable HTTP transfer decoding. /// /// By default this option is `true` and corresponds to /// `CURLOPT_HTTP_TRANSFER_DECODING`. pub fn http_transfer_decoding(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, enable as c_long) } // /// Timeout for the Expect: 100-continue response // /// // /// By default this option is 1s and corresponds to // /// `CURLOPT_EXPECT_100_TIMEOUT_MS`. // pub fn expect_100_timeout(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_HTTP_TRANSFER_DECODING, // enable as c_long) // } // /// Wait for pipelining/multiplexing. // /// // /// Tells libcurl to prefer to wait for a connection to confirm or deny that // /// it can do pipelining or multiplexing before continuing. // /// // /// When about to perform a new transfer that allows pipelining or // /// multiplexing, libcurl will check for existing connections to re-use and // /// pipeline on. If no such connection exists it will immediately continue // /// and create a fresh new connection to use. // /// // /// By setting this option to `true` - having `pipeline` enabled for the // /// multi handle this transfer is associated with - libcurl will instead // /// wait for the connection to reveal if it is possible to // /// pipeline/multiplex on before it continues. This enables libcurl to much // /// better keep the number of connections to a minimum when using pipelining // /// or multiplexing protocols. // /// // /// The effect thus becomes that with this option set, libcurl prefers to // /// wait and re-use an existing connection for pipelining rather than the // /// opposite: prefer to open a new connection rather than waiting. // /// // /// The waiting time is as long as it takes for the connection to get up and // /// for libcurl to get the necessary response back that informs it about its // /// protocol and support level. // pub fn http_pipewait(&mut self, enable: bool) -> Result<(), Error> { // } // ========================================================================= // Protocol Options /// Indicates the range that this request should retrieve. /// /// The string provided should be of the form `N-M` where either `N` or `M` /// can be left out. For HTTP transfers multiple ranges separated by commas /// are also accepted. /// /// By default this option is not set and corresponds to `CURLOPT_RANGE`. pub fn range(&mut self, range: &str) -> Result<(), Error> { let range = try!(CString::new(range)); self.setopt_str(curl_sys::CURLOPT_RANGE, &range) } /// Set a point to resume transfer from /// /// Specify the offset in bytes you want the transfer to start from. /// /// By default this option is 0 and corresponds to /// `CURLOPT_RESUME_FROM_LARGE`. pub fn resume_from(&mut self, from: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_RESUME_FROM_LARGE, from as curl_sys::curl_off_t) } /// Set a custom request string /// /// Specifies that a custom request will be made (e.g. a custom HTTP /// method). This does not change how libcurl performs internally, just /// changes the string sent to the server. /// /// By default this option is not set and corresponds to /// `CURLOPT_CUSTOMREQUEST`. pub fn custom_request(&mut self, request: &str) -> Result<(), Error> { let request = try!(CString::new(request)); self.setopt_str(curl_sys::CURLOPT_CUSTOMREQUEST, &request) } /// Get the modification time of the remote resource /// /// If true, libcurl will attempt to get the modification time of the /// remote document in this operation. This requires that the remote server /// sends the time or replies to a time querying command. The `filetime` /// function can be used after a transfer to extract the received time (if /// any). /// /// By default this option is `false` and corresponds to `CURLOPT_FILETIME` pub fn fetch_filetime(&mut self, fetch: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FILETIME, fetch as c_long) } /// Indicate whether to download the request without getting the body /// /// This is useful, for example, for doing a HEAD request. /// /// By default this option is `false` and corresponds to `CURLOPT_NOBODY`. pub fn nobody(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_NOBODY, enable as c_long) } /// Set the size of the input file to send off. /// /// By default this option is not set and corresponds to /// `CURLOPT_INFILESIZE_LARGE`. pub fn in_filesize(&mut self, size: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_INFILESIZE_LARGE, size as curl_sys::curl_off_t) } /// Enable or disable data upload. /// /// This means that a PUT request will be made for HTTP and probably wants /// to be combined with the read callback as well as the `in_filesize` /// method. /// /// By default this option is `false` and corresponds to `CURLOPT_UPLOAD`. pub fn upload(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_UPLOAD, enable as c_long) } /// Configure the maximum file size to download. /// /// By default this option is not set and corresponds to /// `CURLOPT_MAXFILESIZE_LARGE`. pub fn max_filesize(&mut self, size: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_MAXFILESIZE_LARGE, size as curl_sys::curl_off_t) } /// Selects a condition for a time request. /// /// This value indicates how the `time_value` option is interpreted. /// /// By default this option is not set and corresponds to /// `CURLOPT_TIMECONDITION`. pub fn time_condition(&mut self, cond: TimeCondition) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TIMECONDITION, cond as c_long) } /// Sets the time value for a conditional request. /// /// The value here should be the number of seconds elapsed since January 1, /// 1970. To pass how to interpret this value, use `time_condition`. /// /// By default this option is not set and corresponds to /// `CURLOPT_TIMEVALUE`. pub fn time_value(&mut self, val: i64) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_TIMEVALUE, val as c_long) } // ========================================================================= // Connection Options /// Set maximum time the request is allowed to take. /// /// Normally, name lookups can take a considerable time and limiting /// operations to less than a few minutes risk aborting perfectly normal /// operations. /// /// If libcurl is built to use the standard system name resolver, that /// portion of the transfer will still use full-second resolution for /// timeouts with a minimum timeout allowed of one second. /// /// In unix-like systems, this might cause signals to be used unless /// `nosignal` is set. /// /// Since this puts a hard limit for how long time a request is allowed to /// take, it has limited use in dynamic use cases with varying transfer /// times. You are then advised to explore `low_speed_limit`, /// `low_speed_time` or using `progress_function` to implement your own /// timeout logic. /// /// By default this option is not set and corresponds to /// `CURLOPT_TIMEOUT_MS`. pub fn timeout(&mut self, timeout: Duration) -> Result<(), Error> { // TODO: checked arithmetic and casts // TODO: use CURLOPT_TIMEOUT if the timeout is too great let ms = timeout.as_secs() * 1000 + (timeout.subsec_nanos() / 1_000_000) as u64; self.setopt_long(curl_sys::CURLOPT_TIMEOUT_MS, ms as c_long) } /// Set the low speed limit in bytes per second. /// /// This specifies the average transfer speed in bytes per second that the /// transfer should be below during `low_speed_time` for libcurl to consider /// it to be too slow and abort. /// /// By default this option is not set and corresponds to /// `CURLOPT_LOW_SPEED_LIMIT`. pub fn low_speed_limit(&mut self, limit: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_LIMIT, limit as c_long) } /// Set the low speed time period. /// /// Specifies the window of time for which if the transfer rate is below /// `low_speed_limit` the request will be aborted. /// /// By default this option is not set and corresponds to /// `CURLOPT_LOW_SPEED_TIME`. pub fn low_speed_time(&mut self, dur: Duration) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_LOW_SPEED_TIME, dur.as_secs() as c_long) } /// Rate limit data upload speed /// /// If an upload exceeds this speed (counted in bytes per second) on /// cumulative average during the transfer, the transfer will pause to keep /// the average rate less than or equal to the parameter value. /// /// By default this option is not set (unlimited speed) and corresponds to /// `CURLOPT_MAX_SEND_SPEED_LARGE`. pub fn max_send_speed(&mut self, speed: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_MAX_SEND_SPEED_LARGE, speed as curl_sys::curl_off_t) } /// Rate limit data download speed /// /// If a download exceeds this speed (counted in bytes per second) on /// cumulative average during the transfer, the transfer will pause to keep /// the average rate less than or equal to the parameter value. /// /// By default this option is not set (unlimited speed) and corresponds to /// `CURLOPT_MAX_RECV_SPEED_LARGE`. pub fn max_recv_speed(&mut self, speed: u64) -> Result<(), Error> { self.setopt_off_t(curl_sys::CURLOPT_MAX_RECV_SPEED_LARGE, speed as curl_sys::curl_off_t) } /// Set the maximum connection cache size. /// /// The set amount will be the maximum number of simultaneously open /// persistent connections that libcurl may cache in the pool associated /// with this handle. The default is 5, and there isn't much point in /// changing this value unless you are perfectly aware of how this works and /// changes libcurl's behaviour. This concerns connections using any of the /// protocols that support persistent connections. /// /// When reaching the maximum limit, curl closes the oldest one in the cache /// to prevent increasing the number of open connections. /// /// By default this option is set to 5 and corresponds to /// `CURLOPT_MAXCONNECTS` pub fn max_connects(&mut self, max: u32) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_MAXCONNECTS, max as c_long) } /// Force a new connection to be used. /// /// Makes the next transfer use a new (fresh) connection by force instead of /// trying to re-use an existing one. This option should be used with /// caution and only if you understand what it does as it may seriously /// impact performance. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FRESH_CONNECT`. pub fn fresh_connect(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FRESH_CONNECT, enable as c_long) } /// Make connection get closed at once after use. /// /// Makes libcurl explicitly close the connection when done with the /// transfer. Normally, libcurl keeps all connections alive when done with /// one transfer in case a succeeding one follows that can re-use them. /// This option should be used with caution and only if you understand what /// it does as it can seriously impact performance. /// /// By default this option is `false` and corresponds to /// `CURLOPT_FORBID_REUSE`. pub fn forbid_reuse(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_FORBID_REUSE, enable as c_long) } /// Timeout for the connect phase /// /// This is the maximum time that you allow the connection phase to the /// server to take. This only limits the connection phase, it has no impact /// once it has connected. /// /// By default this value is 300 seconds and corresponds to /// `CURLOPT_CONNECTTIMEOUT_MS`. pub fn connect_timeout(&mut self, timeout: Duration) -> Result<(), Error> { let ms = timeout.as_secs() * 1000 + (timeout.subsec_nanos() / 1_000_000) as u64; self.setopt_long(curl_sys::CURLOPT_CONNECTTIMEOUT_MS, ms as c_long) } /// Specify which IP protocol version to use /// /// Allows an application to select what kind of IP addresses to use when /// resolving host names. This is only interesting when using host names /// that resolve addresses using more than one version of IP. /// /// By default this value is "any" and corresponds to `CURLOPT_IPRESOLVE`. pub fn ip_resolve(&mut self, resolve: IpResolve) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_IPRESOLVE, resolve as c_long) } /// Configure whether to stop when connected to target server /// /// When enabled it tells the library to perform all the required proxy /// authentication and connection setup, but no data transfer, and then /// return. /// /// The option can be used to simply test a connection to a server. /// /// By default this value is `false` and corresponds to /// `CURLOPT_CONNECT_ONLY`. pub fn connect_only(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_CONNECT_ONLY, enable as c_long) } // /// Set interface to speak DNS over. // /// // /// Set the name of the network interface that the DNS resolver should bind // /// to. This must be an interface name (not an address). // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_INTERFACE`. // pub fn dns_interface(&mut self, interface: &str) -> Result<(), Error> { // let interface = try!(CString::new(interface)); // self.setopt_str(curl_sys::CURLOPT_DNS_INTERFACE, &interface) // } // // /// IPv4 address to bind DNS resolves to // /// // /// Set the local IPv4 address that the resolver should bind to. The // /// argument should be of type char * and contain a single numerical IPv4 // /// address as a string. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_LOCAL_IP4`. // pub fn dns_local_ip4(&mut self, ip: &str) -> Result<(), Error> { // let ip = try!(CString::new(ip)); // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP4, &ip) // } // // /// IPv6 address to bind DNS resolves to // /// // /// Set the local IPv6 address that the resolver should bind to. The // /// argument should be of type char * and contain a single numerical IPv6 // /// address as a string. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_LOCAL_IP6`. // pub fn dns_local_ip6(&mut self, ip: &str) -> Result<(), Error> { // let ip = try!(CString::new(ip)); // self.setopt_str(curl_sys::CURLOPT_DNS_LOCAL_IP6, &ip) // } // // /// Set preferred DNS servers. // /// // /// Provides a list of DNS servers to be used instead of the system default. // /// The format of the dns servers option is: // /// // /// ```text // /// host[:port],[host[:port]]... // /// ``` // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_DNS_SERVERS`. // pub fn dns_servers(&mut self, servers: &str) -> Result<(), Error> { // let servers = try!(CString::new(servers)); // self.setopt_str(curl_sys::CURLOPT_DNS_SERVERS, &servers) // } // ========================================================================= // SSL/Security Options /// Sets the SSL client certificate. /// /// The string should be the file name of your client certificate. The /// default format is "P12" on Secure Transport and "PEM" on other engines, /// and can be changed with `ssl_cert_type`. /// /// With NSS or Secure Transport, this can also be the nickname of the /// certificate you wish to authenticate with as it is named in the security /// database. If you want to use a file from the current directory, please /// precede it with "./" prefix, in order to avoid confusion with a /// nickname. /// /// When using a client certificate, you most likely also need to provide a /// private key with `ssl_key`. /// /// By default this option is not set and corresponds to `CURLOPT_SSLCERT`. pub fn ssl_cert<P: AsRef<Path>>(&mut self, cert: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_SSLCERT, cert.as_ref()) } /// Specify type of the client SSL certificate. /// /// The string should be the format of your certificate. Supported formats /// are "PEM" and "DER", except with Secure Transport. OpenSSL (versions /// 0.9.3 and later) and Secure Transport (on iOS 5 or later, or OS X 10.7 /// or later) also support "P12" for PKCS#12-encoded files. /// /// By default this option is "PEM" and corresponds to /// `CURLOPT_SSLCERTTYPE`. pub fn ssl_cert_type(&mut self, kind: &str) -> Result<(), Error> { let kind = try!(CString::new(kind)); self.setopt_str(curl_sys::CURLOPT_SSLCERTTYPE, &kind) } /// Specify private keyfile for TLS and SSL client cert. /// /// The string should be the file name of your private key. The default /// format is "PEM" and can be changed with `ssl_key_type`. /// /// (iOS and Mac OS X only) This option is ignored if curl was built against /// Secure Transport. Secure Transport expects the private key to be already /// present in the keychain or PKCS#12 file containing the certificate. /// /// By default this option is not set and corresponds to `CURLOPT_SSLKEY`. pub fn ssl_key<P: AsRef<Path>>(&mut self, key: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_SSLKEY, key.as_ref()) } /// Set type of the private key file. /// /// The string should be the format of your private key. Supported formats /// are "PEM", "DER" and "ENG". /// /// The format "ENG" enables you to load the private key from a crypto /// engine. In this case `ssl_key` is used as an identifier passed to /// the engine. You have to set the crypto engine with `ssl_engine`. /// "DER" format key file currently does not work because of a bug in /// OpenSSL. /// /// By default this option is "PEM" and corresponds to /// `CURLOPT_SSLKEYTYPE`. pub fn ssl_key_type(&mut self, kind: &str) -> Result<(), Error> { let kind = try!(CString::new(kind)); self.setopt_str(curl_sys::CURLOPT_SSLKEYTYPE, &kind) } /// Set passphrase to private key. /// /// This will be used as the password required to use the `ssl_key`. /// You never needed a pass phrase to load a certificate but you need one to /// load your private key. /// /// By default this option is not set and corresponds to /// `CURLOPT_KEYPASSWD`. pub fn key_password(&mut self, password: &str) -> Result<(), Error> { let password = try!(CString::new(password)); self.setopt_str(curl_sys::CURLOPT_KEYPASSWD, &password) } /// Set the SSL engine identifier. /// /// This will be used as the identifier for the crypto engine you want to /// use for your private key. /// /// By default this option is not set and corresponds to /// `CURLOPT_SSLENGINE`. pub fn ssl_engine(&mut self, engine: &str) -> Result<(), Error> { let engine = try!(CString::new(engine)); self.setopt_str(curl_sys::CURLOPT_SSLENGINE, &engine) } /// Make this handle's SSL engine the default. /// /// By default this option is not set and corresponds to /// `CURLOPT_SSLENGINE_DEFAULT`. pub fn ssl_engine_default(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) } // /// Enable TLS false start. // /// // /// This option determines whether libcurl should use false start during the // /// TLS handshake. False start is a mode where a TLS client will start // /// sending application data before verifying the server's Finished message, // /// thus saving a round trip when performing a full handshake. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_SSL_FALSESTARTE`. // pub fn ssl_false_start(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_SSLENGINE_DEFAULT, enable as c_long) // } /// Set preferred TLS/SSL version. /// /// By default this option is not set and corresponds to /// `CURLOPT_SSLVERSION`. pub fn ssl_version(&mut self, version: SslVersion) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSLVERSION, version as c_long) } /// Verify the certificate's name against host. /// /// This should be disabled with great caution! It basically disables the /// security features of SSL if it is disabled. /// /// By default this option is set to `true` and corresponds to /// `CURLOPT_SSL_VERIFYHOST`. pub fn ssl_verify_host(&mut self, verify: bool) -> Result<(), Error> { let val = if verify {2} else {0}; self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYHOST, val) } /// Verify the peer's SSL certificate. /// /// This should be disabled with great caution! It basically disables the /// security features of SSL if it is disabled. /// /// By default this option is set to `true` and corresponds to /// `CURLOPT_SSL_VERIFYPEER`. pub fn ssl_verify_peer(&mut self, verify: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYPEER, verify as c_long) } // /// Verify the certificate's status. // /// // /// This option determines whether libcurl verifies the status of the server // /// cert using the "Certificate Status Request" TLS extension (aka. OCSP // /// stapling). // /// // /// By default this option is set to `false` and corresponds to // /// `CURLOPT_SSL_VERIFYSTATUS`. // pub fn ssl_verify_status(&mut self, verify: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_SSL_VERIFYSTATUS, verify as c_long) // } /// Specify the path to Certificate Authority (CA) bundle /// /// The file referenced should hold one or more certificates to verify the /// peer with. /// /// This option is by default set to the system path where libcurl's cacert /// bundle is assumed to be stored, as established at build time. /// /// If curl is built against the NSS SSL library, the NSS PEM PKCS#11 module /// (libnsspem.so) needs to be available for this option to work properly. /// /// By default this option is the system defaults, and corresponds to /// `CURLOPT_CAINFO`. pub fn cainfo<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_CAINFO, path.as_ref()) } /// Set the issuer SSL certificate filename /// /// Specifies a file holding a CA certificate in PEM format. If the option /// is set, an additional check against the peer certificate is performed to /// verify the issuer is indeed the one associated with the certificate /// provided by the option. This additional check is useful in multi-level /// PKI where one needs to enforce that the peer certificate is from a /// specific branch of the tree. /// /// This option makes sense only when used in combination with the /// `ssl_verify_peer` option. Otherwise, the result of the check is not /// considered as failure. /// /// By default this option is not set and corresponds to /// `CURLOPT_ISSUERCERT`. pub fn issuer_cert<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_ISSUERCERT, path.as_ref()) } /// Specify directory holding CA certificates /// /// Names a directory holding multiple CA certificates to verify the peer /// with. If libcurl is built against OpenSSL, the certificate directory /// must be prepared using the openssl c_rehash utility. This makes sense /// only when used in combination with the `ssl_verify_peer` option. /// /// By default this option is not set and corresponds to `CURLOPT_CAPATH`. pub fn capath<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_CAPATH, path.as_ref()) } /// Specify a Certificate Revocation List file /// /// Names a file with the concatenation of CRL (in PEM format) to use in the /// certificate validation that occurs during the SSL exchange. /// /// When curl is built to use NSS or GnuTLS, there is no way to influence /// the use of CRL passed to help in the verification process. When libcurl /// is built with OpenSSL support, X509_V_FLAG_CRL_CHECK and /// X509_V_FLAG_CRL_CHECK_ALL are both set, requiring CRL check against all /// the elements of the certificate chain if a CRL file is passed. /// /// This option makes sense only when used in combination with the /// `ssl_verify_peer` option. /// /// A specific error code (`is_ssl_crl_badfile`) is defined with the /// option. It is returned when the SSL exchange fails because the CRL file /// cannot be loaded. A failure in certificate verification due to a /// revocation information found in the CRL does not trigger this specific /// error. /// /// By default this option is not set and corresponds to `CURLOPT_CRLFILE`. pub fn crlfile<P: AsRef<Path>>(&mut self, path: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_CRLFILE, path.as_ref()) } /// Request SSL certificate information /// /// Enable libcurl's certificate chain info gatherer. With this enabled, /// libcurl will extract lots of information and data about the certificates /// in the certificate chain used in the SSL connection. /// /// By default this option is `false` and corresponds to /// `CURLOPT_CERTINFO`. pub fn certinfo(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) } // /// Set pinned public key. // /// // /// Pass a pointer to a zero terminated string as parameter. The string can // /// be the file name of your pinned public key. The file format expected is // /// "PEM" or "DER". The string can also be any number of base64 encoded // /// sha256 hashes preceded by "sha256//" and separated by ";" // /// // /// When negotiating a TLS or SSL connection, the server sends a certificate // /// indicating its identity. A public key is extracted from this certificate // /// and if it does not exactly match the public key provided to this option, // /// curl will abort the connection before sending or receiving any data. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_PINNEDPUBLICKEY`. // pub fn pinned_public_key(&mut self, enable: bool) -> Result<(), Error> { // self.setopt_long(curl_sys::CURLOPT_CERTINFO, enable as c_long) // } /// Specify a source for random data /// /// The file will be used to read from to seed the random engine for SSL and /// more. /// /// By default this option is not set and corresponds to /// `CURLOPT_RANDOM_FILE`. pub fn random_file<P: AsRef<Path>>(&mut self, p: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_RANDOM_FILE, p.as_ref()) } /// Specify EGD socket path. /// /// Indicates the path name to the Entropy Gathering Daemon socket. It will /// be used to seed the random engine for SSL. /// /// By default this option is not set and corresponds to /// `CURLOPT_EGDSOCKET`. pub fn egd_socket<P: AsRef<Path>>(&mut self, p: P) -> Result<(), Error> { self.setopt_path(curl_sys::CURLOPT_EGDSOCKET, p.as_ref()) } /// Specify ciphers to use for TLS. /// /// Holds the list of ciphers to use for the SSL connection. The list must /// be syntactically correct, it consists of one or more cipher strings /// separated by colons. Commas or spaces are also acceptable separators /// but colons are normally used, !, - and + can be used as operators. /// /// For OpenSSL and GnuTLS valid examples of cipher lists include 'RC4-SHA', /// ´SHA1+DES´, 'TLSv1' and 'DEFAULT'. The default list is normally set when /// you compile OpenSSL. /// /// You'll find more details about cipher lists on this URL: /// /// https://www.openssl.org/docs/apps/ciphers.html /// /// For NSS, valid examples of cipher lists include 'rsa_rc4_128_md5', /// ´rsa_aes_128_sha´, etc. With NSS you don't add/remove ciphers. If one /// uses this option then all known ciphers are disabled and only those /// passed in are enabled. /// /// You'll find more details about the NSS cipher lists on this URL: /// /// http://git.fedorahosted.org/cgit/mod_nss.git/plain/docs/mod_nss.html#Directives /// /// By default this option is not set and corresponds to /// `CURLOPT_SSL_CIPHER_LIST`. pub fn ssl_cipher_list(&mut self, ciphers: &str) -> Result<(), Error> { let ciphers = try!(CString::new(ciphers)); self.setopt_str(curl_sys::CURLOPT_SSL_CIPHER_LIST, &ciphers) } /// Enable or disable use of the SSL session-ID cache /// /// By default all transfers are done using the cache enabled. While nothing /// ever should get hurt by attempting to reuse SSL session-IDs, there seem /// to be or have been broken SSL implementations in the wild that may /// require you to disable this in order for you to succeed. /// /// This corresponds to the `CURLOPT_SSL_SESSIONID_CACHE` option. pub fn ssl_sessionid_cache(&mut self, enable: bool) -> Result<(), Error> { self.setopt_long(curl_sys::CURLOPT_SSL_SESSIONID_CACHE, enable as c_long) } // /// Stores a private pointer-sized piece of data. // /// // /// This can be retrieved through the `private` function and otherwise // /// libcurl does not tamper with this value. This corresponds to // /// `CURLOPT_PRIVATE` and defaults to 0. // pub fn set_private(&mut self, private: usize) -> Result<(), Error> { // self.setopt_ptr(curl_sys::CURLOPT_PRIVATE, private as *const _) // } // // /// Fetches this handle's private pointer-sized piece of data. // /// // /// This corresponds to `CURLINFO_PRIVATE` and defaults to 0. // pub fn private(&mut self) -> Result<usize, Error> { // self.getopt_ptr(curl_sys::CURLINFO_PRIVATE).map(|p| p as usize) // } // ========================================================================= // getters /// Get the last used URL /// /// In cases when you've asked libcurl to follow redirects, it may /// not be the same value you set with `url`. /// /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. /// /// Returns `Ok(None)` if no effective url is listed or `Err` if an error /// happens or the underlying bytes aren't valid utf-8. pub fn effective_url(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_EFFECTIVE_URL) } /// Get the last used URL, in bytes /// /// In cases when you've asked libcurl to follow redirects, it may /// not be the same value you set with `url`. /// /// This methods corresponds to the `CURLINFO_EFFECTIVE_URL` option. /// /// Returns `Ok(None)` if no effective url is listed or `Err` if an error /// happens or the underlying bytes aren't valid utf-8. pub fn effective_url_bytes(&mut self) -> Result<Option<&[u8]>, Error> { self.getopt_bytes(curl_sys::CURLINFO_EFFECTIVE_URL) } /// Get the last response code /// /// The stored value will be zero if no server response code has been /// received. Note that a proxy's CONNECT response should be read with /// `http_connectcode` and not this. /// /// Corresponds to `CURLINFO_RESPONSE_CODE` and returns an error if this /// option is not supported. pub fn response_code(&mut self) -> Result<u32, Error> { self.getopt_long(curl_sys::CURLINFO_RESPONSE_CODE).map(|c| c as u32) } /// Get the CONNECT response code /// /// Returns the last received HTTP proxy response code to a CONNECT request. /// The returned value will be zero if no such response code was available. /// /// Corresponds to `CURLINFO_HTTP_CONNECTCODE` and returns an error if this /// option is not supported. pub fn http_connectcode(&mut self) -> Result<u32, Error> { self.getopt_long(curl_sys::CURLINFO_HTTP_CONNECTCODE).map(|c| c as u32) } /// Get the remote time of the retrieved document /// /// Returns the remote time of the retrieved document (in number of seconds /// since 1 Jan 1970 in the GMT/UTC time zone). If you get `None`, it can be /// because of many reasons (it might be unknown, the server might hide it /// or the server doesn't support the command that tells document time etc) /// and the time of the document is unknown. /// /// Note that you must tell the server to collect this information before /// the transfer is made, by using the `filetime` method to /// or you will unconditionally get a `None` back. /// /// This corresponds to `CURLINFO_FILETIME` and may return an error if the /// option is not supported pub fn filetime(&mut self) -> Result<Option<i64>, Error> { self.getopt_long(curl_sys::CURLINFO_FILETIME).map(|r| { if r == -1 { None } else { Some(r as i64) } }) } /// Get the number of redirects /// /// Corresponds to `CURLINFO_REDIRECT_COUNT` and may return an error if the /// option isn't supported. pub fn redirect_count(&mut self) -> Result<u32, Error> { self.getopt_long(curl_sys::CURLINFO_REDIRECT_COUNT).map(|c| c as u32) } /// Get the URL a redirect would go to /// /// Returns the URL a redirect would take you to if you would enable /// `follow_location`. This can come very handy if you think using the /// built-in libcurl redirect logic isn't good enough for you but you would /// still prefer to avoid implementing all the magic of figuring out the new /// URL. /// /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error if the /// url isn't valid utf-8 or an error happens. pub fn redirect_url(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_REDIRECT_URL) } /// Get the URL a redirect would go to, in bytes /// /// Returns the URL a redirect would take you to if you would enable /// `follow_location`. This can come very handy if you think using the /// built-in libcurl redirect logic isn't good enough for you but you would /// still prefer to avoid implementing all the magic of figuring out the new /// URL. /// /// Corresponds to `CURLINFO_REDIRECT_URL` and may return an error. pub fn redirect_url_bytes(&mut self) -> Result<Option<&[u8]>, Error> { self.getopt_bytes(curl_sys::CURLINFO_REDIRECT_URL) } /// Get size of retrieved headers /// /// Corresponds to `CURLINFO_HEADER_SIZE` and may return an error if the /// option isn't supported. pub fn header_size(&mut self) -> Result<u64, Error> { self.getopt_long(curl_sys::CURLINFO_HEADER_SIZE).map(|c| c as u64) } /// Get size of sent request. /// /// Corresponds to `CURLINFO_REQUEST_SIZE` and may return an error if the /// option isn't supported. pub fn request_size(&mut self) -> Result<u64, Error> { self.getopt_long(curl_sys::CURLINFO_REQUEST_SIZE).map(|c| c as u64) } /// Get Content-Type /// /// Returns the content-type of the downloaded object. This is the value /// read from the Content-Type: field. If you get `None`, it means that the /// server didn't send a valid Content-Type header or that the protocol /// used doesn't support this. /// /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the /// option isn't supported. pub fn content_type(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_CONTENT_TYPE) } /// Get Content-Type, in bytes /// /// Returns the content-type of the downloaded object. This is the value /// read from the Content-Type: field. If you get `None`, it means that the /// server didn't send a valid Content-Type header or that the protocol /// used doesn't support this. /// /// Corresponds to `CURLINFO_CONTENT_TYPE` and may return an error if the /// option isn't supported. pub fn content_type_bytes(&mut self) -> Result<Option<&[u8]>, Error> { self.getopt_bytes(curl_sys::CURLINFO_CONTENT_TYPE) } /// Get errno number from last connect failure. /// /// Note that the value is only set on failure, it is not reset upon a /// successful operation. The number is OS and system specific. /// /// Corresponds to `CURLINFO_OS_ERRNO` and may return an error if the /// option isn't supported. pub fn os_errno(&mut self) -> Result<i32, Error> { self.getopt_long(curl_sys::CURLINFO_OS_ERRNO).map(|c| c as i32) } /// Get IP address of last connection. /// /// Returns a string holding the IP address of the most recent connection /// done with this curl handle. This string may be IPv6 when that is /// enabled. /// /// Corresponds to `CURLINFO_PRIMARY_IP` and may return an error if the /// option isn't supported. pub fn primary_ip(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_PRIMARY_IP) } /// Get the latest destination port number /// /// Corresponds to `CURLINFO_PRIMARY_PORT` and may return an error if the /// option isn't supported. pub fn primary_port(&mut self) -> Result<u16, Error> { self.getopt_long(curl_sys::CURLINFO_PRIMARY_PORT).map(|c| c as u16) } /// Get local IP address of last connection /// /// Returns a string holding the IP address of the local end of most recent /// connection done with this curl handle. This string may be IPv6 when that /// is enabled. /// /// Corresponds to `CURLINFO_LOCAL_IP` and may return an error if the /// option isn't supported. pub fn local_ip(&mut self) -> Result<Option<&str>, Error> { self.getopt_str(curl_sys::CURLINFO_LOCAL_IP) } /// Get the latest local port number /// /// Corresponds to `CURLINFO_LOCAL_PORT` and may return an error if the /// option isn't supported. pub fn local_port(&mut self) -> Result<u16, Error> { self.getopt_long(curl_sys::CURLINFO_LOCAL_PORT).map(|c| c as u16) } /// Get all known cookies /// /// Returns a linked-list of all cookies cURL knows (expired ones, too). /// /// Corresponds to the `CURLINFO_COOKIELIST` option and may return an error /// if the option isn't supported. pub fn cookies(&mut self) -> Result<List, Error> { unsafe { let mut list = 0 as *mut _; let rc = curl_sys::curl_easy_getinfo(self.handle, curl_sys::CURLINFO_COOKIELIST, &mut list); try!(self.cvt(rc)); Ok(List { raw: list }) } } // ========================================================================= // Other methods /// After options have been set, this will perform the transfer described by /// the options. /// /// This performs the request in a synchronous fashion. This can be used /// multiple times for one easy handle and libcurl will attempt to re-use /// the same connection for all transfers. /// /// This method will preserve all options configured in this handle for the /// next request, and if that is not desired then the options can be /// manually reset or the `reset` method can be called. /// /// Note that this method takes `&self`, which is quite important! This /// allows applications to close over the handle in various callbacks to /// call methods like `unpause_write` and `unpause_read` while a transfer is /// in progress. pub fn perform(&self) -> Result<(), Error> { unsafe { self.reset_scoped_configuration(); } self.do_perform() } fn do_perform(&self) -> Result<(), Error> { if self.data.running.get() { return Err(Error::new(curl_sys::CURLE_FAILED_INIT)) } self.data.running.set(true); let ret = unsafe { self.cvt(curl_sys::curl_easy_perform(self.handle)) }; self.data.running.set(false); panic::propagate(); return ret } /// Creates a new scoped transfer which can be used to set callbacks and /// data which only live for the scope of the returned object. /// /// An `Easy` handle is often reused between different requests to cache /// connections to servers, but often the lifetime of the data as part of /// each transfer is unique. This function serves as an ability to share an /// `Easy` across many transfers while ergonomically using possibly /// stack-local data as part of each transfer. /// /// Configuration can be set on the `Easy` and then a `Transfer` can be /// created to set scoped configuration (like callbacks). Finally, the /// `perform` method on the `Transfer` function can be used. /// /// When the `Transfer` option is dropped then all configuration set on the /// transfer itself will be reset. pub fn transfer<'data, 'easy>(&'easy mut self) -> Transfer<'easy, 'data> { // NB: We need to be *very* careful here about how we treat the // callbacks set on a `Transfer`! It may be possible for that type // to leak, and if we were to continue using the callbacks there // there could possibly be use-after-free as they reference // stack-local data. As a result, we attempt to be robust in the // face of leaking a `Transfer` (one that didn't drop). // // What this basically amounts to is that whenever we poke libcurl that // *might* call one of those callbacks or use some of that data we clear // out everything that would have been set on a `Transfer` and instead // start fresh. This call to `reset_scoped_configuration` will reset all // callbacks based on the state in *this* handle which we know is still // alive, so it's safe to configure. // // Also note that because we have to be resilient in the face of // `Transfer` leaks anyway we just don't bother with a `Drop` impl and // instead rely on this always running to reset any configuration. assert!(!self.data.running.get()); unsafe { self.reset_scoped_configuration(); } Transfer { data: Box::new(TransferData::default()), easy: self, } } // See note above in `transfer` for what this is doing. unsafe fn reset_scoped_configuration(&self) { let EasyData { ref write, ref read, ref seek, ref debug, ref header, ref progress, ref ssl_ctx, ref running, header_list: _, form: _, error_buf: _, } = *self.data; // Can't reset while running, we'll detect this elsewhere if running.get() { return } let ptr = |set| { if set { &*self.data as *const _ as *mut c_void } else { 0 as *mut _ } }; let write = ptr(write.is_some()); let read = ptr(read.is_some()); let seek = ptr(seek.is_some()); let debug = ptr(debug.is_some()); let header = ptr(header.is_some()); let progress = ptr(progress.is_some()); let ssl_ctx = ptr(ssl_ctx.is_some()); let _ = self.set_write_function(easy_write_cb, write); let _ = self.set_read_function(easy_read_cb, read); let _ = self.set_seek_function(easy_seek_cb, seek); let _ = self.set_debug_function(easy_debug_cb, debug); let _ = self.set_header_function(easy_header_cb, header); let _ = self.set_progress_function(easy_progress_cb, progress); let _ = self.set_ssl_ctx_function(easy_ssl_ctx_cb, ssl_ctx); // Clear out the post fields which may be referencing stale data. // curl_sys::curl_easy_setopt(easy, // curl_sys::CURLOPT_POSTFIELDS, // 0 as *const i32); } /// Unpause reading on a connection. /// /// Using this function, you can explicitly unpause a connection that was /// previously paused. /// /// A connection can be paused by letting the read or the write callbacks /// return `ReadError::Pause` or `WriteError::Pause`. /// /// To unpause, you may for example call this from the progress callback /// which gets called at least once per second, even if the connection is /// paused. /// /// The chance is high that you will get your write callback called before /// this function returns. pub fn unpause_read(&self) -> Result<(), Error> { unsafe { let rc = curl_sys::curl_easy_pause(self.handle, curl_sys::CURLPAUSE_RECV_CONT); self.cvt(rc) } } /// Unpause writing on a connection. /// /// Using this function, you can explicitly unpause a connection that was /// previously paused. /// /// A connection can be paused by letting the read or the write callbacks /// return `ReadError::Pause` or `WriteError::Pause`. A write callback that /// returns pause signals to the library that it couldn't take care of any /// data at all, and that data will then be delivered again to the callback /// when the writing is later unpaused. /// /// To unpause, you may for example call this from the progress callback /// which gets called at least once per second, even if the connection is /// paused. pub fn unpause_write(&self) -> Result<(), Error> { unsafe { let rc = curl_sys::curl_easy_pause(self.handle, curl_sys::CURLPAUSE_SEND_CONT); self.cvt(rc) } } /// URL encodes a string `s` pub fn url_encode(&mut self, s: &[u8]) -> String { if s.len() == 0 { return String::new() } unsafe { let p = curl_sys::curl_easy_escape(self.handle, s.as_ptr() as *const _, s.len() as c_int); assert!(!p.is_null()); let ret = str::from_utf8(CStr::from_ptr(p).to_bytes()).unwrap(); let ret = String::from(ret); curl_sys::curl_free(p as *mut _); return ret } } /// URL decodes a string `s`, returning `None` if it fails pub fn url_decode(&mut self, s: &str) -> Vec<u8> { if s.len() == 0 { return Vec::new(); } // Work around https://curl.haxx.se/docs/adv_20130622.html, a bug where // if the last few characters are a bad escape then curl will have a // buffer overrun. let mut iter = s.chars().rev(); let orig_len = s.len(); let mut data; let mut s = s; if iter.next() == Some('%') || iter.next() == Some('%') || iter.next() == Some('%') { data = s.to_string(); data.push(0u8 as char); s = &data[..]; } unsafe { let mut len = 0; let p = curl_sys::curl_easy_unescape(self.handle, s.as_ptr() as *const _, orig_len as c_int, &mut len); assert!(!p.is_null()); let slice = slice::from_raw_parts(p as *const u8, len as usize); let ret = slice.to_vec(); curl_sys::curl_free(p as *mut _); return ret } } // TODO: I don't think this is safe, you can drop this which has all the // callback data and then the next is use-after-free // // /// Attempts to clone this handle, returning a new session handle with the // /// same options set for this handle. // /// // /// Internal state info and things like persistent connections ccannot be // /// transferred. // /// // /// # Errors // /// // /// If a new handle could not be allocated or another error happens, `None` // /// is returned. // pub fn try_clone<'b>(&mut self) -> Option<Easy<'b>> { // unsafe { // let handle = curl_sys::curl_easy_duphandle(self.handle); // if handle.is_null() { // None // } else { // Some(Easy { // handle: handle, // data: blank_data(), // _marker: marker::PhantomData, // }) // } // } // } /// Re-initializes this handle to the default values. /// /// This puts the handle to the same state as it was in when it was just /// created. This does, however, keep live connections, the session id /// cache, the dns cache, and cookies. pub fn reset(&mut self) { unsafe { curl_sys::curl_easy_reset(self.handle); } default_configure(self); } /// Receives data from a connected socket. /// /// Only useful after a successful `perform` with the `connect_only` option /// set as well. pub fn recv(&mut self, data: &mut [u8]) -> Result<usize, Error> { unsafe { let mut n = 0; let r = curl_sys::curl_easy_recv(self.handle, data.as_mut_ptr() as *mut _, data.len(), &mut n); if r == curl_sys::CURLE_OK { Ok(n) } else { Err(Error::new(r)) } } } /// Sends data over the connected socket. /// /// Only useful after a successful `perform` with the `connect_only` option /// set as well. pub fn send(&mut self, data: &[u8]) -> Result<usize, Error> { unsafe { let mut n = 0; let rc = curl_sys::curl_easy_send(self.handle, data.as_ptr() as *const _, data.len(), &mut n); try!(self.cvt(rc)); Ok(n) } } /// Get a pointer to the raw underlying CURL handle. pub fn raw(&self) -> *mut curl_sys::CURL { self.handle } #[cfg(unix)] fn setopt_path(&mut self, opt: curl_sys::CURLoption, val: &Path) -> Result<(), Error> { use std::os::unix::prelude::*; let s = try!(CString::new(val.as_os_str().as_bytes())); self.setopt_str(opt, &s) } #[cfg(windows)] fn setopt_path(&mut self, opt: curl_sys::CURLoption, val: &Path) -> Result<(), Error> { match val.to_str() { Some(s) => self.setopt_str(opt, &try!(CString::new(s))), None => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), } } fn setopt_long(&mut self, opt: curl_sys::CURLoption, val: c_long) -> Result<(), Error> { unsafe { self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) } } fn setopt_str(&mut self, opt: curl_sys::CURLoption, val: &CStr) -> Result<(), Error> { self.setopt_ptr(opt, val.as_ptr()) } fn setopt_ptr(&self, opt: curl_sys::CURLoption, val: *const c_char) -> Result<(), Error> { unsafe { self.cvt(curl_sys::curl_easy_setopt(self.handle, opt, val)) } } fn setopt_off_t(&mut self, opt: curl_sys::CURLoption, val: curl_sys::curl_off_t) -> Result<(), Error> { unsafe { let rc = curl_sys::curl_easy_setopt(self.handle, opt, val); self.cvt(rc) } } fn getopt_bytes(&mut self, opt: curl_sys::CURLINFO) -> Result<Option<&[u8]>, Error> { unsafe { let p = try!(self.getopt_ptr(opt)); if p.is_null() { Ok(None) } else { Ok(Some(CStr::from_ptr(p).to_bytes())) } } } fn getopt_ptr(&mut self, opt: curl_sys::CURLINFO) -> Result<*const c_char, Error> { unsafe { let mut p = 0 as *const c_char; let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); try!(self.cvt(rc)); Ok(p) } } fn getopt_str(&mut self, opt: curl_sys::CURLINFO) -> Result<Option<&str>, Error> { match self.getopt_bytes(opt) { Ok(None) => Ok(None), Err(e) => Err(e), Ok(Some(bytes)) => { match str::from_utf8(bytes) { Ok(s) => Ok(Some(s)), Err(_) => Err(Error::new(curl_sys::CURLE_CONV_FAILED)), } } } } fn getopt_long(&mut self, opt: curl_sys::CURLINFO) -> Result<c_long, Error> { unsafe { let mut p = 0; let rc = curl_sys::curl_easy_getinfo(self.handle, opt, &mut p); try!(self.cvt(rc)); Ok(p) } } fn cvt(&self, rc: curl_sys::CURLcode) -> Result<(), Error> { if rc == curl_sys::CURLE_OK { return Ok(()) } let mut buf = self.data.error_buf.borrow_mut(); if buf[0] == 0 { return Err(Error::new(rc)) } let pos = buf.iter().position(|i| *i == 0).unwrap_or(buf.len()); let msg = str::from_utf8(&buf[..pos]).expect("non-utf8 error").to_owned(); buf[0] = 0; Err(::error::error_with_extra(rc, msg.into_boxed_str())) } } extern fn easy_write_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { write_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut EasyData)).write.as_mut().map(|f| f(buf)) }) } extern fn transfer_write_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { write_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut TransferData)).write.as_mut().map(|f| f(buf)) }) } fn write_cb<F>(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void, f: F) -> size_t where F: FnOnce(&[u8]) -> Option<Result<usize, WriteError>> { if data.is_null() { return size * nmemb } panic::catch(|| unsafe { let input = slice::from_raw_parts(ptr as *const u8, size * nmemb); match f(input) { Some(Ok(s)) => s, Some(Err(WriteError::Pause)) | Some(Err(WriteError::__Nonexhaustive)) => { curl_sys::CURL_WRITEFUNC_PAUSE } None => !0, } }).unwrap_or(!0) } extern fn easy_read_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { read_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut EasyData)).read.as_mut().map(|f| f(buf)) }) } extern fn transfer_read_cb(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void) -> size_t { read_cb(ptr, size, nmemb, data, |buf| unsafe { (*(data as *mut TransferData)).read.as_mut().map(|f| f(buf)) }) } fn read_cb<F>(ptr: *mut c_char, size: size_t, nmemb: size_t, data: *mut c_void, f: F) -> size_t where F: FnOnce(&mut [u8]) -> Option<Result<usize, ReadError>> { unsafe { if data.is_null() { return 0 } let input = slice::from_raw_parts_mut(ptr as *mut u8, size * nmemb); panic::catch(|| { match f(input) { Some(Ok(s)) => s, Some(Err(ReadError::Pause)) => { curl_sys::CURL_READFUNC_PAUSE } Some(Err(ReadError::__Nonexhaustive)) | Some(Err(ReadError::Abort)) => { curl_sys::CURL_READFUNC_ABORT } None => !0, } }).unwrap_or(!0) } } extern fn easy_seek_cb(data: *mut c_void, offset: curl_sys::curl_off_t, origin: c_int) -> c_int { seek_cb(data, offset, origin, |s| unsafe { (*(data as *mut EasyData)).seek.as_mut().map(|f| f(s)) }) } extern fn transfer_seek_cb(data: *mut c_void, offset: curl_sys::curl_off_t, origin: c_int) -> c_int { seek_cb(data, offset, origin, |s| unsafe { (*(data as *mut TransferData)).seek.as_mut().map(|f| f(s)) }) } fn seek_cb<F>(data: *mut c_void, offset: curl_sys::curl_off_t, origin: c_int, f: F) -> c_int where F: FnOnce(SeekFrom) -> Option<SeekResult> { if data.is_null() { return -1 } panic::catch(|| { let from = if origin == libc::SEEK_SET { SeekFrom::Start(offset as u64) } else { panic!("unknown origin from libcurl: {}", origin); }; match f(from) { Some(to) => to as c_int, None => -1, } }).unwrap_or(!0) } extern fn easy_progress_cb(data: *mut c_void, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int { progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { (*(data as *mut EasyData)).progress.as_mut().map(|f| f(a, b, c, d)) }) } extern fn transfer_progress_cb(data: *mut c_void, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double) -> c_int { progress_cb(data, dltotal, dlnow, ultotal, ulnow, |a, b, c, d| unsafe { (*(data as *mut TransferData)).progress.as_mut().map(|f| f(a, b, c, d)) }) } fn progress_cb<F>(data: *mut c_void, dltotal: c_double, dlnow: c_double, ultotal: c_double, ulnow: c_double, f: F) -> c_int where F: FnOnce(f64, f64, f64, f64) -> Option<bool>, { if data.is_null() { return 0 } let keep_going = panic::catch(|| { f(dltotal, dlnow, ultotal, ulnow).unwrap_or(false) }).unwrap_or(false); if keep_going { 0 } else { 1 } } extern fn easy_ssl_ctx_cb(handle: *mut curl_sys::CURL, ssl_ctx: *mut c_void, data: *mut c_void) -> curl_sys::CURLcode { ssl_ctx_cb(handle, ssl_ctx, data, |ssl_ctx| unsafe { match (*(data as *mut EasyData)).ssl_ctx.as_mut() { Some(f) => f(ssl_ctx), // If the callback isn't set we just tell CURL to // continue. None => Ok(()), } }) } extern fn transfer_ssl_ctx_cb(handle: *mut curl_sys::CURL, ssl_ctx: *mut c_void, data: *mut c_void) -> curl_sys::CURLcode { ssl_ctx_cb(handle, ssl_ctx, data, |ssl_ctx| unsafe { match (*(data as *mut TransferData)).ssl_ctx.as_mut() { Some(f) => f(ssl_ctx), // If the callback isn't set we just tell CURL to // continue. None => Ok(()), } }) } // TODO: same thing as `debug_cb`: can we expose `handle`? fn ssl_ctx_cb<F>(_handle: *mut curl_sys::CURL, ssl_ctx: *mut c_void, data: *mut c_void, f: F) -> curl_sys::CURLcode where F: FnOnce(*mut c_void) -> Result<(), Error> { if data.is_null() { return curl_sys::CURLE_OK; } let result = panic::catch(|| { f(ssl_ctx) }); match result { Some(Ok(())) => curl_sys::CURLE_OK, Some(Err(e)) => e.code(), // Default to a generic SSL error in case of panic. This // shouldn't really matter since the error should be // propagated later on but better safe than sorry... None => curl_sys::CURLE_SSL_CONNECT_ERROR, } } extern fn easy_debug_cb(handle: *mut curl_sys::CURL, kind: curl_sys::curl_infotype, data: *mut c_char, size: size_t, userptr: *mut c_void) -> c_int { debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { (*(userptr as *mut EasyData)).debug.as_mut().map(|f| f(a, b)) }) } extern fn transfer_debug_cb(handle: *mut curl_sys::CURL, kind: curl_sys::curl_infotype, data: *mut c_char, size: size_t, userptr: *mut c_void) -> c_int { debug_cb(handle, kind, data, size, userptr, |a, b| unsafe { (*(userptr as *mut TransferData)).debug.as_mut().map(|f| f(a, b)) }) } // TODO: expose `handle`? is that safe? fn debug_cb<F>(_handle: *mut curl_sys::CURL, kind: curl_sys::curl_infotype, data: *mut c_char, size: size_t, userptr: *mut c_void, f: F) -> c_int where F: FnOnce(InfoType, &[u8]) -> Option<()> { if userptr.is_null() { return 0 } panic::catch(|| unsafe { let data = slice::from_raw_parts(data as *const u8, size); let kind = match kind { curl_sys::CURLINFO_TEXT => InfoType::Text, curl_sys::CURLINFO_HEADER_IN => InfoType::HeaderIn, curl_sys::CURLINFO_HEADER_OUT => InfoType::HeaderOut, curl_sys::CURLINFO_DATA_IN => InfoType::DataIn, curl_sys::CURLINFO_DATA_OUT => InfoType::DataOut, curl_sys::CURLINFO_SSL_DATA_IN => InfoType::SslDataIn, curl_sys::CURLINFO_SSL_DATA_OUT => InfoType::SslDataOut, _ => return, }; f(kind, data); }); return 0 } extern fn easy_header_cb(buffer: *mut c_char, size: size_t, nitems: size_t, userptr: *mut c_void) -> size_t { header_cb(buffer, size, nitems, userptr, |buf| unsafe { (*(userptr as *mut EasyData)).header.as_mut().map(|f| f(buf)) }) } extern fn transfer_header_cb(buffer: *mut c_char, size: size_t, nitems: size_t, userptr: *mut c_void) -> size_t { header_cb(buffer, size, nitems, userptr, |buf| unsafe { (*(userptr as *mut TransferData)).header.as_mut().map(|f| f(buf)) }) } fn header_cb<F>(buffer: *mut c_char, size: size_t, nitems: size_t, userptr: *mut c_void, f: F) -> size_t where F: FnOnce(&[u8]) -> Option<bool>, { if userptr.is_null() { return size * nitems } let keep_going = panic::catch(|| unsafe { let data = slice::from_raw_parts(buffer as *const u8, size * nitems); f(data).unwrap_or(false) }).unwrap_or(false); if keep_going { size * nitems } else { !0 } } impl<'easy, 'data> Transfer<'easy, 'data> { /// Same as `Easy::write_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn write_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> Result<usize, WriteError> + 'data { self.data.write = Some(Box::new(f)); unsafe { self.easy.set_write_function(transfer_write_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::read_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn read_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&mut [u8]) -> Result<usize, ReadError> + 'data { self.data.read = Some(Box::new(f)); unsafe { self.easy.set_read_function(transfer_read_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::seek_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn seek_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(SeekFrom) -> SeekResult + 'data { self.data.seek = Some(Box::new(f)); unsafe { self.easy.set_seek_function(transfer_seek_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::progress_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn progress_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(f64, f64, f64, f64) -> bool + 'data { self.data.progress = Some(Box::new(f)); unsafe { self.easy.set_progress_function(transfer_progress_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::ssl_ctx_function`, just takes a non `'static` /// lifetime corresponding to the lifetime of this transfer. pub fn ssl_ctx_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(*mut c_void) -> Result<(), Error> + Send + 'data { self.data.ssl_ctx = Some(Box::new(f)); unsafe { self.easy.set_ssl_ctx_function(transfer_ssl_ctx_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::debug_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn debug_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(InfoType, &[u8]) + 'data { self.data.debug = Some(Box::new(f)); unsafe { self.easy.set_debug_function(transfer_debug_cb, &*self.data as *const _ as *mut _) } } /// Same as `Easy::header_function`, just takes a non `'static` lifetime /// corresponding to the lifetime of this transfer. pub fn header_function<F>(&mut self, f: F) -> Result<(), Error> where F: FnMut(&[u8]) -> bool + 'data { self.data.header = Some(Box::new(f)); unsafe { self.easy.set_header_function(transfer_header_cb, &*self.data as *const _ as *mut _) } } // TODO: need to figure out how to expose this, but it also needs to be // reset as part of `reset_scoped_configuration` above. Unfortunately // setting `CURLOPT_POSTFIELDS` to null will switch the request to // POST, which is not what we want. // // /// Configures the data that will be uploaded as part of a POST. // /// // /// By default this option is not set and corresponds to // /// `CURLOPT_POSTFIELDS`. // pub fn post_fields(&mut self, data: &'data [u8]) -> Result<(), Error> { // // Set the length before the pointer so libcurl knows how much to read // try!(self.easy.post_field_size(data.len() as u64)); // self.easy.setopt_ptr(curl_sys::CURLOPT_POSTFIELDS, // data.as_ptr() as *const _) // } /// Same as `Easy::transfer`. pub fn perform(&self) -> Result<(), Error> { self.easy.do_perform() } /// Same as `Easy::unpause_read`. pub fn unpause_read(&self) -> Result<(), Error> { self.easy.unpause_read() } /// Same as `Easy::unpause_write` pub fn unpause_write(&self) -> Result<(), Error> { self.easy.unpause_write() } } fn default_configure(handle: &mut Easy) { handle.data.error_buf = RefCell::new(vec![0; curl_sys::CURL_ERROR_SIZE]); handle.setopt_ptr(curl_sys::CURLOPT_ERRORBUFFER, handle.data.error_buf.borrow().as_ptr() as *const _) .expect("failed to set error buffer"); let _ = handle.signal(false); ssl_configure(handle); } #[cfg(all(unix, not(target_os = "macos")))] fn ssl_configure(handle: &mut Easy) { let probe = ::openssl_sys::probe::probe(); if let Some(ref path) = probe.cert_file { let _ = handle.cainfo(path); } if let Some(ref path) = probe.cert_dir { let _ = handle.capath(path); } } #[cfg(not(all(unix, not(target_os = "macos"))))] fn ssl_configure(_handle: &mut Easy) {} impl Drop for Easy { fn drop(&mut self) { unsafe { curl_sys::curl_easy_cleanup(self.handle); } } } impl List { /// Creates a new empty list of strings. pub fn new() -> List { List { raw: 0 as *mut _ } } /// Appends some data into this list. pub fn append(&mut self, data: &str) -> Result<(), Error> { let data = try!(CString::new(data)); unsafe { let raw = curl_sys::curl_slist_append(self.raw, data.as_ptr()); assert!(!raw.is_null()); self.raw = raw; Ok(()) } } /// Returns an iterator over the nodes in this list. pub fn iter(&self) -> Iter { Iter { _me: self, cur: self.raw } } } impl Drop for List { fn drop(&mut self) { unsafe { curl_sys::curl_slist_free_all(self.raw) } } } impl<'a> Iterator for Iter<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<&'a [u8]> { if self.cur.is_null() { return None } unsafe { let ret = Some(CStr::from_ptr((*self.cur).data).to_bytes()); self.cur = (*self.cur).next; return ret } } } impl Form { /// Creates a new blank form ready for the addition of new data. pub fn new() -> Form { Form { head: 0 as *mut _, tail: 0 as *mut _, headers: Vec::new(), buffers: Vec::new(), strings: Vec::new(), } } /// Prepares adding a new part to this `Form` /// /// Note that the part is not actually added to the form until the `add` /// method is called on `Part`, which may or may not fail. pub fn part<'a, 'data>(&'a mut self, name: &'data str) -> Part<'a, 'data> { Part { error: None, form: self, name: name, array: vec![curl_sys::curl_forms { option: curl_sys::CURLFORM_END, value: 0 as *mut _, }], } } } impl Drop for Form { fn drop(&mut self) { unsafe { curl_sys::curl_formfree(self.head); } } } impl<'form, 'data> Part<'form, 'data> { /// A pointer to the contents of this part, the actual data to send away. pub fn contents(&mut self, contents: &'data [u8]) -> &mut Self { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_COPYCONTENTS, value: contents.as_ptr() as *mut _, }); self.array.insert(pos + 1, curl_sys::curl_forms { option: curl_sys::CURLFORM_CONTENTSLENGTH, value: contents.len() as *mut _, }); self } /// Causes this file to be read and its contents used as data in this part /// /// This part does not automatically become a file upload part simply /// because its data was read from a file. /// /// # Errors /// /// If the filename has any internal nul bytes or if on Windows it does not /// contain a unicode filename then the `add` function will eventually /// return an error. pub fn file_content<P>(&mut self, file: P) -> &mut Self where P: AsRef<Path> { self._file_content(file.as_ref()) } fn _file_content(&mut self, file: &Path) -> &mut Self { if let Some(bytes) = self.path2cstr(file) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_FILECONTENT, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// Makes this part a file upload part of the given file. /// /// Sets the filename field to the basename of the provided file name, and /// it reads the contents of the file and passes them as data and sets the /// content type if the given file matches one of the internally known file /// extensions. /// /// The given upload file must exist entirely on the filesystem before the /// upload is started because libcurl needs to read the size of it /// beforehand. /// /// Multiple files can be uploaded by calling this method multiple times and /// content types can also be configured for each file (by calling that /// next). /// /// # Errors /// /// If the filename has any internal nul bytes or if on Windows it does not /// contain a unicode filename then this function will cause `add` to return /// an error when called. pub fn file<P: ?Sized>(&mut self, file: &'data P) -> &mut Self where P: AsRef<Path> { self._file(file.as_ref()) } fn _file(&mut self, file: &'data Path) -> &mut Self { if let Some(bytes) = self.path2cstr(file) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_FILE, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// Used in combination with `Part::file`, provides the content-type for /// this part, possibly instead of choosing an internal one. /// /// # Panics /// /// This function will panic if `content_type` contains an internal nul /// byte. pub fn content_type(&mut self, content_type: &'data str) -> &mut Self { if let Some(bytes) = self.bytes2cstr(content_type.as_bytes()) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_CONTENTTYPE, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// Used in combination with `Part::file`, provides the filename for /// this part instead of the actual one. /// /// # Errors /// /// If `name` contains an internal nul byte, or if on Windows the path is /// not valid unicode then this function will return an error when `add` is /// called. pub fn filename<P: ?Sized>(&mut self, name: &'data P) -> &mut Self where P: AsRef<Path> { self._filename(name.as_ref()) } fn _filename(&mut self, name: &'data Path) -> &mut Self { if let Some(bytes) = self.path2cstr(name) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_FILENAME, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); } self } /// This is used to provide a custom file upload part without using the /// `file` method above. /// /// The first parameter is for the filename field and the second is the /// in-memory contents. /// /// # Errors /// /// If `name` contains an internal nul byte, or if on Windows the path is /// not valid unicode then this function will return an error when `add` is /// called. pub fn buffer<P: ?Sized>(&mut self, name: &'data P, data: Vec<u8>) -> &mut Self where P: AsRef<Path> { self._buffer(name.as_ref(), data) } fn _buffer(&mut self, name: &'data Path, data: Vec<u8>) -> &mut Self { if let Some(bytes) = self.path2cstr(name) { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_BUFFER, value: bytes.as_ptr() as *mut _, }); self.form.strings.push(bytes); self.array.insert(pos + 1, curl_sys::curl_forms { option: curl_sys::CURLFORM_BUFFERPTR, value: data.as_ptr() as *mut _, }); self.array.insert(pos + 2, curl_sys::curl_forms { option: curl_sys::CURLFORM_BUFFERLENGTH, value: data.len() as *mut _, }); self.form.buffers.push(data); } self } /// Specifies extra headers for the form POST section. /// /// Appends the list of headers to those libcurl automatically generates. pub fn content_header(&mut self, headers: List) -> &mut Self { let pos = self.array.len() - 1; self.array.insert(pos, curl_sys::curl_forms { option: curl_sys::CURLFORM_CONTENTHEADER, value: headers.raw as *mut _, }); self.form.headers.push(headers); self } /// Attempts to add this part to the `Form` that it was created from. /// /// If any error happens while adding that error is returned, otherwise if /// the part was successfully appended then `Ok(())` is returned. pub fn add(&mut self) -> Result<(), FormError> { if let Some(err) = self.error.clone() { return Err(err) } let rc = unsafe { curl_sys::curl_formadd(&mut self.form.head, &mut self.form.tail, curl_sys::CURLFORM_COPYNAME, self.name.as_ptr(), curl_sys::CURLFORM_NAMELENGTH, self.name.len(), curl_sys::CURLFORM_ARRAY, self.array.as_ptr(), curl_sys::CURLFORM_END) }; if rc == curl_sys::CURL_FORMADD_OK { Ok(()) } else { Err(FormError::new(rc)) } } #[cfg(unix)] fn path2cstr(&mut self, p: &Path) -> Option<CString> { use std::os::unix::prelude::*; self.bytes2cstr(p.as_os_str().as_bytes()) } #[cfg(windows)] fn path2cstr(&mut self, p: &Path) -> Option<CString> { match p.to_str() { Some(bytes) => self.bytes2cstr(bytes.as_bytes()), None if self.error.is_none() => { // TODO: better error code self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); None } None => None, } } fn bytes2cstr(&mut self, bytes: &[u8]) -> Option<CString> { match CString::new(bytes) { Ok(c) => Some(c), Err(..) if self.error.is_none() => { // TODO: better error code self.error = Some(FormError::new(curl_sys::CURL_FORMADD_INCOMPLETE)); None } Err(..) => None, } } } impl Auth { /// Creates a new set of authentications with no members. /// /// An `Auth` structure is used to configure which forms of authentication /// are attempted when negotiating connections with servers. pub fn new() -> Auth { Auth { bits: 0 } } /// HTTP Basic authentication. /// /// This is the default choice, and the only method that is in wide-spread /// use and supported virtually everywhere. This sends the user name and /// password over the network in plain text, easily captured by others. pub fn basic(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_BASIC, on) } /// HTTP Digest authentication. /// /// Digest authentication is defined in RFC 2617 and is a more secure way to /// do authentication over public networks than the regular old-fashioned /// Basic method. pub fn digest(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_DIGEST, on) } /// HTTP Digest authentication with an IE flavor. /// /// Digest authentication is defined in RFC 2617 and is a more secure way to /// do authentication over public networks than the regular old-fashioned /// Basic method. The IE flavor is simply that libcurl will use a special /// "quirk" that IE is known to have used before version 7 and that some /// servers require the client to use. pub fn digest_ie(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_DIGEST_IE, on) } /// HTTP Negotiate (SPNEGO) authentication. /// /// Negotiate authentication is defined in RFC 4559 and is the most secure /// way to perform authentication over HTTP. /// /// You need to build libcurl with a suitable GSS-API library or SSPI on /// Windows for this to work. pub fn gssnegotiate(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_GSSNEGOTIATE, on) } /// HTTP NTLM authentication. /// /// A proprietary protocol invented and used by Microsoft. It uses a /// challenge-response and hash concept similar to Digest, to prevent the /// password from being eavesdropped. /// /// You need to build libcurl with either OpenSSL, GnuTLS or NSS support for /// this option to work, or build libcurl on Windows with SSPI support. pub fn ntlm(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_NTLM, on) } /// NTLM delegating to winbind helper. /// /// Authentication is performed by a separate binary application that is /// executed when needed. The name of the application is specified at /// compile time but is typically /usr/bin/ntlm_auth /// /// Note that libcurl will fork when necessary to run the winbind /// application and kill it when complete, calling waitpid() to await its /// exit when done. On POSIX operating systems, killing the process will /// cause a SIGCHLD signal to be raised (regardless of whether /// CURLOPT_NOSIGNAL is set), which must be handled intelligently by the /// application. In particular, the application must not unconditionally /// call wait() in its SIGCHLD signal handler to avoid being subject to a /// race condition. This behavior is subject to change in future versions of /// libcurl. /// /// A proprietary protocol invented and used by Microsoft. It uses a /// challenge-response and hash concept similar to Digest, to prevent the /// password from being eavesdropped. pub fn ntlm_wb(&mut self, on: bool) -> &mut Auth { self.flag(curl_sys::CURLAUTH_NTLM_WB, on) } fn flag(&mut self, bit: c_ulong, on: bool) -> &mut Auth { if on { self.bits |= bit as c_long; } else { self.bits &= !bit as c_long; } self } }
use std::fmt::{Display, Formatter, Error}; #[derive(Debug, PartialEq)] pub struct PerElem { pub name: String, pub coef: u32, pub pos: usize, pub len: usize, } pub type Molecule = Vec<PerElem>; /// Sorts the PerElems and groups those with the same name field. /// /// Grouping of two (or more) PerElems means adding the coef field of the /// duplicate to the one already found, and then throwing away the duplicate. /// E.g. CH3CH3 would turn into C2H6. pub fn group_elems(mut molecule: Molecule) -> Molecule { let mut out = Vec::<PerElem>::new(); molecule.as_mut_slice().sort_by(|a, b| a.name.cmp(&b.name)); // since the elements are now sorted, if the current elem does not match the // last element in out (i.e. what we previously pushed), then it won't match // anything in out for elem in molecule.into_iter() { if out.last().and_then(|e| Some(e.name == elem.name)).unwrap_or(false) { out.last_mut().unwrap().coef += elem.coef; } else { out.push(elem); } } out } impl Display for Molecule { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { for elem in self.iter() { fmt.write_str(elem.name.as_slice()); // TODO: Use a proper conversion function -- if it exists if elem.coef > 1 { fmt.write_str(format!("{}", elem.coef).as_slice()); } } Ok(()) } } #[cfg(test)] mod test { use super::*; macro_rules! dummy_elem( ($name:expr) => ( PerElem { name: $name.to_string(), coef: 1, pos: 0, len: 1 } ); ($name:expr, $coef:expr) => ( PerElem { name: $name.to_string(), coef: $coef, pos: 0, len: 1 } ); ); #[test] fn group() { let result = group_elems(vec!(dummy_elem!("C"), dummy_elem!("H"), dummy_elem!("C"))); let expected = vec!(dummy_elem!("C", 2), dummy_elem!("H", 1)); assert_eq!(result, expected); } #[test] fn molecule_display() { let molecule = vec!(dummy_elem!("C", 2), dummy_elem!("H", 3)); let result = format!("{}", molecule); let expected = "C2H3"; assert_eq!(result, expected); } } Add 1 more test to Molecule's prettyprinter We now check that coef which are equal to 1 are skipped use std::fmt::{Display, Formatter, Error}; #[derive(Debug, PartialEq)] pub struct PerElem { pub name: String, pub coef: u32, pub pos: usize, pub len: usize, } pub type Molecule = Vec<PerElem>; /// Sorts the PerElems and groups those with the same name field. /// /// Grouping of two (or more) PerElems means adding the coef field of the /// duplicate to the one already found, and then throwing away the duplicate. /// E.g. CH3CH3 would turn into C2H6. pub fn group_elems(mut molecule: Molecule) -> Molecule { let mut out = Vec::<PerElem>::new(); molecule.as_mut_slice().sort_by(|a, b| a.name.cmp(&b.name)); // since the elements are now sorted, if the current elem does not match the // last element in out (i.e. what we previously pushed), then it won't match // anything in out for elem in molecule.into_iter() { if out.last().and_then(|e| Some(e.name == elem.name)).unwrap_or(false) { out.last_mut().unwrap().coef += elem.coef; } else { out.push(elem); } } out } impl Display for Molecule { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { for elem in self.iter() { fmt.write_str(elem.name.as_slice()); // TODO: Use a proper conversion function -- if it exists if elem.coef > 1 { fmt.write_str(format!("{}", elem.coef).as_slice()); } } Ok(()) } } #[cfg(test)] mod test { use super::*; macro_rules! dummy_elem( ($name:expr) => ( PerElem { name: $name.to_string(), coef: 1, pos: 0, len: 1 } ); ($name:expr, $coef:expr) => ( PerElem { name: $name.to_string(), coef: $coef, pos: 0, len: 1 } ); ); #[test] fn group() { let result = group_elems(vec!(dummy_elem!("C"), dummy_elem!("H"), dummy_elem!("C"))); let expected = vec!(dummy_elem!("C", 2), dummy_elem!("H", 1)); assert_eq!(result, expected); } #[test] fn molecule_display() { let molecule = vec!(dummy_elem!("C", 2), dummy_elem!("H", 3)); let result = format!("{}", molecule); let expected = "C2H3"; assert_eq!(result, expected); } #[test] fn molecule_display_coefs() { let molecule = vec!(dummy_elem!("C", 1), dummy_elem!("H", 3)); let result = format!("{}", molecule); let expected = "CH3"; assert_eq!(result, expected); } }
use super::*; #[derive(Debug, Clone, Eq, PartialEq)] pub struct Expr { node: ExprKind, attrs: Vec<Attribute>, } impl From<ExprKind> for Expr { fn from(node: ExprKind) -> Expr { Expr { node: node, attrs: Vec::new(), } } } #[derive(Debug, Clone, Eq, PartialEq)] pub enum ExprKind { /// A `box x` expression. Box(Box<Expr>), /// First expr is the place; second expr is the value. InPlace(Box<Expr>, Box<Expr>), /// An array (`[a, b, c, d]`) Vec(Vec<Expr>), /// A function call /// /// The first field resolves to the function itself, /// and the second field is the list of arguments Call(Box<Expr>, Vec<Expr>), /// A method call (`x.foo::<Bar, Baz>(a, b, c, d)`) /// /// The `Ident` is the identifier for the method name. /// The vector of `Ty`s are the ascripted type parameters for the method /// (within the angle brackets). /// /// The first element of the vector of `Expr`s is the expression that evaluates /// to the object on which the method is being called on (the receiver), /// and the remaining elements are the rest of the arguments. /// /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as /// `ExprKind::MethodCall(foo, [Bar, Baz], [x, a, b, c, d])`. MethodCall(Ident, Vec<Ty>, Vec<Expr>), /// A tuple (`(a, b, c, d)`) Tup(Vec<Expr>), /// A binary operation (For example: `a + b`, `a * b`) Binary(BinOp, Box<Expr>, Box<Expr>), /// A unary operation (For example: `!x`, `*x`) Unary(UnOp, Box<Expr>), /// A literal (For example: `1`, `"foo"`) Lit(Lit), /// A cast (`foo as f64`) Cast(Box<Expr>, Box<Ty>), /// Type ascription (`foo: f64`) Type(Box<Expr>, Box<Ty>), /// An `if` block, with an optional else block /// /// `if expr { block } else { expr }` If(Box<Expr>, Block, Option<Box<Expr>>), /// An `if let` expression with an optional else block /// /// `if let pat = expr { block } else { expr }` /// /// This is desugared to a `match` expression. IfLet(Box<Pat>, Box<Expr>, Block, Option<Box<Expr>>), /// A while loop, with an optional label /// /// `'label: while expr { block }` While(Box<Expr>, Block, Option<Ident>), /// A while-let loop, with an optional label /// /// `'label: while let pat = expr { block }` /// /// This is desugared to a combination of `loop` and `match` expressions. WhileLet(Box<Pat>, Box<Expr>, Block, Option<Ident>), /// A for loop, with an optional label /// /// `'label: for pat in expr { block }` /// /// This is desugared to a combination of `loop` and `match` expressions. ForLoop(Box<Pat>, Box<Expr>, Block, Option<Ident>), /// Conditionless loop (can be exited with break, continue, or return) /// /// `'label: loop { block }` Loop(Block, Option<Ident>), /// A `match` block. Match(Box<Expr>, Vec<Arm>), /// A closure (for example, `move |a, b, c| {a + b + c}`) Closure(CaptureBy, Box<FnDecl>, Block), /// A block (`{ ... }` or `unsafe { ... }`) Block(BlockCheckMode, Block), /// An assignment (`a = foo()`) Assign(Box<Expr>, Box<Expr>), /// An assignment with an operator /// /// For example, `a += 1`. AssignOp(BinOp, Box<Expr>, Box<Expr>), /// Access of a named struct field (`obj.foo`) Field(Box<Expr>, Ident), /// Access of an unnamed field of a struct or tuple-struct /// /// For example, `foo.0`. TupField(Box<Expr>, usize), /// An indexing operation (`foo[2]`) Index(Box<Expr>, Box<Expr>), /// A range (`1..2`, `1..`, `..2`, `1...2`, `1...`, `...2`) Range(Option<Box<Expr>>, Option<Box<Expr>>, RangeLimits), /// Variable reference, possibly containing `::` and/or type /// parameters, e.g. foo::bar::<baz>. /// /// Optionally "qualified", /// E.g. `<Vec<T> as SomeTrait>::SomeType`. Path(Option<QSelf>, Path), /// A referencing operation (`&a` or `&mut a`) AddrOf(Mutability, Box<Expr>), /// A `break`, with an optional label to break Break(Option<Ident>), /// A `continue`, with an optional label Continue(Option<Ident>), /// A `return`, with an optional value to be returned Ret(Option<Box<Expr>>), /// A macro invocation; pre-expansion Mac(Mac), /// A struct literal expression. /// /// For example, `Foo {x: 1, y: 2}`, or /// `Foo {x: 1, .. base}`, where `base` is the `Option<Expr>`. Struct(Path, Vec<FieldValue>, Option<Box<Expr>>), /// An array literal constructed from one repeated element. /// /// For example, `[1; 5]`. The first expression is the element /// to be repeated; the second is the number of times to repeat it. Repeat(Box<Expr>, Box<Expr>), /// No-op: used solely so we can pretty-print faithfully Paren(Box<Expr>), /// `expr?` Try(Box<Expr>), } #[derive(Debug, Clone, Eq, PartialEq)] pub struct FieldValue { pub ident: Ident, pub expr: Expr, pub is_shorthand: bool, } /// A Block (`{ .. }`). /// /// E.g. `{ .. }` as in `fn foo() { .. }` #[derive(Debug, Clone, Eq, PartialEq)] pub struct Block { /// Statements in a block pub stmts: Vec<Stmt>, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum BlockCheckMode { Default, Unsafe, } #[derive(Debug, Clone, Eq, PartialEq)] pub enum Stmt { /// A local (let) binding. Local(Box<Local>), /// An item definition. Item(Box<Item>), /// Expr without trailing semi-colon. Expr(Box<Expr>), Semi(Box<Expr>), Mac(Box<(Mac, MacStmtStyle, Vec<Attribute>)>), } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum MacStmtStyle { /// The macro statement had a trailing semicolon, e.g. `foo! { ... };` /// `foo!(...);`, `foo![...];` Semicolon, /// The macro statement had braces; e.g. foo! { ... } Braces, /// The macro statement had parentheses or brackets and no semicolon; e.g. /// `foo!(...)`. All of these will end up being converted into macro /// expressions. NoBraces, } /// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;` #[derive(Debug, Clone, Eq, PartialEq)] pub struct Local { pub pat: Box<Pat>, pub ty: Option<Box<Ty>>, /// Initializer expression to set the value, if any pub init: Option<Box<Expr>>, pub attrs: Vec<Attribute>, } #[derive(Debug, Clone, Eq, PartialEq)] // Clippy false positive // https://github.com/Manishearth/rust-clippy/issues/1241 #[cfg_attr(feature = "clippy", allow(enum_variant_names))] pub enum Pat { /// Represents a wildcard pattern (`_`) Wild, /// A `Pat::Ident` may either be a new bound variable (`ref mut binding @ OPT_SUBPATTERN`), /// or a unit struct/variant pattern, or a const pattern (in the last two cases the third /// field must be `None`). Disambiguation cannot be done with parser alone, so it happens /// during name resolution. Ident(BindingMode, Ident, Option<Box<Pat>>), /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`. /// The `bool` is `true` in the presence of a `..`. Struct(Path, Vec<FieldPat>, bool), /// A tuple struct/variant pattern `Variant(x, y, .., z)`. /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position. /// 0 <= position <= subpats.len() TupleStruct(Path, Vec<Pat>, Option<usize>), /// A possibly qualified path pattern. /// Unquailfied path patterns `A::B::C` can legally refer to variants, structs, constants /// or associated constants. Quailfied path patterns `<A>::B::C`/`<A as Trait>::B::C` can /// only legally refer to associated constants. Path(Option<QSelf>, Path), /// A tuple pattern `(a, b)`. /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position. /// 0 <= position <= subpats.len() Tuple(Vec<Pat>, Option<usize>), /// A `box` pattern Box(Box<Pat>), /// A reference pattern, e.g. `&mut (a, b)` Ref(Box<Pat>, Mutability), /// A literal Lit(Box<Expr>), /// A range pattern, e.g. `1...2` Range(Box<Expr>, Box<Expr>), /// `[a, b, ..i, y, z]` is represented as: /// `Pat::Slice(box [a, b], Some(i), box [y, z])` Slice(Vec<Pat>, Option<Box<Pat>>, Vec<Pat>), /// A macro pattern; pre-expansion Mac(Mac), } /// An arm of a 'match'. /// /// E.g. `0...10 => { println!("match!") }` as in /// /// ```rust,ignore /// match n { /// 0...10 => { println!("match!") }, /// // .. /// } /// ``` #[derive(Debug, Clone, Eq, PartialEq)] pub struct Arm { pub attrs: Vec<Attribute>, pub pats: Vec<Pat>, pub guard: Option<Box<Expr>>, pub body: Box<Expr>, } /// A capture clause #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum CaptureBy { Value, Ref, } /// Limit types of a range (inclusive or exclusive) #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RangeLimits { /// Inclusive at the beginning, exclusive at the end HalfOpen, /// Inclusive at the beginning and end Closed, } /// A single field in a struct pattern /// /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` /// are treated the same as `x: x, y: ref y, z: ref mut z`, /// except `is_shorthand` is true #[derive(Debug, Clone, Eq, PartialEq)] pub struct FieldPat { /// The identifier for the field pub ident: Ident, /// The pattern the field is destructured to pub pat: Box<Pat>, pub is_shorthand: bool, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum BindingMode { ByRef(Mutability), ByValue(Mutability), } #[cfg(feature = "parsing")] pub mod parsing { use super::*; use {BinOp, Delimited, DelimToken, FnArg, FnDecl, FunctionRetTy, Ident, Lifetime, Mac, TokenTree, Ty, UnOp}; use attr::parsing::outer_attr; use generics::parsing::lifetime; use ident::parsing::{ident, wordlike}; use item::parsing::item; use lit::parsing::{digits, lit}; use mac::parsing::{mac, token_trees}; use nom::IResult::{self, Error}; use op::parsing::{assign_op, binop, unop}; use ty::parsing::{mutability, path, qpath, ty}; // Struct literals are ambiguous in certain positions // https://github.com/rust-lang/rfcs/pull/92 macro_rules! named_ambiguous_expr { ($name:ident -> $o:ty, $allow_struct:ident, $submac:ident!( $($args:tt)* )) => { fn $name(i: &str, $allow_struct: bool) -> $crate::nom::IResult<&str, $o> { $submac!(i, $($args)*) } }; } macro_rules! ambiguous_expr { ($i:expr, $allow_struct:ident) => { ambiguous_expr($i, $allow_struct, true) }; } named!(pub expr -> Expr, ambiguous_expr!(true)); named!(expr_no_struct -> Expr, ambiguous_expr!(false)); fn ambiguous_expr(i: &str, allow_struct: bool, allow_block: bool) -> IResult<&str, Expr> { do_parse!( i, mut e: alt!( expr_lit // must be before expr_struct | cond_reduce!(allow_struct, expr_struct) // must be before expr_path | expr_paren // must be before expr_tup | expr_mac // must be before expr_path | expr_break // must be before expr_path | expr_continue // must be before expr_path | call!(expr_ret, allow_struct) // must be before expr_path | call!(expr_box, allow_struct) | expr_in_place | expr_vec | expr_tup | call!(expr_unary, allow_struct) | expr_if | expr_while | expr_for_loop | expr_loop | expr_match | call!(expr_closure, allow_struct) | cond_reduce!(allow_block, expr_block) | call!(expr_range, allow_struct) | expr_path | call!(expr_addr_of, allow_struct) | expr_repeat ) >> many0!(alt!( tap!(args: and_call => { e = ExprKind::Call(Box::new(e.into()), args); }) | tap!(more: and_method_call => { let (method, ascript, mut args) = more; args.insert(0, e.into()); e = ExprKind::MethodCall(method, ascript, args); }) | tap!(more: call!(and_binary, allow_struct) => { let (op, other) = more; e = ExprKind::Binary(op, Box::new(e.into()), Box::new(other)); }) | tap!(ty: and_cast => { e = ExprKind::Cast(Box::new(e.into()), Box::new(ty)); }) | tap!(ty: and_ascription => { e = ExprKind::Type(Box::new(e.into()), Box::new(ty)); }) | tap!(v: call!(and_assign, allow_struct) => { e = ExprKind::Assign(Box::new(e.into()), Box::new(v)); }) | tap!(more: call!(and_assign_op, allow_struct) => { let (op, v) = more; e = ExprKind::AssignOp(op, Box::new(e.into()), Box::new(v)); }) | tap!(field: and_field => { e = ExprKind::Field(Box::new(e.into()), field); }) | tap!(field: and_tup_field => { e = ExprKind::TupField(Box::new(e.into()), field as usize); }) | tap!(i: and_index => { e = ExprKind::Index(Box::new(e.into()), Box::new(i)); }) | tap!(more: call!(and_range, allow_struct) => { let (limits, hi) = more; e = ExprKind::Range(Some(Box::new(e.into())), hi.map(Box::new), limits); }) | tap!(_try: punct!("?") => { e = ExprKind::Try(Box::new(e.into())); }) )) >> (e.into()) ) } named!(expr_mac -> ExprKind, map!(mac, ExprKind::Mac)); named!(expr_paren -> ExprKind, do_parse!( punct!("(") >> e: expr >> punct!(")") >> (ExprKind::Paren(Box::new(e))) )); named_ambiguous_expr!(expr_box -> ExprKind, allow_struct, do_parse!( keyword!("box") >> inner: ambiguous_expr!(allow_struct) >> (ExprKind::Box(Box::new(inner))) )); named!(expr_in_place -> ExprKind, do_parse!( keyword!("in") >> place: expr_no_struct >> punct!("{") >> value: within_block >> punct!("}") >> (ExprKind::InPlace( Box::new(place), Box::new(ExprKind::Block(BlockCheckMode::Default, Block { stmts: value, }).into()), )) )); named!(expr_vec -> ExprKind, do_parse!( punct!("[") >> elems: terminated_list!(punct!(","), expr) >> punct!("]") >> (ExprKind::Vec(elems)) )); named!(and_call -> Vec<Expr>, do_parse!( punct!("(") >> args: terminated_list!(punct!(","), expr) >> punct!(")") >> (args) )); named!(and_method_call -> (Ident, Vec<Ty>, Vec<Expr>), do_parse!( punct!(".") >> method: ident >> ascript: opt_vec!(preceded!( punct!("::"), delimited!( punct!("<"), terminated_list!(punct!(","), ty), punct!(">") ) )) >> punct!("(") >> args: terminated_list!(punct!(","), expr) >> punct!(")") >> (method, ascript, args) )); named!(expr_tup -> ExprKind, do_parse!( punct!("(") >> elems: terminated_list!(punct!(","), expr) >> punct!(")") >> (ExprKind::Tup(elems)) )); named_ambiguous_expr!(and_binary -> (BinOp, Expr), allow_struct, tuple!( binop, ambiguous_expr!(allow_struct) )); named_ambiguous_expr!(expr_unary -> ExprKind, allow_struct, do_parse!( operator: unop >> operand: ambiguous_expr!(allow_struct) >> (ExprKind::Unary(operator, Box::new(operand))) )); named!(expr_lit -> ExprKind, map!(lit, ExprKind::Lit)); named!(and_cast -> Ty, do_parse!( keyword!("as") >> ty: ty >> (ty) )); named!(and_ascription -> Ty, preceded!(punct!(":"), ty)); enum Cond { Let(Pat, Expr), Expr(Expr), } named!(cond -> Cond, alt!( do_parse!( keyword!("let") >> pat: pat >> punct!("=") >> value: expr_no_struct >> (Cond::Let(pat, value)) ) | map!(expr_no_struct, Cond::Expr) )); named!(expr_if -> ExprKind, do_parse!( keyword!("if") >> cond: cond >> punct!("{") >> then_block: within_block >> punct!("}") >> else_block: option!(preceded!( keyword!("else"), alt!( expr_if | do_parse!( punct!("{") >> else_block: within_block >> punct!("}") >> (ExprKind::Block(BlockCheckMode::Default, Block { stmts: else_block, }).into()) ) ) )) >> (match cond { Cond::Let(pat, expr) => ExprKind::IfLet( Box::new(pat), Box::new(expr), Block { stmts: then_block, }, else_block.map(|els| Box::new(els.into())), ), Cond::Expr(cond) => ExprKind::If( Box::new(cond), Block { stmts: then_block, }, else_block.map(|els| Box::new(els.into())), ), }) )); named!(expr_for_loop -> ExprKind, do_parse!( lbl: option!(terminated!(label, punct!(":"))) >> keyword!("for") >> pat: pat >> keyword!("in") >> expr: expr_no_struct >> loop_block: block >> (ExprKind::ForLoop(Box::new(pat), Box::new(expr), loop_block, lbl)) )); named!(expr_loop -> ExprKind, do_parse!( lbl: option!(terminated!(label, punct!(":"))) >> keyword!("loop") >> loop_block: block >> (ExprKind::Loop(loop_block, lbl)) )); named!(expr_match -> ExprKind, do_parse!( keyword!("match") >> obj: expr_no_struct >> punct!("{") >> mut arms: many0!(do_parse!( arm: match_arm >> cond!(arm_requires_comma(&arm), punct!(",")) >> cond!(!arm_requires_comma(&arm), option!(punct!(","))) >> (arm) )) >> last_arm: option!(match_arm) >> punct!("}") >> (ExprKind::Match(Box::new(obj), { arms.extend(last_arm); arms })) )); fn arm_requires_comma(arm: &Arm) -> bool { if let ExprKind::Block(BlockCheckMode::Default, _) = arm.body.node { false } else { true } } named!(match_arm -> Arm, do_parse!( attrs: many0!(outer_attr) >> pats: separated_nonempty_list!(punct!("|"), pat) >> guard: option!(preceded!(keyword!("if"), expr)) >> punct!("=>") >> body: alt!( map!(block, |blk| ExprKind::Block(BlockCheckMode::Default, blk).into()) | expr ) >> (Arm { attrs: attrs, pats: pats, guard: guard.map(Box::new), body: Box::new(body), }) )); named_ambiguous_expr!(expr_closure -> ExprKind, allow_struct, do_parse!( capture: capture_by >> punct!("|") >> inputs: terminated_list!(punct!(","), closure_arg) >> punct!("|") >> ret_and_body: alt!( do_parse!( punct!("->") >> ty: ty >> body: block >> ((FunctionRetTy::Ty(ty), body)) ) | map!(ambiguous_expr!(allow_struct), |e| ( FunctionRetTy::Default, Block { stmts: vec![Stmt::Expr(Box::new(e))], }, )) ) >> (ExprKind::Closure( capture, Box::new(FnDecl { inputs: inputs, output: ret_and_body.0, variadic: false, }), ret_and_body.1, )) )); named!(closure_arg -> FnArg, do_parse!( pat: pat >> ty: option!(preceded!(punct!(":"), ty)) >> (FnArg::Captured(pat, ty.unwrap_or(Ty::Infer))) )); named!(expr_while -> ExprKind, do_parse!( lbl: option!(terminated!(label, punct!(":"))) >> keyword!("while") >> cond: cond >> while_block: block >> (match cond { Cond::Let(pat, expr) => ExprKind::WhileLet( Box::new(pat), Box::new(expr), while_block, lbl, ), Cond::Expr(cond) => ExprKind::While( Box::new(cond), while_block, lbl, ), }) )); named!(expr_continue -> ExprKind, do_parse!( keyword!("continue") >> lbl: option!(label) >> (ExprKind::Continue(lbl)) )); named!(expr_break -> ExprKind, do_parse!( keyword!("break") >> lbl: option!(label) >> (ExprKind::Break(lbl)) )); named_ambiguous_expr!(expr_ret -> ExprKind, allow_struct, do_parse!( keyword!("return") >> ret_value: option!(ambiguous_expr!(allow_struct)) >> (ExprKind::Ret(ret_value.map(Box::new))) )); named!(expr_struct -> ExprKind, do_parse!( path: path >> punct!("{") >> fields: separated_list!(punct!(","), field_value) >> base: option!(do_parse!( cond!(!fields.is_empty(), punct!(",")) >> punct!("..") >> base: expr >> (base) )) >> cond!(!fields.is_empty() && base.is_none(), option!(punct!(","))) >> punct!("}") >> (ExprKind::Struct(path, fields, base.map(Box::new))) )); named!(field_value -> FieldValue, alt!( do_parse!( name: wordlike >> punct!(":") >> value: expr >> (FieldValue { ident: name, expr: value, is_shorthand: false, }) ) | map!(ident, |name: Ident| FieldValue { ident: name.clone(), expr: ExprKind::Path(None, name.into()).into(), is_shorthand: true, }) )); named!(expr_repeat -> ExprKind, do_parse!( punct!("[") >> value: expr >> punct!(";") >> times: expr >> punct!("]") >> (ExprKind::Repeat(Box::new(value), Box::new(times))) )); named!(expr_block -> ExprKind, do_parse!( rules: block_check_mode >> b: block >> (ExprKind::Block(rules, Block { stmts: b.stmts, })) )); named_ambiguous_expr!(expr_range -> ExprKind, allow_struct, do_parse!( limits: range_limits >> hi: option!(ambiguous_expr!(allow_struct)) >> (ExprKind::Range(None, hi.map(Box::new), limits)) )); named!(range_limits -> RangeLimits, alt!( punct!("...") => { |_| RangeLimits::Closed } | punct!("..") => { |_| RangeLimits::HalfOpen } )); named!(expr_path -> ExprKind, map!(qpath, |(qself, path)| ExprKind::Path(qself, path))); named_ambiguous_expr!(expr_addr_of -> ExprKind, allow_struct, do_parse!( punct!("&") >> mutability: mutability >> expr: ambiguous_expr!(allow_struct) >> (ExprKind::AddrOf(mutability, Box::new(expr))) )); named_ambiguous_expr!(and_assign -> Expr, allow_struct, preceded!( punct!("="), ambiguous_expr!(allow_struct) )); named_ambiguous_expr!(and_assign_op -> (BinOp, Expr), allow_struct, tuple!( assign_op, ambiguous_expr!(allow_struct) )); named!(and_field -> Ident, preceded!(punct!("."), ident)); named!(and_tup_field -> u64, preceded!(punct!("."), digits)); named!(and_index -> Expr, delimited!(punct!("["), expr, punct!("]"))); named_ambiguous_expr!(and_range -> (RangeLimits, Option<Expr>), allow_struct, tuple!( range_limits, option!(call!(ambiguous_expr, allow_struct, false)) )); named!(pub block -> Block, do_parse!( punct!("{") >> stmts: within_block >> punct!("}") >> (Block { stmts: stmts, }) )); named!(block_check_mode -> BlockCheckMode, alt!( keyword!("unsafe") => { |_| BlockCheckMode::Unsafe } | epsilon!() => { |_| BlockCheckMode::Default } )); named!(pub within_block -> Vec<Stmt>, do_parse!( many0!(punct!(";")) >> mut standalone: many0!(terminated!(standalone_stmt, many0!(punct!(";")))) >> last: option!(expr) >> (match last { None => standalone, Some(last) => { standalone.push(Stmt::Expr(Box::new(last))); standalone } }) )); named!(standalone_stmt -> Stmt, alt!( stmt_mac | stmt_local | stmt_item | stmt_expr )); named!(stmt_mac -> Stmt, do_parse!( attrs: many0!(outer_attr) >> name: ident >> punct!("!") >> // Only parse braces here; paren and bracket will get parsed as // expression statements punct!("{") >> tts: token_trees >> punct!("}") >> semi: option!(punct!(";")) >> (Stmt::Mac(Box::new(( Mac { path: name.into(), tts: vec![TokenTree::Delimited(Delimited { delim: DelimToken::Brace, tts: tts, })], }, if semi.is_some() { MacStmtStyle::Semicolon } else { MacStmtStyle::Braces }, attrs, )))) )); named!(stmt_local -> Stmt, do_parse!( attrs: many0!(outer_attr) >> keyword!("let") >> pat: pat >> ty: option!(preceded!(punct!(":"), ty)) >> init: option!(preceded!(punct!("="), expr)) >> punct!(";") >> (Stmt::Local(Box::new(Local { pat: Box::new(pat), ty: ty.map(Box::new), init: init.map(Box::new), attrs: attrs, }))) )); named!(stmt_item -> Stmt, map!(item, |i| Stmt::Item(Box::new(i)))); fn requires_semi(e: &Expr) -> bool { match e.node { ExprKind::If(_, _, _) | ExprKind::IfLet(_, _, _, _) | ExprKind::While(_, _, _) | ExprKind::WhileLet(_, _, _, _) | ExprKind::ForLoop(_, _, _, _) | ExprKind::Loop(_, _) | ExprKind::Match(_, _) | ExprKind::Block(_, _) => false, _ => true, } } named!(stmt_expr -> Stmt, do_parse!( attrs: many0!(outer_attr) >> mut e: expr >> semi: option!(punct!(";")) >> ({ e.attrs = attrs; if semi.is_some() { Stmt::Semi(Box::new(e)) } else if requires_semi(&e) { return Error; } else { Stmt::Expr(Box::new(e)) } }) )); named!(pub pat -> Pat, alt!( pat_wild // must be before pat_ident | pat_box // must be before pat_ident | pat_range // must be before pat_lit | pat_tuple_struct // must be before pat_ident | pat_struct // must be before pat_ident | pat_mac // must be before pat_ident | pat_lit // must be before pat_ident | pat_ident // must be before pat_path | pat_path | pat_tuple | pat_ref | pat_slice )); named!(pat_mac -> Pat, map!(mac, Pat::Mac)); named!(pat_wild -> Pat, map!(keyword!("_"), |_| Pat::Wild)); named!(pat_box -> Pat, do_parse!( keyword!("box") >> pat: pat >> (Pat::Box(Box::new(pat))) )); named!(pat_ident -> Pat, do_parse!( mode: option!(keyword!("ref")) >> mutability: mutability >> name: alt!( ident | keyword!("self") => { Into::into } ) >> not!(peek!(punct!("<"))) >> not!(peek!(punct!("::"))) >> subpat: option!(preceded!(punct!("@"), pat)) >> (Pat::Ident( if mode.is_some() { BindingMode::ByRef(mutability) } else { BindingMode::ByValue(mutability) }, name, subpat.map(Box::new), )) )); named!(pat_tuple_struct -> Pat, do_parse!( path: path >> tuple: pat_tuple_helper >> (Pat::TupleStruct(path, tuple.0, tuple.1)) )); named!(pat_struct -> Pat, do_parse!( path: path >> punct!("{") >> fields: separated_list!(punct!(","), field_pat) >> more: option!(preceded!( cond!(!fields.is_empty(), punct!(",")), punct!("..") )) >> cond!(!fields.is_empty() && more.is_none(), option!(punct!(","))) >> punct!("}") >> (Pat::Struct(path, fields, more.is_some())) )); named!(field_pat -> FieldPat, alt!( do_parse!( ident: wordlike >> punct!(":") >> pat: pat >> (FieldPat { ident: ident, pat: Box::new(pat), is_shorthand: false, }) ) | do_parse!( boxed: option!(keyword!("box")) >> mode: option!(keyword!("ref")) >> mutability: mutability >> ident: ident >> ({ let mut pat = Pat::Ident( if mode.is_some() { BindingMode::ByRef(mutability) } else { BindingMode::ByValue(mutability) }, ident.clone(), None, ); if boxed.is_some() { pat = Pat::Box(Box::new(pat)); } FieldPat { ident: ident, pat: Box::new(pat), is_shorthand: true, } }) ) )); named!(pat_path -> Pat, map!(qpath, |(qself, path)| Pat::Path(qself, path))); named!(pat_tuple -> Pat, map!( pat_tuple_helper, |(pats, dotdot)| Pat::Tuple(pats, dotdot) )); named!(pat_tuple_helper -> (Vec<Pat>, Option<usize>), do_parse!( punct!("(") >> mut elems: separated_list!(punct!(","), pat) >> dotdot: option!(do_parse!( cond!(!elems.is_empty(), punct!(",")) >> punct!("..") >> rest: many0!(preceded!(punct!(","), pat)) >> cond!(!rest.is_empty(), option!(punct!(","))) >> (rest) )) >> cond!(!elems.is_empty() && dotdot.is_none(), option!(punct!(","))) >> punct!(")") >> (match dotdot { Some(rest) => { let pos = elems.len(); elems.extend(rest); (elems, Some(pos)) } None => (elems, None), }) )); named!(pat_ref -> Pat, do_parse!( punct!("&") >> mutability: mutability >> pat: pat >> (Pat::Ref(Box::new(pat), mutability)) )); named!(pat_lit -> Pat, do_parse!( lit: pat_lit_expr >> (if let ExprKind::Path(_, _) = lit.node { return IResult::Error; // these need to be parsed by pat_path } else { Pat::Lit(Box::new(lit)) }) )); named!(pat_range -> Pat, do_parse!( lo: pat_lit_expr >> punct!("...") >> hi: pat_lit_expr >> (Pat::Range(Box::new(lo), Box::new(hi))) )); named!(pat_lit_expr -> Expr, do_parse!( neg: option!(punct!("-")) >> v: alt!( lit => { ExprKind::Lit } | path => { |p| ExprKind::Path(None, p) } ) >> (if neg.is_some() { ExprKind::Unary(UnOp::Neg, Box::new(v.into())).into() } else { v.into() }) )); named!(pat_slice -> Pat, do_parse!( punct!("[") >> mut before: separated_list!(punct!(","), pat) >> after: option!(do_parse!( comma_before_dots: option!(cond_reduce!(!before.is_empty(), punct!(","))) >> punct!("..") >> after: many0!(preceded!(punct!(","), pat)) >> cond!(!after.is_empty(), option!(punct!(","))) >> (comma_before_dots.is_some(), after) )) >> cond!(after.is_none(), option!(punct!(","))) >> punct!("]") >> (match after { None => Pat::Slice(before, None, Vec::new()), Some((true, after)) => { if before.is_empty() { return IResult::Error; } Pat::Slice(before, Some(Box::new(Pat::Wild)), after) } Some((false, after)) => { let rest = before.pop().unwrap_or(Pat::Wild); Pat::Slice(before, Some(Box::new(rest)), after) } }) )); named!(capture_by -> CaptureBy, alt!( keyword!("move") => { |_| CaptureBy::Value } | epsilon!() => { |_| CaptureBy::Ref } )); named!(label -> Ident, map!(lifetime, |lt: Lifetime| lt.ident)); } #[cfg(feature = "printing")] mod printing { use super::*; use {FnArg, FunctionRetTy, Mutability, Ty}; use attr::FilterAttrs; use quote::{Tokens, ToTokens}; impl ToTokens for Expr { fn to_tokens(&self, tokens: &mut Tokens) { tokens.append_all(self.attrs.outer()); match self.node { ExprKind::Box(ref inner) => { tokens.append("box"); inner.to_tokens(tokens); } ExprKind::InPlace(ref place, ref value) => { tokens.append("in"); place.to_tokens(tokens); value.to_tokens(tokens); } ExprKind::Vec(ref tys) => { tokens.append("["); tokens.append_separated(tys, ","); tokens.append("]"); } ExprKind::Call(ref func, ref args) => { func.to_tokens(tokens); tokens.append("("); tokens.append_separated(args, ","); tokens.append(")"); } ExprKind::MethodCall(ref ident, ref ascript, ref args) => { args[0].to_tokens(tokens); tokens.append("."); ident.to_tokens(tokens); if !ascript.is_empty() { tokens.append("::"); tokens.append("<"); tokens.append_separated(ascript, ","); tokens.append(">"); } tokens.append("("); tokens.append_separated(&args[1..], ","); tokens.append(")"); } ExprKind::Tup(ref fields) => { tokens.append("("); tokens.append_separated(fields, ","); if fields.len() == 1 { tokens.append(","); } tokens.append(")"); } ExprKind::Binary(op, ref left, ref right) => { left.to_tokens(tokens); op.to_tokens(tokens); right.to_tokens(tokens); } ExprKind::Unary(op, ref expr) => { op.to_tokens(tokens); expr.to_tokens(tokens); } ExprKind::Lit(ref lit) => lit.to_tokens(tokens), ExprKind::Cast(ref expr, ref ty) => { expr.to_tokens(tokens); tokens.append("as"); ty.to_tokens(tokens); } ExprKind::Type(ref expr, ref ty) => { expr.to_tokens(tokens); tokens.append(":"); ty.to_tokens(tokens); } ExprKind::If(ref cond, ref then_block, ref else_block) => { tokens.append("if"); cond.to_tokens(tokens); then_block.to_tokens(tokens); if let Some(ref else_block) = *else_block { tokens.append("else"); else_block.to_tokens(tokens); } } ExprKind::IfLet(ref pat, ref expr, ref then_block, ref else_block) => { tokens.append("if"); tokens.append("let"); pat.to_tokens(tokens); tokens.append("="); expr.to_tokens(tokens); then_block.to_tokens(tokens); if let Some(ref else_block) = *else_block { tokens.append("else"); else_block.to_tokens(tokens); } } ExprKind::While(ref cond, ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("while"); cond.to_tokens(tokens); body.to_tokens(tokens); } ExprKind::WhileLet(ref pat, ref expr, ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("while"); tokens.append("let"); pat.to_tokens(tokens); tokens.append("="); expr.to_tokens(tokens); body.to_tokens(tokens); } ExprKind::ForLoop(ref pat, ref expr, ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("for"); pat.to_tokens(tokens); tokens.append("in"); expr.to_tokens(tokens); body.to_tokens(tokens); } ExprKind::Loop(ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("loop"); body.to_tokens(tokens); } ExprKind::Match(ref expr, ref arms) => { tokens.append("match"); expr.to_tokens(tokens); tokens.append("{"); tokens.append_all(arms); tokens.append("}"); } ExprKind::Closure(capture, ref decl, ref body) => { capture.to_tokens(tokens); tokens.append("|"); for (i, input) in decl.inputs.iter().enumerate() { if i > 0 { tokens.append(","); } match *input { FnArg::Captured(ref pat, Ty::Infer) => { pat.to_tokens(tokens); } _ => input.to_tokens(tokens), } } tokens.append("|"); match decl.output { FunctionRetTy::Default => { if body.stmts.len() == 1 { if let Stmt::Expr(ref expr) = body.stmts[0] { expr.to_tokens(tokens); } else { body.to_tokens(tokens); } } else { body.to_tokens(tokens); } } FunctionRetTy::Ty(ref ty) => { tokens.append("->"); ty.to_tokens(tokens); body.to_tokens(tokens); } } } ExprKind::Block(rules, ref block) => { rules.to_tokens(tokens); block.to_tokens(tokens); } ExprKind::Assign(ref var, ref expr) => { var.to_tokens(tokens); tokens.append("="); expr.to_tokens(tokens); } ExprKind::AssignOp(op, ref var, ref expr) => { var.to_tokens(tokens); tokens.append(op.assign_op().unwrap()); expr.to_tokens(tokens); } ExprKind::Field(ref expr, ref field) => { expr.to_tokens(tokens); tokens.append("."); field.to_tokens(tokens); } ExprKind::TupField(ref expr, field) => { expr.to_tokens(tokens); tokens.append("."); tokens.append(&field.to_string()); } ExprKind::Index(ref expr, ref index) => { expr.to_tokens(tokens); tokens.append("["); index.to_tokens(tokens); tokens.append("]"); } ExprKind::Range(ref from, ref to, limits) => { from.to_tokens(tokens); match limits { RangeLimits::HalfOpen => tokens.append(".."), RangeLimits::Closed => tokens.append("..."), } to.to_tokens(tokens); } ExprKind::Path(None, ref path) => path.to_tokens(tokens), ExprKind::Path(Some(ref qself), ref path) => { tokens.append("<"); qself.ty.to_tokens(tokens); if qself.position > 0 { tokens.append("as"); for (i, segment) in path.segments .iter() .take(qself.position) .enumerate() { if i > 0 || path.global { tokens.append("::"); } segment.to_tokens(tokens); } } tokens.append(">"); for segment in path.segments.iter().skip(qself.position) { tokens.append("::"); segment.to_tokens(tokens); } } ExprKind::AddrOf(mutability, ref expr) => { tokens.append("&"); mutability.to_tokens(tokens); expr.to_tokens(tokens); } ExprKind::Break(ref opt_label) => { tokens.append("break"); opt_label.to_tokens(tokens); } ExprKind::Continue(ref opt_label) => { tokens.append("continue"); opt_label.to_tokens(tokens); } ExprKind::Ret(ref opt_expr) => { tokens.append("return"); opt_expr.to_tokens(tokens); } ExprKind::Mac(ref mac) => mac.to_tokens(tokens), ExprKind::Struct(ref path, ref fields, ref base) => { path.to_tokens(tokens); tokens.append("{"); tokens.append_separated(fields, ","); if let Some(ref base) = *base { if !fields.is_empty() { tokens.append(","); } tokens.append(".."); base.to_tokens(tokens); } tokens.append("}"); } ExprKind::Repeat(ref expr, ref times) => { tokens.append("["); expr.to_tokens(tokens); tokens.append(";"); times.to_tokens(tokens); tokens.append("]"); } ExprKind::Paren(ref expr) => { tokens.append("("); expr.to_tokens(tokens); tokens.append(")"); } ExprKind::Try(ref expr) => { expr.to_tokens(tokens); tokens.append("?"); } } } } impl ToTokens for FieldValue { fn to_tokens(&self, tokens: &mut Tokens) { self.ident.to_tokens(tokens); if !self.is_shorthand { tokens.append(":"); self.expr.to_tokens(tokens); } } } impl ToTokens for Arm { fn to_tokens(&self, tokens: &mut Tokens) { for attr in &self.attrs { attr.to_tokens(tokens); } tokens.append_separated(&self.pats, "|"); if let Some(ref guard) = self.guard { tokens.append("if"); guard.to_tokens(tokens); } tokens.append("=>"); self.body.to_tokens(tokens); match self.body.node { ExprKind::Block(BlockCheckMode::Default, _) => { // no comma } _ => tokens.append(","), } } } impl ToTokens for Pat { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Pat::Wild => tokens.append("_"), Pat::Ident(mode, ref ident, ref subpat) => { mode.to_tokens(tokens); ident.to_tokens(tokens); if let Some(ref subpat) = *subpat { tokens.append("@"); subpat.to_tokens(tokens); } } Pat::Struct(ref path, ref fields, dots) => { path.to_tokens(tokens); tokens.append("{"); tokens.append_separated(fields, ","); if dots { if !fields.is_empty() { tokens.append(","); } tokens.append(".."); } tokens.append("}"); } Pat::TupleStruct(ref path, ref pats, dotpos) => { path.to_tokens(tokens); tokens.append("("); match dotpos { Some(pos) => { if pos > 0 { tokens.append_separated(&pats[..pos], ","); tokens.append(","); } tokens.append(".."); if pos < pats.len() { tokens.append(","); tokens.append_separated(&pats[pos..], ","); } } None => tokens.append_separated(pats, ","), } tokens.append(")"); } Pat::Path(None, ref path) => path.to_tokens(tokens), Pat::Path(Some(ref qself), ref path) => { tokens.append("<"); qself.ty.to_tokens(tokens); if qself.position > 0 { tokens.append("as"); for (i, segment) in path.segments .iter() .take(qself.position) .enumerate() { if i > 0 || path.global { tokens.append("::"); } segment.to_tokens(tokens); } } tokens.append(">"); for segment in path.segments.iter().skip(qself.position) { tokens.append("::"); segment.to_tokens(tokens); } } Pat::Tuple(ref pats, dotpos) => { tokens.append("("); match dotpos { Some(pos) => { if pos > 0 { tokens.append_separated(&pats[..pos], ","); tokens.append(","); } tokens.append(".."); if pos < pats.len() { tokens.append(","); tokens.append_separated(&pats[pos..], ","); } } None => { tokens.append_separated(pats, ","); if pats.len() == 1 { tokens.append(","); } } } tokens.append(")"); } Pat::Box(ref inner) => { tokens.append("box"); inner.to_tokens(tokens); } Pat::Ref(ref target, mutability) => { tokens.append("&"); mutability.to_tokens(tokens); target.to_tokens(tokens); } Pat::Lit(ref lit) => lit.to_tokens(tokens), Pat::Range(ref lo, ref hi) => { lo.to_tokens(tokens); tokens.append("..."); hi.to_tokens(tokens); } Pat::Slice(ref before, ref rest, ref after) => { tokens.append("["); tokens.append_separated(before, ","); if let Some(ref rest) = *rest { if !before.is_empty() { tokens.append(","); } if **rest != Pat::Wild { rest.to_tokens(tokens); } tokens.append(".."); if !after.is_empty() { tokens.append(","); } tokens.append_separated(after, ","); } tokens.append("]"); } Pat::Mac(ref mac) => mac.to_tokens(tokens), } } } impl ToTokens for FieldPat { fn to_tokens(&self, tokens: &mut Tokens) { if !self.is_shorthand { self.ident.to_tokens(tokens); tokens.append(":"); } self.pat.to_tokens(tokens); } } impl ToTokens for BindingMode { fn to_tokens(&self, tokens: &mut Tokens) { match *self { BindingMode::ByRef(Mutability::Immutable) => { tokens.append("ref"); } BindingMode::ByRef(Mutability::Mutable) => { tokens.append("ref"); tokens.append("mut"); } BindingMode::ByValue(Mutability::Immutable) => {} BindingMode::ByValue(Mutability::Mutable) => { tokens.append("mut"); } } } } impl ToTokens for CaptureBy { fn to_tokens(&self, tokens: &mut Tokens) { match *self { CaptureBy::Value => tokens.append("move"), CaptureBy::Ref => { // nothing } } } } impl ToTokens for Block { fn to_tokens(&self, tokens: &mut Tokens) { tokens.append("{"); tokens.append_all(&self.stmts); tokens.append("}"); } } impl ToTokens for BlockCheckMode { fn to_tokens(&self, tokens: &mut Tokens) { match *self { BlockCheckMode::Default => { // nothing } BlockCheckMode::Unsafe => tokens.append("unsafe"), } } } impl ToTokens for Stmt { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Stmt::Local(ref local) => local.to_tokens(tokens), Stmt::Item(ref item) => item.to_tokens(tokens), Stmt::Expr(ref expr) => expr.to_tokens(tokens), Stmt::Semi(ref expr) => { expr.to_tokens(tokens); tokens.append(";"); } Stmt::Mac(ref mac) => { let (ref mac, style, ref attrs) = **mac; tokens.append_all(attrs.outer()); mac.to_tokens(tokens); match style { MacStmtStyle::Semicolon => tokens.append(";"), MacStmtStyle::Braces | MacStmtStyle::NoBraces => { // no semicolon } } } } } } impl ToTokens for Local { fn to_tokens(&self, tokens: &mut Tokens) { tokens.append_all(self.attrs.outer()); tokens.append("let"); self.pat.to_tokens(tokens); if let Some(ref ty) = self.ty { tokens.append(":"); ty.to_tokens(tokens); } if let Some(ref init) = self.init { tokens.append("="); init.to_tokens(tokens); } tokens.append(";"); } } } Public fields for struct Expr use super::*; #[derive(Debug, Clone, Eq, PartialEq)] pub struct Expr { pub node: ExprKind, pub attrs: Vec<Attribute>, } impl From<ExprKind> for Expr { fn from(node: ExprKind) -> Expr { Expr { node: node, attrs: Vec::new(), } } } #[derive(Debug, Clone, Eq, PartialEq)] pub enum ExprKind { /// A `box x` expression. Box(Box<Expr>), /// First expr is the place; second expr is the value. InPlace(Box<Expr>, Box<Expr>), /// An array (`[a, b, c, d]`) Vec(Vec<Expr>), /// A function call /// /// The first field resolves to the function itself, /// and the second field is the list of arguments Call(Box<Expr>, Vec<Expr>), /// A method call (`x.foo::<Bar, Baz>(a, b, c, d)`) /// /// The `Ident` is the identifier for the method name. /// The vector of `Ty`s are the ascripted type parameters for the method /// (within the angle brackets). /// /// The first element of the vector of `Expr`s is the expression that evaluates /// to the object on which the method is being called on (the receiver), /// and the remaining elements are the rest of the arguments. /// /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as /// `ExprKind::MethodCall(foo, [Bar, Baz], [x, a, b, c, d])`. MethodCall(Ident, Vec<Ty>, Vec<Expr>), /// A tuple (`(a, b, c, d)`) Tup(Vec<Expr>), /// A binary operation (For example: `a + b`, `a * b`) Binary(BinOp, Box<Expr>, Box<Expr>), /// A unary operation (For example: `!x`, `*x`) Unary(UnOp, Box<Expr>), /// A literal (For example: `1`, `"foo"`) Lit(Lit), /// A cast (`foo as f64`) Cast(Box<Expr>, Box<Ty>), /// Type ascription (`foo: f64`) Type(Box<Expr>, Box<Ty>), /// An `if` block, with an optional else block /// /// `if expr { block } else { expr }` If(Box<Expr>, Block, Option<Box<Expr>>), /// An `if let` expression with an optional else block /// /// `if let pat = expr { block } else { expr }` /// /// This is desugared to a `match` expression. IfLet(Box<Pat>, Box<Expr>, Block, Option<Box<Expr>>), /// A while loop, with an optional label /// /// `'label: while expr { block }` While(Box<Expr>, Block, Option<Ident>), /// A while-let loop, with an optional label /// /// `'label: while let pat = expr { block }` /// /// This is desugared to a combination of `loop` and `match` expressions. WhileLet(Box<Pat>, Box<Expr>, Block, Option<Ident>), /// A for loop, with an optional label /// /// `'label: for pat in expr { block }` /// /// This is desugared to a combination of `loop` and `match` expressions. ForLoop(Box<Pat>, Box<Expr>, Block, Option<Ident>), /// Conditionless loop (can be exited with break, continue, or return) /// /// `'label: loop { block }` Loop(Block, Option<Ident>), /// A `match` block. Match(Box<Expr>, Vec<Arm>), /// A closure (for example, `move |a, b, c| {a + b + c}`) Closure(CaptureBy, Box<FnDecl>, Block), /// A block (`{ ... }` or `unsafe { ... }`) Block(BlockCheckMode, Block), /// An assignment (`a = foo()`) Assign(Box<Expr>, Box<Expr>), /// An assignment with an operator /// /// For example, `a += 1`. AssignOp(BinOp, Box<Expr>, Box<Expr>), /// Access of a named struct field (`obj.foo`) Field(Box<Expr>, Ident), /// Access of an unnamed field of a struct or tuple-struct /// /// For example, `foo.0`. TupField(Box<Expr>, usize), /// An indexing operation (`foo[2]`) Index(Box<Expr>, Box<Expr>), /// A range (`1..2`, `1..`, `..2`, `1...2`, `1...`, `...2`) Range(Option<Box<Expr>>, Option<Box<Expr>>, RangeLimits), /// Variable reference, possibly containing `::` and/or type /// parameters, e.g. foo::bar::<baz>. /// /// Optionally "qualified", /// E.g. `<Vec<T> as SomeTrait>::SomeType`. Path(Option<QSelf>, Path), /// A referencing operation (`&a` or `&mut a`) AddrOf(Mutability, Box<Expr>), /// A `break`, with an optional label to break Break(Option<Ident>), /// A `continue`, with an optional label Continue(Option<Ident>), /// A `return`, with an optional value to be returned Ret(Option<Box<Expr>>), /// A macro invocation; pre-expansion Mac(Mac), /// A struct literal expression. /// /// For example, `Foo {x: 1, y: 2}`, or /// `Foo {x: 1, .. base}`, where `base` is the `Option<Expr>`. Struct(Path, Vec<FieldValue>, Option<Box<Expr>>), /// An array literal constructed from one repeated element. /// /// For example, `[1; 5]`. The first expression is the element /// to be repeated; the second is the number of times to repeat it. Repeat(Box<Expr>, Box<Expr>), /// No-op: used solely so we can pretty-print faithfully Paren(Box<Expr>), /// `expr?` Try(Box<Expr>), } #[derive(Debug, Clone, Eq, PartialEq)] pub struct FieldValue { pub ident: Ident, pub expr: Expr, pub is_shorthand: bool, } /// A Block (`{ .. }`). /// /// E.g. `{ .. }` as in `fn foo() { .. }` #[derive(Debug, Clone, Eq, PartialEq)] pub struct Block { /// Statements in a block pub stmts: Vec<Stmt>, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum BlockCheckMode { Default, Unsafe, } #[derive(Debug, Clone, Eq, PartialEq)] pub enum Stmt { /// A local (let) binding. Local(Box<Local>), /// An item definition. Item(Box<Item>), /// Expr without trailing semi-colon. Expr(Box<Expr>), Semi(Box<Expr>), Mac(Box<(Mac, MacStmtStyle, Vec<Attribute>)>), } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum MacStmtStyle { /// The macro statement had a trailing semicolon, e.g. `foo! { ... };` /// `foo!(...);`, `foo![...];` Semicolon, /// The macro statement had braces; e.g. foo! { ... } Braces, /// The macro statement had parentheses or brackets and no semicolon; e.g. /// `foo!(...)`. All of these will end up being converted into macro /// expressions. NoBraces, } /// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;` #[derive(Debug, Clone, Eq, PartialEq)] pub struct Local { pub pat: Box<Pat>, pub ty: Option<Box<Ty>>, /// Initializer expression to set the value, if any pub init: Option<Box<Expr>>, pub attrs: Vec<Attribute>, } #[derive(Debug, Clone, Eq, PartialEq)] // Clippy false positive // https://github.com/Manishearth/rust-clippy/issues/1241 #[cfg_attr(feature = "clippy", allow(enum_variant_names))] pub enum Pat { /// Represents a wildcard pattern (`_`) Wild, /// A `Pat::Ident` may either be a new bound variable (`ref mut binding @ OPT_SUBPATTERN`), /// or a unit struct/variant pattern, or a const pattern (in the last two cases the third /// field must be `None`). Disambiguation cannot be done with parser alone, so it happens /// during name resolution. Ident(BindingMode, Ident, Option<Box<Pat>>), /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`. /// The `bool` is `true` in the presence of a `..`. Struct(Path, Vec<FieldPat>, bool), /// A tuple struct/variant pattern `Variant(x, y, .., z)`. /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position. /// 0 <= position <= subpats.len() TupleStruct(Path, Vec<Pat>, Option<usize>), /// A possibly qualified path pattern. /// Unquailfied path patterns `A::B::C` can legally refer to variants, structs, constants /// or associated constants. Quailfied path patterns `<A>::B::C`/`<A as Trait>::B::C` can /// only legally refer to associated constants. Path(Option<QSelf>, Path), /// A tuple pattern `(a, b)`. /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position. /// 0 <= position <= subpats.len() Tuple(Vec<Pat>, Option<usize>), /// A `box` pattern Box(Box<Pat>), /// A reference pattern, e.g. `&mut (a, b)` Ref(Box<Pat>, Mutability), /// A literal Lit(Box<Expr>), /// A range pattern, e.g. `1...2` Range(Box<Expr>, Box<Expr>), /// `[a, b, ..i, y, z]` is represented as: /// `Pat::Slice(box [a, b], Some(i), box [y, z])` Slice(Vec<Pat>, Option<Box<Pat>>, Vec<Pat>), /// A macro pattern; pre-expansion Mac(Mac), } /// An arm of a 'match'. /// /// E.g. `0...10 => { println!("match!") }` as in /// /// ```rust,ignore /// match n { /// 0...10 => { println!("match!") }, /// // .. /// } /// ``` #[derive(Debug, Clone, Eq, PartialEq)] pub struct Arm { pub attrs: Vec<Attribute>, pub pats: Vec<Pat>, pub guard: Option<Box<Expr>>, pub body: Box<Expr>, } /// A capture clause #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum CaptureBy { Value, Ref, } /// Limit types of a range (inclusive or exclusive) #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RangeLimits { /// Inclusive at the beginning, exclusive at the end HalfOpen, /// Inclusive at the beginning and end Closed, } /// A single field in a struct pattern /// /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` /// are treated the same as `x: x, y: ref y, z: ref mut z`, /// except `is_shorthand` is true #[derive(Debug, Clone, Eq, PartialEq)] pub struct FieldPat { /// The identifier for the field pub ident: Ident, /// The pattern the field is destructured to pub pat: Box<Pat>, pub is_shorthand: bool, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum BindingMode { ByRef(Mutability), ByValue(Mutability), } #[cfg(feature = "parsing")] pub mod parsing { use super::*; use {BinOp, Delimited, DelimToken, FnArg, FnDecl, FunctionRetTy, Ident, Lifetime, Mac, TokenTree, Ty, UnOp}; use attr::parsing::outer_attr; use generics::parsing::lifetime; use ident::parsing::{ident, wordlike}; use item::parsing::item; use lit::parsing::{digits, lit}; use mac::parsing::{mac, token_trees}; use nom::IResult::{self, Error}; use op::parsing::{assign_op, binop, unop}; use ty::parsing::{mutability, path, qpath, ty}; // Struct literals are ambiguous in certain positions // https://github.com/rust-lang/rfcs/pull/92 macro_rules! named_ambiguous_expr { ($name:ident -> $o:ty, $allow_struct:ident, $submac:ident!( $($args:tt)* )) => { fn $name(i: &str, $allow_struct: bool) -> $crate::nom::IResult<&str, $o> { $submac!(i, $($args)*) } }; } macro_rules! ambiguous_expr { ($i:expr, $allow_struct:ident) => { ambiguous_expr($i, $allow_struct, true) }; } named!(pub expr -> Expr, ambiguous_expr!(true)); named!(expr_no_struct -> Expr, ambiguous_expr!(false)); fn ambiguous_expr(i: &str, allow_struct: bool, allow_block: bool) -> IResult<&str, Expr> { do_parse!( i, mut e: alt!( expr_lit // must be before expr_struct | cond_reduce!(allow_struct, expr_struct) // must be before expr_path | expr_paren // must be before expr_tup | expr_mac // must be before expr_path | expr_break // must be before expr_path | expr_continue // must be before expr_path | call!(expr_ret, allow_struct) // must be before expr_path | call!(expr_box, allow_struct) | expr_in_place | expr_vec | expr_tup | call!(expr_unary, allow_struct) | expr_if | expr_while | expr_for_loop | expr_loop | expr_match | call!(expr_closure, allow_struct) | cond_reduce!(allow_block, expr_block) | call!(expr_range, allow_struct) | expr_path | call!(expr_addr_of, allow_struct) | expr_repeat ) >> many0!(alt!( tap!(args: and_call => { e = ExprKind::Call(Box::new(e.into()), args); }) | tap!(more: and_method_call => { let (method, ascript, mut args) = more; args.insert(0, e.into()); e = ExprKind::MethodCall(method, ascript, args); }) | tap!(more: call!(and_binary, allow_struct) => { let (op, other) = more; e = ExprKind::Binary(op, Box::new(e.into()), Box::new(other)); }) | tap!(ty: and_cast => { e = ExprKind::Cast(Box::new(e.into()), Box::new(ty)); }) | tap!(ty: and_ascription => { e = ExprKind::Type(Box::new(e.into()), Box::new(ty)); }) | tap!(v: call!(and_assign, allow_struct) => { e = ExprKind::Assign(Box::new(e.into()), Box::new(v)); }) | tap!(more: call!(and_assign_op, allow_struct) => { let (op, v) = more; e = ExprKind::AssignOp(op, Box::new(e.into()), Box::new(v)); }) | tap!(field: and_field => { e = ExprKind::Field(Box::new(e.into()), field); }) | tap!(field: and_tup_field => { e = ExprKind::TupField(Box::new(e.into()), field as usize); }) | tap!(i: and_index => { e = ExprKind::Index(Box::new(e.into()), Box::new(i)); }) | tap!(more: call!(and_range, allow_struct) => { let (limits, hi) = more; e = ExprKind::Range(Some(Box::new(e.into())), hi.map(Box::new), limits); }) | tap!(_try: punct!("?") => { e = ExprKind::Try(Box::new(e.into())); }) )) >> (e.into()) ) } named!(expr_mac -> ExprKind, map!(mac, ExprKind::Mac)); named!(expr_paren -> ExprKind, do_parse!( punct!("(") >> e: expr >> punct!(")") >> (ExprKind::Paren(Box::new(e))) )); named_ambiguous_expr!(expr_box -> ExprKind, allow_struct, do_parse!( keyword!("box") >> inner: ambiguous_expr!(allow_struct) >> (ExprKind::Box(Box::new(inner))) )); named!(expr_in_place -> ExprKind, do_parse!( keyword!("in") >> place: expr_no_struct >> punct!("{") >> value: within_block >> punct!("}") >> (ExprKind::InPlace( Box::new(place), Box::new(ExprKind::Block(BlockCheckMode::Default, Block { stmts: value, }).into()), )) )); named!(expr_vec -> ExprKind, do_parse!( punct!("[") >> elems: terminated_list!(punct!(","), expr) >> punct!("]") >> (ExprKind::Vec(elems)) )); named!(and_call -> Vec<Expr>, do_parse!( punct!("(") >> args: terminated_list!(punct!(","), expr) >> punct!(")") >> (args) )); named!(and_method_call -> (Ident, Vec<Ty>, Vec<Expr>), do_parse!( punct!(".") >> method: ident >> ascript: opt_vec!(preceded!( punct!("::"), delimited!( punct!("<"), terminated_list!(punct!(","), ty), punct!(">") ) )) >> punct!("(") >> args: terminated_list!(punct!(","), expr) >> punct!(")") >> (method, ascript, args) )); named!(expr_tup -> ExprKind, do_parse!( punct!("(") >> elems: terminated_list!(punct!(","), expr) >> punct!(")") >> (ExprKind::Tup(elems)) )); named_ambiguous_expr!(and_binary -> (BinOp, Expr), allow_struct, tuple!( binop, ambiguous_expr!(allow_struct) )); named_ambiguous_expr!(expr_unary -> ExprKind, allow_struct, do_parse!( operator: unop >> operand: ambiguous_expr!(allow_struct) >> (ExprKind::Unary(operator, Box::new(operand))) )); named!(expr_lit -> ExprKind, map!(lit, ExprKind::Lit)); named!(and_cast -> Ty, do_parse!( keyword!("as") >> ty: ty >> (ty) )); named!(and_ascription -> Ty, preceded!(punct!(":"), ty)); enum Cond { Let(Pat, Expr), Expr(Expr), } named!(cond -> Cond, alt!( do_parse!( keyword!("let") >> pat: pat >> punct!("=") >> value: expr_no_struct >> (Cond::Let(pat, value)) ) | map!(expr_no_struct, Cond::Expr) )); named!(expr_if -> ExprKind, do_parse!( keyword!("if") >> cond: cond >> punct!("{") >> then_block: within_block >> punct!("}") >> else_block: option!(preceded!( keyword!("else"), alt!( expr_if | do_parse!( punct!("{") >> else_block: within_block >> punct!("}") >> (ExprKind::Block(BlockCheckMode::Default, Block { stmts: else_block, }).into()) ) ) )) >> (match cond { Cond::Let(pat, expr) => ExprKind::IfLet( Box::new(pat), Box::new(expr), Block { stmts: then_block, }, else_block.map(|els| Box::new(els.into())), ), Cond::Expr(cond) => ExprKind::If( Box::new(cond), Block { stmts: then_block, }, else_block.map(|els| Box::new(els.into())), ), }) )); named!(expr_for_loop -> ExprKind, do_parse!( lbl: option!(terminated!(label, punct!(":"))) >> keyword!("for") >> pat: pat >> keyword!("in") >> expr: expr_no_struct >> loop_block: block >> (ExprKind::ForLoop(Box::new(pat), Box::new(expr), loop_block, lbl)) )); named!(expr_loop -> ExprKind, do_parse!( lbl: option!(terminated!(label, punct!(":"))) >> keyword!("loop") >> loop_block: block >> (ExprKind::Loop(loop_block, lbl)) )); named!(expr_match -> ExprKind, do_parse!( keyword!("match") >> obj: expr_no_struct >> punct!("{") >> mut arms: many0!(do_parse!( arm: match_arm >> cond!(arm_requires_comma(&arm), punct!(",")) >> cond!(!arm_requires_comma(&arm), option!(punct!(","))) >> (arm) )) >> last_arm: option!(match_arm) >> punct!("}") >> (ExprKind::Match(Box::new(obj), { arms.extend(last_arm); arms })) )); fn arm_requires_comma(arm: &Arm) -> bool { if let ExprKind::Block(BlockCheckMode::Default, _) = arm.body.node { false } else { true } } named!(match_arm -> Arm, do_parse!( attrs: many0!(outer_attr) >> pats: separated_nonempty_list!(punct!("|"), pat) >> guard: option!(preceded!(keyword!("if"), expr)) >> punct!("=>") >> body: alt!( map!(block, |blk| ExprKind::Block(BlockCheckMode::Default, blk).into()) | expr ) >> (Arm { attrs: attrs, pats: pats, guard: guard.map(Box::new), body: Box::new(body), }) )); named_ambiguous_expr!(expr_closure -> ExprKind, allow_struct, do_parse!( capture: capture_by >> punct!("|") >> inputs: terminated_list!(punct!(","), closure_arg) >> punct!("|") >> ret_and_body: alt!( do_parse!( punct!("->") >> ty: ty >> body: block >> ((FunctionRetTy::Ty(ty), body)) ) | map!(ambiguous_expr!(allow_struct), |e| ( FunctionRetTy::Default, Block { stmts: vec![Stmt::Expr(Box::new(e))], }, )) ) >> (ExprKind::Closure( capture, Box::new(FnDecl { inputs: inputs, output: ret_and_body.0, variadic: false, }), ret_and_body.1, )) )); named!(closure_arg -> FnArg, do_parse!( pat: pat >> ty: option!(preceded!(punct!(":"), ty)) >> (FnArg::Captured(pat, ty.unwrap_or(Ty::Infer))) )); named!(expr_while -> ExprKind, do_parse!( lbl: option!(terminated!(label, punct!(":"))) >> keyword!("while") >> cond: cond >> while_block: block >> (match cond { Cond::Let(pat, expr) => ExprKind::WhileLet( Box::new(pat), Box::new(expr), while_block, lbl, ), Cond::Expr(cond) => ExprKind::While( Box::new(cond), while_block, lbl, ), }) )); named!(expr_continue -> ExprKind, do_parse!( keyword!("continue") >> lbl: option!(label) >> (ExprKind::Continue(lbl)) )); named!(expr_break -> ExprKind, do_parse!( keyword!("break") >> lbl: option!(label) >> (ExprKind::Break(lbl)) )); named_ambiguous_expr!(expr_ret -> ExprKind, allow_struct, do_parse!( keyword!("return") >> ret_value: option!(ambiguous_expr!(allow_struct)) >> (ExprKind::Ret(ret_value.map(Box::new))) )); named!(expr_struct -> ExprKind, do_parse!( path: path >> punct!("{") >> fields: separated_list!(punct!(","), field_value) >> base: option!(do_parse!( cond!(!fields.is_empty(), punct!(",")) >> punct!("..") >> base: expr >> (base) )) >> cond!(!fields.is_empty() && base.is_none(), option!(punct!(","))) >> punct!("}") >> (ExprKind::Struct(path, fields, base.map(Box::new))) )); named!(field_value -> FieldValue, alt!( do_parse!( name: wordlike >> punct!(":") >> value: expr >> (FieldValue { ident: name, expr: value, is_shorthand: false, }) ) | map!(ident, |name: Ident| FieldValue { ident: name.clone(), expr: ExprKind::Path(None, name.into()).into(), is_shorthand: true, }) )); named!(expr_repeat -> ExprKind, do_parse!( punct!("[") >> value: expr >> punct!(";") >> times: expr >> punct!("]") >> (ExprKind::Repeat(Box::new(value), Box::new(times))) )); named!(expr_block -> ExprKind, do_parse!( rules: block_check_mode >> b: block >> (ExprKind::Block(rules, Block { stmts: b.stmts, })) )); named_ambiguous_expr!(expr_range -> ExprKind, allow_struct, do_parse!( limits: range_limits >> hi: option!(ambiguous_expr!(allow_struct)) >> (ExprKind::Range(None, hi.map(Box::new), limits)) )); named!(range_limits -> RangeLimits, alt!( punct!("...") => { |_| RangeLimits::Closed } | punct!("..") => { |_| RangeLimits::HalfOpen } )); named!(expr_path -> ExprKind, map!(qpath, |(qself, path)| ExprKind::Path(qself, path))); named_ambiguous_expr!(expr_addr_of -> ExprKind, allow_struct, do_parse!( punct!("&") >> mutability: mutability >> expr: ambiguous_expr!(allow_struct) >> (ExprKind::AddrOf(mutability, Box::new(expr))) )); named_ambiguous_expr!(and_assign -> Expr, allow_struct, preceded!( punct!("="), ambiguous_expr!(allow_struct) )); named_ambiguous_expr!(and_assign_op -> (BinOp, Expr), allow_struct, tuple!( assign_op, ambiguous_expr!(allow_struct) )); named!(and_field -> Ident, preceded!(punct!("."), ident)); named!(and_tup_field -> u64, preceded!(punct!("."), digits)); named!(and_index -> Expr, delimited!(punct!("["), expr, punct!("]"))); named_ambiguous_expr!(and_range -> (RangeLimits, Option<Expr>), allow_struct, tuple!( range_limits, option!(call!(ambiguous_expr, allow_struct, false)) )); named!(pub block -> Block, do_parse!( punct!("{") >> stmts: within_block >> punct!("}") >> (Block { stmts: stmts, }) )); named!(block_check_mode -> BlockCheckMode, alt!( keyword!("unsafe") => { |_| BlockCheckMode::Unsafe } | epsilon!() => { |_| BlockCheckMode::Default } )); named!(pub within_block -> Vec<Stmt>, do_parse!( many0!(punct!(";")) >> mut standalone: many0!(terminated!(standalone_stmt, many0!(punct!(";")))) >> last: option!(expr) >> (match last { None => standalone, Some(last) => { standalone.push(Stmt::Expr(Box::new(last))); standalone } }) )); named!(standalone_stmt -> Stmt, alt!( stmt_mac | stmt_local | stmt_item | stmt_expr )); named!(stmt_mac -> Stmt, do_parse!( attrs: many0!(outer_attr) >> name: ident >> punct!("!") >> // Only parse braces here; paren and bracket will get parsed as // expression statements punct!("{") >> tts: token_trees >> punct!("}") >> semi: option!(punct!(";")) >> (Stmt::Mac(Box::new(( Mac { path: name.into(), tts: vec![TokenTree::Delimited(Delimited { delim: DelimToken::Brace, tts: tts, })], }, if semi.is_some() { MacStmtStyle::Semicolon } else { MacStmtStyle::Braces }, attrs, )))) )); named!(stmt_local -> Stmt, do_parse!( attrs: many0!(outer_attr) >> keyword!("let") >> pat: pat >> ty: option!(preceded!(punct!(":"), ty)) >> init: option!(preceded!(punct!("="), expr)) >> punct!(";") >> (Stmt::Local(Box::new(Local { pat: Box::new(pat), ty: ty.map(Box::new), init: init.map(Box::new), attrs: attrs, }))) )); named!(stmt_item -> Stmt, map!(item, |i| Stmt::Item(Box::new(i)))); fn requires_semi(e: &Expr) -> bool { match e.node { ExprKind::If(_, _, _) | ExprKind::IfLet(_, _, _, _) | ExprKind::While(_, _, _) | ExprKind::WhileLet(_, _, _, _) | ExprKind::ForLoop(_, _, _, _) | ExprKind::Loop(_, _) | ExprKind::Match(_, _) | ExprKind::Block(_, _) => false, _ => true, } } named!(stmt_expr -> Stmt, do_parse!( attrs: many0!(outer_attr) >> mut e: expr >> semi: option!(punct!(";")) >> ({ e.attrs = attrs; if semi.is_some() { Stmt::Semi(Box::new(e)) } else if requires_semi(&e) { return Error; } else { Stmt::Expr(Box::new(e)) } }) )); named!(pub pat -> Pat, alt!( pat_wild // must be before pat_ident | pat_box // must be before pat_ident | pat_range // must be before pat_lit | pat_tuple_struct // must be before pat_ident | pat_struct // must be before pat_ident | pat_mac // must be before pat_ident | pat_lit // must be before pat_ident | pat_ident // must be before pat_path | pat_path | pat_tuple | pat_ref | pat_slice )); named!(pat_mac -> Pat, map!(mac, Pat::Mac)); named!(pat_wild -> Pat, map!(keyword!("_"), |_| Pat::Wild)); named!(pat_box -> Pat, do_parse!( keyword!("box") >> pat: pat >> (Pat::Box(Box::new(pat))) )); named!(pat_ident -> Pat, do_parse!( mode: option!(keyword!("ref")) >> mutability: mutability >> name: alt!( ident | keyword!("self") => { Into::into } ) >> not!(peek!(punct!("<"))) >> not!(peek!(punct!("::"))) >> subpat: option!(preceded!(punct!("@"), pat)) >> (Pat::Ident( if mode.is_some() { BindingMode::ByRef(mutability) } else { BindingMode::ByValue(mutability) }, name, subpat.map(Box::new), )) )); named!(pat_tuple_struct -> Pat, do_parse!( path: path >> tuple: pat_tuple_helper >> (Pat::TupleStruct(path, tuple.0, tuple.1)) )); named!(pat_struct -> Pat, do_parse!( path: path >> punct!("{") >> fields: separated_list!(punct!(","), field_pat) >> more: option!(preceded!( cond!(!fields.is_empty(), punct!(",")), punct!("..") )) >> cond!(!fields.is_empty() && more.is_none(), option!(punct!(","))) >> punct!("}") >> (Pat::Struct(path, fields, more.is_some())) )); named!(field_pat -> FieldPat, alt!( do_parse!( ident: wordlike >> punct!(":") >> pat: pat >> (FieldPat { ident: ident, pat: Box::new(pat), is_shorthand: false, }) ) | do_parse!( boxed: option!(keyword!("box")) >> mode: option!(keyword!("ref")) >> mutability: mutability >> ident: ident >> ({ let mut pat = Pat::Ident( if mode.is_some() { BindingMode::ByRef(mutability) } else { BindingMode::ByValue(mutability) }, ident.clone(), None, ); if boxed.is_some() { pat = Pat::Box(Box::new(pat)); } FieldPat { ident: ident, pat: Box::new(pat), is_shorthand: true, } }) ) )); named!(pat_path -> Pat, map!(qpath, |(qself, path)| Pat::Path(qself, path))); named!(pat_tuple -> Pat, map!( pat_tuple_helper, |(pats, dotdot)| Pat::Tuple(pats, dotdot) )); named!(pat_tuple_helper -> (Vec<Pat>, Option<usize>), do_parse!( punct!("(") >> mut elems: separated_list!(punct!(","), pat) >> dotdot: option!(do_parse!( cond!(!elems.is_empty(), punct!(",")) >> punct!("..") >> rest: many0!(preceded!(punct!(","), pat)) >> cond!(!rest.is_empty(), option!(punct!(","))) >> (rest) )) >> cond!(!elems.is_empty() && dotdot.is_none(), option!(punct!(","))) >> punct!(")") >> (match dotdot { Some(rest) => { let pos = elems.len(); elems.extend(rest); (elems, Some(pos)) } None => (elems, None), }) )); named!(pat_ref -> Pat, do_parse!( punct!("&") >> mutability: mutability >> pat: pat >> (Pat::Ref(Box::new(pat), mutability)) )); named!(pat_lit -> Pat, do_parse!( lit: pat_lit_expr >> (if let ExprKind::Path(_, _) = lit.node { return IResult::Error; // these need to be parsed by pat_path } else { Pat::Lit(Box::new(lit)) }) )); named!(pat_range -> Pat, do_parse!( lo: pat_lit_expr >> punct!("...") >> hi: pat_lit_expr >> (Pat::Range(Box::new(lo), Box::new(hi))) )); named!(pat_lit_expr -> Expr, do_parse!( neg: option!(punct!("-")) >> v: alt!( lit => { ExprKind::Lit } | path => { |p| ExprKind::Path(None, p) } ) >> (if neg.is_some() { ExprKind::Unary(UnOp::Neg, Box::new(v.into())).into() } else { v.into() }) )); named!(pat_slice -> Pat, do_parse!( punct!("[") >> mut before: separated_list!(punct!(","), pat) >> after: option!(do_parse!( comma_before_dots: option!(cond_reduce!(!before.is_empty(), punct!(","))) >> punct!("..") >> after: many0!(preceded!(punct!(","), pat)) >> cond!(!after.is_empty(), option!(punct!(","))) >> (comma_before_dots.is_some(), after) )) >> cond!(after.is_none(), option!(punct!(","))) >> punct!("]") >> (match after { None => Pat::Slice(before, None, Vec::new()), Some((true, after)) => { if before.is_empty() { return IResult::Error; } Pat::Slice(before, Some(Box::new(Pat::Wild)), after) } Some((false, after)) => { let rest = before.pop().unwrap_or(Pat::Wild); Pat::Slice(before, Some(Box::new(rest)), after) } }) )); named!(capture_by -> CaptureBy, alt!( keyword!("move") => { |_| CaptureBy::Value } | epsilon!() => { |_| CaptureBy::Ref } )); named!(label -> Ident, map!(lifetime, |lt: Lifetime| lt.ident)); } #[cfg(feature = "printing")] mod printing { use super::*; use {FnArg, FunctionRetTy, Mutability, Ty}; use attr::FilterAttrs; use quote::{Tokens, ToTokens}; impl ToTokens for Expr { fn to_tokens(&self, tokens: &mut Tokens) { tokens.append_all(self.attrs.outer()); match self.node { ExprKind::Box(ref inner) => { tokens.append("box"); inner.to_tokens(tokens); } ExprKind::InPlace(ref place, ref value) => { tokens.append("in"); place.to_tokens(tokens); value.to_tokens(tokens); } ExprKind::Vec(ref tys) => { tokens.append("["); tokens.append_separated(tys, ","); tokens.append("]"); } ExprKind::Call(ref func, ref args) => { func.to_tokens(tokens); tokens.append("("); tokens.append_separated(args, ","); tokens.append(")"); } ExprKind::MethodCall(ref ident, ref ascript, ref args) => { args[0].to_tokens(tokens); tokens.append("."); ident.to_tokens(tokens); if !ascript.is_empty() { tokens.append("::"); tokens.append("<"); tokens.append_separated(ascript, ","); tokens.append(">"); } tokens.append("("); tokens.append_separated(&args[1..], ","); tokens.append(")"); } ExprKind::Tup(ref fields) => { tokens.append("("); tokens.append_separated(fields, ","); if fields.len() == 1 { tokens.append(","); } tokens.append(")"); } ExprKind::Binary(op, ref left, ref right) => { left.to_tokens(tokens); op.to_tokens(tokens); right.to_tokens(tokens); } ExprKind::Unary(op, ref expr) => { op.to_tokens(tokens); expr.to_tokens(tokens); } ExprKind::Lit(ref lit) => lit.to_tokens(tokens), ExprKind::Cast(ref expr, ref ty) => { expr.to_tokens(tokens); tokens.append("as"); ty.to_tokens(tokens); } ExprKind::Type(ref expr, ref ty) => { expr.to_tokens(tokens); tokens.append(":"); ty.to_tokens(tokens); } ExprKind::If(ref cond, ref then_block, ref else_block) => { tokens.append("if"); cond.to_tokens(tokens); then_block.to_tokens(tokens); if let Some(ref else_block) = *else_block { tokens.append("else"); else_block.to_tokens(tokens); } } ExprKind::IfLet(ref pat, ref expr, ref then_block, ref else_block) => { tokens.append("if"); tokens.append("let"); pat.to_tokens(tokens); tokens.append("="); expr.to_tokens(tokens); then_block.to_tokens(tokens); if let Some(ref else_block) = *else_block { tokens.append("else"); else_block.to_tokens(tokens); } } ExprKind::While(ref cond, ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("while"); cond.to_tokens(tokens); body.to_tokens(tokens); } ExprKind::WhileLet(ref pat, ref expr, ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("while"); tokens.append("let"); pat.to_tokens(tokens); tokens.append("="); expr.to_tokens(tokens); body.to_tokens(tokens); } ExprKind::ForLoop(ref pat, ref expr, ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("for"); pat.to_tokens(tokens); tokens.append("in"); expr.to_tokens(tokens); body.to_tokens(tokens); } ExprKind::Loop(ref body, ref label) => { if let Some(ref label) = *label { label.to_tokens(tokens); tokens.append(":"); } tokens.append("loop"); body.to_tokens(tokens); } ExprKind::Match(ref expr, ref arms) => { tokens.append("match"); expr.to_tokens(tokens); tokens.append("{"); tokens.append_all(arms); tokens.append("}"); } ExprKind::Closure(capture, ref decl, ref body) => { capture.to_tokens(tokens); tokens.append("|"); for (i, input) in decl.inputs.iter().enumerate() { if i > 0 { tokens.append(","); } match *input { FnArg::Captured(ref pat, Ty::Infer) => { pat.to_tokens(tokens); } _ => input.to_tokens(tokens), } } tokens.append("|"); match decl.output { FunctionRetTy::Default => { if body.stmts.len() == 1 { if let Stmt::Expr(ref expr) = body.stmts[0] { expr.to_tokens(tokens); } else { body.to_tokens(tokens); } } else { body.to_tokens(tokens); } } FunctionRetTy::Ty(ref ty) => { tokens.append("->"); ty.to_tokens(tokens); body.to_tokens(tokens); } } } ExprKind::Block(rules, ref block) => { rules.to_tokens(tokens); block.to_tokens(tokens); } ExprKind::Assign(ref var, ref expr) => { var.to_tokens(tokens); tokens.append("="); expr.to_tokens(tokens); } ExprKind::AssignOp(op, ref var, ref expr) => { var.to_tokens(tokens); tokens.append(op.assign_op().unwrap()); expr.to_tokens(tokens); } ExprKind::Field(ref expr, ref field) => { expr.to_tokens(tokens); tokens.append("."); field.to_tokens(tokens); } ExprKind::TupField(ref expr, field) => { expr.to_tokens(tokens); tokens.append("."); tokens.append(&field.to_string()); } ExprKind::Index(ref expr, ref index) => { expr.to_tokens(tokens); tokens.append("["); index.to_tokens(tokens); tokens.append("]"); } ExprKind::Range(ref from, ref to, limits) => { from.to_tokens(tokens); match limits { RangeLimits::HalfOpen => tokens.append(".."), RangeLimits::Closed => tokens.append("..."), } to.to_tokens(tokens); } ExprKind::Path(None, ref path) => path.to_tokens(tokens), ExprKind::Path(Some(ref qself), ref path) => { tokens.append("<"); qself.ty.to_tokens(tokens); if qself.position > 0 { tokens.append("as"); for (i, segment) in path.segments .iter() .take(qself.position) .enumerate() { if i > 0 || path.global { tokens.append("::"); } segment.to_tokens(tokens); } } tokens.append(">"); for segment in path.segments.iter().skip(qself.position) { tokens.append("::"); segment.to_tokens(tokens); } } ExprKind::AddrOf(mutability, ref expr) => { tokens.append("&"); mutability.to_tokens(tokens); expr.to_tokens(tokens); } ExprKind::Break(ref opt_label) => { tokens.append("break"); opt_label.to_tokens(tokens); } ExprKind::Continue(ref opt_label) => { tokens.append("continue"); opt_label.to_tokens(tokens); } ExprKind::Ret(ref opt_expr) => { tokens.append("return"); opt_expr.to_tokens(tokens); } ExprKind::Mac(ref mac) => mac.to_tokens(tokens), ExprKind::Struct(ref path, ref fields, ref base) => { path.to_tokens(tokens); tokens.append("{"); tokens.append_separated(fields, ","); if let Some(ref base) = *base { if !fields.is_empty() { tokens.append(","); } tokens.append(".."); base.to_tokens(tokens); } tokens.append("}"); } ExprKind::Repeat(ref expr, ref times) => { tokens.append("["); expr.to_tokens(tokens); tokens.append(";"); times.to_tokens(tokens); tokens.append("]"); } ExprKind::Paren(ref expr) => { tokens.append("("); expr.to_tokens(tokens); tokens.append(")"); } ExprKind::Try(ref expr) => { expr.to_tokens(tokens); tokens.append("?"); } } } } impl ToTokens for FieldValue { fn to_tokens(&self, tokens: &mut Tokens) { self.ident.to_tokens(tokens); if !self.is_shorthand { tokens.append(":"); self.expr.to_tokens(tokens); } } } impl ToTokens for Arm { fn to_tokens(&self, tokens: &mut Tokens) { for attr in &self.attrs { attr.to_tokens(tokens); } tokens.append_separated(&self.pats, "|"); if let Some(ref guard) = self.guard { tokens.append("if"); guard.to_tokens(tokens); } tokens.append("=>"); self.body.to_tokens(tokens); match self.body.node { ExprKind::Block(BlockCheckMode::Default, _) => { // no comma } _ => tokens.append(","), } } } impl ToTokens for Pat { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Pat::Wild => tokens.append("_"), Pat::Ident(mode, ref ident, ref subpat) => { mode.to_tokens(tokens); ident.to_tokens(tokens); if let Some(ref subpat) = *subpat { tokens.append("@"); subpat.to_tokens(tokens); } } Pat::Struct(ref path, ref fields, dots) => { path.to_tokens(tokens); tokens.append("{"); tokens.append_separated(fields, ","); if dots { if !fields.is_empty() { tokens.append(","); } tokens.append(".."); } tokens.append("}"); } Pat::TupleStruct(ref path, ref pats, dotpos) => { path.to_tokens(tokens); tokens.append("("); match dotpos { Some(pos) => { if pos > 0 { tokens.append_separated(&pats[..pos], ","); tokens.append(","); } tokens.append(".."); if pos < pats.len() { tokens.append(","); tokens.append_separated(&pats[pos..], ","); } } None => tokens.append_separated(pats, ","), } tokens.append(")"); } Pat::Path(None, ref path) => path.to_tokens(tokens), Pat::Path(Some(ref qself), ref path) => { tokens.append("<"); qself.ty.to_tokens(tokens); if qself.position > 0 { tokens.append("as"); for (i, segment) in path.segments .iter() .take(qself.position) .enumerate() { if i > 0 || path.global { tokens.append("::"); } segment.to_tokens(tokens); } } tokens.append(">"); for segment in path.segments.iter().skip(qself.position) { tokens.append("::"); segment.to_tokens(tokens); } } Pat::Tuple(ref pats, dotpos) => { tokens.append("("); match dotpos { Some(pos) => { if pos > 0 { tokens.append_separated(&pats[..pos], ","); tokens.append(","); } tokens.append(".."); if pos < pats.len() { tokens.append(","); tokens.append_separated(&pats[pos..], ","); } } None => { tokens.append_separated(pats, ","); if pats.len() == 1 { tokens.append(","); } } } tokens.append(")"); } Pat::Box(ref inner) => { tokens.append("box"); inner.to_tokens(tokens); } Pat::Ref(ref target, mutability) => { tokens.append("&"); mutability.to_tokens(tokens); target.to_tokens(tokens); } Pat::Lit(ref lit) => lit.to_tokens(tokens), Pat::Range(ref lo, ref hi) => { lo.to_tokens(tokens); tokens.append("..."); hi.to_tokens(tokens); } Pat::Slice(ref before, ref rest, ref after) => { tokens.append("["); tokens.append_separated(before, ","); if let Some(ref rest) = *rest { if !before.is_empty() { tokens.append(","); } if **rest != Pat::Wild { rest.to_tokens(tokens); } tokens.append(".."); if !after.is_empty() { tokens.append(","); } tokens.append_separated(after, ","); } tokens.append("]"); } Pat::Mac(ref mac) => mac.to_tokens(tokens), } } } impl ToTokens for FieldPat { fn to_tokens(&self, tokens: &mut Tokens) { if !self.is_shorthand { self.ident.to_tokens(tokens); tokens.append(":"); } self.pat.to_tokens(tokens); } } impl ToTokens for BindingMode { fn to_tokens(&self, tokens: &mut Tokens) { match *self { BindingMode::ByRef(Mutability::Immutable) => { tokens.append("ref"); } BindingMode::ByRef(Mutability::Mutable) => { tokens.append("ref"); tokens.append("mut"); } BindingMode::ByValue(Mutability::Immutable) => {} BindingMode::ByValue(Mutability::Mutable) => { tokens.append("mut"); } } } } impl ToTokens for CaptureBy { fn to_tokens(&self, tokens: &mut Tokens) { match *self { CaptureBy::Value => tokens.append("move"), CaptureBy::Ref => { // nothing } } } } impl ToTokens for Block { fn to_tokens(&self, tokens: &mut Tokens) { tokens.append("{"); tokens.append_all(&self.stmts); tokens.append("}"); } } impl ToTokens for BlockCheckMode { fn to_tokens(&self, tokens: &mut Tokens) { match *self { BlockCheckMode::Default => { // nothing } BlockCheckMode::Unsafe => tokens.append("unsafe"), } } } impl ToTokens for Stmt { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Stmt::Local(ref local) => local.to_tokens(tokens), Stmt::Item(ref item) => item.to_tokens(tokens), Stmt::Expr(ref expr) => expr.to_tokens(tokens), Stmt::Semi(ref expr) => { expr.to_tokens(tokens); tokens.append(";"); } Stmt::Mac(ref mac) => { let (ref mac, style, ref attrs) = **mac; tokens.append_all(attrs.outer()); mac.to_tokens(tokens); match style { MacStmtStyle::Semicolon => tokens.append(";"), MacStmtStyle::Braces | MacStmtStyle::NoBraces => { // no semicolon } } } } } } impl ToTokens for Local { fn to_tokens(&self, tokens: &mut Tokens) { tokens.append_all(self.attrs.outer()); tokens.append("let"); self.pat.to_tokens(tokens); if let Some(ref ty) = self.ty { tokens.append(":"); ty.to_tokens(tokens); } if let Some(ref init) = self.init { tokens.append("="); init.to_tokens(tokens); } tokens.append(";"); } } }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::cmp::{Ordering, min}; use std::iter::ExactSizeIterator; use std::fmt::Write; use {Indent, Shape, Spanned}; use codemap::SpanUtils; use rewrite::{Rewrite, RewriteContext}; use lists::{write_list, itemize_list, ListFormatting, SeparatorTactic, ListTactic, DefinitiveListTactic, definitive_tactic, ListItem, format_item_list, struct_lit_shape, struct_lit_tactic, shape_for_tactic, struct_lit_formatting}; use string::{StringFormat, rewrite_string}; use utils::{extra_offset, last_line_width, wrap_str, binary_search, first_line_width, semicolon_for_stmt, trimmed_last_line_width, left_most_sub_expr, stmt_expr, colon_spaces, contains_skip, mk_sp}; use visitor::FmtVisitor; use config::{Config, IndentStyle, MultilineStyle, ControlBraceStyle, Style}; use comment::{FindUncommented, rewrite_comment, contains_comment, recover_comment_removed}; use types::{rewrite_path, PathContext, can_be_overflowed_type}; use items::{span_lo_for_arg, span_hi_for_arg}; use chains::rewrite_chain; use macros::{rewrite_macro, MacroPosition}; use patterns::{TuplePatField, can_be_overflowed_pat}; use syntax::{ast, ptr}; use syntax::codemap::{CodeMap, Span, BytePos}; use syntax::parse::classify; impl Rewrite for ast::Expr { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { format_expr(self, ExprType::SubExpression, context, shape) } } #[derive(PartialEq)] enum ExprType { Statement, SubExpression, } fn combine_attr_and_expr( context: &RewriteContext, shape: Shape, attr_str: &str, expr_str: &str, ) -> String { let separator = if attr_str.is_empty() { String::new() } else { if expr_str.contains('\n') || attr_str.contains('\n') || attr_str.len() + expr_str.len() > shape.width { format!("\n{}", shape.indent.to_string(context.config)) } else { String::from(" ") } }; format!("{}{}{}", attr_str, separator, expr_str) } fn format_expr( expr: &ast::Expr, expr_type: ExprType, context: &RewriteContext, shape: Shape, ) -> Option<String> { let attr_rw = (&*expr.attrs).rewrite(context, shape); if contains_skip(&*expr.attrs) { if let Some(attr_str) = attr_rw { return Some(combine_attr_and_expr( context, shape, &attr_str, &context.snippet(expr.span), )); } else { return Some(context.snippet(expr.span)); } } let expr_rw = match expr.node { ast::ExprKind::Array(ref expr_vec) => { rewrite_array( expr_vec.iter().map(|e| &**e), mk_sp(context.codemap.span_after(expr.span, "["), expr.span.hi), context, shape, ) } ast::ExprKind::Lit(ref l) => { match l.node { ast::LitKind::Str(_, ast::StrStyle::Cooked) => { rewrite_string_lit(context, l.span, shape) } _ => { wrap_str( context.snippet(expr.span), context.config.max_width(), shape, ) } } } ast::ExprKind::Call(ref callee, ref args) => { let inner_span = mk_sp(callee.span.hi, expr.span.hi); rewrite_call_with_binary_search( context, &**callee, &args.iter().map(|x| &**x).collect::<Vec<_>>()[..], inner_span, shape, ) } ast::ExprKind::Paren(ref subexpr) => rewrite_paren(context, subexpr, shape), ast::ExprKind::Binary(ref op, ref lhs, ref rhs) => { // FIXME: format comments between operands and operator rewrite_pair( &**lhs, &**rhs, "", &format!(" {} ", context.snippet(op.span)), "", context, shape, ) } ast::ExprKind::Unary(ref op, ref subexpr) => rewrite_unary_op(context, op, subexpr, shape), ast::ExprKind::Struct(ref path, ref fields, ref base) => { rewrite_struct_lit( context, path, fields, base.as_ref().map(|e| &**e), expr.span, shape, ) } ast::ExprKind::Tup(ref items) => { rewrite_tuple( context, &items.iter().map(|x| &**x).collect::<Vec<_>>()[..], expr.span, shape, ) } ast::ExprKind::If(..) | ast::ExprKind::IfLet(..) | ast::ExprKind::ForLoop(..) | ast::ExprKind::Loop(..) | ast::ExprKind::While(..) | ast::ExprKind::WhileLet(..) => { to_control_flow(expr, expr_type) .and_then(|control_flow| control_flow.rewrite(context, shape)) } ast::ExprKind::Block(ref block) => block.rewrite(context, shape), ast::ExprKind::Match(ref cond, ref arms) => { rewrite_match(context, cond, arms, shape, expr.span) } ast::ExprKind::Path(ref qself, ref path) => { rewrite_path(context, PathContext::Expr, qself.as_ref(), path, shape) } ast::ExprKind::Assign(ref lhs, ref rhs) => { rewrite_assignment(context, lhs, rhs, None, shape) } ast::ExprKind::AssignOp(ref op, ref lhs, ref rhs) => { rewrite_assignment(context, lhs, rhs, Some(op), shape) } ast::ExprKind::Continue(ref opt_ident) => { let id_str = match *opt_ident { Some(ident) => format!(" {}", ident.node), None => String::new(), }; wrap_str( format!("continue{}", id_str), context.config.max_width(), shape, ) } ast::ExprKind::Break(ref opt_ident, ref opt_expr) => { let id_str = match *opt_ident { Some(ident) => format!(" {}", ident.node), None => String::new(), }; if let Some(ref expr) = *opt_expr { rewrite_unary_prefix(context, &format!("break{} ", id_str), &**expr, shape) } else { wrap_str( format!("break{}", id_str), context.config.max_width(), shape, ) } } ast::ExprKind::Closure(capture, ref fn_decl, ref body, _) => { rewrite_closure(capture, fn_decl, body, expr.span, context, shape) } ast::ExprKind::Try(..) | ast::ExprKind::Field(..) | ast::ExprKind::TupField(..) | ast::ExprKind::MethodCall(..) => rewrite_chain(expr, context, shape), ast::ExprKind::Mac(ref mac) => { // Failure to rewrite a marco should not imply failure to // rewrite the expression. rewrite_macro(mac, None, context, shape, MacroPosition::Expression).or_else(|| { wrap_str( context.snippet(expr.span), context.config.max_width(), shape, ) }) } ast::ExprKind::Ret(None) => { wrap_str("return".to_owned(), context.config.max_width(), shape) } ast::ExprKind::Ret(Some(ref expr)) => { rewrite_unary_prefix(context, "return ", &**expr, shape) } ast::ExprKind::Box(ref expr) => rewrite_unary_prefix(context, "box ", &**expr, shape), ast::ExprKind::AddrOf(mutability, ref expr) => { rewrite_expr_addrof(context, mutability, expr, shape) } ast::ExprKind::Cast(ref expr, ref ty) => { rewrite_pair(&**expr, &**ty, "", " as ", "", context, shape) } ast::ExprKind::Type(ref expr, ref ty) => { rewrite_pair(&**expr, &**ty, "", ": ", "", context, shape) } ast::ExprKind::Index(ref expr, ref index) => { rewrite_index(&**expr, &**index, context, shape) } ast::ExprKind::Repeat(ref expr, ref repeats) => { let (lbr, rbr) = if context.config.spaces_within_square_brackets() { ("[ ", " ]") } else { ("[", "]") }; rewrite_pair(&**expr, &**repeats, lbr, "; ", rbr, context, shape) } ast::ExprKind::Range(ref lhs, ref rhs, limits) => { let delim = match limits { ast::RangeLimits::HalfOpen => "..", ast::RangeLimits::Closed => "...", }; match (lhs.as_ref().map(|x| &**x), rhs.as_ref().map(|x| &**x)) { (Some(ref lhs), Some(ref rhs)) => { let sp_delim = if context.config.spaces_around_ranges() { format!(" {} ", delim) } else { delim.into() }; rewrite_pair(&**lhs, &**rhs, "", &sp_delim, "", context, shape) } (None, Some(ref rhs)) => { let sp_delim = if context.config.spaces_around_ranges() { format!("{} ", delim) } else { delim.into() }; rewrite_unary_prefix(context, &sp_delim, &**rhs, shape) } (Some(ref lhs), None) => { let sp_delim = if context.config.spaces_around_ranges() { format!(" {}", delim) } else { delim.into() }; rewrite_unary_suffix(context, &sp_delim, &**lhs, shape) } (None, None) => wrap_str(delim.into(), context.config.max_width(), shape), } } // We do not format these expressions yet, but they should still // satisfy our width restrictions. ast::ExprKind::InPlace(..) | ast::ExprKind::InlineAsm(..) => { wrap_str( context.snippet(expr.span), context.config.max_width(), shape, ) } ast::ExprKind::Catch(ref block) => { if let rewrite @ Some(_) = try_one_line_block(context, shape, "do catch ", block) { return rewrite; } // 9 = `do catch ` let budget = shape.width.checked_sub(9).unwrap_or(0); Some(format!( "{}{}", "do catch ", try_opt!( block.rewrite(&context, Shape::legacy(budget, shape.indent)) ) )) } }; match (attr_rw, expr_rw) { (Some(attr_str), Some(expr_str)) => { recover_comment_removed( combine_attr_and_expr(context, shape, &attr_str, &expr_str), expr.span, context, shape, ) } _ => None, } } fn try_one_line_block( context: &RewriteContext, shape: Shape, prefix: &str, block: &ast::Block, ) -> Option<String> { if is_simple_block(block, context.codemap) { let expr_shape = Shape::legacy(shape.width - prefix.len(), shape.indent); let expr_str = try_opt!(block.stmts[0].rewrite(context, expr_shape)); let result = format!("{}{{ {} }}", prefix, expr_str); if result.len() <= shape.width && !result.contains('\n') { return Some(result); } } None } pub fn rewrite_pair<LHS, RHS>( lhs: &LHS, rhs: &RHS, prefix: &str, infix: &str, suffix: &str, context: &RewriteContext, shape: Shape, ) -> Option<String> where LHS: Rewrite, RHS: Rewrite, { // Get "full width" rhs and see if it fits on the current line. This // usually works fairly well since it tends to place operands of // operations with high precendence close together. // Note that this is non-conservative, but its just to see if it's even // worth trying to put everything on one line. let rhs_shape = try_opt!(shape.sub_width(suffix.len())); let rhs_result = rhs.rewrite(context, rhs_shape); if let Some(rhs_result) = rhs_result { // This is needed in case of line break not caused by a // shortage of space, but by end-of-line comments, for example. if !rhs_result.contains('\n') { let lhs_shape = try_opt!(try_opt!(shape.offset_left(prefix.len())).sub_width(infix.len())); let lhs_result = lhs.rewrite(context, lhs_shape); if let Some(lhs_result) = lhs_result { let mut result = format!("{}{}{}", prefix, lhs_result, infix); let remaining_width = shape .width .checked_sub(last_line_width(&result) + suffix.len()) .unwrap_or(0); if rhs_result.len() <= remaining_width { result.push_str(&rhs_result); result.push_str(suffix); return Some(result); } // Try rewriting the rhs into the remaining space. let rhs_shape = shape.shrink_left(last_line_width(&result) + suffix.len()); if let Some(rhs_shape) = rhs_shape { if let Some(rhs_result) = rhs.rewrite(context, rhs_shape) { // FIXME this should always hold. if rhs_result.len() <= remaining_width { result.push_str(&rhs_result); result.push_str(suffix); return Some(result); } } } } } } // We have to use multiple lines. // Re-evaluate the rhs because we have more space now: let infix = infix.trim_right(); let rhs_shape = match context.config.control_style() { Style::Legacy => { try_opt!(shape.sub_width(suffix.len() + prefix.len())).visual_indent(prefix.len()) } Style::Rfc => { // Try to calculate the initial constraint on the right hand side. let rhs_overhead = context .config .max_width() .checked_sub(shape.used_width() + shape.width) .unwrap_or(0); try_opt!( Shape::indented(shape.indent.block_indent(context.config), context.config) .sub_width(rhs_overhead) ) } }; let rhs_result = try_opt!(rhs.rewrite(context, rhs_shape)); let lhs_overhead = shape.used_width() + prefix.len() + infix.len(); let lhs_shape = Shape { width: try_opt!(context.config.max_width().checked_sub(lhs_overhead)), ..shape }; let lhs_result = try_opt!(lhs.rewrite(context, lhs_shape)); Some(format!( "{}{}{}\n{}{}{}", prefix, lhs_result, infix, rhs_shape.indent.to_string(context.config), rhs_result, suffix )) } pub fn rewrite_array<'a, I>( expr_iter: I, span: Span, context: &RewriteContext, shape: Shape, ) -> Option<String> where I: Iterator<Item = &'a ast::Expr>, { let bracket_size = if context.config.spaces_within_square_brackets() { 2 // "[ " } else { 1 // "[" }; let nested_shape = match context.config.array_layout() { IndentStyle::Block => shape.block().block_indent(context.config.tab_spaces()), IndentStyle::Visual => { try_opt!( shape .visual_indent(bracket_size) .sub_width(bracket_size * 2) ) } }; let items = itemize_list( context.codemap, expr_iter, "]", |item| item.span.lo, |item| item.span.hi, |item| item.rewrite(context, nested_shape), span.lo, span.hi, ).collect::<Vec<_>>(); if items.is_empty() { if context.config.spaces_within_square_brackets() { return Some("[ ]".to_string()); } else { return Some("[]".to_string()); } } let has_long_item = items .iter() .any(|li| li.item.as_ref().map(|s| s.len() > 10).unwrap_or(false)); let tactic = match context.config.array_layout() { IndentStyle::Block => { // FIXME wrong shape in one-line case match shape.width.checked_sub(2 * bracket_size) { Some(width) => { let tactic = ListTactic::LimitedHorizontalVertical(context.config.array_width()); definitive_tactic(&items, tactic, width) } None => DefinitiveListTactic::Vertical, } } IndentStyle::Visual => { if has_long_item || items.iter().any(ListItem::is_multiline) { definitive_tactic( &items, ListTactic::LimitedHorizontalVertical(context.config.array_width()), nested_shape.width, ) } else { DefinitiveListTactic::Mixed } } }; let fmt = ListFormatting { tactic: tactic, separator: ",", trailing_separator: SeparatorTactic::Never, shape: nested_shape, ends_with_newline: false, config: context.config, }; let list_str = try_opt!(write_list(&items, &fmt)); let result = if context.config.array_layout() == IndentStyle::Visual || tactic != DefinitiveListTactic::Vertical { if context.config.spaces_within_square_brackets() && list_str.len() > 0 { format!("[ {} ]", list_str) } else { format!("[{}]", list_str) } } else { format!( "[\n{}{},\n{}]", nested_shape.indent.to_string(context.config), list_str, shape.block().indent.to_string(context.config) ) }; Some(result) } // Return type is (prefix, extra_offset) fn rewrite_closure_fn_decl( capture: ast::CaptureBy, fn_decl: &ast::FnDecl, body: &ast::Expr, span: Span, context: &RewriteContext, shape: Shape, ) -> Option<(String, usize)> { let mover = if capture == ast::CaptureBy::Value { "move " } else { "" }; // 4 = "|| {".len(), which is overconservative when the closure consists of // a single expression. let nested_shape = try_opt!(try_opt!(shape.shrink_left(mover.len())).sub_width(4)); // 1 = | let argument_offset = nested_shape.indent + 1; let arg_shape = try_opt!(nested_shape.shrink_left(1)).visual_indent(0); let ret_str = try_opt!(fn_decl.output.rewrite(context, arg_shape)); let arg_items = itemize_list( context.codemap, fn_decl.inputs.iter(), "|", |arg| span_lo_for_arg(arg), |arg| span_hi_for_arg(arg), |arg| arg.rewrite(context, arg_shape), context.codemap.span_after(span, "|"), body.span.lo, ); let item_vec = arg_items.collect::<Vec<_>>(); // 1 = space between arguments and return type. let horizontal_budget = nested_shape .width .checked_sub(ret_str.len() + 1) .unwrap_or(0); let tactic = definitive_tactic(&item_vec, ListTactic::HorizontalVertical, horizontal_budget); let arg_shape = match tactic { DefinitiveListTactic::Horizontal => try_opt!(arg_shape.sub_width(ret_str.len() + 1)), _ => arg_shape, }; let fmt = ListFormatting { tactic: tactic, separator: ",", trailing_separator: SeparatorTactic::Never, shape: arg_shape, ends_with_newline: false, config: context.config, }; let list_str = try_opt!(write_list(&item_vec, &fmt)); let mut prefix = format!("{}|{}|", mover, list_str); // 1 = space between `|...|` and body. let extra_offset = extra_offset(&prefix, shape) + 1; if !ret_str.is_empty() { if prefix.contains('\n') { prefix.push('\n'); prefix.push_str(&argument_offset.to_string(context.config)); } else { prefix.push(' '); } prefix.push_str(&ret_str); } Some((prefix, extra_offset)) } // This functions is pretty messy because of the rules around closures and blocks: // FIXME - the below is probably no longer true in full. // * if there is a return type, then there must be braces, // * given a closure with braces, whether that is parsed to give an inner block // or not depends on if there is a return type and if there are statements // in that block, // * if the first expression in the body ends with a block (i.e., is a // statement without needing a semi-colon), then adding or removing braces // can change whether it is treated as an expression or statement. fn rewrite_closure( capture: ast::CaptureBy, fn_decl: &ast::FnDecl, body: &ast::Expr, span: Span, context: &RewriteContext, shape: Shape, ) -> Option<String> { let (prefix, extra_offset) = try_opt!(rewrite_closure_fn_decl( capture, fn_decl, body, span, context, shape, )); // 1 = space between `|...|` and body. let body_shape = try_opt!(shape.offset_left(extra_offset)); if let ast::ExprKind::Block(ref block) = body.node { // The body of the closure is an empty block. if block.stmts.is_empty() && !block_contains_comment(block, context.codemap) { return Some(format!("{} {{}}", prefix)); } // Figure out if the block is necessary. let needs_block = block.rules != ast::BlockCheckMode::Default || block.stmts.len() > 1 || context.inside_macro || block_contains_comment(block, context.codemap) || prefix.contains('\n'); let no_return_type = if let ast::FunctionRetTy::Default(_) = fn_decl.output { true } else { false }; if no_return_type && !needs_block { // lock.stmts.len() == 1 if let Some(ref expr) = stmt_expr(&block.stmts[0]) { if let Some(rw) = rewrite_closure_expr(expr, &prefix, context, body_shape) { return Some(rw); } } } if !needs_block { // We need braces, but we might still prefer a one-liner. let stmt = &block.stmts[0]; // 4 = braces and spaces. if let Some(body_shape) = body_shape.sub_width(4) { // Checks if rewrite succeeded and fits on a single line. if let Some(rewrite) = and_one_line(stmt.rewrite(context, body_shape)) { return Some(format!("{} {{ {} }}", prefix, rewrite)); } } } // Either we require a block, or tried without and failed. rewrite_closure_block(&block, &prefix, context, body_shape) } else { rewrite_closure_expr(body, &prefix, context, body_shape).or_else(|| { // The closure originally had a non-block expression, but we can't fit on // one line, so we'll insert a block. rewrite_closure_with_block(context, body_shape, &prefix, body) }) } } // Rewrite closure with a single expression wrapping its body with block. fn rewrite_closure_with_block( context: &RewriteContext, shape: Shape, prefix: &str, body: &ast::Expr, ) -> Option<String> { let block = ast::Block { stmts: vec![ ast::Stmt { id: ast::NodeId::new(0), node: ast::StmtKind::Expr(ptr::P(body.clone())), span: body.span, }, ], id: ast::NodeId::new(0), rules: ast::BlockCheckMode::Default, span: body.span, }; rewrite_closure_block(&block, prefix, context, shape) } // Rewrite closure with a single expression without wrapping its body with block. fn rewrite_closure_expr( expr: &ast::Expr, prefix: &str, context: &RewriteContext, shape: Shape, ) -> Option<String> { let mut rewrite = expr.rewrite(context, shape); if classify::expr_requires_semi_to_be_stmt(left_most_sub_expr(expr)) { rewrite = and_one_line(rewrite); } rewrite.map(|rw| format!("{} {}", prefix, rw)) } // Rewrite closure whose body is block. fn rewrite_closure_block( block: &ast::Block, prefix: &str, context: &RewriteContext, shape: Shape, ) -> Option<String> { // Start with visual indent, then fall back to block indent if the // closure is large. let block_threshold = context.config.closure_block_indent_threshold(); if block_threshold >= 0 { if let Some(block_str) = block.rewrite(&context, shape) { if block_str.matches('\n').count() <= block_threshold as usize && !need_block_indent(&block_str, shape) { if let Some(block_str) = block_str.rewrite(context, shape) { return Some(format!("{} {}", prefix, block_str)); } } } } // The body of the closure is big enough to be block indented, that // means we must re-format. let block_shape = shape.block().with_max_width(context.config); let block_str = try_opt!(block.rewrite(&context, block_shape)); Some(format!("{} {}", prefix, block_str)) } fn and_one_line(x: Option<String>) -> Option<String> { x.and_then(|x| if x.contains('\n') { None } else { Some(x) }) } fn nop_block_collapse(block_str: Option<String>, budget: usize) -> Option<String> { debug!("nop_block_collapse {:?} {}", block_str, budget); block_str.map(|block_str| { if block_str.starts_with('{') && budget >= 2 && (block_str[1..].find(|c: char| !c.is_whitespace()).unwrap() == block_str.len() - 2) { "{}".to_owned() } else { block_str.to_owned() } }) } impl Rewrite for ast::Block { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { // shape.width is used only for the single line case: either the empty block `{}`, // or an unsafe expression `unsafe { e }`. if self.stmts.is_empty() && !block_contains_comment(self, context.codemap) && shape.width >= 2 { return Some("{}".to_owned()); } // If a block contains only a single-line comment, then leave it on one line. let user_str = context.snippet(self.span); let user_str = user_str.trim(); if user_str.starts_with('{') && user_str.ends_with('}') { let comment_str = user_str[1..user_str.len() - 1].trim(); if self.stmts.is_empty() && !comment_str.contains('\n') && !comment_str.starts_with("//") && comment_str.len() + 4 <= shape.width { return Some(format!("{{ {} }}", comment_str)); } } let mut visitor = FmtVisitor::from_codemap(context.parse_session, context.config); visitor.block_indent = shape.indent; visitor.is_if_else_block = context.is_if_else_block; let prefix = match self.rules { ast::BlockCheckMode::Unsafe(..) => { let snippet = context.snippet(self.span); let open_pos = try_opt!(snippet.find_uncommented("{")); visitor.last_pos = self.span.lo + BytePos(open_pos as u32); // Extract comment between unsafe and block start. let trimmed = &snippet[6..open_pos].trim(); let prefix = if !trimmed.is_empty() { // 9 = "unsafe {".len(), 7 = "unsafe ".len() let budget = try_opt!(shape.width.checked_sub(9)); format!( "unsafe {} ", try_opt!(rewrite_comment( trimmed, true, Shape::legacy(budget, shape.indent + 7), context.config, )) ) } else { "unsafe ".to_owned() }; if let result @ Some(_) = try_one_line_block(context, shape, &prefix, self) { return result; } prefix } ast::BlockCheckMode::Default => { visitor.last_pos = self.span.lo; String::new() } }; visitor.visit_block(self); if visitor.failed && shape.indent.alignment != 0 { self.rewrite( context, Shape::indented(shape.indent.block_only(), context.config), ) } else { Some(format!("{}{}", prefix, visitor.buffer)) } } } impl Rewrite for ast::Stmt { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { let result = match self.node { ast::StmtKind::Local(ref local) => local.rewrite(context, shape), ast::StmtKind::Expr(ref ex) | ast::StmtKind::Semi(ref ex) => { let suffix = if semicolon_for_stmt(self) { ";" } else { "" }; format_expr( ex, match self.node { ast::StmtKind::Expr(_) => ExprType::SubExpression, ast::StmtKind::Semi(_) => ExprType::Statement, _ => unreachable!(), }, context, try_opt!(shape.sub_width(suffix.len())), ).map(|s| s + suffix) } ast::StmtKind::Mac(..) | ast::StmtKind::Item(..) => None, }; result.and_then(|res| { recover_comment_removed(res, self.span, context, shape) }) } } // Rewrite condition if the given expression has one. fn rewrite_cond(context: &RewriteContext, expr: &ast::Expr, shape: Shape) -> Option<String> { match expr.node { ast::ExprKind::Match(ref cond, _) => { // `match `cond` {` let cond_shape = match context.config.control_style() { Style::Legacy => try_opt!(shape.shrink_left(6).and_then(|s| s.sub_width(2))), Style::Rfc => try_opt!(shape.offset_left(8)), }; cond.rewrite(context, cond_shape) } ast::ExprKind::Block(ref block) if block.stmts.len() == 1 => { stmt_expr(&block.stmts[0]).and_then(|e| rewrite_cond(context, e, shape)) } _ => { to_control_flow(expr, ExprType::SubExpression).and_then(|control_flow| { let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); control_flow .rewrite_cond(context, shape, &alt_block_sep) .and_then(|rw| Some(rw.0)) }) } } } // Abstraction over control flow expressions #[derive(Debug)] struct ControlFlow<'a> { cond: Option<&'a ast::Expr>, block: &'a ast::Block, else_block: Option<&'a ast::Expr>, label: Option<ast::SpannedIdent>, pat: Option<&'a ast::Pat>, keyword: &'a str, matcher: &'a str, connector: &'a str, allow_single_line: bool, // True if this is an `if` expression in an `else if` :-( hacky nested_if: bool, span: Span, } fn to_control_flow<'a>(expr: &'a ast::Expr, expr_type: ExprType) -> Option<ControlFlow<'a>> { match expr.node { ast::ExprKind::If(ref cond, ref if_block, ref else_block) => { Some(ControlFlow::new_if( cond, None, if_block, else_block.as_ref().map(|e| &**e), expr_type == ExprType::SubExpression, false, expr.span, )) } ast::ExprKind::IfLet(ref pat, ref cond, ref if_block, ref else_block) => { Some(ControlFlow::new_if( cond, Some(pat), if_block, else_block.as_ref().map(|e| &**e), expr_type == ExprType::SubExpression, false, expr.span, )) } ast::ExprKind::ForLoop(ref pat, ref cond, ref block, label) => { Some(ControlFlow::new_for(pat, cond, block, label, expr.span)) } ast::ExprKind::Loop(ref block, label) => Some( ControlFlow::new_loop(block, label, expr.span), ), ast::ExprKind::While(ref cond, ref block, label) => Some(ControlFlow::new_while( None, cond, block, label, expr.span, )), ast::ExprKind::WhileLet(ref pat, ref cond, ref block, label) => { Some(ControlFlow::new_while( Some(pat), cond, block, label, expr.span, )) } _ => None, } } impl<'a> ControlFlow<'a> { fn new_if( cond: &'a ast::Expr, pat: Option<&'a ast::Pat>, block: &'a ast::Block, else_block: Option<&'a ast::Expr>, allow_single_line: bool, nested_if: bool, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: Some(cond), block: block, else_block: else_block, label: None, pat: pat, keyword: "if", matcher: match pat { Some(..) => "let", None => "", }, connector: " =", allow_single_line: allow_single_line, nested_if: nested_if, span: span, } } fn new_loop( block: &'a ast::Block, label: Option<ast::SpannedIdent>, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: None, block: block, else_block: None, label: label, pat: None, keyword: "loop", matcher: "", connector: "", allow_single_line: false, nested_if: false, span: span, } } fn new_while( pat: Option<&'a ast::Pat>, cond: &'a ast::Expr, block: &'a ast::Block, label: Option<ast::SpannedIdent>, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: Some(cond), block: block, else_block: None, label: label, pat: pat, keyword: "while", matcher: match pat { Some(..) => "let", None => "", }, connector: " =", allow_single_line: false, nested_if: false, span: span, } } fn new_for( pat: &'a ast::Pat, cond: &'a ast::Expr, block: &'a ast::Block, label: Option<ast::SpannedIdent>, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: Some(cond), block: block, else_block: None, label: label, pat: Some(pat), keyword: "for", matcher: "", connector: " in", allow_single_line: false, nested_if: false, span: span, } } fn rewrite_single_line( &self, pat_expr_str: &str, context: &RewriteContext, width: usize, ) -> Option<String> { assert!(self.allow_single_line); let else_block = try_opt!(self.else_block); let fixed_cost = self.keyword.len() + " { } else { }".len(); if let ast::ExprKind::Block(ref else_node) = else_block.node { if !is_simple_block(self.block, context.codemap) || !is_simple_block(else_node, context.codemap) || pat_expr_str.contains('\n') { return None; } let new_width = try_opt!(width.checked_sub(pat_expr_str.len() + fixed_cost)); let expr = &self.block.stmts[0]; let if_str = try_opt!(expr.rewrite( context, Shape::legacy(new_width, Indent::empty()), )); let new_width = try_opt!(new_width.checked_sub(if_str.len())); let else_expr = &else_node.stmts[0]; let else_str = try_opt!(else_expr.rewrite( context, Shape::legacy(new_width, Indent::empty()), )); if if_str.contains('\n') || else_str.contains('\n') { return None; } let result = format!( "{} {} {{ {} }} else {{ {} }}", self.keyword, pat_expr_str, if_str, else_str ); if result.len() <= width { return Some(result); } } None } } impl<'a> ControlFlow<'a> { fn rewrite_cond( &self, context: &RewriteContext, shape: Shape, alt_block_sep: &str, ) -> Option<(String, usize)> { let constr_shape = if self.nested_if { // We are part of an if-elseif-else chain. Our constraints are tightened. // 7 = "} else " .len() try_opt!(shape.shrink_left(7)) } else { shape }; let label_string = rewrite_label(self.label); // 1 = space after keyword. let offset = self.keyword.len() + label_string.len() + 1; let pat_expr_string = match self.cond { Some(cond) => { let mut cond_shape = match context.config.control_style() { Style::Legacy => try_opt!(constr_shape.shrink_left(offset)), Style::Rfc => try_opt!(constr_shape.offset_left(offset)), }; if context.config.control_brace_style() != ControlBraceStyle::AlwaysNextLine { // 2 = " {".len() cond_shape = try_opt!(cond_shape.sub_width(2)); } try_opt!(rewrite_pat_expr( context, self.pat, cond, self.matcher, self.connector, self.keyword, cond_shape, )) } None => String::new(), }; let force_newline_brace = context.config.control_style() == Style::Rfc && pat_expr_string.contains('\n'); // Try to format if-else on single line. if self.allow_single_line && context.config.single_line_if_else_max_width() > 0 { let trial = self.rewrite_single_line(&pat_expr_string, context, shape.width); if let Some(cond_str) = trial { if cond_str.len() <= context.config.single_line_if_else_max_width() { return Some((cond_str, 0)); } } } let cond_span = if let Some(cond) = self.cond { cond.span } else { mk_sp(self.block.span.lo, self.block.span.lo) }; // for event in event let between_kwd_cond = mk_sp( context.codemap.span_after(self.span, self.keyword.trim()), self.pat.map_or( cond_span.lo, |p| if self.matcher.is_empty() { p.span.lo } else { context.codemap.span_before(self.span, self.matcher.trim()) }, ), ); let between_kwd_cond_comment = extract_comment(between_kwd_cond, context, shape); let after_cond_comment = extract_comment(mk_sp(cond_span.hi, self.block.span.lo), context, shape); let block_sep = if self.cond.is_none() && between_kwd_cond_comment.is_some() { "" } else if context.config.control_brace_style() == ControlBraceStyle::AlwaysNextLine || force_newline_brace { alt_block_sep } else { " " }; let used_width = if pat_expr_string.contains('\n') { last_line_width(&pat_expr_string) } else { // 2 = spaces after keyword and condition. label_string.len() + self.keyword.len() + pat_expr_string.len() + 2 }; Some(( format!( "{}{}{}{}{}", label_string, self.keyword, between_kwd_cond_comment.as_ref().map_or( if pat_expr_string.is_empty() || pat_expr_string.starts_with('\n') { "" } else { " " }, |s| &**s, ), pat_expr_string, after_cond_comment.as_ref().map_or(block_sep, |s| &**s) ), used_width, )) } } impl<'a> Rewrite for ControlFlow<'a> { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { debug!("ControlFlow::rewrite {:?} {:?}", self, shape); let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); let (cond_str, used_width) = try_opt!(self.rewrite_cond(context, shape, &alt_block_sep)); // If `used_width` is 0, it indicates that whole control flow is written in a single line. if used_width == 0 { return Some(cond_str); } let block_width = shape.width.checked_sub(used_width).unwrap_or(0); // This is used only for the empty block case: `{}`. So, we use 1 if we know // we should avoid the single line case. let block_width = if self.else_block.is_some() || self.nested_if { min(1, block_width) } else { block_width }; let block_shape = Shape { width: block_width, ..shape }; let mut block_context = context.clone(); block_context.is_if_else_block = self.else_block.is_some(); let block_str = try_opt!(self.block.rewrite(&block_context, block_shape)); let mut result = format!("{}{}", cond_str, block_str); if let Some(else_block) = self.else_block { let shape = Shape::indented(shape.indent, context.config); let mut last_in_chain = false; let rewrite = match else_block.node { // If the else expression is another if-else expression, prevent it // from being formatted on a single line. // Note how we're passing the original shape, as the // cost of "else" should not cascade. ast::ExprKind::IfLet(ref pat, ref cond, ref if_block, ref next_else_block) => { ControlFlow::new_if( cond, Some(pat), if_block, next_else_block.as_ref().map(|e| &**e), false, true, mk_sp(else_block.span.lo, self.span.hi), ).rewrite(context, shape) } ast::ExprKind::If(ref cond, ref if_block, ref next_else_block) => { ControlFlow::new_if( cond, None, if_block, next_else_block.as_ref().map(|e| &**e), false, true, mk_sp(else_block.span.lo, self.span.hi), ).rewrite(context, shape) } _ => { last_in_chain = true; // When rewriting a block, the width is only used for single line // blocks, passing 1 lets us avoid that. let else_shape = Shape { width: min(1, shape.width), ..shape }; else_block.rewrite(context, else_shape) } }; let between_kwd_else_block = mk_sp( self.block.span.hi, context.codemap.span_before( mk_sp(self.block.span.hi, else_block.span.lo), "else", ), ); let between_kwd_else_block_comment = extract_comment(between_kwd_else_block, context, shape); let after_else = mk_sp( context.codemap.span_after( mk_sp(self.block.span.hi, else_block.span.lo), "else", ), else_block.span.lo, ); let after_else_comment = extract_comment(after_else, context, shape); let between_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine | ControlBraceStyle::ClosingNextLine => &*alt_block_sep, ControlBraceStyle::AlwaysSameLine => " ", }; let after_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine if last_in_chain => &*alt_block_sep, _ => " ", }; try_opt!( write!( &mut result, "{}else{}", between_kwd_else_block_comment.as_ref().map_or( between_sep, |s| &**s, ), after_else_comment.as_ref().map_or(after_sep, |s| &**s) ).ok() ); result.push_str(&try_opt!(rewrite)); } Some(result) } } fn rewrite_label(label: Option<ast::SpannedIdent>) -> String { match label { Some(ident) => format!("{}: ", ident.node), None => "".to_owned(), } } fn extract_comment(span: Span, context: &RewriteContext, shape: Shape) -> Option<String> { let comment_str = context.snippet(span); if contains_comment(&comment_str) { let comment = try_opt!(rewrite_comment( comment_str.trim(), false, shape, context.config, )); Some(format!( "\n{indent}{}\n{indent}", comment, indent = shape.indent.to_string(context.config) )) } else { None } } fn block_contains_comment(block: &ast::Block, codemap: &CodeMap) -> bool { let snippet = codemap.span_to_snippet(block.span).unwrap(); contains_comment(&snippet) } // Checks that a block contains no statements, an expression and no comments. // FIXME: incorrectly returns false when comment is contained completely within // the expression. pub fn is_simple_block(block: &ast::Block, codemap: &CodeMap) -> bool { (block.stmts.len() == 1 && stmt_is_expr(&block.stmts[0]) && !block_contains_comment(block, codemap)) } /// Checks whether a block contains at most one statement or expression, and no comments. pub fn is_simple_block_stmt(block: &ast::Block, codemap: &CodeMap) -> bool { block.stmts.len() <= 1 && !block_contains_comment(block, codemap) } /// Checks whether a block contains no statements, expressions, or comments. pub fn is_empty_block(block: &ast::Block, codemap: &CodeMap) -> bool { block.stmts.is_empty() && !block_contains_comment(block, codemap) } pub fn stmt_is_expr(stmt: &ast::Stmt) -> bool { match stmt.node { ast::StmtKind::Expr(..) => true, _ => false, } } fn is_unsafe_block(block: &ast::Block) -> bool { if let ast::BlockCheckMode::Unsafe(..) = block.rules { true } else { false } } // inter-match-arm-comment-rules: // - all comments following a match arm before the start of the next arm // are about the second arm fn rewrite_match_arm_comment( context: &RewriteContext, missed_str: &str, shape: Shape, arm_indent_str: &str, ) -> Option<String> { // The leading "," is not part of the arm-comment let missed_str = match missed_str.find_uncommented(",") { Some(n) => &missed_str[n + 1..], None => &missed_str[..], }; let mut result = String::new(); // any text not preceeded by a newline is pushed unmodified to the block let first_brk = missed_str.find(|c: char| c == '\n').unwrap_or(0); result.push_str(&missed_str[..first_brk]); let missed_str = &missed_str[first_brk..]; // If missed_str had one newline, it starts with it let first = missed_str.find(|c: char| !c.is_whitespace()).unwrap_or( missed_str .len(), ); if missed_str[..first].chars().filter(|c| c == &'\n').count() >= 2 { // Excessive vertical whitespace before comment should be preserved // FIXME handle vertical whitespace better result.push('\n'); } let missed_str = missed_str[first..].trim(); if !missed_str.is_empty() { let comment = try_opt!(rewrite_comment(&missed_str, false, shape, context.config)); result.push('\n'); result.push_str(arm_indent_str); result.push_str(&comment); } Some(result) } fn rewrite_match( context: &RewriteContext, cond: &ast::Expr, arms: &[ast::Arm], shape: Shape, span: Span, ) -> Option<String> { if arms.is_empty() { return None; } // `match `cond` {` let cond_shape = match context.config.control_style() { Style::Legacy => try_opt!(shape.shrink_left(6).and_then(|s| s.sub_width(2))), Style::Rfc => try_opt!(shape.offset_left(8)), }; let cond_str = try_opt!(cond.rewrite(context, cond_shape)); let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); let block_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysSameLine => " ", _ => alt_block_sep.as_str(), }; let mut result = format!("match {}{}{{", cond_str, block_sep); let arm_shape = if context.config.indent_match_arms() { shape.block_indent(context.config.tab_spaces()) } else { shape.block_indent(0) }; let arm_indent_str = arm_shape.indent.to_string(context.config); let open_brace_pos = context.codemap.span_after( mk_sp(cond.span.hi, arm_start_pos(&arms[0])), "{", ); for (i, arm) in arms.iter().enumerate() { // Make sure we get the stuff between arms. let missed_str = if i == 0 { context.snippet(mk_sp(open_brace_pos, arm_start_pos(arm))) } else { context.snippet(mk_sp(arm_end_pos(&arms[i - 1]), arm_start_pos(arm))) }; let comment = try_opt!(rewrite_match_arm_comment( context, &missed_str, arm_shape, &arm_indent_str, )); result.push_str(&comment); result.push('\n'); result.push_str(&arm_indent_str); let arm_str = arm.rewrite(&context, arm_shape.with_max_width(context.config)); if let Some(ref arm_str) = arm_str { result.push_str(arm_str); } else { // We couldn't format the arm, just reproduce the source. let snippet = context.snippet(mk_sp(arm_start_pos(arm), arm_end_pos(arm))); result.push_str(&snippet); result.push_str(arm_comma(context.config, &arm.body)); } } // BytePos(1) = closing match brace. let last_span = mk_sp(arm_end_pos(&arms[arms.len() - 1]), span.hi - BytePos(1)); let last_comment = context.snippet(last_span); let comment = try_opt!(rewrite_match_arm_comment( context, &last_comment, arm_shape, &arm_indent_str, )); result.push_str(&comment); result.push('\n'); result.push_str(&shape.indent.to_string(context.config)); result.push('}'); Some(result) } fn arm_start_pos(arm: &ast::Arm) -> BytePos { let &ast::Arm { ref attrs, ref pats, .. } = arm; if !attrs.is_empty() { return attrs[0].span.lo; } pats[0].span.lo } fn arm_end_pos(arm: &ast::Arm) -> BytePos { arm.body.span.hi } fn arm_comma(config: &Config, body: &ast::Expr) -> &'static str { if config.match_block_trailing_comma() { "," } else if let ast::ExprKind::Block(ref block) = body.node { if let ast::BlockCheckMode::Default = block.rules { "" } else { "," } } else { "," } } // Match arms. impl Rewrite for ast::Arm { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { debug!("Arm::rewrite {:?} {:?}", self, shape); let &ast::Arm { ref attrs, ref pats, ref guard, ref body, } = self; let attr_str = if !attrs.is_empty() { if contains_skip(attrs) { return None; } format!( "{}\n{}", try_opt!(attrs.rewrite(context, shape)), shape.indent.to_string(context.config) ) } else { String::new() }; // Patterns // 5 = ` => {` let pat_shape = try_opt!(shape.sub_width(5)); let pat_strs = try_opt!( pats.iter() .map(|p| p.rewrite(context, pat_shape)) .collect::<Option<Vec<_>>>() ); let all_simple = pat_strs.iter().all(|p| pat_is_simple(p)); let items: Vec<_> = pat_strs.into_iter().map(ListItem::from_str).collect(); let fmt = ListFormatting { tactic: if all_simple { DefinitiveListTactic::Mixed } else { DefinitiveListTactic::Vertical }, separator: " |", trailing_separator: SeparatorTactic::Never, shape: pat_shape, ends_with_newline: false, config: context.config, }; let pats_str = try_opt!(write_list(items, &fmt)); let guard_shape = if pats_str.contains('\n') { shape.with_max_width(context.config) } else { shape }; let guard_str = try_opt!(rewrite_guard( context, guard, guard_shape, trimmed_last_line_width(&pats_str), )); let pats_str = format!("{}{}", pats_str, guard_str); let (mut extend, body) = match body.node { ast::ExprKind::Block(ref block) if !is_unsafe_block(block) && is_simple_block(block, context.codemap) && context.config.wrap_match_arms() => { if let ast::StmtKind::Expr(ref expr) = block.stmts[0].node { (false, &**expr) } else { (false, &**body) } } ast::ExprKind::Call(_, ref args) => (args.len() == 1, &**body), ast::ExprKind::Closure(..) | ast::ExprKind::Struct(..) | ast::ExprKind::Tup(..) => (true, &**body), _ => (false, &**body), }; extend &= context.use_block_indent(); let comma = arm_comma(&context.config, body); let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); let pat_width = extra_offset(&pats_str, shape); // Let's try and get the arm body on the same line as the condition. // 4 = ` => `.len() if shape.width > pat_width + comma.len() + 4 { let arm_shape = shape .offset_left(pat_width + 4) .unwrap() .sub_width(comma.len()) .unwrap(); let rewrite = nop_block_collapse(body.rewrite(context, arm_shape), arm_shape.width); let is_block = if let ast::ExprKind::Block(..) = body.node { true } else { false }; match rewrite { Some(ref body_str) if (!body_str.contains('\n') && body_str.len() <= arm_shape.width) || !context.config.wrap_match_arms() || (extend && first_line_width(body_str) <= arm_shape.width) || is_block => { let block_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine if is_block => alt_block_sep.as_str(), _ => " ", }; return Some(format!( "{}{} =>{}{}{}", attr_str.trim_left(), pats_str, block_sep, body_str, comma )); } _ => {} } } // FIXME: we're doing a second rewrite of the expr; This may not be // necessary. let body_shape = try_opt!(shape.block_left(context.config.tab_spaces())); let next_line_body = try_opt!(nop_block_collapse( body.rewrite(context, body_shape), body_shape.width, )); let indent_str = shape .indent .block_indent(context.config) .to_string(context.config); let (body_prefix, body_suffix) = if context.config.wrap_match_arms() { if context.config.match_block_trailing_comma() { ("{", "},") } else { ("{", "}") } } else { ("", ",") }; let block_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine => alt_block_sep + body_prefix + "\n", _ if body_prefix.is_empty() => "\n".to_owned(), _ => " ".to_owned() + body_prefix + "\n", }; if context.config.wrap_match_arms() { Some(format!( "{}{} =>{}{}{}\n{}{}", attr_str.trim_left(), pats_str, block_sep, indent_str, next_line_body, shape.indent.to_string(context.config), body_suffix )) } else { Some(format!( "{}{} =>{}{}{}{}", attr_str.trim_left(), pats_str, block_sep, indent_str, next_line_body, body_suffix )) } } } // A pattern is simple if it is very short or it is short-ish and just a path. // E.g. `Foo::Bar` is simple, but `Foo(..)` is not. fn pat_is_simple(pat_str: &str) -> bool { pat_str.len() <= 16 || (pat_str.len() <= 24 && pat_str.chars().all(|c| c.is_alphabetic() || c == ':')) } // The `if ...` guard on a match arm. fn rewrite_guard( context: &RewriteContext, guard: &Option<ptr::P<ast::Expr>>, shape: Shape, // The amount of space used up on this line for the pattern in // the arm (excludes offset). pattern_width: usize, ) -> Option<String> { if let Some(ref guard) = *guard { // First try to fit the guard string on the same line as the pattern. // 4 = ` if `, 5 = ` => {` if let Some(cond_shape) = shape .shrink_left(pattern_width + 4) .and_then(|s| s.sub_width(5)) { if let Some(cond_str) = guard .rewrite(context, cond_shape) .and_then(|s| s.rewrite(context, cond_shape)) { if !cond_str.contains('\n') { return Some(format!(" if {}", cond_str)); } } } // Not enough space to put the guard after the pattern, try a newline. // 3 == `if ` if let Some(cond_shape) = Shape::indented( shape.indent.block_indent(context.config) + 3, context.config, ).sub_width(3) { if let Some(cond_str) = guard.rewrite(context, cond_shape) { return Some(format!( "\n{}if {}", shape .indent .block_indent(context.config) .to_string(context.config), cond_str )); } } None } else { Some(String::new()) } } fn rewrite_pat_expr( context: &RewriteContext, pat: Option<&ast::Pat>, expr: &ast::Expr, matcher: &str, // Connecting piece between pattern and expression, // *without* trailing space. connector: &str, keyword: &str, shape: Shape, ) -> Option<String> { debug!("rewrite_pat_expr {:?} {:?} {:?}", shape, pat, expr); let mut pat_string = String::new(); let mut result = match pat { Some(pat) => { let matcher = if matcher.is_empty() { matcher.to_owned() } else { format!("{} ", matcher) }; let pat_shape = try_opt!(try_opt!(shape.offset_left(matcher.len())).sub_width(connector.len())); pat_string = try_opt!(pat.rewrite(context, pat_shape)); format!("{}{}{}", matcher, pat_string, connector) } None => String::new(), }; // Consider only the last line of the pat string. let extra_offset = extra_offset(&result, shape); // The expression may (partially) fit on the current line. if shape.width > extra_offset + 1 { let spacer = if pat.is_some() { " " } else { "" }; let expr_shape = try_opt!(shape.offset_left(extra_offset + spacer.len())); let expr_rewrite = expr.rewrite(context, expr_shape); if let Some(expr_string) = expr_rewrite { if pat.is_none() || pat_is_simple(&pat_string) || !expr_string.contains('\n') { result.push_str(spacer); result.push_str(&expr_string); return Some(result); } } } if pat.is_none() && keyword == "if" { return None; } let nested_indent = shape.indent.block_only().block_indent(context.config); // The expression won't fit on the current line, jump to next. result.push('\n'); result.push_str(&nested_indent.to_string(context.config)); let expr_rewrite = expr.rewrite(&context, Shape::indented(nested_indent, context.config)); result.push_str(&try_opt!(expr_rewrite)); Some(result) } fn rewrite_string_lit(context: &RewriteContext, span: Span, shape: Shape) -> Option<String> { let string_lit = context.snippet(span); if !context.config.format_strings() && !context.config.force_format_strings() { return Some(string_lit); } if !context.config.force_format_strings() && !string_requires_rewrite(context, span, &string_lit, shape) { return Some(string_lit); } let fmt = StringFormat { opener: "\"", closer: "\"", line_start: " ", line_end: "\\", shape: shape, trim_end: false, config: context.config, }; // Remove the quote characters. let str_lit = &string_lit[1..string_lit.len() - 1]; rewrite_string(str_lit, &fmt) } fn string_requires_rewrite( context: &RewriteContext, span: Span, string: &str, shape: Shape, ) -> bool { if context.codemap.lookup_char_pos(span.lo).col.0 != shape.indent.width() { return true; } for (i, line) in string.lines().enumerate() { if i == 0 { if line.len() > shape.width { return true; } } else { if line.len() > shape.width + shape.indent.width() { return true; } } } false } pub fn rewrite_call_with_binary_search<R>( context: &RewriteContext, callee: &R, args: &[&ast::Expr], span: Span, shape: Shape, ) -> Option<String> where R: Rewrite, { let closure = |callee_max_width| { // FIXME using byte lens instead of char lens (and probably all over the // place too) let callee_shape = Shape { width: callee_max_width, ..shape }; let callee_str = callee .rewrite(context, callee_shape) .ok_or(Ordering::Greater)?; rewrite_call_inner( context, &callee_str, args, span, shape, context.config.fn_call_width(), false, ) }; binary_search(1, shape.width, closure) } pub fn rewrite_call( context: &RewriteContext, callee: &str, args: &[ptr::P<ast::Expr>], span: Span, shape: Shape, ) -> Option<String> { rewrite_call_inner( context, &callee, &args.iter().map(|x| &**x).collect::<Vec<_>>(), span, shape, context.config.fn_call_width(), false, ).ok() } pub fn rewrite_call_inner<'a, T>( context: &RewriteContext, callee_str: &str, args: &[&T], span: Span, shape: Shape, args_max_width: usize, force_trailing_comma: bool, ) -> Result<String, Ordering> where T: Rewrite + Spanned + ToExpr + 'a, { // 2 = `( `, 1 = `(` let paren_overhead = if context.config.spaces_within_parens() { 2 } else { 1 }; let used_width = extra_offset(&callee_str, shape); let one_line_width = shape .width .checked_sub(used_width + 2 * paren_overhead) .ok_or(Ordering::Greater)?; let nested_shape = shape_from_fn_call_style( context, shape, used_width + 2 * paren_overhead, used_width + paren_overhead, ).ok_or(Ordering::Greater)?; let span_lo = context.codemap.span_after(span, "("); let args_span = mk_sp(span_lo, span.hi); let (extendable, list_str) = rewrite_call_args( context, args, args_span, nested_shape, one_line_width, args_max_width, force_trailing_comma, ).or_else(|| if context.use_block_indent() { rewrite_call_args( context, args, args_span, Shape::indented( shape.block().indent.block_indent(context.config), context.config, ), 0, 0, force_trailing_comma, ) } else { None }) .ok_or(Ordering::Less)?; if !context.use_block_indent() && need_block_indent(&list_str, nested_shape) && !extendable { let mut new_context = context.clone(); new_context.use_block = true; return rewrite_call_inner( &new_context, callee_str, args, span, shape, args_max_width, force_trailing_comma, ); } let args_shape = shape .sub_width(last_line_width(&callee_str)) .ok_or(Ordering::Less)?; Ok(format!( "{}{}", callee_str, wrap_args_with_parens( context, &list_str, extendable, args_shape, nested_shape, ) )) } fn need_block_indent(s: &str, shape: Shape) -> bool { s.lines().skip(1).any(|s| { s.find(|c| !char::is_whitespace(c)) .map_or(false, |w| w + 1 < shape.indent.width()) }) } fn rewrite_call_args<'a, T>( context: &RewriteContext, args: &[&T], span: Span, shape: Shape, one_line_width: usize, args_max_width: usize, force_trailing_comma: bool, ) -> Option<(bool, String)> where T: Rewrite + Spanned + ToExpr + 'a, { let mut item_context = context.clone(); item_context.inside_macro = false; let items = itemize_list( context.codemap, args.iter(), ")", |item| item.span().lo, |item| item.span().hi, |item| item.rewrite(&item_context, shape), span.lo, span.hi, ); let mut item_vec: Vec<_> = items.collect(); // Try letting the last argument overflow to the next line with block // indentation. If its first line fits on one line with the other arguments, // we format the function arguments horizontally. let tactic = try_overflow_last_arg( &item_context, &mut item_vec, &args[..], shape, one_line_width, args_max_width, ); let fmt = ListFormatting { tactic: tactic, separator: ",", trailing_separator: if force_trailing_comma { SeparatorTactic::Always } else if context.inside_macro || !context.use_block_indent() { SeparatorTactic::Never } else { context.config.trailing_comma() }, shape: shape, ends_with_newline: false, config: context.config, }; write_list(&item_vec, &fmt).map(|args_str| { (tactic != DefinitiveListTactic::Vertical, args_str) }) } fn try_overflow_last_arg<'a, T>( context: &RewriteContext, item_vec: &mut Vec<ListItem>, args: &[&T], shape: Shape, one_line_width: usize, args_max_width: usize, ) -> DefinitiveListTactic where T: Rewrite + Spanned + ToExpr + 'a, { let overflow_last = can_be_overflowed(&context, args); // Replace the last item with its first line to see if it fits with // first arguments. let (orig_last, placeholder) = if overflow_last { let mut context = context.clone(); if let Some(expr) = args[args.len() - 1].to_expr() { match expr.node { ast::ExprKind::MethodCall(..) => context.force_one_line_chain = true, _ => (), } } last_arg_shape(&context, &item_vec, shape, args_max_width) .map_or((None, None), |arg_shape| { rewrite_last_arg_with_overflow( &context, args[args.len() - 1], &mut item_vec[args.len() - 1], arg_shape, ) }) } else { (None, None) }; let tactic = definitive_tactic( &*item_vec, ListTactic::LimitedHorizontalVertical(args_max_width), one_line_width, ); // Replace the stub with the full overflowing last argument if the rewrite // succeeded and its first line fits with the other arguments. match (overflow_last, tactic, placeholder) { (true, DefinitiveListTactic::Horizontal, placeholder @ Some(..)) => { item_vec[args.len() - 1].item = placeholder; } (true, _, _) => { item_vec[args.len() - 1].item = orig_last; } (false, _, _) => {} } tactic } fn last_arg_shape( context: &RewriteContext, items: &Vec<ListItem>, shape: Shape, args_max_width: usize, ) -> Option<Shape> { let overhead = items.iter().rev().skip(1).fold(0, |acc, i| { acc + i.item.as_ref().map_or(0, |s| first_line_width(&s)) }); let max_width = min(args_max_width, shape.width); let arg_indent = if context.use_block_indent() { shape.block().indent.block_unindent(context.config) } else { shape.block().indent }; Some(Shape { width: try_opt!(max_width.checked_sub(overhead)), indent: arg_indent, offset: 0, }) } fn rewrite_last_arg_with_overflow<'a, T>( context: &RewriteContext, last_arg: &T, last_item: &mut ListItem, shape: Shape, ) -> (Option<String>, Option<String>) where T: Rewrite + Spanned + ToExpr + 'a, { let rewrite = if let Some(expr) = last_arg.to_expr() { match expr.node { // When overflowing the closure which consists of a single control flow expression, // force to use block if its condition uses multi line. ast::ExprKind::Closure(capture, ref fn_decl, ref body, _) => { let try_closure_with_block = || { let body = match body.node { ast::ExprKind::Block(ref block) if block.stmts.len() == 1 => { try_opt!(stmt_expr(&block.stmts[0])) } _ => body, }; let (prefix, extra_offset) = try_opt!(rewrite_closure_fn_decl( capture, fn_decl, body, expr.span, context, shape, )); let shape = try_opt!(shape.offset_left(extra_offset)); rewrite_cond(context, body, shape).map_or(None, |cond| if cond.contains('\n') { rewrite_closure_with_block(context, shape, &prefix, body) } else { None }) }; try_closure_with_block().or_else(|| expr.rewrite(context, shape)) } _ => expr.rewrite(context, shape), } } else { last_arg.rewrite(context, shape) }; let orig_last = last_item.item.clone(); if let Some(rewrite) = rewrite { let rewrite_first_line = Some(rewrite[..first_line_width(&rewrite)].to_owned()); last_item.item = rewrite_first_line; (orig_last, Some(rewrite)) } else { (orig_last, None) } } fn can_be_overflowed<'a, T>(context: &RewriteContext, args: &[&T]) -> bool where T: Rewrite + Spanned + ToExpr + 'a, { args.last().map_or( false, |x| x.can_be_overflowed(context, args.len()), ) } pub fn can_be_overflowed_expr(context: &RewriteContext, expr: &ast::Expr, args_len: usize) -> bool { match expr.node { ast::ExprKind::Match(..) => { (context.use_block_indent() && args_len == 1) || (context.config.fn_call_style() == IndentStyle::Visual && args_len > 1) } ast::ExprKind::If(..) | ast::ExprKind::IfLet(..) | ast::ExprKind::ForLoop(..) | ast::ExprKind::Loop(..) | ast::ExprKind::While(..) | ast::ExprKind::WhileLet(..) => { context.config.combine_control_expr() && context.use_block_indent() && args_len == 1 } ast::ExprKind::Block(..) | ast::ExprKind::Closure(..) => { context.use_block_indent() || context.config.fn_call_style() == IndentStyle::Visual && args_len > 1 } ast::ExprKind::Call(..) | ast::ExprKind::MethodCall(..) | ast::ExprKind::Mac(..) | ast::ExprKind::Struct(..) => context.use_block_indent() && args_len == 1, ast::ExprKind::Tup(..) => context.use_block_indent(), ast::ExprKind::AddrOf(_, ref expr) | ast::ExprKind::Box(ref expr) | ast::ExprKind::Try(ref expr) | ast::ExprKind::Unary(_, ref expr) | ast::ExprKind::Cast(ref expr, _) => can_be_overflowed_expr(context, expr, args_len), _ => false, } } fn paren_overhead(context: &RewriteContext) -> usize { if context.config.spaces_within_parens() { 4 } else { 2 } } pub fn wrap_args_with_parens( context: &RewriteContext, args_str: &str, is_extendable: bool, shape: Shape, nested_shape: Shape, ) -> String { if !context.use_block_indent() || (context.inside_macro && !args_str.contains('\n') && args_str.len() + paren_overhead(context) <= shape.width) || is_extendable { if context.config.spaces_within_parens() && args_str.len() > 0 { format!("( {} )", args_str) } else { format!("({})", args_str) } } else { format!( "(\n{}{}\n{})", nested_shape.indent.to_string(context.config), args_str, shape.block().indent.to_string(context.config) ) } } fn rewrite_paren(context: &RewriteContext, subexpr: &ast::Expr, shape: Shape) -> Option<String> { debug!("rewrite_paren, shape: {:?}", shape); let paren_overhead = paren_overhead(context); let sub_shape = try_opt!(shape.sub_width(paren_overhead / 2)).visual_indent(paren_overhead / 2); let paren_wrapper = |s: &str| if context.config.spaces_within_parens() && s.len() > 0 { format!("( {} )", s) } else { format!("({})", s) }; let subexpr_str = try_opt!(subexpr.rewrite(context, sub_shape)); debug!("rewrite_paren, subexpr_str: `{:?}`", subexpr_str); if subexpr_str.contains('\n') { Some(paren_wrapper(&subexpr_str)) } else { if subexpr_str.len() + paren_overhead <= shape.width { Some(paren_wrapper(&subexpr_str)) } else { let sub_shape = try_opt!(shape.offset_left(2)); let subexpr_str = try_opt!(subexpr.rewrite(context, sub_shape)); Some(paren_wrapper(&subexpr_str)) } } } fn rewrite_index( expr: &ast::Expr, index: &ast::Expr, context: &RewriteContext, shape: Shape, ) -> Option<String> { let expr_str = try_opt!(expr.rewrite(context, shape)); let (lbr, rbr) = if context.config.spaces_within_square_brackets() { ("[ ", " ]") } else { ("[", "]") }; let offset = expr_str.len() + lbr.len(); if let Some(index_shape) = shape.visual_indent(offset).sub_width(offset + rbr.len()) { if let Some(index_str) = index.rewrite(context, index_shape) { return Some(format!("{}{}{}{}", expr_str, lbr, index_str, rbr)); } } let indent = shape.indent.block_indent(&context.config); let indent = indent.to_string(&context.config); // FIXME this is not right, since we don't take into account that shape.width // might be reduced from max_width by something on the right. let budget = try_opt!(context.config.max_width().checked_sub( indent.len() + lbr.len() + rbr.len(), )); let index_str = try_opt!(index.rewrite(context, Shape::legacy(budget, shape.indent))); Some(format!( "{}\n{}{}{}{}", expr_str, indent, lbr, index_str, rbr )) } fn rewrite_struct_lit<'a>( context: &RewriteContext, path: &ast::Path, fields: &'a [ast::Field], base: Option<&'a ast::Expr>, span: Span, shape: Shape, ) -> Option<String> { debug!("rewrite_struct_lit: shape {:?}", shape); enum StructLitField<'a> { Regular(&'a ast::Field), Base(&'a ast::Expr), } // 2 = " {".len() let path_shape = try_opt!(shape.sub_width(2)); let path_str = try_opt!(rewrite_path( context, PathContext::Expr, None, path, path_shape, )); if fields.len() == 0 && base.is_none() { return Some(format!("{} {{}}", path_str)); } let field_iter = fields .into_iter() .map(StructLitField::Regular) .chain(base.into_iter().map(StructLitField::Base)); // Foo { a: Foo } - indent is +3, width is -5. let (h_shape, v_shape) = try_opt!(struct_lit_shape(shape, context, path_str.len() + 3, 2)); let span_lo = |item: &StructLitField| match *item { StructLitField::Regular(field) => field.span.lo, StructLitField::Base(expr) => { let last_field_hi = fields.last().map_or(span.lo, |field| field.span.hi); let snippet = context.snippet(mk_sp(last_field_hi, expr.span.lo)); let pos = snippet.find_uncommented("..").unwrap(); last_field_hi + BytePos(pos as u32) } }; let span_hi = |item: &StructLitField| match *item { StructLitField::Regular(field) => field.span.hi, StructLitField::Base(expr) => expr.span.hi, }; let rewrite = |item: &StructLitField| match *item { StructLitField::Regular(field) => { // The 1 taken from the v_budget is for the comma. rewrite_field(context, field, try_opt!(v_shape.sub_width(1))) } StructLitField::Base(expr) => { // 2 = .. expr.rewrite(context, try_opt!(v_shape.shrink_left(2))) .map(|s| format!("..{}", s)) } }; let items = itemize_list( context.codemap, field_iter, "}", span_lo, span_hi, rewrite, context.codemap.span_after(span, "{"), span.hi, ); let item_vec = items.collect::<Vec<_>>(); let tactic = struct_lit_tactic(h_shape, context, &item_vec); let nested_shape = shape_for_tactic(tactic, h_shape, v_shape); let fmt = struct_lit_formatting(nested_shape, tactic, context, base.is_some()); let fields_str = try_opt!(write_list(&item_vec, &fmt)); let fields_str = if context.config.struct_lit_style() == IndentStyle::Block && (fields_str.contains('\n') || context.config.struct_lit_multiline_style() == MultilineStyle::ForceMulti || fields_str.len() > h_shape.map(|s| s.width).unwrap_or(0)) { format!( "\n{}{}\n{}", v_shape.indent.to_string(context.config), fields_str, shape.indent.to_string(context.config) ) } else { // One liner or visual indent. format!(" {} ", fields_str) }; Some(format!("{} {{{}}}", path_str, fields_str)) // FIXME if context.config.struct_lit_style() == Visual, but we run out // of space, we should fall back to BlockIndent. } pub fn struct_lit_field_separator(config: &Config) -> &str { colon_spaces( config.space_before_struct_lit_field_colon(), config.space_after_struct_lit_field_colon(), ) } fn rewrite_field(context: &RewriteContext, field: &ast::Field, shape: Shape) -> Option<String> { let name = &field.ident.node.to_string(); if field.is_shorthand { Some(name.to_string()) } else { let separator = struct_lit_field_separator(context.config); let overhead = name.len() + separator.len(); let mut expr_shape = try_opt!(shape.sub_width(overhead)); expr_shape.offset += overhead; let expr = field.expr.rewrite(context, expr_shape); let mut attrs_str = try_opt!((*field.attrs).rewrite(context, shape)); if !attrs_str.is_empty() { attrs_str.push_str(&format!("\n{}", shape.indent.to_string(context.config))); }; match expr { Some(e) => Some(format!("{}{}{}{}", attrs_str, name, separator, e)), None => { let expr_offset = shape.indent.block_indent(context.config); let expr = field.expr.rewrite( context, Shape::indented(expr_offset, context.config), ); expr.map(|s| { format!( "{}{}:\n{}{}", attrs_str, name, expr_offset.to_string(&context.config), s ) }) } } } } fn shape_from_fn_call_style( context: &RewriteContext, shape: Shape, overhead: usize, offset: usize, ) -> Option<Shape> { if context.use_block_indent() { // 1 = "," shape .block() .block_indent(context.config.tab_spaces()) .with_max_width(context.config) .sub_width(1) } else { shape.visual_indent(offset).sub_width(overhead) } } fn rewrite_tuple_in_visual_indent_style<'a, T>( context: &RewriteContext, items: &[&T], span: Span, shape: Shape, ) -> Option<String> where T: Rewrite + Spanned + ToExpr + 'a, { let mut items = items.iter(); // In case of length 1, need a trailing comma debug!("rewrite_tuple_in_visual_indent_style {:?}", shape); if items.len() == 1 { // 3 = "(" + ",)" let nested_shape = try_opt!(shape.sub_width(3)).visual_indent(1); return items.next().unwrap().rewrite(context, nested_shape).map( |s| { if context.config.spaces_within_parens() { format!("( {}, )", s) } else { format!("({},)", s) } }, ); } let list_lo = context.codemap.span_after(span, "("); let nested_shape = try_opt!(shape.sub_width(2)).visual_indent(1); let items = itemize_list( context.codemap, items, ")", |item| item.span().lo, |item| item.span().hi, |item| item.rewrite(context, nested_shape), list_lo, span.hi - BytePos(1), ); let list_str = try_opt!(format_item_list(items, nested_shape, context.config)); if context.config.spaces_within_parens() && list_str.len() > 0 { Some(format!("( {} )", list_str)) } else { Some(format!("({})", list_str)) } } pub fn rewrite_tuple<'a, T>( context: &RewriteContext, items: &[&T], span: Span, shape: Shape, ) -> Option<String> where T: Rewrite + Spanned + ToExpr + 'a, { debug!("rewrite_tuple {:?}", shape); if context.use_block_indent() { // We use the same rule as funcation call for rewriting tuple. rewrite_call_inner( context, &String::new(), items, span, shape, context.config.fn_call_width(), items.len() == 1, ).ok() } else { rewrite_tuple_in_visual_indent_style(context, items, span, shape) } } pub fn rewrite_unary_prefix<R: Rewrite>( context: &RewriteContext, prefix: &str, rewrite: &R, shape: Shape, ) -> Option<String> { rewrite .rewrite(context, try_opt!(shape.offset_left(prefix.len()))) .map(|r| format!("{}{}", prefix, r)) } // FIXME: this is probably not correct for multi-line Rewrites. we should // subtract suffix.len() from the last line budget, not the first! pub fn rewrite_unary_suffix<R: Rewrite>( context: &RewriteContext, suffix: &str, rewrite: &R, shape: Shape, ) -> Option<String> { rewrite .rewrite(context, try_opt!(shape.sub_width(suffix.len()))) .map(|mut r| { r.push_str(suffix); r }) } fn rewrite_unary_op( context: &RewriteContext, op: &ast::UnOp, expr: &ast::Expr, shape: Shape, ) -> Option<String> { // For some reason, an UnOp is not spanned like BinOp! let operator_str = match *op { ast::UnOp::Deref => "*", ast::UnOp::Not => "!", ast::UnOp::Neg => "-", }; rewrite_unary_prefix(context, operator_str, expr, shape) } fn rewrite_assignment( context: &RewriteContext, lhs: &ast::Expr, rhs: &ast::Expr, op: Option<&ast::BinOp>, shape: Shape, ) -> Option<String> { let operator_str = match op { Some(op) => context.snippet(op.span), None => "=".to_owned(), }; // 1 = space between lhs and operator. let lhs_shape = try_opt!(shape.sub_width(operator_str.len() + 1)); let lhs_str = format!( "{} {}", try_opt!(lhs.rewrite(context, lhs_shape)), operator_str ); rewrite_assign_rhs(context, lhs_str, rhs, shape) } // The left hand side must contain everything up to, and including, the // assignment operator. pub fn rewrite_assign_rhs<S: Into<String>>( context: &RewriteContext, lhs: S, ex: &ast::Expr, shape: Shape, ) -> Option<String> { let mut result = lhs.into(); let last_line_width = last_line_width(&result) - if result.contains('\n') { shape.indent.width() } else { 0 }; // 1 = space between operator and rhs. let orig_shape = try_opt!(shape.block_indent(0).offset_left(last_line_width + 1)); let rhs = match ex.node { ast::ExprKind::Mac(ref mac) => { match rewrite_macro(mac, None, context, orig_shape, MacroPosition::Expression) { None if !context.snippet(ex.span).contains("\n") => { context.snippet(ex.span).rewrite(context, orig_shape) } rhs @ _ => rhs, } } _ => ex.rewrite(context, orig_shape), }; fn count_line_breaks(src: &str) -> usize { src.chars().filter(|&x| x == '\n').count() } match rhs { Some(ref new_str) if count_line_breaks(new_str) < 2 => { result.push(' '); result.push_str(new_str); } _ => { // Expression did not fit on the same line as the identifier or is // at least three lines big. Try splitting the line and see // if that works better. let new_shape = try_opt!(shape.block_left(context.config.tab_spaces())); let new_rhs = ex.rewrite(context, new_shape); // FIXME: DRY! match (rhs, new_rhs) { (Some(ref orig_rhs), Some(ref replacement_rhs)) if count_line_breaks(orig_rhs) > count_line_breaks(replacement_rhs) + 1 || (orig_rhs.rewrite(context, shape).is_none() && replacement_rhs.rewrite(context, new_shape).is_some()) => { result.push_str(&format!("\n{}", new_shape.indent.to_string(context.config))); result.push_str(replacement_rhs); } (None, Some(ref final_rhs)) => { result.push_str(&format!("\n{}", new_shape.indent.to_string(context.config))); result.push_str(final_rhs); } (None, None) => return None, (Some(ref orig_rhs), _) => { result.push(' '); result.push_str(orig_rhs); } } } } Some(result) } fn rewrite_expr_addrof( context: &RewriteContext, mutability: ast::Mutability, expr: &ast::Expr, shape: Shape, ) -> Option<String> { let operator_str = match mutability { ast::Mutability::Immutable => "&", ast::Mutability::Mutable => "&mut ", }; rewrite_unary_prefix(context, operator_str, expr, shape) } pub trait ToExpr { fn to_expr(&self) -> Option<&ast::Expr>; fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool; } impl ToExpr for ast::Expr { fn to_expr(&self) -> Option<&ast::Expr> { Some(self) } fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool { can_be_overflowed_expr(context, self, len) } } impl ToExpr for ast::Ty { fn to_expr(&self) -> Option<&ast::Expr> { None } fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool { can_be_overflowed_type(context, self, len) } } impl<'a> ToExpr for TuplePatField<'a> { fn to_expr(&self) -> Option<&ast::Expr> { None } fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool { can_be_overflowed_pat(context, self, len) } } Use special rules when overflowing the last argument When overflowing the last argument of function call, if it is a closure, we apply some special rules in order to avoid weird formatting. // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::cmp::{Ordering, min}; use std::iter::ExactSizeIterator; use std::fmt::Write; use {Indent, Shape, Spanned}; use codemap::SpanUtils; use rewrite::{Rewrite, RewriteContext}; use lists::{write_list, itemize_list, ListFormatting, SeparatorTactic, ListTactic, DefinitiveListTactic, definitive_tactic, ListItem, format_item_list, struct_lit_shape, struct_lit_tactic, shape_for_tactic, struct_lit_formatting}; use string::{StringFormat, rewrite_string}; use utils::{extra_offset, last_line_width, wrap_str, binary_search, first_line_width, semicolon_for_stmt, trimmed_last_line_width, left_most_sub_expr, stmt_expr, colon_spaces, contains_skip, mk_sp}; use visitor::FmtVisitor; use config::{Config, IndentStyle, MultilineStyle, ControlBraceStyle, Style}; use comment::{FindUncommented, rewrite_comment, contains_comment, recover_comment_removed}; use types::{rewrite_path, PathContext, can_be_overflowed_type}; use items::{span_lo_for_arg, span_hi_for_arg}; use chains::rewrite_chain; use macros::{rewrite_macro, MacroPosition}; use patterns::{TuplePatField, can_be_overflowed_pat}; use syntax::{ast, ptr}; use syntax::codemap::{CodeMap, Span, BytePos}; use syntax::parse::classify; impl Rewrite for ast::Expr { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { format_expr(self, ExprType::SubExpression, context, shape) } } #[derive(PartialEq)] enum ExprType { Statement, SubExpression, } fn combine_attr_and_expr( context: &RewriteContext, shape: Shape, attr_str: &str, expr_str: &str, ) -> String { let separator = if attr_str.is_empty() { String::new() } else { if expr_str.contains('\n') || attr_str.contains('\n') || attr_str.len() + expr_str.len() > shape.width { format!("\n{}", shape.indent.to_string(context.config)) } else { String::from(" ") } }; format!("{}{}{}", attr_str, separator, expr_str) } fn format_expr( expr: &ast::Expr, expr_type: ExprType, context: &RewriteContext, shape: Shape, ) -> Option<String> { let attr_rw = (&*expr.attrs).rewrite(context, shape); if contains_skip(&*expr.attrs) { if let Some(attr_str) = attr_rw { return Some(combine_attr_and_expr( context, shape, &attr_str, &context.snippet(expr.span), )); } else { return Some(context.snippet(expr.span)); } } let expr_rw = match expr.node { ast::ExprKind::Array(ref expr_vec) => { rewrite_array( expr_vec.iter().map(|e| &**e), mk_sp(context.codemap.span_after(expr.span, "["), expr.span.hi), context, shape, ) } ast::ExprKind::Lit(ref l) => { match l.node { ast::LitKind::Str(_, ast::StrStyle::Cooked) => { rewrite_string_lit(context, l.span, shape) } _ => { wrap_str( context.snippet(expr.span), context.config.max_width(), shape, ) } } } ast::ExprKind::Call(ref callee, ref args) => { let inner_span = mk_sp(callee.span.hi, expr.span.hi); rewrite_call_with_binary_search( context, &**callee, &args.iter().map(|x| &**x).collect::<Vec<_>>()[..], inner_span, shape, ) } ast::ExprKind::Paren(ref subexpr) => rewrite_paren(context, subexpr, shape), ast::ExprKind::Binary(ref op, ref lhs, ref rhs) => { // FIXME: format comments between operands and operator rewrite_pair( &**lhs, &**rhs, "", &format!(" {} ", context.snippet(op.span)), "", context, shape, ) } ast::ExprKind::Unary(ref op, ref subexpr) => rewrite_unary_op(context, op, subexpr, shape), ast::ExprKind::Struct(ref path, ref fields, ref base) => { rewrite_struct_lit( context, path, fields, base.as_ref().map(|e| &**e), expr.span, shape, ) } ast::ExprKind::Tup(ref items) => { rewrite_tuple( context, &items.iter().map(|x| &**x).collect::<Vec<_>>()[..], expr.span, shape, ) } ast::ExprKind::If(..) | ast::ExprKind::IfLet(..) | ast::ExprKind::ForLoop(..) | ast::ExprKind::Loop(..) | ast::ExprKind::While(..) | ast::ExprKind::WhileLet(..) => { to_control_flow(expr, expr_type) .and_then(|control_flow| control_flow.rewrite(context, shape)) } ast::ExprKind::Block(ref block) => block.rewrite(context, shape), ast::ExprKind::Match(ref cond, ref arms) => { rewrite_match(context, cond, arms, shape, expr.span) } ast::ExprKind::Path(ref qself, ref path) => { rewrite_path(context, PathContext::Expr, qself.as_ref(), path, shape) } ast::ExprKind::Assign(ref lhs, ref rhs) => { rewrite_assignment(context, lhs, rhs, None, shape) } ast::ExprKind::AssignOp(ref op, ref lhs, ref rhs) => { rewrite_assignment(context, lhs, rhs, Some(op), shape) } ast::ExprKind::Continue(ref opt_ident) => { let id_str = match *opt_ident { Some(ident) => format!(" {}", ident.node), None => String::new(), }; wrap_str( format!("continue{}", id_str), context.config.max_width(), shape, ) } ast::ExprKind::Break(ref opt_ident, ref opt_expr) => { let id_str = match *opt_ident { Some(ident) => format!(" {}", ident.node), None => String::new(), }; if let Some(ref expr) = *opt_expr { rewrite_unary_prefix(context, &format!("break{} ", id_str), &**expr, shape) } else { wrap_str( format!("break{}", id_str), context.config.max_width(), shape, ) } } ast::ExprKind::Closure(capture, ref fn_decl, ref body, _) => { rewrite_closure(capture, fn_decl, body, expr.span, context, shape) } ast::ExprKind::Try(..) | ast::ExprKind::Field(..) | ast::ExprKind::TupField(..) | ast::ExprKind::MethodCall(..) => rewrite_chain(expr, context, shape), ast::ExprKind::Mac(ref mac) => { // Failure to rewrite a marco should not imply failure to // rewrite the expression. rewrite_macro(mac, None, context, shape, MacroPosition::Expression).or_else(|| { wrap_str( context.snippet(expr.span), context.config.max_width(), shape, ) }) } ast::ExprKind::Ret(None) => { wrap_str("return".to_owned(), context.config.max_width(), shape) } ast::ExprKind::Ret(Some(ref expr)) => { rewrite_unary_prefix(context, "return ", &**expr, shape) } ast::ExprKind::Box(ref expr) => rewrite_unary_prefix(context, "box ", &**expr, shape), ast::ExprKind::AddrOf(mutability, ref expr) => { rewrite_expr_addrof(context, mutability, expr, shape) } ast::ExprKind::Cast(ref expr, ref ty) => { rewrite_pair(&**expr, &**ty, "", " as ", "", context, shape) } ast::ExprKind::Type(ref expr, ref ty) => { rewrite_pair(&**expr, &**ty, "", ": ", "", context, shape) } ast::ExprKind::Index(ref expr, ref index) => { rewrite_index(&**expr, &**index, context, shape) } ast::ExprKind::Repeat(ref expr, ref repeats) => { let (lbr, rbr) = if context.config.spaces_within_square_brackets() { ("[ ", " ]") } else { ("[", "]") }; rewrite_pair(&**expr, &**repeats, lbr, "; ", rbr, context, shape) } ast::ExprKind::Range(ref lhs, ref rhs, limits) => { let delim = match limits { ast::RangeLimits::HalfOpen => "..", ast::RangeLimits::Closed => "...", }; match (lhs.as_ref().map(|x| &**x), rhs.as_ref().map(|x| &**x)) { (Some(ref lhs), Some(ref rhs)) => { let sp_delim = if context.config.spaces_around_ranges() { format!(" {} ", delim) } else { delim.into() }; rewrite_pair(&**lhs, &**rhs, "", &sp_delim, "", context, shape) } (None, Some(ref rhs)) => { let sp_delim = if context.config.spaces_around_ranges() { format!("{} ", delim) } else { delim.into() }; rewrite_unary_prefix(context, &sp_delim, &**rhs, shape) } (Some(ref lhs), None) => { let sp_delim = if context.config.spaces_around_ranges() { format!(" {}", delim) } else { delim.into() }; rewrite_unary_suffix(context, &sp_delim, &**lhs, shape) } (None, None) => wrap_str(delim.into(), context.config.max_width(), shape), } } // We do not format these expressions yet, but they should still // satisfy our width restrictions. ast::ExprKind::InPlace(..) | ast::ExprKind::InlineAsm(..) => { wrap_str( context.snippet(expr.span), context.config.max_width(), shape, ) } ast::ExprKind::Catch(ref block) => { if let rewrite @ Some(_) = try_one_line_block(context, shape, "do catch ", block) { return rewrite; } // 9 = `do catch ` let budget = shape.width.checked_sub(9).unwrap_or(0); Some(format!( "{}{}", "do catch ", try_opt!( block.rewrite(&context, Shape::legacy(budget, shape.indent)) ) )) } }; match (attr_rw, expr_rw) { (Some(attr_str), Some(expr_str)) => { recover_comment_removed( combine_attr_and_expr(context, shape, &attr_str, &expr_str), expr.span, context, shape, ) } _ => None, } } fn try_one_line_block( context: &RewriteContext, shape: Shape, prefix: &str, block: &ast::Block, ) -> Option<String> { if is_simple_block(block, context.codemap) { let expr_shape = Shape::legacy(shape.width - prefix.len(), shape.indent); let expr_str = try_opt!(block.stmts[0].rewrite(context, expr_shape)); let result = format!("{}{{ {} }}", prefix, expr_str); if result.len() <= shape.width && !result.contains('\n') { return Some(result); } } None } pub fn rewrite_pair<LHS, RHS>( lhs: &LHS, rhs: &RHS, prefix: &str, infix: &str, suffix: &str, context: &RewriteContext, shape: Shape, ) -> Option<String> where LHS: Rewrite, RHS: Rewrite, { // Get "full width" rhs and see if it fits on the current line. This // usually works fairly well since it tends to place operands of // operations with high precendence close together. // Note that this is non-conservative, but its just to see if it's even // worth trying to put everything on one line. let rhs_shape = try_opt!(shape.sub_width(suffix.len())); let rhs_result = rhs.rewrite(context, rhs_shape); if let Some(rhs_result) = rhs_result { // This is needed in case of line break not caused by a // shortage of space, but by end-of-line comments, for example. if !rhs_result.contains('\n') { let lhs_shape = try_opt!(try_opt!(shape.offset_left(prefix.len())).sub_width(infix.len())); let lhs_result = lhs.rewrite(context, lhs_shape); if let Some(lhs_result) = lhs_result { let mut result = format!("{}{}{}", prefix, lhs_result, infix); let remaining_width = shape .width .checked_sub(last_line_width(&result) + suffix.len()) .unwrap_or(0); if rhs_result.len() <= remaining_width { result.push_str(&rhs_result); result.push_str(suffix); return Some(result); } // Try rewriting the rhs into the remaining space. let rhs_shape = shape.shrink_left(last_line_width(&result) + suffix.len()); if let Some(rhs_shape) = rhs_shape { if let Some(rhs_result) = rhs.rewrite(context, rhs_shape) { // FIXME this should always hold. if rhs_result.len() <= remaining_width { result.push_str(&rhs_result); result.push_str(suffix); return Some(result); } } } } } } // We have to use multiple lines. // Re-evaluate the rhs because we have more space now: let infix = infix.trim_right(); let rhs_shape = match context.config.control_style() { Style::Legacy => { try_opt!(shape.sub_width(suffix.len() + prefix.len())).visual_indent(prefix.len()) } Style::Rfc => { // Try to calculate the initial constraint on the right hand side. let rhs_overhead = context .config .max_width() .checked_sub(shape.used_width() + shape.width) .unwrap_or(0); try_opt!( Shape::indented(shape.indent.block_indent(context.config), context.config) .sub_width(rhs_overhead) ) } }; let rhs_result = try_opt!(rhs.rewrite(context, rhs_shape)); let lhs_overhead = shape.used_width() + prefix.len() + infix.len(); let lhs_shape = Shape { width: try_opt!(context.config.max_width().checked_sub(lhs_overhead)), ..shape }; let lhs_result = try_opt!(lhs.rewrite(context, lhs_shape)); Some(format!( "{}{}{}\n{}{}{}", prefix, lhs_result, infix, rhs_shape.indent.to_string(context.config), rhs_result, suffix )) } pub fn rewrite_array<'a, I>( expr_iter: I, span: Span, context: &RewriteContext, shape: Shape, ) -> Option<String> where I: Iterator<Item = &'a ast::Expr>, { let bracket_size = if context.config.spaces_within_square_brackets() { 2 // "[ " } else { 1 // "[" }; let nested_shape = match context.config.array_layout() { IndentStyle::Block => shape.block().block_indent(context.config.tab_spaces()), IndentStyle::Visual => { try_opt!( shape .visual_indent(bracket_size) .sub_width(bracket_size * 2) ) } }; let items = itemize_list( context.codemap, expr_iter, "]", |item| item.span.lo, |item| item.span.hi, |item| item.rewrite(context, nested_shape), span.lo, span.hi, ).collect::<Vec<_>>(); if items.is_empty() { if context.config.spaces_within_square_brackets() { return Some("[ ]".to_string()); } else { return Some("[]".to_string()); } } let has_long_item = items .iter() .any(|li| li.item.as_ref().map(|s| s.len() > 10).unwrap_or(false)); let tactic = match context.config.array_layout() { IndentStyle::Block => { // FIXME wrong shape in one-line case match shape.width.checked_sub(2 * bracket_size) { Some(width) => { let tactic = ListTactic::LimitedHorizontalVertical(context.config.array_width()); definitive_tactic(&items, tactic, width) } None => DefinitiveListTactic::Vertical, } } IndentStyle::Visual => { if has_long_item || items.iter().any(ListItem::is_multiline) { definitive_tactic( &items, ListTactic::LimitedHorizontalVertical(context.config.array_width()), nested_shape.width, ) } else { DefinitiveListTactic::Mixed } } }; let fmt = ListFormatting { tactic: tactic, separator: ",", trailing_separator: SeparatorTactic::Never, shape: nested_shape, ends_with_newline: false, config: context.config, }; let list_str = try_opt!(write_list(&items, &fmt)); let result = if context.config.array_layout() == IndentStyle::Visual || tactic != DefinitiveListTactic::Vertical { if context.config.spaces_within_square_brackets() && list_str.len() > 0 { format!("[ {} ]", list_str) } else { format!("[{}]", list_str) } } else { format!( "[\n{}{},\n{}]", nested_shape.indent.to_string(context.config), list_str, shape.block().indent.to_string(context.config) ) }; Some(result) } // Return type is (prefix, extra_offset) fn rewrite_closure_fn_decl( capture: ast::CaptureBy, fn_decl: &ast::FnDecl, body: &ast::Expr, span: Span, context: &RewriteContext, shape: Shape, ) -> Option<(String, usize)> { let mover = if capture == ast::CaptureBy::Value { "move " } else { "" }; // 4 = "|| {".len(), which is overconservative when the closure consists of // a single expression. let nested_shape = try_opt!(try_opt!(shape.shrink_left(mover.len())).sub_width(4)); // 1 = | let argument_offset = nested_shape.indent + 1; let arg_shape = try_opt!(nested_shape.shrink_left(1)).visual_indent(0); let ret_str = try_opt!(fn_decl.output.rewrite(context, arg_shape)); let arg_items = itemize_list( context.codemap, fn_decl.inputs.iter(), "|", |arg| span_lo_for_arg(arg), |arg| span_hi_for_arg(arg), |arg| arg.rewrite(context, arg_shape), context.codemap.span_after(span, "|"), body.span.lo, ); let item_vec = arg_items.collect::<Vec<_>>(); // 1 = space between arguments and return type. let horizontal_budget = nested_shape .width .checked_sub(ret_str.len() + 1) .unwrap_or(0); let tactic = definitive_tactic(&item_vec, ListTactic::HorizontalVertical, horizontal_budget); let arg_shape = match tactic { DefinitiveListTactic::Horizontal => try_opt!(arg_shape.sub_width(ret_str.len() + 1)), _ => arg_shape, }; let fmt = ListFormatting { tactic: tactic, separator: ",", trailing_separator: SeparatorTactic::Never, shape: arg_shape, ends_with_newline: false, config: context.config, }; let list_str = try_opt!(write_list(&item_vec, &fmt)); let mut prefix = format!("{}|{}|", mover, list_str); // 1 = space between `|...|` and body. let extra_offset = extra_offset(&prefix, shape) + 1; if !ret_str.is_empty() { if prefix.contains('\n') { prefix.push('\n'); prefix.push_str(&argument_offset.to_string(context.config)); } else { prefix.push(' '); } prefix.push_str(&ret_str); } Some((prefix, extra_offset)) } // This functions is pretty messy because of the rules around closures and blocks: // FIXME - the below is probably no longer true in full. // * if there is a return type, then there must be braces, // * given a closure with braces, whether that is parsed to give an inner block // or not depends on if there is a return type and if there are statements // in that block, // * if the first expression in the body ends with a block (i.e., is a // statement without needing a semi-colon), then adding or removing braces // can change whether it is treated as an expression or statement. fn rewrite_closure( capture: ast::CaptureBy, fn_decl: &ast::FnDecl, body: &ast::Expr, span: Span, context: &RewriteContext, shape: Shape, ) -> Option<String> { let (prefix, extra_offset) = try_opt!(rewrite_closure_fn_decl( capture, fn_decl, body, span, context, shape, )); // 1 = space between `|...|` and body. let body_shape = try_opt!(shape.offset_left(extra_offset)); if let ast::ExprKind::Block(ref block) = body.node { // The body of the closure is an empty block. if block.stmts.is_empty() && !block_contains_comment(block, context.codemap) { return Some(format!("{} {{}}", prefix)); } // Figure out if the block is necessary. let needs_block = block.rules != ast::BlockCheckMode::Default || block.stmts.len() > 1 || context.inside_macro || block_contains_comment(block, context.codemap) || prefix.contains('\n'); let no_return_type = if let ast::FunctionRetTy::Default(_) = fn_decl.output { true } else { false }; if no_return_type && !needs_block { // lock.stmts.len() == 1 if let Some(ref expr) = stmt_expr(&block.stmts[0]) { if let Some(rw) = rewrite_closure_expr(expr, &prefix, context, body_shape) { return Some(rw); } } } if !needs_block { // We need braces, but we might still prefer a one-liner. let stmt = &block.stmts[0]; // 4 = braces and spaces. if let Some(body_shape) = body_shape.sub_width(4) { // Checks if rewrite succeeded and fits on a single line. if let Some(rewrite) = and_one_line(stmt.rewrite(context, body_shape)) { return Some(format!("{} {{ {} }}", prefix, rewrite)); } } } // Either we require a block, or tried without and failed. rewrite_closure_block(&block, &prefix, context, body_shape) } else { rewrite_closure_expr(body, &prefix, context, body_shape).or_else(|| { // The closure originally had a non-block expression, but we can't fit on // one line, so we'll insert a block. rewrite_closure_with_block(context, body_shape, &prefix, body) }) } } // Rewrite closure with a single expression wrapping its body with block. fn rewrite_closure_with_block( context: &RewriteContext, shape: Shape, prefix: &str, body: &ast::Expr, ) -> Option<String> { let block = ast::Block { stmts: vec![ ast::Stmt { id: ast::NodeId::new(0), node: ast::StmtKind::Expr(ptr::P(body.clone())), span: body.span, }, ], id: ast::NodeId::new(0), rules: ast::BlockCheckMode::Default, span: body.span, }; rewrite_closure_block(&block, prefix, context, shape) } // Rewrite closure with a single expression without wrapping its body with block. fn rewrite_closure_expr( expr: &ast::Expr, prefix: &str, context: &RewriteContext, shape: Shape, ) -> Option<String> { let mut rewrite = expr.rewrite(context, shape); if classify::expr_requires_semi_to_be_stmt(left_most_sub_expr(expr)) { rewrite = and_one_line(rewrite); } rewrite.map(|rw| format!("{} {}", prefix, rw)) } // Rewrite closure whose body is block. fn rewrite_closure_block( block: &ast::Block, prefix: &str, context: &RewriteContext, shape: Shape, ) -> Option<String> { // Start with visual indent, then fall back to block indent if the // closure is large. let block_threshold = context.config.closure_block_indent_threshold(); if block_threshold >= 0 { if let Some(block_str) = block.rewrite(&context, shape) { if block_str.matches('\n').count() <= block_threshold as usize && !need_block_indent(&block_str, shape) { if let Some(block_str) = block_str.rewrite(context, shape) { return Some(format!("{} {}", prefix, block_str)); } } } } // The body of the closure is big enough to be block indented, that // means we must re-format. let block_shape = shape.block().with_max_width(context.config); let block_str = try_opt!(block.rewrite(&context, block_shape)); Some(format!("{} {}", prefix, block_str)) } fn and_one_line(x: Option<String>) -> Option<String> { x.and_then(|x| if x.contains('\n') { None } else { Some(x) }) } fn nop_block_collapse(block_str: Option<String>, budget: usize) -> Option<String> { debug!("nop_block_collapse {:?} {}", block_str, budget); block_str.map(|block_str| { if block_str.starts_with('{') && budget >= 2 && (block_str[1..].find(|c: char| !c.is_whitespace()).unwrap() == block_str.len() - 2) { "{}".to_owned() } else { block_str.to_owned() } }) } impl Rewrite for ast::Block { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { // shape.width is used only for the single line case: either the empty block `{}`, // or an unsafe expression `unsafe { e }`. if self.stmts.is_empty() && !block_contains_comment(self, context.codemap) && shape.width >= 2 { return Some("{}".to_owned()); } // If a block contains only a single-line comment, then leave it on one line. let user_str = context.snippet(self.span); let user_str = user_str.trim(); if user_str.starts_with('{') && user_str.ends_with('}') { let comment_str = user_str[1..user_str.len() - 1].trim(); if self.stmts.is_empty() && !comment_str.contains('\n') && !comment_str.starts_with("//") && comment_str.len() + 4 <= shape.width { return Some(format!("{{ {} }}", comment_str)); } } let mut visitor = FmtVisitor::from_codemap(context.parse_session, context.config); visitor.block_indent = shape.indent; visitor.is_if_else_block = context.is_if_else_block; let prefix = match self.rules { ast::BlockCheckMode::Unsafe(..) => { let snippet = context.snippet(self.span); let open_pos = try_opt!(snippet.find_uncommented("{")); visitor.last_pos = self.span.lo + BytePos(open_pos as u32); // Extract comment between unsafe and block start. let trimmed = &snippet[6..open_pos].trim(); let prefix = if !trimmed.is_empty() { // 9 = "unsafe {".len(), 7 = "unsafe ".len() let budget = try_opt!(shape.width.checked_sub(9)); format!( "unsafe {} ", try_opt!(rewrite_comment( trimmed, true, Shape::legacy(budget, shape.indent + 7), context.config, )) ) } else { "unsafe ".to_owned() }; if let result @ Some(_) = try_one_line_block(context, shape, &prefix, self) { return result; } prefix } ast::BlockCheckMode::Default => { visitor.last_pos = self.span.lo; String::new() } }; visitor.visit_block(self); if visitor.failed && shape.indent.alignment != 0 { self.rewrite( context, Shape::indented(shape.indent.block_only(), context.config), ) } else { Some(format!("{}{}", prefix, visitor.buffer)) } } } impl Rewrite for ast::Stmt { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { let result = match self.node { ast::StmtKind::Local(ref local) => local.rewrite(context, shape), ast::StmtKind::Expr(ref ex) | ast::StmtKind::Semi(ref ex) => { let suffix = if semicolon_for_stmt(self) { ";" } else { "" }; format_expr( ex, match self.node { ast::StmtKind::Expr(_) => ExprType::SubExpression, ast::StmtKind::Semi(_) => ExprType::Statement, _ => unreachable!(), }, context, try_opt!(shape.sub_width(suffix.len())), ).map(|s| s + suffix) } ast::StmtKind::Mac(..) | ast::StmtKind::Item(..) => None, }; result.and_then(|res| { recover_comment_removed(res, self.span, context, shape) }) } } // Rewrite condition if the given expression has one. fn rewrite_cond(context: &RewriteContext, expr: &ast::Expr, shape: Shape) -> Option<String> { match expr.node { ast::ExprKind::Match(ref cond, _) => { // `match `cond` {` let cond_shape = match context.config.control_style() { Style::Legacy => try_opt!(shape.shrink_left(6).and_then(|s| s.sub_width(2))), Style::Rfc => try_opt!(shape.offset_left(8)), }; cond.rewrite(context, cond_shape) } ast::ExprKind::Block(ref block) if block.stmts.len() == 1 => { stmt_expr(&block.stmts[0]).and_then(|e| rewrite_cond(context, e, shape)) } _ => { to_control_flow(expr, ExprType::SubExpression).and_then(|control_flow| { let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); control_flow .rewrite_cond(context, shape, &alt_block_sep) .and_then(|rw| Some(rw.0)) }) } } } // Abstraction over control flow expressions #[derive(Debug)] struct ControlFlow<'a> { cond: Option<&'a ast::Expr>, block: &'a ast::Block, else_block: Option<&'a ast::Expr>, label: Option<ast::SpannedIdent>, pat: Option<&'a ast::Pat>, keyword: &'a str, matcher: &'a str, connector: &'a str, allow_single_line: bool, // True if this is an `if` expression in an `else if` :-( hacky nested_if: bool, span: Span, } fn to_control_flow<'a>(expr: &'a ast::Expr, expr_type: ExprType) -> Option<ControlFlow<'a>> { match expr.node { ast::ExprKind::If(ref cond, ref if_block, ref else_block) => { Some(ControlFlow::new_if( cond, None, if_block, else_block.as_ref().map(|e| &**e), expr_type == ExprType::SubExpression, false, expr.span, )) } ast::ExprKind::IfLet(ref pat, ref cond, ref if_block, ref else_block) => { Some(ControlFlow::new_if( cond, Some(pat), if_block, else_block.as_ref().map(|e| &**e), expr_type == ExprType::SubExpression, false, expr.span, )) } ast::ExprKind::ForLoop(ref pat, ref cond, ref block, label) => { Some(ControlFlow::new_for(pat, cond, block, label, expr.span)) } ast::ExprKind::Loop(ref block, label) => Some( ControlFlow::new_loop(block, label, expr.span), ), ast::ExprKind::While(ref cond, ref block, label) => Some(ControlFlow::new_while( None, cond, block, label, expr.span, )), ast::ExprKind::WhileLet(ref pat, ref cond, ref block, label) => { Some(ControlFlow::new_while( Some(pat), cond, block, label, expr.span, )) } _ => None, } } impl<'a> ControlFlow<'a> { fn new_if( cond: &'a ast::Expr, pat: Option<&'a ast::Pat>, block: &'a ast::Block, else_block: Option<&'a ast::Expr>, allow_single_line: bool, nested_if: bool, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: Some(cond), block: block, else_block: else_block, label: None, pat: pat, keyword: "if", matcher: match pat { Some(..) => "let", None => "", }, connector: " =", allow_single_line: allow_single_line, nested_if: nested_if, span: span, } } fn new_loop( block: &'a ast::Block, label: Option<ast::SpannedIdent>, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: None, block: block, else_block: None, label: label, pat: None, keyword: "loop", matcher: "", connector: "", allow_single_line: false, nested_if: false, span: span, } } fn new_while( pat: Option<&'a ast::Pat>, cond: &'a ast::Expr, block: &'a ast::Block, label: Option<ast::SpannedIdent>, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: Some(cond), block: block, else_block: None, label: label, pat: pat, keyword: "while", matcher: match pat { Some(..) => "let", None => "", }, connector: " =", allow_single_line: false, nested_if: false, span: span, } } fn new_for( pat: &'a ast::Pat, cond: &'a ast::Expr, block: &'a ast::Block, label: Option<ast::SpannedIdent>, span: Span, ) -> ControlFlow<'a> { ControlFlow { cond: Some(cond), block: block, else_block: None, label: label, pat: Some(pat), keyword: "for", matcher: "", connector: " in", allow_single_line: false, nested_if: false, span: span, } } fn rewrite_single_line( &self, pat_expr_str: &str, context: &RewriteContext, width: usize, ) -> Option<String> { assert!(self.allow_single_line); let else_block = try_opt!(self.else_block); let fixed_cost = self.keyword.len() + " { } else { }".len(); if let ast::ExprKind::Block(ref else_node) = else_block.node { if !is_simple_block(self.block, context.codemap) || !is_simple_block(else_node, context.codemap) || pat_expr_str.contains('\n') { return None; } let new_width = try_opt!(width.checked_sub(pat_expr_str.len() + fixed_cost)); let expr = &self.block.stmts[0]; let if_str = try_opt!(expr.rewrite( context, Shape::legacy(new_width, Indent::empty()), )); let new_width = try_opt!(new_width.checked_sub(if_str.len())); let else_expr = &else_node.stmts[0]; let else_str = try_opt!(else_expr.rewrite( context, Shape::legacy(new_width, Indent::empty()), )); if if_str.contains('\n') || else_str.contains('\n') { return None; } let result = format!( "{} {} {{ {} }} else {{ {} }}", self.keyword, pat_expr_str, if_str, else_str ); if result.len() <= width { return Some(result); } } None } } impl<'a> ControlFlow<'a> { fn rewrite_cond( &self, context: &RewriteContext, shape: Shape, alt_block_sep: &str, ) -> Option<(String, usize)> { let constr_shape = if self.nested_if { // We are part of an if-elseif-else chain. Our constraints are tightened. // 7 = "} else " .len() try_opt!(shape.shrink_left(7)) } else { shape }; let label_string = rewrite_label(self.label); // 1 = space after keyword. let offset = self.keyword.len() + label_string.len() + 1; let pat_expr_string = match self.cond { Some(cond) => { let mut cond_shape = match context.config.control_style() { Style::Legacy => try_opt!(constr_shape.shrink_left(offset)), Style::Rfc => try_opt!(constr_shape.offset_left(offset)), }; if context.config.control_brace_style() != ControlBraceStyle::AlwaysNextLine { // 2 = " {".len() cond_shape = try_opt!(cond_shape.sub_width(2)); } try_opt!(rewrite_pat_expr( context, self.pat, cond, self.matcher, self.connector, self.keyword, cond_shape, )) } None => String::new(), }; let force_newline_brace = context.config.control_style() == Style::Rfc && pat_expr_string.contains('\n'); // Try to format if-else on single line. if self.allow_single_line && context.config.single_line_if_else_max_width() > 0 { let trial = self.rewrite_single_line(&pat_expr_string, context, shape.width); if let Some(cond_str) = trial { if cond_str.len() <= context.config.single_line_if_else_max_width() { return Some((cond_str, 0)); } } } let cond_span = if let Some(cond) = self.cond { cond.span } else { mk_sp(self.block.span.lo, self.block.span.lo) }; // for event in event let between_kwd_cond = mk_sp( context.codemap.span_after(self.span, self.keyword.trim()), self.pat.map_or( cond_span.lo, |p| if self.matcher.is_empty() { p.span.lo } else { context.codemap.span_before(self.span, self.matcher.trim()) }, ), ); let between_kwd_cond_comment = extract_comment(between_kwd_cond, context, shape); let after_cond_comment = extract_comment(mk_sp(cond_span.hi, self.block.span.lo), context, shape); let block_sep = if self.cond.is_none() && between_kwd_cond_comment.is_some() { "" } else if context.config.control_brace_style() == ControlBraceStyle::AlwaysNextLine || force_newline_brace { alt_block_sep } else { " " }; let used_width = if pat_expr_string.contains('\n') { last_line_width(&pat_expr_string) } else { // 2 = spaces after keyword and condition. label_string.len() + self.keyword.len() + pat_expr_string.len() + 2 }; Some(( format!( "{}{}{}{}{}", label_string, self.keyword, between_kwd_cond_comment.as_ref().map_or( if pat_expr_string.is_empty() || pat_expr_string.starts_with('\n') { "" } else { " " }, |s| &**s, ), pat_expr_string, after_cond_comment.as_ref().map_or(block_sep, |s| &**s) ), used_width, )) } } impl<'a> Rewrite for ControlFlow<'a> { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { debug!("ControlFlow::rewrite {:?} {:?}", self, shape); let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); let (cond_str, used_width) = try_opt!(self.rewrite_cond(context, shape, &alt_block_sep)); // If `used_width` is 0, it indicates that whole control flow is written in a single line. if used_width == 0 { return Some(cond_str); } let block_width = shape.width.checked_sub(used_width).unwrap_or(0); // This is used only for the empty block case: `{}`. So, we use 1 if we know // we should avoid the single line case. let block_width = if self.else_block.is_some() || self.nested_if { min(1, block_width) } else { block_width }; let block_shape = Shape { width: block_width, ..shape }; let mut block_context = context.clone(); block_context.is_if_else_block = self.else_block.is_some(); let block_str = try_opt!(self.block.rewrite(&block_context, block_shape)); let mut result = format!("{}{}", cond_str, block_str); if let Some(else_block) = self.else_block { let shape = Shape::indented(shape.indent, context.config); let mut last_in_chain = false; let rewrite = match else_block.node { // If the else expression is another if-else expression, prevent it // from being formatted on a single line. // Note how we're passing the original shape, as the // cost of "else" should not cascade. ast::ExprKind::IfLet(ref pat, ref cond, ref if_block, ref next_else_block) => { ControlFlow::new_if( cond, Some(pat), if_block, next_else_block.as_ref().map(|e| &**e), false, true, mk_sp(else_block.span.lo, self.span.hi), ).rewrite(context, shape) } ast::ExprKind::If(ref cond, ref if_block, ref next_else_block) => { ControlFlow::new_if( cond, None, if_block, next_else_block.as_ref().map(|e| &**e), false, true, mk_sp(else_block.span.lo, self.span.hi), ).rewrite(context, shape) } _ => { last_in_chain = true; // When rewriting a block, the width is only used for single line // blocks, passing 1 lets us avoid that. let else_shape = Shape { width: min(1, shape.width), ..shape }; else_block.rewrite(context, else_shape) } }; let between_kwd_else_block = mk_sp( self.block.span.hi, context.codemap.span_before( mk_sp(self.block.span.hi, else_block.span.lo), "else", ), ); let between_kwd_else_block_comment = extract_comment(between_kwd_else_block, context, shape); let after_else = mk_sp( context.codemap.span_after( mk_sp(self.block.span.hi, else_block.span.lo), "else", ), else_block.span.lo, ); let after_else_comment = extract_comment(after_else, context, shape); let between_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine | ControlBraceStyle::ClosingNextLine => &*alt_block_sep, ControlBraceStyle::AlwaysSameLine => " ", }; let after_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine if last_in_chain => &*alt_block_sep, _ => " ", }; try_opt!( write!( &mut result, "{}else{}", between_kwd_else_block_comment.as_ref().map_or( between_sep, |s| &**s, ), after_else_comment.as_ref().map_or(after_sep, |s| &**s) ).ok() ); result.push_str(&try_opt!(rewrite)); } Some(result) } } fn rewrite_label(label: Option<ast::SpannedIdent>) -> String { match label { Some(ident) => format!("{}: ", ident.node), None => "".to_owned(), } } fn extract_comment(span: Span, context: &RewriteContext, shape: Shape) -> Option<String> { let comment_str = context.snippet(span); if contains_comment(&comment_str) { let comment = try_opt!(rewrite_comment( comment_str.trim(), false, shape, context.config, )); Some(format!( "\n{indent}{}\n{indent}", comment, indent = shape.indent.to_string(context.config) )) } else { None } } fn block_contains_comment(block: &ast::Block, codemap: &CodeMap) -> bool { let snippet = codemap.span_to_snippet(block.span).unwrap(); contains_comment(&snippet) } // Checks that a block contains no statements, an expression and no comments. // FIXME: incorrectly returns false when comment is contained completely within // the expression. pub fn is_simple_block(block: &ast::Block, codemap: &CodeMap) -> bool { (block.stmts.len() == 1 && stmt_is_expr(&block.stmts[0]) && !block_contains_comment(block, codemap)) } /// Checks whether a block contains at most one statement or expression, and no comments. pub fn is_simple_block_stmt(block: &ast::Block, codemap: &CodeMap) -> bool { block.stmts.len() <= 1 && !block_contains_comment(block, codemap) } /// Checks whether a block contains no statements, expressions, or comments. pub fn is_empty_block(block: &ast::Block, codemap: &CodeMap) -> bool { block.stmts.is_empty() && !block_contains_comment(block, codemap) } pub fn stmt_is_expr(stmt: &ast::Stmt) -> bool { match stmt.node { ast::StmtKind::Expr(..) => true, _ => false, } } fn is_unsafe_block(block: &ast::Block) -> bool { if let ast::BlockCheckMode::Unsafe(..) = block.rules { true } else { false } } // inter-match-arm-comment-rules: // - all comments following a match arm before the start of the next arm // are about the second arm fn rewrite_match_arm_comment( context: &RewriteContext, missed_str: &str, shape: Shape, arm_indent_str: &str, ) -> Option<String> { // The leading "," is not part of the arm-comment let missed_str = match missed_str.find_uncommented(",") { Some(n) => &missed_str[n + 1..], None => &missed_str[..], }; let mut result = String::new(); // any text not preceeded by a newline is pushed unmodified to the block let first_brk = missed_str.find(|c: char| c == '\n').unwrap_or(0); result.push_str(&missed_str[..first_brk]); let missed_str = &missed_str[first_brk..]; // If missed_str had one newline, it starts with it let first = missed_str.find(|c: char| !c.is_whitespace()).unwrap_or( missed_str .len(), ); if missed_str[..first].chars().filter(|c| c == &'\n').count() >= 2 { // Excessive vertical whitespace before comment should be preserved // FIXME handle vertical whitespace better result.push('\n'); } let missed_str = missed_str[first..].trim(); if !missed_str.is_empty() { let comment = try_opt!(rewrite_comment(&missed_str, false, shape, context.config)); result.push('\n'); result.push_str(arm_indent_str); result.push_str(&comment); } Some(result) } fn rewrite_match( context: &RewriteContext, cond: &ast::Expr, arms: &[ast::Arm], shape: Shape, span: Span, ) -> Option<String> { if arms.is_empty() { return None; } // `match `cond` {` let cond_shape = match context.config.control_style() { Style::Legacy => try_opt!(shape.shrink_left(6).and_then(|s| s.sub_width(2))), Style::Rfc => try_opt!(shape.offset_left(8)), }; let cond_str = try_opt!(cond.rewrite(context, cond_shape)); let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); let block_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysSameLine => " ", _ => alt_block_sep.as_str(), }; let mut result = format!("match {}{}{{", cond_str, block_sep); let arm_shape = if context.config.indent_match_arms() { shape.block_indent(context.config.tab_spaces()) } else { shape.block_indent(0) }; let arm_indent_str = arm_shape.indent.to_string(context.config); let open_brace_pos = context.codemap.span_after( mk_sp(cond.span.hi, arm_start_pos(&arms[0])), "{", ); for (i, arm) in arms.iter().enumerate() { // Make sure we get the stuff between arms. let missed_str = if i == 0 { context.snippet(mk_sp(open_brace_pos, arm_start_pos(arm))) } else { context.snippet(mk_sp(arm_end_pos(&arms[i - 1]), arm_start_pos(arm))) }; let comment = try_opt!(rewrite_match_arm_comment( context, &missed_str, arm_shape, &arm_indent_str, )); result.push_str(&comment); result.push('\n'); result.push_str(&arm_indent_str); let arm_str = arm.rewrite(&context, arm_shape.with_max_width(context.config)); if let Some(ref arm_str) = arm_str { result.push_str(arm_str); } else { // We couldn't format the arm, just reproduce the source. let snippet = context.snippet(mk_sp(arm_start_pos(arm), arm_end_pos(arm))); result.push_str(&snippet); result.push_str(arm_comma(context.config, &arm.body)); } } // BytePos(1) = closing match brace. let last_span = mk_sp(arm_end_pos(&arms[arms.len() - 1]), span.hi - BytePos(1)); let last_comment = context.snippet(last_span); let comment = try_opt!(rewrite_match_arm_comment( context, &last_comment, arm_shape, &arm_indent_str, )); result.push_str(&comment); result.push('\n'); result.push_str(&shape.indent.to_string(context.config)); result.push('}'); Some(result) } fn arm_start_pos(arm: &ast::Arm) -> BytePos { let &ast::Arm { ref attrs, ref pats, .. } = arm; if !attrs.is_empty() { return attrs[0].span.lo; } pats[0].span.lo } fn arm_end_pos(arm: &ast::Arm) -> BytePos { arm.body.span.hi } fn arm_comma(config: &Config, body: &ast::Expr) -> &'static str { if config.match_block_trailing_comma() { "," } else if let ast::ExprKind::Block(ref block) = body.node { if let ast::BlockCheckMode::Default = block.rules { "" } else { "," } } else { "," } } // Match arms. impl Rewrite for ast::Arm { fn rewrite(&self, context: &RewriteContext, shape: Shape) -> Option<String> { debug!("Arm::rewrite {:?} {:?}", self, shape); let &ast::Arm { ref attrs, ref pats, ref guard, ref body, } = self; let attr_str = if !attrs.is_empty() { if contains_skip(attrs) { return None; } format!( "{}\n{}", try_opt!(attrs.rewrite(context, shape)), shape.indent.to_string(context.config) ) } else { String::new() }; // Patterns // 5 = ` => {` let pat_shape = try_opt!(shape.sub_width(5)); let pat_strs = try_opt!( pats.iter() .map(|p| p.rewrite(context, pat_shape)) .collect::<Option<Vec<_>>>() ); let all_simple = pat_strs.iter().all(|p| pat_is_simple(p)); let items: Vec<_> = pat_strs.into_iter().map(ListItem::from_str).collect(); let fmt = ListFormatting { tactic: if all_simple { DefinitiveListTactic::Mixed } else { DefinitiveListTactic::Vertical }, separator: " |", trailing_separator: SeparatorTactic::Never, shape: pat_shape, ends_with_newline: false, config: context.config, }; let pats_str = try_opt!(write_list(items, &fmt)); let guard_shape = if pats_str.contains('\n') { shape.with_max_width(context.config) } else { shape }; let guard_str = try_opt!(rewrite_guard( context, guard, guard_shape, trimmed_last_line_width(&pats_str), )); let pats_str = format!("{}{}", pats_str, guard_str); let (mut extend, body) = match body.node { ast::ExprKind::Block(ref block) if !is_unsafe_block(block) && is_simple_block(block, context.codemap) && context.config.wrap_match_arms() => { if let ast::StmtKind::Expr(ref expr) = block.stmts[0].node { (false, &**expr) } else { (false, &**body) } } ast::ExprKind::Call(_, ref args) => (args.len() == 1, &**body), ast::ExprKind::Closure(..) | ast::ExprKind::Struct(..) | ast::ExprKind::Tup(..) => (true, &**body), _ => (false, &**body), }; extend &= context.use_block_indent(); let comma = arm_comma(&context.config, body); let alt_block_sep = String::from("\n") + &shape.indent.block_only().to_string(context.config); let pat_width = extra_offset(&pats_str, shape); // Let's try and get the arm body on the same line as the condition. // 4 = ` => `.len() if shape.width > pat_width + comma.len() + 4 { let arm_shape = shape .offset_left(pat_width + 4) .unwrap() .sub_width(comma.len()) .unwrap(); let rewrite = nop_block_collapse(body.rewrite(context, arm_shape), arm_shape.width); let is_block = if let ast::ExprKind::Block(..) = body.node { true } else { false }; match rewrite { Some(ref body_str) if (!body_str.contains('\n') && body_str.len() <= arm_shape.width) || !context.config.wrap_match_arms() || (extend && first_line_width(body_str) <= arm_shape.width) || is_block => { let block_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine if is_block => alt_block_sep.as_str(), _ => " ", }; return Some(format!( "{}{} =>{}{}{}", attr_str.trim_left(), pats_str, block_sep, body_str, comma )); } _ => {} } } // FIXME: we're doing a second rewrite of the expr; This may not be // necessary. let body_shape = try_opt!(shape.block_left(context.config.tab_spaces())); let next_line_body = try_opt!(nop_block_collapse( body.rewrite(context, body_shape), body_shape.width, )); let indent_str = shape .indent .block_indent(context.config) .to_string(context.config); let (body_prefix, body_suffix) = if context.config.wrap_match_arms() { if context.config.match_block_trailing_comma() { ("{", "},") } else { ("{", "}") } } else { ("", ",") }; let block_sep = match context.config.control_brace_style() { ControlBraceStyle::AlwaysNextLine => alt_block_sep + body_prefix + "\n", _ if body_prefix.is_empty() => "\n".to_owned(), _ => " ".to_owned() + body_prefix + "\n", }; if context.config.wrap_match_arms() { Some(format!( "{}{} =>{}{}{}\n{}{}", attr_str.trim_left(), pats_str, block_sep, indent_str, next_line_body, shape.indent.to_string(context.config), body_suffix )) } else { Some(format!( "{}{} =>{}{}{}{}", attr_str.trim_left(), pats_str, block_sep, indent_str, next_line_body, body_suffix )) } } } // A pattern is simple if it is very short or it is short-ish and just a path. // E.g. `Foo::Bar` is simple, but `Foo(..)` is not. fn pat_is_simple(pat_str: &str) -> bool { pat_str.len() <= 16 || (pat_str.len() <= 24 && pat_str.chars().all(|c| c.is_alphabetic() || c == ':')) } // The `if ...` guard on a match arm. fn rewrite_guard( context: &RewriteContext, guard: &Option<ptr::P<ast::Expr>>, shape: Shape, // The amount of space used up on this line for the pattern in // the arm (excludes offset). pattern_width: usize, ) -> Option<String> { if let Some(ref guard) = *guard { // First try to fit the guard string on the same line as the pattern. // 4 = ` if `, 5 = ` => {` if let Some(cond_shape) = shape .shrink_left(pattern_width + 4) .and_then(|s| s.sub_width(5)) { if let Some(cond_str) = guard .rewrite(context, cond_shape) .and_then(|s| s.rewrite(context, cond_shape)) { if !cond_str.contains('\n') { return Some(format!(" if {}", cond_str)); } } } // Not enough space to put the guard after the pattern, try a newline. // 3 == `if ` if let Some(cond_shape) = Shape::indented( shape.indent.block_indent(context.config) + 3, context.config, ).sub_width(3) { if let Some(cond_str) = guard.rewrite(context, cond_shape) { return Some(format!( "\n{}if {}", shape .indent .block_indent(context.config) .to_string(context.config), cond_str )); } } None } else { Some(String::new()) } } fn rewrite_pat_expr( context: &RewriteContext, pat: Option<&ast::Pat>, expr: &ast::Expr, matcher: &str, // Connecting piece between pattern and expression, // *without* trailing space. connector: &str, keyword: &str, shape: Shape, ) -> Option<String> { debug!("rewrite_pat_expr {:?} {:?} {:?}", shape, pat, expr); let mut pat_string = String::new(); let mut result = match pat { Some(pat) => { let matcher = if matcher.is_empty() { matcher.to_owned() } else { format!("{} ", matcher) }; let pat_shape = try_opt!(try_opt!(shape.offset_left(matcher.len())).sub_width(connector.len())); pat_string = try_opt!(pat.rewrite(context, pat_shape)); format!("{}{}{}", matcher, pat_string, connector) } None => String::new(), }; // Consider only the last line of the pat string. let extra_offset = extra_offset(&result, shape); // The expression may (partially) fit on the current line. if shape.width > extra_offset + 1 { let spacer = if pat.is_some() { " " } else { "" }; let expr_shape = try_opt!(shape.offset_left(extra_offset + spacer.len())); let expr_rewrite = expr.rewrite(context, expr_shape); if let Some(expr_string) = expr_rewrite { if pat.is_none() || pat_is_simple(&pat_string) || !expr_string.contains('\n') { result.push_str(spacer); result.push_str(&expr_string); return Some(result); } } } if pat.is_none() && keyword == "if" { return None; } let nested_indent = shape.indent.block_only().block_indent(context.config); // The expression won't fit on the current line, jump to next. result.push('\n'); result.push_str(&nested_indent.to_string(context.config)); let expr_rewrite = expr.rewrite(&context, Shape::indented(nested_indent, context.config)); result.push_str(&try_opt!(expr_rewrite)); Some(result) } fn rewrite_string_lit(context: &RewriteContext, span: Span, shape: Shape) -> Option<String> { let string_lit = context.snippet(span); if !context.config.format_strings() && !context.config.force_format_strings() { return Some(string_lit); } if !context.config.force_format_strings() && !string_requires_rewrite(context, span, &string_lit, shape) { return Some(string_lit); } let fmt = StringFormat { opener: "\"", closer: "\"", line_start: " ", line_end: "\\", shape: shape, trim_end: false, config: context.config, }; // Remove the quote characters. let str_lit = &string_lit[1..string_lit.len() - 1]; rewrite_string(str_lit, &fmt) } fn string_requires_rewrite( context: &RewriteContext, span: Span, string: &str, shape: Shape, ) -> bool { if context.codemap.lookup_char_pos(span.lo).col.0 != shape.indent.width() { return true; } for (i, line) in string.lines().enumerate() { if i == 0 { if line.len() > shape.width { return true; } } else { if line.len() > shape.width + shape.indent.width() { return true; } } } false } pub fn rewrite_call_with_binary_search<R>( context: &RewriteContext, callee: &R, args: &[&ast::Expr], span: Span, shape: Shape, ) -> Option<String> where R: Rewrite, { let closure = |callee_max_width| { // FIXME using byte lens instead of char lens (and probably all over the // place too) let callee_shape = Shape { width: callee_max_width, ..shape }; let callee_str = callee .rewrite(context, callee_shape) .ok_or(Ordering::Greater)?; rewrite_call_inner( context, &callee_str, args, span, shape, context.config.fn_call_width(), false, ) }; binary_search(1, shape.width, closure) } pub fn rewrite_call( context: &RewriteContext, callee: &str, args: &[ptr::P<ast::Expr>], span: Span, shape: Shape, ) -> Option<String> { rewrite_call_inner( context, &callee, &args.iter().map(|x| &**x).collect::<Vec<_>>(), span, shape, context.config.fn_call_width(), false, ).ok() } pub fn rewrite_call_inner<'a, T>( context: &RewriteContext, callee_str: &str, args: &[&T], span: Span, shape: Shape, args_max_width: usize, force_trailing_comma: bool, ) -> Result<String, Ordering> where T: Rewrite + Spanned + ToExpr + 'a, { // 2 = `( `, 1 = `(` let paren_overhead = if context.config.spaces_within_parens() { 2 } else { 1 }; let used_width = extra_offset(&callee_str, shape); let one_line_width = shape .width .checked_sub(used_width + 2 * paren_overhead) .ok_or(Ordering::Greater)?; let nested_shape = shape_from_fn_call_style( context, shape, used_width + 2 * paren_overhead, used_width + paren_overhead, ).ok_or(Ordering::Greater)?; let span_lo = context.codemap.span_after(span, "("); let args_span = mk_sp(span_lo, span.hi); let (extendable, list_str) = rewrite_call_args( context, args, args_span, nested_shape, one_line_width, args_max_width, force_trailing_comma, ).or_else(|| if context.use_block_indent() { rewrite_call_args( context, args, args_span, Shape::indented( shape.block().indent.block_indent(context.config), context.config, ), 0, 0, force_trailing_comma, ) } else { None }) .ok_or(Ordering::Less)?; if !context.use_block_indent() && need_block_indent(&list_str, nested_shape) && !extendable { let mut new_context = context.clone(); new_context.use_block = true; return rewrite_call_inner( &new_context, callee_str, args, span, shape, args_max_width, force_trailing_comma, ); } let args_shape = shape .sub_width(last_line_width(&callee_str)) .ok_or(Ordering::Less)?; Ok(format!( "{}{}", callee_str, wrap_args_with_parens( context, &list_str, extendable, args_shape, nested_shape, ) )) } fn need_block_indent(s: &str, shape: Shape) -> bool { s.lines().skip(1).any(|s| { s.find(|c| !char::is_whitespace(c)) .map_or(false, |w| w + 1 < shape.indent.width()) }) } fn rewrite_call_args<'a, T>( context: &RewriteContext, args: &[&T], span: Span, shape: Shape, one_line_width: usize, args_max_width: usize, force_trailing_comma: bool, ) -> Option<(bool, String)> where T: Rewrite + Spanned + ToExpr + 'a, { let mut item_context = context.clone(); item_context.inside_macro = false; let items = itemize_list( context.codemap, args.iter(), ")", |item| item.span().lo, |item| item.span().hi, |item| item.rewrite(&item_context, shape), span.lo, span.hi, ); let mut item_vec: Vec<_> = items.collect(); // Try letting the last argument overflow to the next line with block // indentation. If its first line fits on one line with the other arguments, // we format the function arguments horizontally. let tactic = try_overflow_last_arg( &item_context, &mut item_vec, &args[..], shape, one_line_width, args_max_width, ); let fmt = ListFormatting { tactic: tactic, separator: ",", trailing_separator: if force_trailing_comma { SeparatorTactic::Always } else if context.inside_macro || !context.use_block_indent() { SeparatorTactic::Never } else { context.config.trailing_comma() }, shape: shape, ends_with_newline: false, config: context.config, }; write_list(&item_vec, &fmt).map(|args_str| { (tactic != DefinitiveListTactic::Vertical, args_str) }) } fn try_overflow_last_arg<'a, T>( context: &RewriteContext, item_vec: &mut Vec<ListItem>, args: &[&T], shape: Shape, one_line_width: usize, args_max_width: usize, ) -> DefinitiveListTactic where T: Rewrite + Spanned + ToExpr + 'a, { let overflow_last = can_be_overflowed(&context, args); // Replace the last item with its first line to see if it fits with // first arguments. let (orig_last, placeholder) = if overflow_last { let mut context = context.clone(); if let Some(expr) = args[args.len() - 1].to_expr() { match expr.node { ast::ExprKind::MethodCall(..) => context.force_one_line_chain = true, _ => (), } } last_arg_shape(&context, &item_vec, shape, args_max_width) .map_or((None, None), |arg_shape| { rewrite_last_arg_with_overflow( &context, args[args.len() - 1], &mut item_vec[args.len() - 1], arg_shape, ) }) } else { (None, None) }; let tactic = definitive_tactic( &*item_vec, ListTactic::LimitedHorizontalVertical(args_max_width), one_line_width, ); // Replace the stub with the full overflowing last argument if the rewrite // succeeded and its first line fits with the other arguments. match (overflow_last, tactic, placeholder) { (true, DefinitiveListTactic::Horizontal, placeholder @ Some(..)) => { item_vec[args.len() - 1].item = placeholder; } (true, _, _) => { item_vec[args.len() - 1].item = orig_last; } (false, _, _) => {} } tactic } fn last_arg_shape( context: &RewriteContext, items: &Vec<ListItem>, shape: Shape, args_max_width: usize, ) -> Option<Shape> { let overhead = items.iter().rev().skip(1).fold(0, |acc, i| { acc + i.item.as_ref().map_or(0, |s| first_line_width(&s)) }); let max_width = min(args_max_width, shape.width); let arg_indent = if context.use_block_indent() { shape.block().indent.block_unindent(context.config) } else { shape.block().indent }; Some(Shape { width: try_opt!(max_width.checked_sub(overhead)), indent: arg_indent, offset: 0, }) } // Rewriting closure which is placed at the end of the function call's arg. // Returns `None` if the reformatted closure 'looks bad'. fn rewrite_last_closure( context: &RewriteContext, expr: &ast::Expr, shape: Shape, ) -> Option<String> { if let ast::ExprKind::Closure(capture, ref fn_decl, ref body, _) = expr.node { let body = match body.node { ast::ExprKind::Block(ref block) if block.stmts.len() == 1 => { stmt_expr(&block.stmts[0]).unwrap_or(body) } _ => body, }; let (prefix, extra_offset) = try_opt!(rewrite_closure_fn_decl( capture, fn_decl, body, expr.span, context, shape, )); // If the closure goes multi line before its body, do not overflow the closure. if prefix.contains('\n') { return None; } let body_shape = try_opt!(shape.offset_left(extra_offset)); // When overflowing the closure which consists of a single control flow expression, // force to use block if its condition uses multi line. if rewrite_cond(context, body, body_shape) .map(|cond| cond.contains('\n')) .unwrap_or(false) { return rewrite_closure_with_block(context, body_shape, &prefix, body); } // Seems fine, just format the closure in usual manner. return expr.rewrite(context, shape); } None } fn rewrite_last_arg_with_overflow<'a, T>( context: &RewriteContext, last_arg: &T, last_item: &mut ListItem, shape: Shape, ) -> (Option<String>, Option<String>) where T: Rewrite + Spanned + ToExpr + 'a, { let rewrite = if let Some(expr) = last_arg.to_expr() { match expr.node { // When overflowing the closure which consists of a single control flow expression, // force to use block if its condition uses multi line. ast::ExprKind::Closure(..) => rewrite_last_closure(context, expr, shape), _ => expr.rewrite(context, shape), } } else { last_arg.rewrite(context, shape) }; let orig_last = last_item.item.clone(); if let Some(rewrite) = rewrite { let rewrite_first_line = Some(rewrite[..first_line_width(&rewrite)].to_owned()); last_item.item = rewrite_first_line; (orig_last, Some(rewrite)) } else { (orig_last, None) } } fn can_be_overflowed<'a, T>(context: &RewriteContext, args: &[&T]) -> bool where T: Rewrite + Spanned + ToExpr + 'a, { args.last().map_or( false, |x| x.can_be_overflowed(context, args.len()), ) } pub fn can_be_overflowed_expr(context: &RewriteContext, expr: &ast::Expr, args_len: usize) -> bool { match expr.node { ast::ExprKind::Match(..) => { (context.use_block_indent() && args_len == 1) || (context.config.fn_call_style() == IndentStyle::Visual && args_len > 1) } ast::ExprKind::If(..) | ast::ExprKind::IfLet(..) | ast::ExprKind::ForLoop(..) | ast::ExprKind::Loop(..) | ast::ExprKind::While(..) | ast::ExprKind::WhileLet(..) => { context.config.combine_control_expr() && context.use_block_indent() && args_len == 1 } ast::ExprKind::Block(..) | ast::ExprKind::Closure(..) => { context.use_block_indent() || context.config.fn_call_style() == IndentStyle::Visual && args_len > 1 } ast::ExprKind::Call(..) | ast::ExprKind::MethodCall(..) | ast::ExprKind::Mac(..) | ast::ExprKind::Struct(..) => context.use_block_indent() && args_len == 1, ast::ExprKind::Tup(..) => context.use_block_indent(), ast::ExprKind::AddrOf(_, ref expr) | ast::ExprKind::Box(ref expr) | ast::ExprKind::Try(ref expr) | ast::ExprKind::Unary(_, ref expr) | ast::ExprKind::Cast(ref expr, _) => can_be_overflowed_expr(context, expr, args_len), _ => false, } } fn paren_overhead(context: &RewriteContext) -> usize { if context.config.spaces_within_parens() { 4 } else { 2 } } pub fn wrap_args_with_parens( context: &RewriteContext, args_str: &str, is_extendable: bool, shape: Shape, nested_shape: Shape, ) -> String { if !context.use_block_indent() || (context.inside_macro && !args_str.contains('\n') && args_str.len() + paren_overhead(context) <= shape.width) || is_extendable { if context.config.spaces_within_parens() && args_str.len() > 0 { format!("( {} )", args_str) } else { format!("({})", args_str) } } else { format!( "(\n{}{}\n{})", nested_shape.indent.to_string(context.config), args_str, shape.block().indent.to_string(context.config) ) } } fn rewrite_paren(context: &RewriteContext, subexpr: &ast::Expr, shape: Shape) -> Option<String> { debug!("rewrite_paren, shape: {:?}", shape); let paren_overhead = paren_overhead(context); let sub_shape = try_opt!(shape.sub_width(paren_overhead / 2)).visual_indent(paren_overhead / 2); let paren_wrapper = |s: &str| if context.config.spaces_within_parens() && s.len() > 0 { format!("( {} )", s) } else { format!("({})", s) }; let subexpr_str = try_opt!(subexpr.rewrite(context, sub_shape)); debug!("rewrite_paren, subexpr_str: `{:?}`", subexpr_str); if subexpr_str.contains('\n') { Some(paren_wrapper(&subexpr_str)) } else { if subexpr_str.len() + paren_overhead <= shape.width { Some(paren_wrapper(&subexpr_str)) } else { let sub_shape = try_opt!(shape.offset_left(2)); let subexpr_str = try_opt!(subexpr.rewrite(context, sub_shape)); Some(paren_wrapper(&subexpr_str)) } } } fn rewrite_index( expr: &ast::Expr, index: &ast::Expr, context: &RewriteContext, shape: Shape, ) -> Option<String> { let expr_str = try_opt!(expr.rewrite(context, shape)); let (lbr, rbr) = if context.config.spaces_within_square_brackets() { ("[ ", " ]") } else { ("[", "]") }; let offset = expr_str.len() + lbr.len(); if let Some(index_shape) = shape.visual_indent(offset).sub_width(offset + rbr.len()) { if let Some(index_str) = index.rewrite(context, index_shape) { return Some(format!("{}{}{}{}", expr_str, lbr, index_str, rbr)); } } let indent = shape.indent.block_indent(&context.config); let indent = indent.to_string(&context.config); // FIXME this is not right, since we don't take into account that shape.width // might be reduced from max_width by something on the right. let budget = try_opt!(context.config.max_width().checked_sub( indent.len() + lbr.len() + rbr.len(), )); let index_str = try_opt!(index.rewrite(context, Shape::legacy(budget, shape.indent))); Some(format!( "{}\n{}{}{}{}", expr_str, indent, lbr, index_str, rbr )) } fn rewrite_struct_lit<'a>( context: &RewriteContext, path: &ast::Path, fields: &'a [ast::Field], base: Option<&'a ast::Expr>, span: Span, shape: Shape, ) -> Option<String> { debug!("rewrite_struct_lit: shape {:?}", shape); enum StructLitField<'a> { Regular(&'a ast::Field), Base(&'a ast::Expr), } // 2 = " {".len() let path_shape = try_opt!(shape.sub_width(2)); let path_str = try_opt!(rewrite_path( context, PathContext::Expr, None, path, path_shape, )); if fields.len() == 0 && base.is_none() { return Some(format!("{} {{}}", path_str)); } let field_iter = fields .into_iter() .map(StructLitField::Regular) .chain(base.into_iter().map(StructLitField::Base)); // Foo { a: Foo } - indent is +3, width is -5. let (h_shape, v_shape) = try_opt!(struct_lit_shape(shape, context, path_str.len() + 3, 2)); let span_lo = |item: &StructLitField| match *item { StructLitField::Regular(field) => field.span.lo, StructLitField::Base(expr) => { let last_field_hi = fields.last().map_or(span.lo, |field| field.span.hi); let snippet = context.snippet(mk_sp(last_field_hi, expr.span.lo)); let pos = snippet.find_uncommented("..").unwrap(); last_field_hi + BytePos(pos as u32) } }; let span_hi = |item: &StructLitField| match *item { StructLitField::Regular(field) => field.span.hi, StructLitField::Base(expr) => expr.span.hi, }; let rewrite = |item: &StructLitField| match *item { StructLitField::Regular(field) => { // The 1 taken from the v_budget is for the comma. rewrite_field(context, field, try_opt!(v_shape.sub_width(1))) } StructLitField::Base(expr) => { // 2 = .. expr.rewrite(context, try_opt!(v_shape.shrink_left(2))) .map(|s| format!("..{}", s)) } }; let items = itemize_list( context.codemap, field_iter, "}", span_lo, span_hi, rewrite, context.codemap.span_after(span, "{"), span.hi, ); let item_vec = items.collect::<Vec<_>>(); let tactic = struct_lit_tactic(h_shape, context, &item_vec); let nested_shape = shape_for_tactic(tactic, h_shape, v_shape); let fmt = struct_lit_formatting(nested_shape, tactic, context, base.is_some()); let fields_str = try_opt!(write_list(&item_vec, &fmt)); let fields_str = if context.config.struct_lit_style() == IndentStyle::Block && (fields_str.contains('\n') || context.config.struct_lit_multiline_style() == MultilineStyle::ForceMulti || fields_str.len() > h_shape.map(|s| s.width).unwrap_or(0)) { format!( "\n{}{}\n{}", v_shape.indent.to_string(context.config), fields_str, shape.indent.to_string(context.config) ) } else { // One liner or visual indent. format!(" {} ", fields_str) }; Some(format!("{} {{{}}}", path_str, fields_str)) // FIXME if context.config.struct_lit_style() == Visual, but we run out // of space, we should fall back to BlockIndent. } pub fn struct_lit_field_separator(config: &Config) -> &str { colon_spaces( config.space_before_struct_lit_field_colon(), config.space_after_struct_lit_field_colon(), ) } fn rewrite_field(context: &RewriteContext, field: &ast::Field, shape: Shape) -> Option<String> { let name = &field.ident.node.to_string(); if field.is_shorthand { Some(name.to_string()) } else { let separator = struct_lit_field_separator(context.config); let overhead = name.len() + separator.len(); let mut expr_shape = try_opt!(shape.sub_width(overhead)); expr_shape.offset += overhead; let expr = field.expr.rewrite(context, expr_shape); let mut attrs_str = try_opt!((*field.attrs).rewrite(context, shape)); if !attrs_str.is_empty() { attrs_str.push_str(&format!("\n{}", shape.indent.to_string(context.config))); }; match expr { Some(e) => Some(format!("{}{}{}{}", attrs_str, name, separator, e)), None => { let expr_offset = shape.indent.block_indent(context.config); let expr = field.expr.rewrite( context, Shape::indented(expr_offset, context.config), ); expr.map(|s| { format!( "{}{}:\n{}{}", attrs_str, name, expr_offset.to_string(&context.config), s ) }) } } } } fn shape_from_fn_call_style( context: &RewriteContext, shape: Shape, overhead: usize, offset: usize, ) -> Option<Shape> { if context.use_block_indent() { // 1 = "," shape .block() .block_indent(context.config.tab_spaces()) .with_max_width(context.config) .sub_width(1) } else { shape.visual_indent(offset).sub_width(overhead) } } fn rewrite_tuple_in_visual_indent_style<'a, T>( context: &RewriteContext, items: &[&T], span: Span, shape: Shape, ) -> Option<String> where T: Rewrite + Spanned + ToExpr + 'a, { let mut items = items.iter(); // In case of length 1, need a trailing comma debug!("rewrite_tuple_in_visual_indent_style {:?}", shape); if items.len() == 1 { // 3 = "(" + ",)" let nested_shape = try_opt!(shape.sub_width(3)).visual_indent(1); return items.next().unwrap().rewrite(context, nested_shape).map( |s| { if context.config.spaces_within_parens() { format!("( {}, )", s) } else { format!("({},)", s) } }, ); } let list_lo = context.codemap.span_after(span, "("); let nested_shape = try_opt!(shape.sub_width(2)).visual_indent(1); let items = itemize_list( context.codemap, items, ")", |item| item.span().lo, |item| item.span().hi, |item| item.rewrite(context, nested_shape), list_lo, span.hi - BytePos(1), ); let list_str = try_opt!(format_item_list(items, nested_shape, context.config)); if context.config.spaces_within_parens() && list_str.len() > 0 { Some(format!("( {} )", list_str)) } else { Some(format!("({})", list_str)) } } pub fn rewrite_tuple<'a, T>( context: &RewriteContext, items: &[&T], span: Span, shape: Shape, ) -> Option<String> where T: Rewrite + Spanned + ToExpr + 'a, { debug!("rewrite_tuple {:?}", shape); if context.use_block_indent() { // We use the same rule as funcation call for rewriting tuple. rewrite_call_inner( context, &String::new(), items, span, shape, context.config.fn_call_width(), items.len() == 1, ).ok() } else { rewrite_tuple_in_visual_indent_style(context, items, span, shape) } } pub fn rewrite_unary_prefix<R: Rewrite>( context: &RewriteContext, prefix: &str, rewrite: &R, shape: Shape, ) -> Option<String> { rewrite .rewrite(context, try_opt!(shape.offset_left(prefix.len()))) .map(|r| format!("{}{}", prefix, r)) } // FIXME: this is probably not correct for multi-line Rewrites. we should // subtract suffix.len() from the last line budget, not the first! pub fn rewrite_unary_suffix<R: Rewrite>( context: &RewriteContext, suffix: &str, rewrite: &R, shape: Shape, ) -> Option<String> { rewrite .rewrite(context, try_opt!(shape.sub_width(suffix.len()))) .map(|mut r| { r.push_str(suffix); r }) } fn rewrite_unary_op( context: &RewriteContext, op: &ast::UnOp, expr: &ast::Expr, shape: Shape, ) -> Option<String> { // For some reason, an UnOp is not spanned like BinOp! let operator_str = match *op { ast::UnOp::Deref => "*", ast::UnOp::Not => "!", ast::UnOp::Neg => "-", }; rewrite_unary_prefix(context, operator_str, expr, shape) } fn rewrite_assignment( context: &RewriteContext, lhs: &ast::Expr, rhs: &ast::Expr, op: Option<&ast::BinOp>, shape: Shape, ) -> Option<String> { let operator_str = match op { Some(op) => context.snippet(op.span), None => "=".to_owned(), }; // 1 = space between lhs and operator. let lhs_shape = try_opt!(shape.sub_width(operator_str.len() + 1)); let lhs_str = format!( "{} {}", try_opt!(lhs.rewrite(context, lhs_shape)), operator_str ); rewrite_assign_rhs(context, lhs_str, rhs, shape) } // The left hand side must contain everything up to, and including, the // assignment operator. pub fn rewrite_assign_rhs<S: Into<String>>( context: &RewriteContext, lhs: S, ex: &ast::Expr, shape: Shape, ) -> Option<String> { let mut result = lhs.into(); let last_line_width = last_line_width(&result) - if result.contains('\n') { shape.indent.width() } else { 0 }; // 1 = space between operator and rhs. let orig_shape = try_opt!(shape.block_indent(0).offset_left(last_line_width + 1)); let rhs = match ex.node { ast::ExprKind::Mac(ref mac) => { match rewrite_macro(mac, None, context, orig_shape, MacroPosition::Expression) { None if !context.snippet(ex.span).contains("\n") => { context.snippet(ex.span).rewrite(context, orig_shape) } rhs @ _ => rhs, } } _ => ex.rewrite(context, orig_shape), }; fn count_line_breaks(src: &str) -> usize { src.chars().filter(|&x| x == '\n').count() } match rhs { Some(ref new_str) if count_line_breaks(new_str) < 2 => { result.push(' '); result.push_str(new_str); } _ => { // Expression did not fit on the same line as the identifier or is // at least three lines big. Try splitting the line and see // if that works better. let new_shape = try_opt!(shape.block_left(context.config.tab_spaces())); let new_rhs = ex.rewrite(context, new_shape); // FIXME: DRY! match (rhs, new_rhs) { (Some(ref orig_rhs), Some(ref replacement_rhs)) if count_line_breaks(orig_rhs) > count_line_breaks(replacement_rhs) + 1 || (orig_rhs.rewrite(context, shape).is_none() && replacement_rhs.rewrite(context, new_shape).is_some()) => { result.push_str(&format!("\n{}", new_shape.indent.to_string(context.config))); result.push_str(replacement_rhs); } (None, Some(ref final_rhs)) => { result.push_str(&format!("\n{}", new_shape.indent.to_string(context.config))); result.push_str(final_rhs); } (None, None) => return None, (Some(ref orig_rhs), _) => { result.push(' '); result.push_str(orig_rhs); } } } } Some(result) } fn rewrite_expr_addrof( context: &RewriteContext, mutability: ast::Mutability, expr: &ast::Expr, shape: Shape, ) -> Option<String> { let operator_str = match mutability { ast::Mutability::Immutable => "&", ast::Mutability::Mutable => "&mut ", }; rewrite_unary_prefix(context, operator_str, expr, shape) } pub trait ToExpr { fn to_expr(&self) -> Option<&ast::Expr>; fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool; } impl ToExpr for ast::Expr { fn to_expr(&self) -> Option<&ast::Expr> { Some(self) } fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool { can_be_overflowed_expr(context, self, len) } } impl ToExpr for ast::Ty { fn to_expr(&self) -> Option<&ast::Expr> { None } fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool { can_be_overflowed_type(context, self, len) } } impl<'a> ToExpr for TuplePatField<'a> { fn to_expr(&self) -> Option<&ast::Expr> { None } fn can_be_overflowed(&self, context: &RewriteContext, len: usize) -> bool { can_be_overflowed_pat(context, self, len) } }
use std::slice; use std::fmt; use std::ops::Index; #[derive(Debug,Clone,Copy,PartialEq)] pub enum YUVRange { Limited, Full } impl fmt::Display for YUVRange { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { YUVRange::Limited => write!(f, "Limited range"), YUVRange::Full => write!(f, "Full range") } } } #[derive(Debug,Clone,Copy,PartialEq)] pub enum YUVSystem { YCbCr(YUVRange), YCoCg, ICtCp } impl fmt::Display for YUVSystem { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::YUVSystem::*; match *self { YCbCr(range) => write!(f, "YCbCr ({})", range), YCoCg => write!(f, "YCbCg"), ICtCp => write!(f, "ICtCp"), } } } #[derive(Debug, Clone,Copy,PartialEq)] pub enum TrichromaticEncodingSystem { RGB, YUV(YUVSystem), XYZ } impl fmt::Display for TrichromaticEncodingSystem { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::TrichromaticEncodingSystem::*; match *self { YUV(system) => write!(f, "{}", system), RGB => write!(f, "RGB"), XYZ => write!(f, "XYZ"), } } } #[derive(Debug, Clone,Copy,PartialEq)] pub enum ColorModel { Trichromatic(TrichromaticEncodingSystem), CMYK, HSV, LAB, } impl fmt::Display for ColorModel { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ColorModel::*; match *self { Trichromatic(system) => write!(f, "{}", system), CMYK => write!(f, "CMYK"), HSV => write!(f, "HSV"), LAB => write!(f, "LAB"), } } } impl ColorModel { pub fn get_default_components(&self) -> usize { match *self { ColorModel::CMYK => 4, _ => 3, } } } #[derive(Clone,Copy,Debug,PartialEq)] pub struct Chromaton { h_ss: u8, v_ss: u8, packed: bool, depth: u8, shift: u8, comp_offs: u8, next_elem: u8, } fn align(v: usize, a: usize) -> usize { (v + a - 1) & !(a - 1) } impl Chromaton { pub fn get_subsampling(&self) -> (u8, u8) { (self.h_ss, self.v_ss) } pub fn is_packed(&self) -> bool { self.packed } pub fn get_depth(&self) -> u8 { self.depth } pub fn get_shift(&self) -> u8 { self.shift } pub fn get_offset(&self) -> u8 { self.comp_offs } pub fn get_step(&self) -> u8 { self.next_elem } pub fn get_width(&self, width: usize) -> usize { (width + ((1 << self.v_ss) - 1)) >> self.v_ss } pub fn get_height(&self, height: usize) -> usize { (height + ((1 << self.h_ss) - 1)) >> self.h_ss } pub fn get_linesize(&self, width: usize, alignment: usize) -> usize { let d = self.depth as usize; align((self.get_width(width) * d + d - 1) >> 3, alignment) } pub fn get_data_size(&self, width: usize, height: usize, align: usize) -> usize { let nh = (height + ((1 << self.v_ss) - 1)) >> self.v_ss; self.get_linesize(width, align) * nh } } impl fmt::Display for Chromaton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let pfmt = if self.packed { let mask = ((1 << self.depth) - 1) << self.shift; format!("packed(+{},{:X}, step {})", self.comp_offs, mask, self.next_elem) } else { format!("planar({},{})", self.comp_offs, self.next_elem) }; write!(f, "({}x{}, {})", self.h_ss, self.v_ss, pfmt) } } #[derive(Clone,Copy,PartialEq,Debug)] pub struct Formaton { model: ColorModel, components: u8, comp_info: [Option<Chromaton>; 5], elem_size: u8, be: bool, alpha: bool, palette: bool, } bitflags! { pub flags Flags: u8 { const BE = 0x01, const ALPHA = 0x02, const PALETTE = 0x04, } } impl Formaton { pub fn new(model: ColorModel, components: &[Chromaton], flags: Flags, elem_size: u8) -> Self { let be = flags.contains(BE); let alpha = flags.contains(ALPHA); let palette = flags.contains(PALETTE); let mut c: [Option<Chromaton>; 5] = [None; 5]; if components.len() > 5 { panic!("too many components"); } for (i, v) in components.iter().enumerate() { c[i] = Some(*v); } Formaton { model: model, components: components.len() as u8, comp_info: c, elem_size: elem_size, be: be, alpha: alpha, palette: palette, } } pub fn get_model(&self) -> ColorModel { self.model } pub fn get_num_comp(&self) -> usize { self.components as usize } pub fn get_chromaton(&self, idx: usize) -> Option<Chromaton> { if idx < self.comp_info.len() { return self.comp_info[idx]; } None } pub fn is_be(&self) -> bool { self.be } pub fn has_alpha(&self) -> bool { self.alpha } pub fn is_paletted(&self) -> bool { self.palette } pub fn get_elem_size(&self) -> u8 { self.elem_size } pub fn iter<'a>(&'a self) -> slice::Iter<'a, Option<Chromaton>> { self.comp_info.iter() } } impl<'a> Index<usize> for &'a Formaton { type Output = Option<Chromaton>; fn index(&self, index: usize) -> &Self::Output { self.comp_info.index(index) } } impl<'a> IntoIterator for &'a Formaton { type Item = &'a Option<Chromaton>; type IntoIter = slice::Iter<'a, Option<Chromaton>>; fn into_iter(self) -> Self::IntoIter { self.comp_info.iter() } } impl fmt::Display for Formaton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let end = if self.be { "BE" } else { "LE" }; let palstr = if self.palette { "palette " } else { "" }; let astr = if self.alpha { "alpha " } else { "" }; let mut str = format!("Formaton for {} ({}{}elem {} size {}): ", self.model, palstr, astr, end, self.elem_size); for &i in self.into_iter() { if let Some(chr) = i { str = format!("{} {}", str, chr); } } write!(f, "[{}]", str) } } macro_rules! chromaton { ($hs: expr, $vs: expr, $pck: expr, $d: expr, $sh: expr, $co: expr, $ne: expr) => ({ Some(Chromaton { h_ss: $hs, v_ss: $vs, packed: $pck, depth: $d, shift: $sh, comp_offs: $co, next_elem: $ne }) }); (yuv8; $hs: expr, $vs: expr, $co: expr) => ({ chromaton!($hs, $vs, false, 8, 0, $co, 1) }); (packrgb; $d: expr, $s: expr, $co: expr, $ne: expr) => ({ chromaton!(0, 0, true, $d, $s, $co, $ne) }); (pal8; $co: expr) => ({ chromaton!(0, 0, true, 8, 0, $co, 3) }); } pub mod formats { use pixel::*; use self::ColorModel::*; use self::TrichromaticEncodingSystem::*; use self::YUVSystem::*; use self::YUVRange::*; pub const YUV420: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 1, 1, 1), chromaton!(yuv8; 1, 1, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const YUV410: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 2, 2, 1), chromaton!(yuv8; 2, 2, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const PAL8: &Formaton = &Formaton { model: Trichromatic(RGB), components: 3, comp_info: [chromaton!(pal8; 0), chromaton!(pal8; 1), chromaton!(pal8; 2), None, None], elem_size: 3, be: false, alpha: false, palette: true, }; pub const RGB565: &Formaton = &Formaton { model: Trichromatic(RGB), components: 3, comp_info: [chromaton!(packrgb; 5, 11, 0, 2), chromaton!(packrgb; 6, 5, 0, 2), chromaton!(packrgb; 5, 0, 0, 2), None, None], elem_size: 2, be: false, alpha: false, palette: false, }; pub const RGB24: &Formaton = &Formaton { model: Trichromatic(RGB), components: 3, comp_info: [chromaton!(packrgb; 8, 0, 2, 3), chromaton!(packrgb; 8, 0, 1, 3), chromaton!(packrgb; 8, 0, 0, 3), None, None], elem_size: 3, be: false, alpha: false, palette: false, }; } #[cfg(test)] mod test { mod formats { use super::super::*; #[test] fn fmt() { println!("formaton yuv- {}", formats::YUV420); println!("formaton pal- {}", formats::PAL8); println!("formaton rgb565- {}", formats::RGB565); } #[test] fn comparison() { use std::sync::Arc; let rcf = Arc::new(*formats::YUV420); let ref cf = formats::YUV420.clone(); if cf != formats::YUV420 { panic!("cf"); } if *rcf != *formats::YUV420 { panic!("rcf"); } } } } data: Add YUV411, YUV422 and YUV444 Formatons use std::slice; use std::fmt; use std::ops::Index; #[derive(Debug,Clone,Copy,PartialEq)] pub enum YUVRange { Limited, Full } impl fmt::Display for YUVRange { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { YUVRange::Limited => write!(f, "Limited range"), YUVRange::Full => write!(f, "Full range") } } } #[derive(Debug,Clone,Copy,PartialEq)] pub enum YUVSystem { YCbCr(YUVRange), YCoCg, ICtCp } impl fmt::Display for YUVSystem { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::YUVSystem::*; match *self { YCbCr(range) => write!(f, "YCbCr ({})", range), YCoCg => write!(f, "YCbCg"), ICtCp => write!(f, "ICtCp"), } } } #[derive(Debug, Clone,Copy,PartialEq)] pub enum TrichromaticEncodingSystem { RGB, YUV(YUVSystem), XYZ } impl fmt::Display for TrichromaticEncodingSystem { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::TrichromaticEncodingSystem::*; match *self { YUV(system) => write!(f, "{}", system), RGB => write!(f, "RGB"), XYZ => write!(f, "XYZ"), } } } #[derive(Debug, Clone,Copy,PartialEq)] pub enum ColorModel { Trichromatic(TrichromaticEncodingSystem), CMYK, HSV, LAB, } impl fmt::Display for ColorModel { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ColorModel::*; match *self { Trichromatic(system) => write!(f, "{}", system), CMYK => write!(f, "CMYK"), HSV => write!(f, "HSV"), LAB => write!(f, "LAB"), } } } impl ColorModel { pub fn get_default_components(&self) -> usize { match *self { ColorModel::CMYK => 4, _ => 3, } } } #[derive(Clone,Copy,Debug,PartialEq)] pub struct Chromaton { h_ss: u8, v_ss: u8, packed: bool, depth: u8, shift: u8, comp_offs: u8, next_elem: u8, } fn align(v: usize, a: usize) -> usize { (v + a - 1) & !(a - 1) } impl Chromaton { pub fn get_subsampling(&self) -> (u8, u8) { (self.h_ss, self.v_ss) } pub fn is_packed(&self) -> bool { self.packed } pub fn get_depth(&self) -> u8 { self.depth } pub fn get_shift(&self) -> u8 { self.shift } pub fn get_offset(&self) -> u8 { self.comp_offs } pub fn get_step(&self) -> u8 { self.next_elem } pub fn get_width(&self, width: usize) -> usize { (width + ((1 << self.v_ss) - 1)) >> self.v_ss } pub fn get_height(&self, height: usize) -> usize { (height + ((1 << self.h_ss) - 1)) >> self.h_ss } pub fn get_linesize(&self, width: usize, alignment: usize) -> usize { let d = self.depth as usize; align((self.get_width(width) * d + d - 1) >> 3, alignment) } pub fn get_data_size(&self, width: usize, height: usize, align: usize) -> usize { let nh = (height + ((1 << self.v_ss) - 1)) >> self.v_ss; self.get_linesize(width, align) * nh } } impl fmt::Display for Chromaton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let pfmt = if self.packed { let mask = ((1 << self.depth) - 1) << self.shift; format!("packed(+{},{:X}, step {})", self.comp_offs, mask, self.next_elem) } else { format!("planar({},{})", self.comp_offs, self.next_elem) }; write!(f, "({}x{}, {})", self.h_ss, self.v_ss, pfmt) } } #[derive(Clone,Copy,PartialEq,Debug)] pub struct Formaton { model: ColorModel, components: u8, comp_info: [Option<Chromaton>; 5], elem_size: u8, be: bool, alpha: bool, palette: bool, } bitflags! { pub flags Flags: u8 { const BE = 0x01, const ALPHA = 0x02, const PALETTE = 0x04, } } impl Formaton { pub fn new(model: ColorModel, components: &[Chromaton], flags: Flags, elem_size: u8) -> Self { let be = flags.contains(BE); let alpha = flags.contains(ALPHA); let palette = flags.contains(PALETTE); let mut c: [Option<Chromaton>; 5] = [None; 5]; if components.len() > 5 { panic!("too many components"); } for (i, v) in components.iter().enumerate() { c[i] = Some(*v); } Formaton { model: model, components: components.len() as u8, comp_info: c, elem_size: elem_size, be: be, alpha: alpha, palette: palette, } } pub fn get_model(&self) -> ColorModel { self.model } pub fn get_num_comp(&self) -> usize { self.components as usize } pub fn get_chromaton(&self, idx: usize) -> Option<Chromaton> { if idx < self.comp_info.len() { return self.comp_info[idx]; } None } pub fn is_be(&self) -> bool { self.be } pub fn has_alpha(&self) -> bool { self.alpha } pub fn is_paletted(&self) -> bool { self.palette } pub fn get_elem_size(&self) -> u8 { self.elem_size } pub fn iter<'a>(&'a self) -> slice::Iter<'a, Option<Chromaton>> { self.comp_info.iter() } } impl<'a> Index<usize> for &'a Formaton { type Output = Option<Chromaton>; fn index(&self, index: usize) -> &Self::Output { self.comp_info.index(index) } } impl<'a> IntoIterator for &'a Formaton { type Item = &'a Option<Chromaton>; type IntoIter = slice::Iter<'a, Option<Chromaton>>; fn into_iter(self) -> Self::IntoIter { self.comp_info.iter() } } impl fmt::Display for Formaton { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let end = if self.be { "BE" } else { "LE" }; let palstr = if self.palette { "palette " } else { "" }; let astr = if self.alpha { "alpha " } else { "" }; let mut str = format!("Formaton for {} ({}{}elem {} size {}): ", self.model, palstr, astr, end, self.elem_size); for &i in self.into_iter() { if let Some(chr) = i { str = format!("{} {}", str, chr); } } write!(f, "[{}]", str) } } macro_rules! chromaton { ($hs: expr, $vs: expr, $pck: expr, $d: expr, $sh: expr, $co: expr, $ne: expr) => ({ Some(Chromaton { h_ss: $hs, v_ss: $vs, packed: $pck, depth: $d, shift: $sh, comp_offs: $co, next_elem: $ne }) }); (yuv8; $hs: expr, $vs: expr, $co: expr) => ({ chromaton!($hs, $vs, false, 8, 0, $co, 1) }); (packrgb; $d: expr, $s: expr, $co: expr, $ne: expr) => ({ chromaton!(0, 0, true, $d, $s, $co, $ne) }); (pal8; $co: expr) => ({ chromaton!(0, 0, true, 8, 0, $co, 3) }); } pub mod formats { use pixel::*; use self::ColorModel::*; use self::TrichromaticEncodingSystem::*; use self::YUVSystem::*; use self::YUVRange::*; pub const YUV444: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 0, 0, 1), chromaton!(yuv8; 0, 0, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const YUV422: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 0, 1, 1), chromaton!(yuv8; 0, 1, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const YUV420: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 1, 1, 1), chromaton!(yuv8; 1, 1, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const YUV411: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 2, 0, 1), chromaton!(yuv8; 2, 0, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const YUV410: &Formaton = &Formaton { model: Trichromatic(YUV(YCbCr(Limited))), components: 3, comp_info: [chromaton!(0, 0, false, 8, 0, 0, 1), chromaton!(yuv8; 2, 1, 1), chromaton!(yuv8; 2, 1, 2), None, None], elem_size: 0, be: false, alpha: false, palette: false, }; pub const PAL8: &Formaton = &Formaton { model: Trichromatic(RGB), components: 3, comp_info: [chromaton!(pal8; 0), chromaton!(pal8; 1), chromaton!(pal8; 2), None, None], elem_size: 3, be: false, alpha: false, palette: true, }; pub const RGB565: &Formaton = &Formaton { model: Trichromatic(RGB), components: 3, comp_info: [chromaton!(packrgb; 5, 11, 0, 2), chromaton!(packrgb; 6, 5, 0, 2), chromaton!(packrgb; 5, 0, 0, 2), None, None], elem_size: 2, be: false, alpha: false, palette: false, }; pub const RGB24: &Formaton = &Formaton { model: Trichromatic(RGB), components: 3, comp_info: [chromaton!(packrgb; 8, 0, 2, 3), chromaton!(packrgb; 8, 0, 1, 3), chromaton!(packrgb; 8, 0, 0, 3), None, None], elem_size: 3, be: false, alpha: false, palette: false, }; } #[cfg(test)] mod test { mod formats { use super::super::*; #[test] fn fmt() { println!("formaton yuv- {}", formats::YUV420); println!("formaton pal- {}", formats::PAL8); println!("formaton rgb565- {}", formats::RGB565); } #[test] fn comparison() { use std::sync::Arc; let rcf = Arc::new(*formats::YUV420); let ref cf = formats::YUV420.clone(); if cf != formats::YUV420 { panic!("cf"); } if *rcf != *formats::YUV420 { panic!("rcf"); } } } }
use std::io::{self, Read, Write}; use ::BufferRedirect; /// Hold output until dropped. On drop, the held output is sent to the stdout/stderr. /// /// Note: This will ignore IO errors when printing held output. pub struct Hold { buf_redir: Option<BufferRedirect>, is_stdout: bool, } impl Hold { /// Hold stderr output. pub fn stderr() -> io::Result<Hold> { Ok(Hold { buf_redir: Some(try!(BufferRedirect::stderr())), is_stdout: false, }) } /// Hold stdout output. pub fn stdout() -> io::Result<Hold> { Ok(Hold { buf_redir: Some(try!(BufferRedirect::stdout())), is_stdout: true, }) } } impl Drop for Hold { fn drop(&mut self) { fn read_into<R: Read, W: Write>(mut from: R, mut to: W) -> io::Result<()> { // TODO: use sendfile? let mut buf = [0u8; 4096]; loop { match from.read(&mut buf) { Ok(0) => return Ok(()), Ok(size) => try!(to.write_all(&buf[..size])), Err(e) => return Err(e), } } } let from = self.buf_redir.take().unwrap().into_inner(); // Ignore errors. if self.is_stdout { let stdout = io::stdout(); let _ = read_into(from, stdout.lock()); } else { let stderr = io::stderr(); let _ = read_into(from, stderr.lock()); } } } flush on drop in hold use std::io::{self, Read, Write}; use ::BufferRedirect; /// Hold output until dropped. On drop, the held output is sent to the stdout/stderr. /// /// Note: This will ignore IO errors when printing held output. pub struct Hold { buf_redir: Option<BufferRedirect>, is_stdout: bool, } impl Hold { /// Hold stderr output. pub fn stderr() -> io::Result<Hold> { Ok(Hold { buf_redir: Some(try!(BufferRedirect::stderr())), is_stdout: false, }) } /// Hold stdout output. pub fn stdout() -> io::Result<Hold> { Ok(Hold { buf_redir: Some(try!(BufferRedirect::stdout())), is_stdout: true, }) } } impl Drop for Hold { fn drop(&mut self) { fn read_into<R: Read, W: Write>(mut from: R, mut to: W) { // TODO: use sendfile? let mut buf = [0u8; 4096]; loop { // Ignore errors match from.read(&mut buf) { Ok(0) => break, Ok(size) => if let Err(_) = to.write_all(&buf[..size]) { break }, Err(_) => break, } } // Just in case... let _ = to.flush(); } let from = self.buf_redir.take().unwrap().into_inner(); // Ignore errors. if self.is_stdout { let stdout = io::stdout(); read_into(from, stdout.lock()); } else { let stderr = io::stderr(); read_into(from, stderr.lock()); } } }
//! Rendering highlighted code as HTML+CSS use std::fmt::Write; use parsing::{ScopeStackOp, BasicScopeStackOp, Scope, ScopeStack, SyntaxReference, ParseState, SyntaxSet, SCOPE_REPO}; use easy::{HighlightLines, HighlightFile}; use highlighting::{Color, FontStyle, Style, Theme}; use util::LinesWithEndings; use escape::Escape; use std::io::{self, BufRead}; use std::path::Path; /// Output HTML for a line of code with `<span>` elements using class names /// As this has to keep track of open and closed `<span>` tags, it is a `struct` /// with additional state. /// /// There is a `finalize()` function that has to be called in the end in order /// to close all open `<span>` tags. /// /// The lines returned don't include a newline at the end. /// # Example /// /// ``` /// use syntect::html::ClassedHTMLGenerator; /// use syntect::parsing::SyntaxSet; /// /// let current_code = r#" /// x <- 5 /// y <- 6 /// x + y /// "#.to_string(); /// /// let syntax_set = SyntaxSet::load_defaults_newlines(); /// let syntax = syntax_set.find_syntax_by_name("R").unwrap(); /// let mut html_generator = ClassedHTMLGenerator::new(&syntax, &syntax_set); /// for line in current_code.lines() { /// html_generator.parse_html_for_line(&line); /// } /// let output_html = html_generator.finalize(); /// ``` pub struct ClassedHTMLGenerator<'a> { syntax_set: &'a SyntaxSet, open_spans: isize, parse_state: ParseState, html: String } impl<'a> ClassedHTMLGenerator<'a> { pub fn new(syntax_reference: &'a SyntaxReference, syntax_set: &'a SyntaxSet) -> ClassedHTMLGenerator<'a> { let parse_state = ParseState::new(syntax_reference); let open_spans = 0; let html = String::new(); ClassedHTMLGenerator { syntax_set, open_spans, parse_state, html } } /// Parse the line of code and update the internal HTML buffer with tagged HTML pub fn parse_html_for_line(&mut self, line: &str) { let parsed_line = self.parse_state.parse_line(line, &self.syntax_set); let (formatted_line, delta) = tokens_to_classed_spans( line, parsed_line.as_slice(), ClassStyle::Spaced); self.open_spans += delta; self.html.push_str(formatted_line.as_str()); } /// Close all open `<span>` tags and return the finished HTML string pub fn finalize(mut self) -> String { for _ in 0..self.open_spans { self.html.push_str("</span>"); } self.html } } /// Only one style for now, I may add more class styles later. /// Just here so I don't have to change the API #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ClassStyle { /// The classes are the atoms of the scope separated by spaces /// (e.g `source.php` becomes `source php`). /// This isn't that fast since it has to use the scope repository /// to look up scope names. Spaced, } fn scope_to_classes(s: &mut String, scope: Scope, style: ClassStyle) { assert!(style == ClassStyle::Spaced); // TODO more styles let repo = SCOPE_REPO.lock().unwrap(); for i in 0..(scope.len()) { let atom = scope.atom_at(i as usize); let atom_s = repo.atom_str(atom); if i != 0 { s.push_str(" ") } s.push_str(atom_s); } } /// Convenience method that combines `start_highlighted_html_snippet`, `styled_line_to_highlighted_html` /// and `HighlightLines` from `syntect::easy` to create a full highlighted HTML snippet for /// a string (which can contain many lines). /// /// Note that the `syntax` passed in must be from a `SyntaxSet` compiled for newline characters. /// This is easy to get with `SyntaxSet::load_defaults_newlines()`. (Note: this was different before v3.0) pub fn highlighted_html_for_string(s: &str, ss: &SyntaxSet, syntax: &SyntaxReference, theme: &Theme) -> String { let mut highlighter = HighlightLines::new(syntax, theme); let (mut output, bg) = start_highlighted_html_snippet(theme); for line in LinesWithEndings::from(s) { let regions = highlighter.highlight(line, ss); append_highlighted_html_for_styled_line(&regions[..], IncludeBackground::IfDifferent(bg), &mut output); } output.push_str("</pre>\n"); output } /// Convenience method that combines `start_highlighted_html_snippet`, `styled_line_to_highlighted_html` /// and `HighlightFile` from `syntect::easy` to create a full highlighted HTML snippet for /// a file. /// /// Note that the `syntax` passed in must be from a `SyntaxSet` compiled for newline characters. /// This is easy to get with `SyntaxSet::load_defaults_newlines()`. (Note: this was different before v3.0) pub fn highlighted_html_for_file<P: AsRef<Path>>(path: P, ss: &SyntaxSet, theme: &Theme) -> io::Result<String> { let mut highlighter = HighlightFile::new(path, ss, theme)?; let (mut output, bg) = start_highlighted_html_snippet(theme); let mut line = String::new(); while highlighter.reader.read_line(&mut line)? > 0 { { let regions = highlighter.highlight_lines.highlight(&line, ss); append_highlighted_html_for_styled_line(&regions[..], IncludeBackground::IfDifferent(bg), &mut output); } line.clear(); } output.push_str("</pre>\n"); Ok(output) } /// Output HTML for a line of code with `<span>` elements /// specifying classes for each token. The span elements are nested /// like the scope stack and the scopes are mapped to classes based /// on the `ClassStyle` (see it's docs). /// /// See `ClassedHTMLGenerator` for a more convenient wrapper, this is the advanced /// version of the function that gives more control over the parsing flow. /// /// For this to work correctly you must concatenate all the lines in a `<pre>` /// tag since some span tags opened on a line may not be closed on that line /// and later lines may close tags from previous lines. /// /// Returns the HTML string and the number of `<span>` tags opened /// (negative for closed). So that you can emit the correct number of closing /// tags at the end. fn tokens_to_classed_spans(line: &str, ops: &[(usize, ScopeStackOp)], style: ClassStyle) -> (String, isize) { let mut s = String::with_capacity(line.len() + ops.len() * 8); // a guess let mut cur_index = 0; let mut stack = ScopeStack::new(); let mut span_delta = 0; for &(i, ref op) in ops { if i > cur_index { write!(s, "{}", Escape(&line[cur_index..i])).unwrap(); cur_index = i } stack.apply_with_hook(op, |basic_op, _| { match basic_op { BasicScopeStackOp::Push(scope) => { s.push_str("<span class=\""); scope_to_classes(&mut s, scope, style); s.push_str("\">"); span_delta += 1; } BasicScopeStackOp::Pop => { s.push_str("</span>"); span_delta -= 1; } } }); } write!(s, "{}", Escape(&line[cur_index..line.len()])).unwrap(); (s, span_delta) } #[deprecated(since="3.1.0", note="please use `tokens_to_classed_spans` instead")] pub fn tokens_to_classed_html(line: &str, ops: &[(usize, ScopeStackOp)], style: ClassStyle) -> String { tokens_to_classed_spans(line, ops, style).0 } /// Determines how background color attributes are generated #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum IncludeBackground { /// Don't include `background-color`, for performance or so that you can use your own background. No, /// Set background color attributes on every node Yes, /// Only set the `background-color` if it is different than the default (presumably set on a parent element) IfDifferent(Color), } fn write_css_color(s: &mut String, c: Color) { if c.a != 0xFF { write!(s,"#{:02x}{:02x}{:02x}{:02x}",c.r,c.g,c.b,c.a).unwrap(); } else { write!(s,"#{:02x}{:02x}{:02x}",c.r,c.g,c.b).unwrap(); } } /// Output HTML for a line of code with `<span>` elements using inline /// `style` attributes to set the correct font attributes. /// The `bg` attribute determines if the spans will have the `background-color` /// attribute set. See the `IncludeBackground` enum's docs. /// /// The lines returned don't include a newline at the end. /// # Examples /// /// ``` /// use syntect::easy::HighlightLines; /// use syntect::parsing::SyntaxSet; /// use syntect::highlighting::{ThemeSet, Style}; /// use syntect::html::{styled_line_to_highlighted_html, IncludeBackground}; /// /// // Load these once at the start of your program /// let ps = SyntaxSet::load_defaults_newlines(); /// let ts = ThemeSet::load_defaults(); /// /// let syntax = ps.find_syntax_by_name("Ruby").unwrap(); /// let mut h = HighlightLines::new(syntax, &ts.themes["base16-ocean.dark"]); /// let regions = h.highlight("5", &ps); /// let html = styled_line_to_highlighted_html(&regions[..], IncludeBackground::No); /// assert_eq!(html, "<span style=\"color:#d08770;\">5</span>"); /// ``` pub fn styled_line_to_highlighted_html(v: &[(Style, &str)], bg: IncludeBackground) -> String { let mut s: String = String::new(); append_highlighted_html_for_styled_line(v, bg, &mut s); s } /// Like `styled_line_to_highlighted_html` but appends to a `String` for increased efficiency. /// In fact `styled_line_to_highlighted_html` is just a wrapper around this function. pub fn append_highlighted_html_for_styled_line(v: &[(Style, &str)], bg: IncludeBackground, mut s: &mut String) { let mut prev_style: Option<&Style> = None; for &(ref style, text) in v.iter() { let unify_style = if let Some(ps) = prev_style { style == ps || (style.background == ps.background && text.trim().is_empty()) } else { false }; if unify_style { write!(s, "{}", Escape(text)).unwrap(); } else { if prev_style.is_some() { write!(s, "</span>").unwrap(); } prev_style = Some(style); write!(s, "<span style=\"").unwrap(); let include_bg = match bg { IncludeBackground::Yes => true, IncludeBackground::No => false, IncludeBackground::IfDifferent(c) => (style.background != c), }; if include_bg { write!(s, "background-color:").unwrap(); write_css_color(&mut s, style.background); write!(s, ";").unwrap(); } if style.font_style.contains(FontStyle::UNDERLINE) { write!(s, "text-decoration:underline;").unwrap(); } if style.font_style.contains(FontStyle::BOLD) { write!(s, "font-weight:bold;").unwrap(); } if style.font_style.contains(FontStyle::ITALIC) { write!(s, "font-style:italic;").unwrap(); } write!(s, "color:").unwrap(); write_css_color(&mut s, style.foreground); write!(s, ";\">{}", Escape(text)).unwrap(); } } if prev_style.is_some() { write!(s, "</span>").unwrap(); } } /// Returns a `<pre style="...">\n` tag with the correct background color for the given theme. /// This is for if you want to roll your own HTML output, you probably just want to use /// `highlighted_html_for_string`. /// /// If you don't care about the background color you can just prefix the lines from /// `styled_line_to_highlighted_html` with a `<pre>`. This is meant to be used with `IncludeBackground::IfDifferent`. /// As of `v3.0` this method also returns the background color to be passed to `IfDifferent`. /// /// You're responsible for creating the string `</pre>` to close this, I'm not gonna provide a /// helper for that :-) pub fn start_highlighted_html_snippet(t: &Theme) -> (String, Color) { let c = t.settings.background.unwrap_or(Color::WHITE); (format!("<pre style=\"background-color:#{:02x}{:02x}{:02x};\">\n", c.r, c.g, c.b), c) } #[cfg(test)] mod tests { use super::*; use parsing::{SyntaxSet, ParseState, ScopeStack, SyntaxSetBuilder}; use highlighting::{ThemeSet, Style, Highlighter, HighlightIterator, HighlightState}; #[test] fn tokens() { let ss = SyntaxSet::load_defaults_newlines(); let syntax = ss.find_syntax_by_name("Markdown").unwrap(); let mut state = ParseState::new(syntax); let line = "[w](t.co) *hi* **five**"; let ops = state.parse_line(line, &ss); // use util::debug_print_ops; // debug_print_ops(line, &ops); let html = tokens_to_classed_html(line, &ops[..], ClassStyle::Spaced); println!("{}", html); assert_eq!(html, include_str!("../testdata/test2.html").trim_right()); let ts = ThemeSet::load_defaults(); let highlighter = Highlighter::new(&ts.themes["InspiredGitHub"]); let mut highlight_state = HighlightState::new(&highlighter, ScopeStack::new()); let iter = HighlightIterator::new(&mut highlight_state, &ops[..], line, &highlighter); let regions: Vec<(Style, &str)> = iter.collect(); let html2 = styled_line_to_highlighted_html(&regions[..], IncludeBackground::Yes); println!("{}", html2); assert_eq!(html2, include_str!("../testdata/test1.html").trim_right()); } #[test] fn strings() { let ss = SyntaxSet::load_defaults_newlines(); let ts = ThemeSet::load_defaults(); let s = include_str!("../testdata/highlight_test.erb"); let syntax = ss.find_syntax_by_extension("erb").unwrap(); let html = highlighted_html_for_string(s, &ss, syntax, &ts.themes["base16-ocean.dark"]); // println!("{}", html); assert_eq!(html, include_str!("../testdata/test3.html")); let html2 = highlighted_html_for_file("testdata/highlight_test.erb", &ss, &ts.themes["base16-ocean.dark"]) .unwrap(); assert_eq!(html2, html); // YAML is a tricky syntax and InspiredGitHub is a fancy theme, this is basically an integration test let html3 = highlighted_html_for_file("testdata/Packages/Rust/Cargo.sublime-syntax", &ss, &ts.themes["InspiredGitHub"]) .unwrap(); println!("{}", html3); assert_eq!(html3, include_str!("../testdata/test4.html")); } #[test] fn tricky_test_syntax() { // This syntax I wrote tests edge cases of prototypes // I verified the output HTML against what ST3 does with the same syntax and file let mut builder = SyntaxSetBuilder::new(); builder.add_from_folder("testdata", true).unwrap(); let ss = builder.build(); let ts = ThemeSet::load_defaults(); let html = highlighted_html_for_file("testdata/testing-syntax.testsyntax", &ss, &ts.themes["base16-ocean.dark"]) .unwrap(); println!("{}", html); assert_eq!(html, include_str!("../testdata/test5.html")); } #[test] fn test_classed_html_generator() { let current_code = "x + y".to_string(); let syntax_set = SyntaxSet::load_defaults_newlines(); let syntax = syntax_set.find_syntax_by_name("R").unwrap(); let mut html_generator = ClassedHTMLGenerator::new(&syntax, &syntax_set); for line in current_code.lines() { html_generator.parse_html_for_line(&line); } let html = html_generator.finalize(); assert_eq!(html, r#"<span class="source r">x <span class="keyword operator arithmetic r">+</span> y</span>"#); } } Make tokens_to_classed_spans public like intended Fixes #240 //! Rendering highlighted code as HTML+CSS use std::fmt::Write; use parsing::{ScopeStackOp, BasicScopeStackOp, Scope, ScopeStack, SyntaxReference, ParseState, SyntaxSet, SCOPE_REPO}; use easy::{HighlightLines, HighlightFile}; use highlighting::{Color, FontStyle, Style, Theme}; use util::LinesWithEndings; use escape::Escape; use std::io::{self, BufRead}; use std::path::Path; /// Output HTML for a line of code with `<span>` elements using class names /// As this has to keep track of open and closed `<span>` tags, it is a `struct` /// with additional state. /// /// There is a `finalize()` function that has to be called in the end in order /// to close all open `<span>` tags. /// /// The lines returned don't include a newline at the end. /// # Example /// /// ``` /// use syntect::html::ClassedHTMLGenerator; /// use syntect::parsing::SyntaxSet; /// /// let current_code = r#" /// x <- 5 /// y <- 6 /// x + y /// "#.to_string(); /// /// let syntax_set = SyntaxSet::load_defaults_newlines(); /// let syntax = syntax_set.find_syntax_by_name("R").unwrap(); /// let mut html_generator = ClassedHTMLGenerator::new(&syntax, &syntax_set); /// for line in current_code.lines() { /// html_generator.parse_html_for_line(&line); /// } /// let output_html = html_generator.finalize(); /// ``` pub struct ClassedHTMLGenerator<'a> { syntax_set: &'a SyntaxSet, open_spans: isize, parse_state: ParseState, html: String } impl<'a> ClassedHTMLGenerator<'a> { pub fn new(syntax_reference: &'a SyntaxReference, syntax_set: &'a SyntaxSet) -> ClassedHTMLGenerator<'a> { let parse_state = ParseState::new(syntax_reference); let open_spans = 0; let html = String::new(); ClassedHTMLGenerator { syntax_set, open_spans, parse_state, html } } /// Parse the line of code and update the internal HTML buffer with tagged HTML pub fn parse_html_for_line(&mut self, line: &str) { let parsed_line = self.parse_state.parse_line(line, &self.syntax_set); let (formatted_line, delta) = tokens_to_classed_spans( line, parsed_line.as_slice(), ClassStyle::Spaced); self.open_spans += delta; self.html.push_str(formatted_line.as_str()); } /// Close all open `<span>` tags and return the finished HTML string pub fn finalize(mut self) -> String { for _ in 0..self.open_spans { self.html.push_str("</span>"); } self.html } } /// Only one style for now, I may add more class styles later. /// Just here so I don't have to change the API #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ClassStyle { /// The classes are the atoms of the scope separated by spaces /// (e.g `source.php` becomes `source php`). /// This isn't that fast since it has to use the scope repository /// to look up scope names. Spaced, } fn scope_to_classes(s: &mut String, scope: Scope, style: ClassStyle) { assert!(style == ClassStyle::Spaced); // TODO more styles let repo = SCOPE_REPO.lock().unwrap(); for i in 0..(scope.len()) { let atom = scope.atom_at(i as usize); let atom_s = repo.atom_str(atom); if i != 0 { s.push_str(" ") } s.push_str(atom_s); } } /// Convenience method that combines `start_highlighted_html_snippet`, `styled_line_to_highlighted_html` /// and `HighlightLines` from `syntect::easy` to create a full highlighted HTML snippet for /// a string (which can contain many lines). /// /// Note that the `syntax` passed in must be from a `SyntaxSet` compiled for newline characters. /// This is easy to get with `SyntaxSet::load_defaults_newlines()`. (Note: this was different before v3.0) pub fn highlighted_html_for_string(s: &str, ss: &SyntaxSet, syntax: &SyntaxReference, theme: &Theme) -> String { let mut highlighter = HighlightLines::new(syntax, theme); let (mut output, bg) = start_highlighted_html_snippet(theme); for line in LinesWithEndings::from(s) { let regions = highlighter.highlight(line, ss); append_highlighted_html_for_styled_line(&regions[..], IncludeBackground::IfDifferent(bg), &mut output); } output.push_str("</pre>\n"); output } /// Convenience method that combines `start_highlighted_html_snippet`, `styled_line_to_highlighted_html` /// and `HighlightFile` from `syntect::easy` to create a full highlighted HTML snippet for /// a file. /// /// Note that the `syntax` passed in must be from a `SyntaxSet` compiled for newline characters. /// This is easy to get with `SyntaxSet::load_defaults_newlines()`. (Note: this was different before v3.0) pub fn highlighted_html_for_file<P: AsRef<Path>>(path: P, ss: &SyntaxSet, theme: &Theme) -> io::Result<String> { let mut highlighter = HighlightFile::new(path, ss, theme)?; let (mut output, bg) = start_highlighted_html_snippet(theme); let mut line = String::new(); while highlighter.reader.read_line(&mut line)? > 0 { { let regions = highlighter.highlight_lines.highlight(&line, ss); append_highlighted_html_for_styled_line(&regions[..], IncludeBackground::IfDifferent(bg), &mut output); } line.clear(); } output.push_str("</pre>\n"); Ok(output) } /// Output HTML for a line of code with `<span>` elements /// specifying classes for each token. The span elements are nested /// like the scope stack and the scopes are mapped to classes based /// on the `ClassStyle` (see it's docs). /// /// See `ClassedHTMLGenerator` for a more convenient wrapper, this is the advanced /// version of the function that gives more control over the parsing flow. /// /// For this to work correctly you must concatenate all the lines in a `<pre>` /// tag since some span tags opened on a line may not be closed on that line /// and later lines may close tags from previous lines. /// /// Returns the HTML string and the number of `<span>` tags opened /// (negative for closed). So that you can emit the correct number of closing /// tags at the end. pub fn tokens_to_classed_spans(line: &str, ops: &[(usize, ScopeStackOp)], style: ClassStyle) -> (String, isize) { let mut s = String::with_capacity(line.len() + ops.len() * 8); // a guess let mut cur_index = 0; let mut stack = ScopeStack::new(); let mut span_delta = 0; for &(i, ref op) in ops { if i > cur_index { write!(s, "{}", Escape(&line[cur_index..i])).unwrap(); cur_index = i } stack.apply_with_hook(op, |basic_op, _| { match basic_op { BasicScopeStackOp::Push(scope) => { s.push_str("<span class=\""); scope_to_classes(&mut s, scope, style); s.push_str("\">"); span_delta += 1; } BasicScopeStackOp::Pop => { s.push_str("</span>"); span_delta -= 1; } } }); } write!(s, "{}", Escape(&line[cur_index..line.len()])).unwrap(); (s, span_delta) } #[deprecated(since="3.1.0", note="please use `tokens_to_classed_spans` instead")] pub fn tokens_to_classed_html(line: &str, ops: &[(usize, ScopeStackOp)], style: ClassStyle) -> String { tokens_to_classed_spans(line, ops, style).0 } /// Determines how background color attributes are generated #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum IncludeBackground { /// Don't include `background-color`, for performance or so that you can use your own background. No, /// Set background color attributes on every node Yes, /// Only set the `background-color` if it is different than the default (presumably set on a parent element) IfDifferent(Color), } fn write_css_color(s: &mut String, c: Color) { if c.a != 0xFF { write!(s,"#{:02x}{:02x}{:02x}{:02x}",c.r,c.g,c.b,c.a).unwrap(); } else { write!(s,"#{:02x}{:02x}{:02x}",c.r,c.g,c.b).unwrap(); } } /// Output HTML for a line of code with `<span>` elements using inline /// `style` attributes to set the correct font attributes. /// The `bg` attribute determines if the spans will have the `background-color` /// attribute set. See the `IncludeBackground` enum's docs. /// /// The lines returned don't include a newline at the end. /// # Examples /// /// ``` /// use syntect::easy::HighlightLines; /// use syntect::parsing::SyntaxSet; /// use syntect::highlighting::{ThemeSet, Style}; /// use syntect::html::{styled_line_to_highlighted_html, IncludeBackground}; /// /// // Load these once at the start of your program /// let ps = SyntaxSet::load_defaults_newlines(); /// let ts = ThemeSet::load_defaults(); /// /// let syntax = ps.find_syntax_by_name("Ruby").unwrap(); /// let mut h = HighlightLines::new(syntax, &ts.themes["base16-ocean.dark"]); /// let regions = h.highlight("5", &ps); /// let html = styled_line_to_highlighted_html(&regions[..], IncludeBackground::No); /// assert_eq!(html, "<span style=\"color:#d08770;\">5</span>"); /// ``` pub fn styled_line_to_highlighted_html(v: &[(Style, &str)], bg: IncludeBackground) -> String { let mut s: String = String::new(); append_highlighted_html_for_styled_line(v, bg, &mut s); s } /// Like `styled_line_to_highlighted_html` but appends to a `String` for increased efficiency. /// In fact `styled_line_to_highlighted_html` is just a wrapper around this function. pub fn append_highlighted_html_for_styled_line(v: &[(Style, &str)], bg: IncludeBackground, mut s: &mut String) { let mut prev_style: Option<&Style> = None; for &(ref style, text) in v.iter() { let unify_style = if let Some(ps) = prev_style { style == ps || (style.background == ps.background && text.trim().is_empty()) } else { false }; if unify_style { write!(s, "{}", Escape(text)).unwrap(); } else { if prev_style.is_some() { write!(s, "</span>").unwrap(); } prev_style = Some(style); write!(s, "<span style=\"").unwrap(); let include_bg = match bg { IncludeBackground::Yes => true, IncludeBackground::No => false, IncludeBackground::IfDifferent(c) => (style.background != c), }; if include_bg { write!(s, "background-color:").unwrap(); write_css_color(&mut s, style.background); write!(s, ";").unwrap(); } if style.font_style.contains(FontStyle::UNDERLINE) { write!(s, "text-decoration:underline;").unwrap(); } if style.font_style.contains(FontStyle::BOLD) { write!(s, "font-weight:bold;").unwrap(); } if style.font_style.contains(FontStyle::ITALIC) { write!(s, "font-style:italic;").unwrap(); } write!(s, "color:").unwrap(); write_css_color(&mut s, style.foreground); write!(s, ";\">{}", Escape(text)).unwrap(); } } if prev_style.is_some() { write!(s, "</span>").unwrap(); } } /// Returns a `<pre style="...">\n` tag with the correct background color for the given theme. /// This is for if you want to roll your own HTML output, you probably just want to use /// `highlighted_html_for_string`. /// /// If you don't care about the background color you can just prefix the lines from /// `styled_line_to_highlighted_html` with a `<pre>`. This is meant to be used with `IncludeBackground::IfDifferent`. /// As of `v3.0` this method also returns the background color to be passed to `IfDifferent`. /// /// You're responsible for creating the string `</pre>` to close this, I'm not gonna provide a /// helper for that :-) pub fn start_highlighted_html_snippet(t: &Theme) -> (String, Color) { let c = t.settings.background.unwrap_or(Color::WHITE); (format!("<pre style=\"background-color:#{:02x}{:02x}{:02x};\">\n", c.r, c.g, c.b), c) } #[cfg(test)] mod tests { use super::*; use parsing::{SyntaxSet, ParseState, ScopeStack, SyntaxSetBuilder}; use highlighting::{ThemeSet, Style, Highlighter, HighlightIterator, HighlightState}; #[test] fn tokens() { let ss = SyntaxSet::load_defaults_newlines(); let syntax = ss.find_syntax_by_name("Markdown").unwrap(); let mut state = ParseState::new(syntax); let line = "[w](t.co) *hi* **five**"; let ops = state.parse_line(line, &ss); // use util::debug_print_ops; // debug_print_ops(line, &ops); let html = tokens_to_classed_html(line, &ops[..], ClassStyle::Spaced); println!("{}", html); assert_eq!(html, include_str!("../testdata/test2.html").trim_right()); let ts = ThemeSet::load_defaults(); let highlighter = Highlighter::new(&ts.themes["InspiredGitHub"]); let mut highlight_state = HighlightState::new(&highlighter, ScopeStack::new()); let iter = HighlightIterator::new(&mut highlight_state, &ops[..], line, &highlighter); let regions: Vec<(Style, &str)> = iter.collect(); let html2 = styled_line_to_highlighted_html(&regions[..], IncludeBackground::Yes); println!("{}", html2); assert_eq!(html2, include_str!("../testdata/test1.html").trim_right()); } #[test] fn strings() { let ss = SyntaxSet::load_defaults_newlines(); let ts = ThemeSet::load_defaults(); let s = include_str!("../testdata/highlight_test.erb"); let syntax = ss.find_syntax_by_extension("erb").unwrap(); let html = highlighted_html_for_string(s, &ss, syntax, &ts.themes["base16-ocean.dark"]); // println!("{}", html); assert_eq!(html, include_str!("../testdata/test3.html")); let html2 = highlighted_html_for_file("testdata/highlight_test.erb", &ss, &ts.themes["base16-ocean.dark"]) .unwrap(); assert_eq!(html2, html); // YAML is a tricky syntax and InspiredGitHub is a fancy theme, this is basically an integration test let html3 = highlighted_html_for_file("testdata/Packages/Rust/Cargo.sublime-syntax", &ss, &ts.themes["InspiredGitHub"]) .unwrap(); println!("{}", html3); assert_eq!(html3, include_str!("../testdata/test4.html")); } #[test] fn tricky_test_syntax() { // This syntax I wrote tests edge cases of prototypes // I verified the output HTML against what ST3 does with the same syntax and file let mut builder = SyntaxSetBuilder::new(); builder.add_from_folder("testdata", true).unwrap(); let ss = builder.build(); let ts = ThemeSet::load_defaults(); let html = highlighted_html_for_file("testdata/testing-syntax.testsyntax", &ss, &ts.themes["base16-ocean.dark"]) .unwrap(); println!("{}", html); assert_eq!(html, include_str!("../testdata/test5.html")); } #[test] fn test_classed_html_generator() { let current_code = "x + y".to_string(); let syntax_set = SyntaxSet::load_defaults_newlines(); let syntax = syntax_set.find_syntax_by_name("R").unwrap(); let mut html_generator = ClassedHTMLGenerator::new(&syntax, &syntax_set); for line in current_code.lines() { html_generator.parse_html_for_line(&line); } let html = html_generator.finalize(); assert_eq!(html, r#"<span class="source r">x <span class="keyword operator arithmetic r">+</span> y</span>"#); } }
use html5ever::interface::QualName; use html5ever::parse_document; use html5ever::rcdom::{Handle, NodeData, RcDom}; use html5ever::serialize::{serialize, SerializeOpts}; use html5ever::tendril::{format_tendril, TendrilSink}; use html5ever::tree_builder::{Attribute, TreeSink}; use html5ever::{local_name, namespace_url, ns}; use http::retrieve_asset; use js::attr_is_event_handler; use std::collections::HashMap; use std::default::Default; use utils::{data_to_dataurl, is_valid_url, resolve_css_imports, resolve_url, url_has_protocol}; lazy_static! { static ref EMPTY_STRING: String = String::new(); } const ICON_VALUES: [&str; 5] = [ "icon", "shortcut icon", "mask-icon", "apple-touch-icon", "fluid-icon", ]; const TRANSPARENT_PIXEL: &str = "data:image/png;base64,\ iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="; pub fn get_parent_node(node: &Handle) -> Handle { let parent = node.parent.take().clone(); parent.and_then(|node| node.upgrade()).unwrap() } pub fn get_node_name(node: &Handle) -> String { match &node.data { NodeData::Element { ref name, .. } => name.local.as_ref().to_string(), _ => EMPTY_STRING.clone(), } } pub fn is_icon(attr_value: &str) -> bool { ICON_VALUES.contains(&&*attr_value.to_lowercase()) } pub fn walk_and_embed_assets( cache: &mut HashMap<String, String>, url: &str, node: &Handle, opt_no_css: bool, opt_no_js: bool, opt_no_images: bool, opt_user_agent: &str, opt_silent: bool, opt_insecure: bool, opt_no_frames: bool, ) { match node.data { NodeData::Document => { // Dig deeper for child in node.children.borrow().iter() { walk_and_embed_assets( cache, &url, child, opt_no_css, opt_no_js, opt_no_images, opt_user_agent, opt_silent, opt_insecure, opt_no_frames, ); } } NodeData::Element { ref name, ref attrs, .. } => { let attrs_mut = &mut attrs.borrow_mut(); match name.local.as_ref() { "link" => { let mut link_type: &str = ""; for attr in attrs_mut.iter_mut() { if &attr.name.local == "rel" { if is_icon(&attr.value.to_string()) { link_type = "icon"; break; } else if attr.value.to_string() == "stylesheet" { link_type = "stylesheet"; break; } } } if link_type == "icon" { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { if opt_no_images { attr.value.clear(); } else { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (favicon_dataurl, _) = retrieve_asset( cache, &href_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(favicon_dataurl.as_str()); } } } } else if link_type == "stylesheet" { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { if opt_no_css { attr.value.clear(); } else { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (css_dataurl, _) = retrieve_asset( cache, &href_full_url, false, "text/css", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); let css_resolved = resolve_css_imports( cache, &css_dataurl, &href_full_url, opt_user_agent, opt_silent, opt_insecure, ); attr.value.push_slice(css_resolved.as_str()); } } } } else { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); attr.value.clear(); attr.value.push_slice(&href_full_url.as_str()); } } } } "img" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { let value = attr.value.to_string(); // Ignore images with empty source if value == EMPTY_STRING.clone() { continue; } if opt_no_images { attr.value.clear(); attr.value.push_slice(TRANSPARENT_PIXEL); } else { let src_full_url: String = resolve_url(&url, &value).unwrap_or(EMPTY_STRING.clone()); let (img_dataurl, _) = retrieve_asset( cache, &src_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(img_dataurl.as_str()); } } } } "source" => { for attr in attrs_mut.iter_mut() { let attr_name: &str = &attr.name.local; if attr_name == "src" { let src_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(attr.value.to_string()); attr.value.clear(); attr.value.push_slice(src_full_url.as_str()); } else if attr_name == "srcset" { if get_node_name(&get_parent_node(&node)) == "picture" { if opt_no_images { attr.value.clear(); attr.value.push_slice(TRANSPARENT_PIXEL); } else { let srcset_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (source_dataurl, _) = retrieve_asset( cache, &srcset_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(source_dataurl.as_str()); } } } } } "a" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { // Don't touch email links or hrefs which begin with a hash sign if attr.value.starts_with('#') || url_has_protocol(&attr.value) { continue; } let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); attr.value.clear(); attr.value.push_slice(href_full_url.as_str()); } } } "script" => { if opt_no_js { // Empty src and inner content of SCRIPT tags for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { attr.value.clear(); } } node.children.borrow_mut().clear(); } else { for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { let src_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (js_dataurl, _) = retrieve_asset( cache, &src_full_url, true, "application/javascript", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(js_dataurl.as_str()); } } } } "style" => { if opt_no_css { // Empty inner content of STYLE tags node.children.borrow_mut().clear(); } } "form" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "action" { // Modify action to be a full URL if !is_valid_url(&attr.value) { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); attr.value.clear(); attr.value.push_slice(href_full_url.as_str()); } } } } "iframe" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { if opt_no_frames { // Empty the src attribute attr.value.clear(); continue; } let iframe_src: String = attr.value.to_string(); // Ignore iframes with empty source (they cause infinite loops) if iframe_src == EMPTY_STRING.clone() { continue; } let src_full_url: String = resolve_url(&url, &iframe_src).unwrap_or(EMPTY_STRING.clone()); let (iframe_data, iframe_final_url) = retrieve_asset( cache, &src_full_url, false, "text/html", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), src_full_url)); let dom = html_to_dom(&iframe_data); walk_and_embed_assets( cache, &iframe_final_url, &dom.document, opt_no_css, opt_no_js, opt_no_images, opt_user_agent, opt_silent, opt_insecure, opt_no_frames, ); let mut buf: Vec<u8> = Vec::new(); serialize(&mut buf, &dom.document, SerializeOpts::default()).unwrap(); let iframe_dataurl = data_to_dataurl("text/html", &buf); attr.value.clear(); attr.value.push_slice(iframe_dataurl.as_str()); } } } "video" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "poster" { let video_poster = attr.value.to_string(); // Skip posters with empty source if video_poster == EMPTY_STRING.clone() { continue; } if opt_no_images { attr.value.clear(); } else { let poster_full_url: String = resolve_url(&url, &video_poster) .unwrap_or(EMPTY_STRING.clone()); let (poster_dataurl, _) = retrieve_asset( cache, &poster_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((poster_full_url, EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(poster_dataurl.as_str()); } } } } _ => {} } if opt_no_css { // Get rid of style attributes let mut style_attr_indexes = Vec::new(); for (i, attr) in attrs_mut.iter_mut().enumerate() { if attr.name.local.to_lowercase() == "style" { style_attr_indexes.push(i); } } style_attr_indexes.reverse(); for attr_index in style_attr_indexes { attrs_mut.remove(attr_index); } } if opt_no_js { // Get rid of JS event attributes let mut js_attr_indexes = Vec::new(); for (i, attr) in attrs_mut.iter_mut().enumerate() { if attr_is_event_handler(&attr.name.local) { js_attr_indexes.push(i); } } js_attr_indexes.reverse(); for attr_index in js_attr_indexes { attrs_mut.remove(attr_index); } } // Dig deeper for child in node.children.borrow().iter() { walk_and_embed_assets( cache, &url, child, opt_no_css, opt_no_js, opt_no_images, opt_user_agent, opt_silent, opt_insecure, opt_no_frames, ); } } _ => { // Note: in case of opt_no_js being set to true, there's no need to worry about // getting rid of comments that may contain scripts, e.g. <!--[if IE]><script>... // since that's not part of W3C standard and therefore gets ignored // by browsers other than IE [5, 9] } } } pub fn html_to_dom(data: &str) -> html5ever::rcdom::RcDom { parse_document(RcDom::default(), Default::default()) .from_utf8() .read_from(&mut data.as_bytes()) .unwrap() } fn get_child_node_by_name(handle: &Handle, node_name: &str) -> Handle { let children = handle.children.borrow(); let matching_children = children.iter().find(|child| match child.data { NodeData::Element { ref name, .. } => &*name.local == node_name, _ => false, }); match matching_children { Some(node) => node.clone(), _ => { return handle.clone(); } } } pub fn stringify_document( handle: &Handle, opt_no_css: bool, opt_no_frames: bool, opt_no_js: bool, opt_no_images: bool, opt_isolate: bool, ) -> String { let mut buf: Vec<u8> = Vec::new(); serialize(&mut buf, handle, SerializeOpts::default()) .expect("unable to serialize DOM into buffer"); let mut result: String = String::from_utf8(buf).unwrap(); if opt_isolate || opt_no_css || opt_no_frames || opt_no_js || opt_no_images { let mut buf: Vec<u8> = Vec::new(); let mut dom = html_to_dom(&result); let doc = dom.get_document(); let html = get_child_node_by_name(&doc, "html"); let head = get_child_node_by_name(&html, "head"); let mut content_attr = EMPTY_STRING.clone(); if opt_isolate { content_attr += " default-src 'unsafe-inline' data:;"; } if opt_no_css { content_attr += " style-src 'none';"; } if opt_no_frames { content_attr += " frame-src 'none';child-src 'none';"; } if opt_no_js { content_attr += " script-src 'none';"; } if opt_no_images { content_attr += " img-src data:;"; } content_attr = content_attr.trim().to_string(); let meta = dom.create_element( QualName::new(None, ns!(), local_name!("meta")), vec![ Attribute { name: QualName::new(None, ns!(), local_name!("http-equiv")), value: format_tendril!("Content-Security-Policy"), }, Attribute { name: QualName::new(None, ns!(), local_name!("content")), value: format_tendril!("{}", content_attr), }, ], Default::default(), ); head.children.borrow_mut().reverse(); head.children.borrow_mut().push(meta.clone()); head.children.borrow_mut().reverse(); // Note: the CSP meta-tag has to be prepended, never appended, // since there already may be one defined in the document, // and browsers don't allow re-defining them (for obvious reasons) serialize(&mut buf, &doc, SerializeOpts::default()) .expect("unable to serialize DOM into buffer"); result = String::from_utf8(buf).unwrap(); // Note: we can't make it isolate the page right away since it may have no HEAD element, // ergo we have to serialize, parse DOM again, and then finally serialize the result } result } Fixed misleading variable name use html5ever::interface::QualName; use html5ever::parse_document; use html5ever::rcdom::{Handle, NodeData, RcDom}; use html5ever::serialize::{serialize, SerializeOpts}; use html5ever::tendril::{format_tendril, TendrilSink}; use html5ever::tree_builder::{Attribute, TreeSink}; use html5ever::{local_name, namespace_url, ns}; use http::retrieve_asset; use js::attr_is_event_handler; use std::collections::HashMap; use std::default::Default; use utils::{data_to_dataurl, is_valid_url, resolve_css_imports, resolve_url, url_has_protocol}; lazy_static! { static ref EMPTY_STRING: String = String::new(); } const ICON_VALUES: [&str; 5] = [ "icon", "shortcut icon", "mask-icon", "apple-touch-icon", "fluid-icon", ]; const TRANSPARENT_PIXEL: &str = "data:image/png;base64,\ iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="; pub fn get_parent_node(node: &Handle) -> Handle { let parent = node.parent.take().clone(); parent.and_then(|node| node.upgrade()).unwrap() } pub fn get_node_name(node: &Handle) -> String { match &node.data { NodeData::Element { ref name, .. } => name.local.as_ref().to_string(), _ => EMPTY_STRING.clone(), } } pub fn is_icon(attr_value: &str) -> bool { ICON_VALUES.contains(&&*attr_value.to_lowercase()) } pub fn walk_and_embed_assets( cache: &mut HashMap<String, String>, url: &str, node: &Handle, opt_no_css: bool, opt_no_js: bool, opt_no_images: bool, opt_user_agent: &str, opt_silent: bool, opt_insecure: bool, opt_no_frames: bool, ) { match node.data { NodeData::Document => { // Dig deeper for child in node.children.borrow().iter() { walk_and_embed_assets( cache, &url, child, opt_no_css, opt_no_js, opt_no_images, opt_user_agent, opt_silent, opt_insecure, opt_no_frames, ); } } NodeData::Element { ref name, ref attrs, .. } => { let attrs_mut = &mut attrs.borrow_mut(); match name.local.as_ref() { "link" => { let mut link_type: &str = ""; for attr in attrs_mut.iter_mut() { if &attr.name.local == "rel" { if is_icon(&attr.value.to_string()) { link_type = "icon"; break; } else if attr.value.to_string() == "stylesheet" { link_type = "stylesheet"; break; } } } if link_type == "icon" { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { if opt_no_images { attr.value.clear(); } else { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (favicon_dataurl, _) = retrieve_asset( cache, &href_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(favicon_dataurl.as_str()); } } } } else if link_type == "stylesheet" { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { if opt_no_css { attr.value.clear(); } else { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (css, _) = retrieve_asset( cache, &href_full_url, false, "text/css", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); let css_resolved = resolve_css_imports( cache, &css, &href_full_url, opt_user_agent, opt_silent, opt_insecure, ); attr.value.push_slice(css_resolved.as_str()); } } } } else { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); attr.value.clear(); attr.value.push_slice(&href_full_url.as_str()); } } } } "img" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { let value = attr.value.to_string(); // Ignore images with empty source if value == EMPTY_STRING.clone() { continue; } if opt_no_images { attr.value.clear(); attr.value.push_slice(TRANSPARENT_PIXEL); } else { let src_full_url: String = resolve_url(&url, &value).unwrap_or(EMPTY_STRING.clone()); let (img_dataurl, _) = retrieve_asset( cache, &src_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(img_dataurl.as_str()); } } } } "source" => { for attr in attrs_mut.iter_mut() { let attr_name: &str = &attr.name.local; if attr_name == "src" { let src_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(attr.value.to_string()); attr.value.clear(); attr.value.push_slice(src_full_url.as_str()); } else if attr_name == "srcset" { if get_node_name(&get_parent_node(&node)) == "picture" { if opt_no_images { attr.value.clear(); attr.value.push_slice(TRANSPARENT_PIXEL); } else { let srcset_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (source_dataurl, _) = retrieve_asset( cache, &srcset_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(source_dataurl.as_str()); } } } } } "a" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "href" { // Don't touch email links or hrefs which begin with a hash sign if attr.value.starts_with('#') || url_has_protocol(&attr.value) { continue; } let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); attr.value.clear(); attr.value.push_slice(href_full_url.as_str()); } } } "script" => { if opt_no_js { // Empty src and inner content of SCRIPT tags for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { attr.value.clear(); } } node.children.borrow_mut().clear(); } else { for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { let src_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); let (js_dataurl, _) = retrieve_asset( cache, &src_full_url, true, "application/javascript", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(js_dataurl.as_str()); } } } } "style" => { if opt_no_css { // Empty inner content of STYLE tags node.children.borrow_mut().clear(); } } "form" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "action" { // Modify action to be a full URL if !is_valid_url(&attr.value) { let href_full_url: String = resolve_url(&url, &attr.value.to_string()) .unwrap_or(EMPTY_STRING.clone()); attr.value.clear(); attr.value.push_slice(href_full_url.as_str()); } } } } "iframe" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "src" { if opt_no_frames { // Empty the src attribute attr.value.clear(); continue; } let iframe_src: String = attr.value.to_string(); // Ignore iframes with empty source (they cause infinite loops) if iframe_src == EMPTY_STRING.clone() { continue; } let src_full_url: String = resolve_url(&url, &iframe_src).unwrap_or(EMPTY_STRING.clone()); let (iframe_data, iframe_final_url) = retrieve_asset( cache, &src_full_url, false, "text/html", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((EMPTY_STRING.clone(), src_full_url)); let dom = html_to_dom(&iframe_data); walk_and_embed_assets( cache, &iframe_final_url, &dom.document, opt_no_css, opt_no_js, opt_no_images, opt_user_agent, opt_silent, opt_insecure, opt_no_frames, ); let mut buf: Vec<u8> = Vec::new(); serialize(&mut buf, &dom.document, SerializeOpts::default()).unwrap(); let iframe_dataurl = data_to_dataurl("text/html", &buf); attr.value.clear(); attr.value.push_slice(iframe_dataurl.as_str()); } } } "video" => { for attr in attrs_mut.iter_mut() { if &attr.name.local == "poster" { let video_poster = attr.value.to_string(); // Skip posters with empty source if video_poster == EMPTY_STRING.clone() { continue; } if opt_no_images { attr.value.clear(); } else { let poster_full_url: String = resolve_url(&url, &video_poster) .unwrap_or(EMPTY_STRING.clone()); let (poster_dataurl, _) = retrieve_asset( cache, &poster_full_url, true, "", opt_user_agent, opt_silent, opt_insecure, ) .unwrap_or((poster_full_url, EMPTY_STRING.clone())); attr.value.clear(); attr.value.push_slice(poster_dataurl.as_str()); } } } } _ => {} } if opt_no_css { // Get rid of style attributes let mut style_attr_indexes = Vec::new(); for (i, attr) in attrs_mut.iter_mut().enumerate() { if attr.name.local.to_lowercase() == "style" { style_attr_indexes.push(i); } } style_attr_indexes.reverse(); for attr_index in style_attr_indexes { attrs_mut.remove(attr_index); } } if opt_no_js { // Get rid of JS event attributes let mut js_attr_indexes = Vec::new(); for (i, attr) in attrs_mut.iter_mut().enumerate() { if attr_is_event_handler(&attr.name.local) { js_attr_indexes.push(i); } } js_attr_indexes.reverse(); for attr_index in js_attr_indexes { attrs_mut.remove(attr_index); } } // Dig deeper for child in node.children.borrow().iter() { walk_and_embed_assets( cache, &url, child, opt_no_css, opt_no_js, opt_no_images, opt_user_agent, opt_silent, opt_insecure, opt_no_frames, ); } } _ => { // Note: in case of opt_no_js being set to true, there's no need to worry about // getting rid of comments that may contain scripts, e.g. <!--[if IE]><script>... // since that's not part of W3C standard and therefore gets ignored // by browsers other than IE [5, 9] } } } pub fn html_to_dom(data: &str) -> html5ever::rcdom::RcDom { parse_document(RcDom::default(), Default::default()) .from_utf8() .read_from(&mut data.as_bytes()) .unwrap() } fn get_child_node_by_name(handle: &Handle, node_name: &str) -> Handle { let children = handle.children.borrow(); let matching_children = children.iter().find(|child| match child.data { NodeData::Element { ref name, .. } => &*name.local == node_name, _ => false, }); match matching_children { Some(node) => node.clone(), _ => { return handle.clone(); } } } pub fn stringify_document( handle: &Handle, opt_no_css: bool, opt_no_frames: bool, opt_no_js: bool, opt_no_images: bool, opt_isolate: bool, ) -> String { let mut buf: Vec<u8> = Vec::new(); serialize(&mut buf, handle, SerializeOpts::default()) .expect("unable to serialize DOM into buffer"); let mut result: String = String::from_utf8(buf).unwrap(); if opt_isolate || opt_no_css || opt_no_frames || opt_no_js || opt_no_images { let mut buf: Vec<u8> = Vec::new(); let mut dom = html_to_dom(&result); let doc = dom.get_document(); let html = get_child_node_by_name(&doc, "html"); let head = get_child_node_by_name(&html, "head"); let mut content_attr = EMPTY_STRING.clone(); if opt_isolate { content_attr += " default-src 'unsafe-inline' data:;"; } if opt_no_css { content_attr += " style-src 'none';"; } if opt_no_frames { content_attr += " frame-src 'none';child-src 'none';"; } if opt_no_js { content_attr += " script-src 'none';"; } if opt_no_images { content_attr += " img-src data:;"; } content_attr = content_attr.trim().to_string(); let meta = dom.create_element( QualName::new(None, ns!(), local_name!("meta")), vec![ Attribute { name: QualName::new(None, ns!(), local_name!("http-equiv")), value: format_tendril!("Content-Security-Policy"), }, Attribute { name: QualName::new(None, ns!(), local_name!("content")), value: format_tendril!("{}", content_attr), }, ], Default::default(), ); head.children.borrow_mut().reverse(); head.children.borrow_mut().push(meta.clone()); head.children.borrow_mut().reverse(); // Note: the CSP meta-tag has to be prepended, never appended, // since there already may be one defined in the document, // and browsers don't allow re-defining them (for obvious reasons) serialize(&mut buf, &doc, SerializeOpts::default()) .expect("unable to serialize DOM into buffer"); result = String::from_utf8(buf).unwrap(); // Note: we can't make it isolate the page right away since it may have no HEAD element, // ergo we have to serialize, parse DOM again, and then finally serialize the result } result }
extern crate irc; use std::default::Default; use std::thread::spawn; use irc::client::prelude::*; fn main() { let config = Config { nickname: Some(format!("pickles")), server: Some(format!("irc.fyrechat.net")), channels: Some(vec![format!("#vana")]), .. Default::default() }; let server = IrcServer::from_config(config).unwrap(); // FIXME: if set_keepalive is stabilized, this can be readded. // server.conn().set_keepalive(Some(5)).unwrap(); let server = server.clone(); let _ = spawn(move || { server.identify().unwrap(); loop { let mut quit = false; for msg in server.iter() { match msg { Ok(msg) => { print!("{}", msg.into_string()); match (&msg).into() { Ok(Command::PRIVMSG(_, msg)) => if msg.contains("bye") { server.send_quit("").unwrap() }, Ok(Command::ERROR(ref msg)) if msg.contains("Quit") => quit = true, _ => (), } }, Err(_) => break, } } if quit { break } server.reconnect().unwrap(); server.identify().unwrap(); } }).join(); } Removed autoreconnect example because it now happens automatically.
extern crate gitlab_api as gitlab; use std::env; #[macro_use] extern crate log; extern crate env_logger; use gitlab::GitLab; // use gitlab::Pagination; fn main() { env_logger::init().unwrap(); info!("starting up"); let hostname = match env::var("GITLAB_HOSTNAME") { Ok(val) => val, Err(_) => { let default = String::from("gitlab.com"); println!("Please set environment variable 'GITLAB_HOSTNAME'. Using default '{}'.", default); default } }; let token = match env::var("GITLAB_TOKEN") { Ok(val) => val, Err(_) => { panic!("Please set environment variable 'GITLAB_TOKEN'. Take it from \ http://{}/profile/account", hostname); } }; // // Projects // // List projects // let projects = gl.projects().list() // let projects = gl.projects().archived(...).visibility(...).order_by(...).sort(...).search(...).simple(...).list() // // List all projects (admin) // let projects = gl.projects().all().list() // let projects = gl.projects().all().archived(...).visibility(...).order_by(...).sort(...).search(...).list() // // Single project // let project = gl.projects().id(142).list() // // Search // let projects = gl.projects().search(...).list() // // Owned // let projects = gl.projects().owned().archived(...).visibility(...).order_by(...).sort(...).search(...).list() let gl = GitLab::new(&hostname, &token); // let projects = gl.projects().list(); let projects = gl.projects().archived(false).list(); println!("projects: {:?}", projects); let projects = gl.projects().owned().archived(false).list(); println!("projects: {:?}", projects); // // for i in 1..82 { // // gl.set_pagination(Pagination{page: i, per_page: 1}); // // println!("projects: {:?}", gl.projects_list().unwrap()); // // } // // gl.set_pagination(Pagination { // // page: 1, // // per_page: 100, // // }); // let projects = gl.projects_list(projects::Listing::new()).unwrap(); // println!("projects: {:?}", projects); // // FIXME: Project's members are private // // for project in projects { // // println!("{:?}", project.path_with_namespace); // // } // // let projects = gl.projects_all(projects::all::Listing::new()).unwrap(); // println!("projects: {:?}", projects); // // let listing = projects::id::Listing::new(projects::id::ListingId::Id(10)); // let projects = gl.project_id(listing).unwrap(); // println!("projects: {:?}", projects); } Add example for `projects::all` extern crate gitlab_api as gitlab; use std::env; #[macro_use] extern crate log; extern crate env_logger; use gitlab::GitLab; // use gitlab::Pagination; fn main() { env_logger::init().unwrap(); info!("starting up"); let hostname = match env::var("GITLAB_HOSTNAME") { Ok(val) => val, Err(_) => { let default = String::from("gitlab.com"); println!("Please set environment variable 'GITLAB_HOSTNAME'. Using default '{}'.", default); default } }; let token = match env::var("GITLAB_TOKEN") { Ok(val) => val, Err(_) => { panic!("Please set environment variable 'GITLAB_TOKEN'. Take it from \ http://{}/profile/account", hostname); } }; // // Projects // // List projects // let projects = gl.projects().list() // let projects = gl.projects().archived(...).visibility(...).order_by(...).sort(...).search(...).simple(...).list() // // List all projects (admin) // let projects = gl.projects().all().list() // let projects = gl.projects().all().archived(...).visibility(...).order_by(...).sort(...).search(...).list() // // Single project // let project = gl.projects().id(142).list() // // Search // let projects = gl.projects().search(...).list() // // Owned // let projects = gl.projects().owned().archived(...).visibility(...).order_by(...).sort(...).search(...).list() let gl = GitLab::new(&hostname, &token); // let projects = gl.projects().list(); let projects = gl.projects().archived(false).list(); println!("projects: {:?}", projects); let projects = gl.projects().owned().archived(false).list(); println!("projects: {:?}", projects); let projects = gl.projects().all().order_by(gitlab::projects::ListingOrderBy::Name).list(); println!("projects: {:?}", projects); // // for i in 1..82 { // // gl.set_pagination(Pagination{page: i, per_page: 1}); // // println!("projects: {:?}", gl.projects_list().unwrap()); // // } // // gl.set_pagination(Pagination { // // page: 1, // // per_page: 100, // // }); // let projects = gl.projects_list(projects::Listing::new()).unwrap(); // println!("projects: {:?}", projects); // // FIXME: Project's members are private // // for project in projects { // // println!("{:?}", project.path_with_namespace); // // } // // let projects = gl.projects_all(projects::all::Listing::new()).unwrap(); // println!("projects: {:?}", projects); // // let listing = projects::id::Listing::new(projects::id::ListingId::Id(10)); // let projects = gl.project_id(listing).unwrap(); // println!("projects: {:?}", projects); }
#![allow(unused_variables)] #![allow(dead_code)] /// Basic structure of a Monitor (aka Server App, aka wtttttt) /// /// Monitors are designed so that the FoxBox can offer a simple /// IFTTT-style Web UX to let users write their own scripts. More /// complex monitors can installed from the web from a master device /// (i.e. the user's cellphone or smart tv). use dependencies::{DeviceKind, InputCapability, OutputCapability, Device, Range, Watcher, Witness}; use std::time::Duration; use std::collections::HashMap; use std::sync::Arc; use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread; extern crate rustc_serialize; use self::rustc_serialize::json::Json; extern crate itertools; use self::itertools::Zip; /// A Monitor Application, i.e. an application (or a component of an /// application) executed on the server. /// /// Monitor applications are typically used for triggering an action /// in reaction to an event: changing temperature when night falls, /// ringing an alarm when a door is opened, etc. /// /// Monitor applications are installed from a paired device. They may /// either be part of a broader application (which can install them /// through a web/REST API) or live on their own. #[derive(Clone)] struct MonitorApp { metadata: (), // FIXME: Authorizations, author, description, update url, version, ... /// `true` if the app is on, `false` otherwise. is_active: bool, /// Monitor applications have sets of requirements (e.g. "I need a /// camera"), which are allocated to actual resources through the /// UX. Re-allocating resources may be requested by the user, the /// foxbox, or an application, e.g. when replacing a device or /// upgrading the app. /// /// The position in the vector is important, as it is used to /// represent the instances of resources in the script. /// /// FIXME: Turn this `Vec` (and others) into a data structure in /// which this indexing property is built-in. requirements: Vec<Named<Requirement>>, /// For each requirement, the resources actually allocated to /// match the requirements. This may be 1 or more individual /// devices. /// /// Allocations can be done in several ways: /// /// - when entering the script through a UX (either the script or /// the UX can suggest allocations); /// - when installing an application with a web front-end. allocations: Vec<Vec<Named<Arc<Device>>>>, code: Vec<Trigger>, is_running: bool, } struct ConditionState { are_conditions_met: bool } type InputState = Vec<Vec<Vec<Option<Json>>>>; struct MonitorTask { watcher: Watcher, // Invariant: len() is the same as that of app.code trigger_condition_state: Vec<ConditionState>, /// For each device set allocated, for each individual device /// allocated, for each input watched by the app, the latest state /// received. // Invariant: outer len() is the same as that of self.requirements. // Invariant: inner len() is the same as number of input devices bound // to the corresponding requirement. May be empty. input_state: InputState, witnesses: Vec<Witness>, tx: Sender<MonitorOp>, rx: Receiver<MonitorOp>, app: MonitorApp, } impl MonitorTask { fn new(app: MonitorApp) -> Self { // Initialize condition state. let mut trigger_condition_state = Vec::with_capacity(app.code.len()); for _ in &app.code { trigger_condition_state.push(ConditionState { are_conditions_met: false, }); } assert_eq!(trigger_condition_state.len(), app.code.len()); // Initialize input state. let mut full_input_state = Vec::with_capacity(app.requirements.len()); for (req, allocation) in app.requirements.iter().zip(&app.allocations) { // State for this allocation. A single allocation may map // to a number of inputs (e.g. "all fire alarms"). let mut allocation_state = Vec::with_capacity(allocation.len()); for individual_device in allocation { let mut individual_device_state = Vec::with_capacity(req.data.inputs.len()); for _ in 0 .. req.data.inputs.len() { individual_device_state.push(None); } allocation_state.push(individual_device_state); } assert_eq!(allocation_state.len(), allocation.len()); full_input_state.push(allocation_state); } assert_eq!(full_input_state.len(), app.requirements.len()); // Start watching let mut watcher = Watcher::new(); let mut witnesses = Vec::new(); let (tx, rx) = channel(); for (req, allocation, allocation_state, allocation_index) in Zip::new((&app.requirements, &app.allocations, &full_input_state, 0..)) { for (individual_device, individual_device_state, individual_device_index) in Zip::new((allocation, allocation_state, 0..)) { for (input, individual_input_state, individual_input_index) in Zip::new((&req.data.inputs, individual_device_state, 0..)) { // FIXME: We currently use `Range::any()` for simplicity. // However, in most cases, we should be able to look inside // the condition to build a better `Range`. witnesses.push( watcher.add( &individual_device.data, &input.data, &Range::any(), |data| { let _ = tx.send(MonitorOp::Update { data: data, allocation_index: allocation_index, individual_device_index: individual_device_index, individual_input_index: individual_input_index }); // FIXME: Find a better structure than sending indices. // Ignore errors. If the thread is shutting down, it's ok to lose messages. })); } } } MonitorTask { watcher: watcher, trigger_condition_state: trigger_condition_state, input_state: full_input_state, witnesses: witnesses, app: app, tx: tx, rx: rx, } } fn run(&mut self) { for msg in &self.rx { use self::MonitorOp::*; match msg { Update { data, allocation_index, individual_device_index, individual_input_index } => { // FIXME: Three raw indices make for a crappy data structure. // Update the state self.input_state[allocation_index][individual_device_index][individual_input_index] = Some(data); // Find out if we should execute triggers. // FIXME: We could optimize this by finding out which triggers // are tainted by the update and only rechecking these. for (trigger, trigger_condition_state) in Zip::new((&self.app.code, &mut self.trigger_condition_state)) { if trigger.condition.is_met(&self.input_state) { if !trigger_condition_state.are_conditions_met { // Conditions were not met, now they are, so it is // time to start executing. We copy the inputs // and dispatch to a background thread // FIXME: Handle cooldown. trigger_condition_state.are_conditions_met = true; let _ = self.tx.send(MonitorOp::Execute { state: self.input_state.clone(), commands: trigger.execute.clone() }); // Ignore errors. If the thread is shutting down, it's ok to lose messages. } } else { trigger_condition_state.are_conditions_met = false; } } }, Execute {..} => { panic!("Not implemented"); } Stop => { // Clean up watcher, stop the thread. self.witnesses.clear(); return; } } } } } enum MonitorOp { Update{data: Json, allocation_index: usize, individual_device_index: usize, individual_input_index: usize}, Execute{state: InputState, commands: Vec<Command>}, Stop } impl MonitorApp { pub fn start(&mut self) { if self.is_running { return; } self.is_running = true; let mut task = MonitorTask::new(self.clone()); thread::spawn(move || { task.run(); }); } } /// Data labelled with a user-readable name. #[derive(Clone)] struct Named<T> { /// User-readable name. name: String, data: T, } /// A resource needed by this application. Typically, a definition of /// device with some input our output capabilities. #[derive(Clone)] struct Requirement { /// The kind of resource, e.g. "a flashbulb". kind: DeviceKind, /// Input capabilities we need from the device, e.g. "the time of /// day", "the current temperature". inputs: Vec<Named<InputCapability>>, /// Output capabilities we need from the device, e.g. "play a /// sound", "set luminosity". outputs: Vec<Named<OutputCapability>>, /// Minimal number of resources required. If unspecified in the /// script, this is 1. min: u32, /// Maximal number of resources that may be handled. If /// unspecified in the script, this is the same as `min`. max: u32, // FIXME: We may need cooldown properties. } /// A single trigger, i.e. "when some condition becomes true, do /// something". #[derive(Clone)] struct Trigger { /// The condition in which to execute the trigger. condition: Disjunction, /// Stuff to do once `condition` is met. execute: Vec<Command>, /// Minimal duration between two executions of the trigger. If a /// duration was not picked by the developer, a reasonable default /// duration should be picked (e.g. 10 minutes). cooldown: Duration, } /// A disjunction (e.g. a "or") of conditions. /// /// # Example /// /// Door alarm #1 OR door alarm #2 #[derive(Clone)] struct Disjunction { /// The disjunction is true iff any of the following conjunctions is true. any: Vec<Conjunction> } impl Disjunction { fn is_met(&self, input_state: &InputState) -> bool { panic!("Not implemented"); } } /// A conjunction (e.g. a "and") of conditions. #[derive(Clone)] struct Conjunction { /// The conjunction is true iff all of the following expressions evaluate to true. all: Vec<Expression> } #[derive(Clone)] enum Value { Json(Json), Blob{data: Arc<Vec<u8>>, mime_type: String}, } /// An expression in the language. Note that expressions may contain /// inputs, which are typically asynchronous. Consequently, /// expressions are evaluated asynchronously. #[derive(Clone)] enum Expression { Const { value: Value, /// We are dealing with real-world values, so physical units /// will prevent real-world accidents. unit: (), // FIXME: An actual unit /// The source for this value. Used whenever we need to find /// out which sensor triggered the trigger (e.g. "where is the /// fire?"). sources: Vec<usize>, }, /// Dynamic values, including both actual sensors and higher-level values. /// /// # Example /// /// "Is there movement on motion detector" (may be true/false) /// /// # Example /// /// "Date of the latest motion on motion detector" (a Date) Input { /// A reference to the device used for input. /// This is an index in `requirements` and `allocations`. index: usize, // FIXME: We should use a custom type. /// A property to fetch (e.g. "luminosity" or "meta/latest-on"). property: InputCapability, }, /// Pure functions on values. The fact that they are pure /// functions is important, as it lets us find out automatically /// which subexpressions (including inputs) do not need to be /// re-evaluated. Function { function: Function, // FIXME: use a Box<> for now to avoid recursive type. arguments: Vec<Box<Expression>> }, } #[derive(Clone)] enum Function { // Operations on all values. InRange, OutOfRange, // Operations on strings Contains, NotContains, } /* enum Function { // Operations on all values. Equals, NotEquals, // Operations on numbers, dates, durations. GreaterEq, Greater, LowerEq, Lower, Plus, Minus, // Operations on strings Contains, NotContains, // Etc. FIXME: We'll need operations on dates, extracting name // from device, etc. } */ /// Stuff to actually do. In practice, this is always a REST call. // FIXME: Need to decide how much we wish to sandbox apps. #[derive(Clone)] struct Command { /// The resource to which this command applies, /// as an index in Trigger.requirements/allocations. destination: usize, // FIXME: Use custom type. action: OutputCapability, arguments: HashMap<String, Option<Expression>> } Got rid of most uses of ugly unyped indices #![allow(unused_variables)] #![allow(dead_code)] /// Basic structure of a Monitor (aka Server App, aka wtttttt) /// /// Monitors are designed so that the FoxBox can offer a simple /// IFTTT-style Web UX to let users write their own scripts. More /// complex monitors can installed from the web from a master device /// (i.e. the user's cellphone or smart tv). use dependencies::{DeviceKind, InputCapability, OutputCapability, Device, Range, Watcher, Witness}; use std::time::Duration; use std::collections::HashMap; use std::sync::Arc; use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread; extern crate rustc_serialize; use self::rustc_serialize::json::Json; extern crate itertools; use self::itertools::Zip; /// A Monitor Application, i.e. an application (or a component of an /// application) executed on the server. /// /// Monitor applications are typically used for triggering an action /// in reaction to an event: changing temperature when night falls, /// ringing an alarm when a door is opened, etc. /// /// Monitor applications are installed from a paired device. They may /// either be part of a broader application (which can install them /// through a web/REST API) or live on their own. #[derive(Clone)] struct MonitorApp { metadata: (), // FIXME: Authorizations, author, description, update url, version, ... /// `true` if the app is on, `false` otherwise. is_active: bool, /// Monitor applications have sets of requirements (e.g. "I need a /// camera"), which are allocated to actual resources through the /// UX. Re-allocating resources may be requested by the user, the /// foxbox, or an application, e.g. when replacing a device or /// upgrading the app. /// /// The position in the vector is important, as it is used to /// represent the instances of resources in the script. /// /// FIXME: Turn this `Vec` (and others) into a data structure in /// which this indexing property is built-in. requirements: Vec<Named<Requirement>>, /// For each requirement, the resources actually allocated to /// match the requirements. This may be 1 or more individual /// devices. /// /// Allocations can be done in several ways: /// /// - when entering the script through a UX (either the script or /// the UX can suggest allocations); /// - when installing an application with a web front-end. allocations: Vec<Vec<Named<Arc<Device>>>>, code: Vec<Trigger>, is_running: bool, } impl<'a> MonitorApp { /* Ideally, I'd like something along these lines. But, as rustc kindly pointed out to me, that's pretty much unsafe. fn iter_state_index(&'a self) -> Box<Iterator<Item=InputIndex> + 'a> { Box::new( Zip::new((&self.requirements, &self.allocations, 0..)). flat_map(|(req, allocation, allocation_index)| { let allocation_index_clone = allocation_index.clone(); Zip::new((allocation, 0..)). flat_map(|(individual_device, individual_device_index)| { Zip::new((&req.data.inputs, 0..)). map(|(_, individual_input_index)| { InputIndex { allocation: allocation_index_clone, device: individual_device_index, input: individual_input_index } }) }) })) } */ fn iter_state_index(&'a self) -> Vec<InputIndex> { let mut vec = Vec::new(); for (req, allocation, allocation_index) in Zip::new((&self.requirements, &self.allocations, 0..)) { for (individual_device, individual_device_index) in Zip::new((allocation, 0..)) { for (input, individual_input_index) in Zip::new((&req.data.inputs, 0..)) { vec.push(InputIndex { allocation: allocation_index, device: individual_device_index, input: individual_input_index }) } } } vec } fn get_individual_device(&self, index: &InputIndex) -> Arc<Device> { self.allocations[index.allocation][index.device].data.clone() } fn get_input(&self, index: &InputIndex) -> InputCapability { self.requirements[index.allocation].data.inputs[index.input].data.clone() } } struct InputIndex { allocation: usize, device: usize, input: usize, } struct ConditionState { are_conditions_met: bool } type InputState = Vec<Vec<Vec<Option<Json>>>>; struct MonitorTaskState { // Invariant: len() is the same as that of app.code trigger_condition_state: Vec<ConditionState>, /// For each device set allocated, for each individual device /// allocated, for each input watched by the app, the latest state /// received. // Invariant: outer len() is the same as that of self.requirements. // Invariant: inner len() is the same as number of input devices bound // to the corresponding requirement. May be empty. input_state: InputState, witnesses: Vec<Witness>, app: MonitorApp, } impl MonitorTaskState { fn set_state(&mut self, index: &InputIndex, value: Option<Json>) { self.input_state[index.allocation][index.device][index.input] = value; } } struct MonitorComm { tx: Sender<MonitorOp>, rx: Receiver<MonitorOp>, } struct MonitorTask { state: MonitorTaskState, comm: MonitorComm, } impl MonitorTask { fn new(app: MonitorApp) -> Self { // Initialize condition state. let mut trigger_condition_state = Vec::with_capacity(app.code.len()); for _ in &app.code { trigger_condition_state.push(ConditionState { are_conditions_met: false, }); } assert_eq!(trigger_condition_state.len(), app.code.len()); // Initialize input state. let mut full_input_state = Vec::with_capacity(app.requirements.len()); for (req, allocation) in app.requirements.iter().zip(&app.allocations) { // State for this allocation. A single allocation may map // to a number of inputs (e.g. "all fire alarms"). let mut allocation_state = Vec::with_capacity(allocation.len()); for individual_device in allocation { let mut individual_device_state = Vec::with_capacity(req.data.inputs.len()); for _ in 0 .. req.data.inputs.len() { individual_device_state.push(None); } allocation_state.push(individual_device_state); } assert_eq!(allocation_state.len(), allocation.len()); full_input_state.push(allocation_state); } assert_eq!(full_input_state.len(), app.requirements.len()); // Start watching let mut watcher = Watcher::new(); let mut witnesses = Vec::new(); let (tx, rx) = channel(); for state_index in app.iter_state_index() { // FIXME: We currently use `Range::any()` for simplicity. // However, in most cases, we should be able to look inside // the condition to build a better `Range`. witnesses.push( watcher.add( &app.get_individual_device(&state_index), // FIXME: Implement &app.get_input(&state_index), // FIXME: Implement &Range::any(), |data| { let _ = tx.send(MonitorOp::Update { data: data, index: state_index, }); // Ignore errors. If the thread is shutting down, it's ok to lose messages. })); } /* for (req, allocation, allocation_state, allocation_index) in Zip::new((&app.requirements, &app.allocations, &full_input_state, 0..)) { for (individual_device, individual_device_state, individual_device_index) in Zip::new((allocation, allocation_state, 0..)) { for (input, individual_input_state, individual_input_index) in Zip::new((&req.data.inputs, individual_device_state, 0..)) { // FIXME: We currently use `Range::any()` for simplicity. // However, in most cases, we should be able to look inside // the condition to build a better `Range`. witnesses.push( watcher.add( &individual_device.data, &input.data, &Range::any(), |data| { let _ = tx.send(MonitorOp::Update { data: data, allocation_index: allocation_index, individual_device_index: individual_device_index, individual_input_index: individual_input_index }); // FIXME: Find a better structure than sending indices. // Ignore errors. If the thread is shutting down, it's ok to lose messages. })); } } } */ MonitorTask { state: MonitorTaskState { trigger_condition_state: trigger_condition_state, input_state: full_input_state, witnesses: witnesses, app: app, }, comm: MonitorComm { tx: tx, rx: rx, } } } fn run(&mut self) { for msg in &self.comm.rx { use self::MonitorOp::*; match msg { Update { data, index, } => { // Update the state self.state.set_state(&index, Some(data)); // Find out if we should execute triggers. // FIXME: We could optimize this by finding out which triggers // are tainted by the update and only rechecking these. for (trigger, trigger_condition_state) in Zip::new((&self.state.app.code, &mut self.state.trigger_condition_state)) { if trigger.condition.is_met(&self.state.input_state) { if !trigger_condition_state.are_conditions_met { // Conditions were not met, now they are, so it is // time to start executing. We copy the inputs // and dispatch to a background thread // FIXME: Handle cooldown. trigger_condition_state.are_conditions_met = true; let _ = self.comm.tx.send(MonitorOp::Execute { state: self.state.input_state.clone(), commands: trigger.execute.clone() }); // Ignore errors. If the thread is shutting down, it's ok to lose messages. } } else { trigger_condition_state.are_conditions_met = false; } } }, Execute {..} => { panic!("Not implemented"); } Stop => { // Clean up watcher, stop the thread. self.state.witnesses.clear(); return; } } } } } enum MonitorOp { Update{data: Json, index: InputIndex}, Execute{state: InputState, commands: Vec<Command>}, Stop } impl MonitorApp { pub fn start(&mut self) { if self.is_running { return; } self.is_running = true; let mut task = MonitorTask::new(self.clone()); thread::spawn(move || { task.run(); }); } } /// Data labelled with a user-readable name. #[derive(Clone)] struct Named<T> { /// User-readable name. name: String, data: T, } /// A resource needed by this application. Typically, a definition of /// device with some input our output capabilities. #[derive(Clone)] struct Requirement { /// The kind of resource, e.g. "a flashbulb". kind: DeviceKind, /// Input capabilities we need from the device, e.g. "the time of /// day", "the current temperature". inputs: Vec<Named<InputCapability>>, /// Output capabilities we need from the device, e.g. "play a /// sound", "set luminosity". outputs: Vec<Named<OutputCapability>>, /// Minimal number of resources required. If unspecified in the /// script, this is 1. min: u32, /// Maximal number of resources that may be handled. If /// unspecified in the script, this is the same as `min`. max: u32, // FIXME: We may need cooldown properties. } /// A single trigger, i.e. "when some condition becomes true, do /// something". #[derive(Clone)] struct Trigger { /// The condition in which to execute the trigger. condition: Disjunction, /// Stuff to do once `condition` is met. execute: Vec<Command>, /// Minimal duration between two executions of the trigger. If a /// duration was not picked by the developer, a reasonable default /// duration should be picked (e.g. 10 minutes). cooldown: Duration, } /// A disjunction (e.g. a "or") of conditions. /// /// # Example /// /// Door alarm #1 OR door alarm #2 #[derive(Clone)] struct Disjunction { /// The disjunction is true iff any of the following conjunctions is true. any: Vec<Conjunction> } impl Disjunction { fn is_met(&self, input_state: &InputState) -> bool { panic!("Not implemented"); } } /// A conjunction (e.g. a "and") of conditions. #[derive(Clone)] struct Conjunction { /// The conjunction is true iff all of the following expressions evaluate to true. all: Vec<Expression> } #[derive(Clone)] enum Value { Json(Json), Blob{data: Arc<Vec<u8>>, mime_type: String}, } /// An expression in the language. Note that expressions may contain /// inputs, which are typically asynchronous. Consequently, /// expressions are evaluated asynchronously. #[derive(Clone)] enum Expression { Const { value: Value, /// We are dealing with real-world values, so physical units /// will prevent real-world accidents. unit: (), // FIXME: An actual unit /// The source for this value. Used whenever we need to find /// out which sensor triggered the trigger (e.g. "where is the /// fire?"). sources: Vec<usize>, }, /// Dynamic values, including both actual sensors and higher-level values. /// /// # Example /// /// "Is there movement on motion detector" (may be true/false) /// /// # Example /// /// "Date of the latest motion on motion detector" (a Date) Input { /// A reference to the device used for input. /// This is an index in `requirements` and `allocations`. index: usize, // FIXME: We should use a custom type. /// A property to fetch (e.g. "luminosity" or "meta/latest-on"). property: InputCapability, }, /// Pure functions on values. The fact that they are pure /// functions is important, as it lets us find out automatically /// which subexpressions (including inputs) do not need to be /// re-evaluated. Function { function: Function, // FIXME: use a Box<> for now to avoid recursive type. arguments: Vec<Box<Expression>> }, } #[derive(Clone)] enum Function { // Operations on all values. InRange, OutOfRange, // Operations on strings Contains, NotContains, } /* enum Function { // Operations on all values. Equals, NotEquals, // Operations on numbers, dates, durations. GreaterEq, Greater, LowerEq, Lower, Plus, Minus, // Operations on strings Contains, NotContains, // Etc. FIXME: We'll need operations on dates, extracting name // from device, etc. } */ /// Stuff to actually do. In practice, this is always a REST call. // FIXME: Need to decide how much we wish to sandbox apps. #[derive(Clone)] struct Command { /// The resource to which this command applies, /// as an index in Trigger.requirements/allocations. destination: usize, // FIXME: Use custom type. action: OutputCapability, arguments: HashMap<String, Option<Expression>> }
// Copyright 2017 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Workaround: Clippy does not correctly handle borrowing checking rules for returned types. #![cfg_attr(feature="cargo-clippy", allow(let_and_return))] use futures::{self, Async, Future, Stream}; use std::ops::{AddAssign, Deref}; use std::sync::{Arc, Mutex}; use std::cell::{Ref, RefCell, RefMut}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::collections::{BTreeMap, BinaryHeap, HashMap, HashSet, VecDeque}; use std::iter::FromIterator; use exonum::node::{Configuration, ListenerConfig, NodeHandler, ServiceConfig, State, SystemStateProvider}; use exonum::blockchain::{Block, BlockProof, Blockchain, ConsensusConfig, GenesisConfig, Schema, Service, SharedNodeState, StoredConfiguration, TimeoutAdjusterConfig, Transaction, ValidatorKeys}; use exonum::storage::{MapProof, MemoryDB}; use exonum::messages::{Any, Connect, Message, RawMessage, RawTransaction, Status}; use exonum::crypto::{gen_keypair_from_seed, Hash, PublicKey, SecretKey, Seed}; #[cfg(test)] use exonum::crypto::gen_keypair; use exonum::helpers::{init_logger, Height, Milliseconds, Round, ValidatorId}; use exonum::events::{Event, EventHandler, NetworkEvent, NetworkRequest}; use exonum::events::handler::{NodeChannel, NodeReceiver, TimeoutRequest}; use exonum::events::network::NetworkConfiguration; use timestamping::TimestampingService; use config_updater::ConfigUpdateService; use sandbox_tests_helper::VALIDATOR_0; pub type SharedTime = Arc<Mutex<SystemTime>>; #[derive(Debug)] pub struct SandboxSystemStateProvider { listen_address: SocketAddr, shared_time: SharedTime, } impl SystemStateProvider for SandboxSystemStateProvider { fn current_time(&self) -> SystemTime { *self.shared_time.lock().unwrap() } fn listen_address(&self) -> SocketAddr { self.listen_address } } #[derive(Debug)] pub struct SandboxInner { pub time: SharedTime, pub handler: NodeHandler, pub sent: VecDeque<(SocketAddr, RawMessage)>, pub events: VecDeque<Event>, pub timers: BinaryHeap<TimeoutRequest>, pub events_receiver: NodeReceiver, } impl SandboxInner { pub fn process_events(&mut self) { self.process_network_requests(); self.process_timeout_requests(); } pub fn handle_event<E: Into<Event>>(&mut self, e: E) { self.handler.handle_event(e.into()); self.process_events(); } fn process_network_requests(&mut self) { let network_getter = futures::lazy(|| -> Result<(), ()> { while let Async::Ready(Some(network)) = self.events_receiver.network.poll()? { debug!("{:?}", network); match network { NetworkRequest::SendMessage(peer, msg) => self.sent.push_back((peer, msg)), NetworkRequest::DisconnectWithPeer(_) => {} } } Ok(()) }); network_getter.wait().unwrap(); } fn process_timeout_requests(&mut self) { let timeouts_getter = futures::lazy(|| -> Result<(), ()> { while let Async::Ready(Some(timeout)) = self.events_receiver.timeout.poll()? { debug!("{:?}", timeout); self.timers.push(timeout); } Ok(()) }); timeouts_getter.wait().unwrap(); } } pub struct Sandbox { pub validators_map: HashMap<PublicKey, SecretKey>, pub services_map: HashMap<PublicKey, SecretKey>, inner: RefCell<SandboxInner>, addresses: Vec<SocketAddr>, } impl Sandbox { pub fn initialize( &self, connect_message_time: SystemTime, start_index: usize, end_index: usize, ) { let connect = Connect::new( &self.p(VALIDATOR_0), self.a(VALIDATOR_0), connect_message_time, self.s(VALIDATOR_0), ); for validator in start_index..end_index { let validator = ValidatorId(validator as u16); self.recv(Connect::new( &self.p(validator), self.a(validator), self.time(), self.s(validator), )); self.send(self.a(validator), connect.clone()); } self.check_unexpected_message() } pub fn set_validators_map( &mut self, new_addresses_len: u8, validators: Vec<(PublicKey, SecretKey)>, services: Vec<(PublicKey, SecretKey)>, ) { self.addresses = (1..(new_addresses_len + 1) as u8) .map(gen_primitive_socket_addr) .collect::<Vec<_>>(); self.validators_map.extend(validators); self.services_map.extend(services); } fn check_unexpected_message(&self) { if let Some((addr, msg)) = self.inner.borrow_mut().sent.pop_front() { let any_msg = Any::from_raw(msg.clone()).expect("Send incorrect message"); panic!("Send unexpected message {:?} to {}", any_msg, addr); } } pub fn tx_from_raw(&self, raw: RawTransaction) -> Option<Box<Transaction>> { self.blockchain_ref().tx_from_raw(raw) } pub fn p(&self, id: ValidatorId) -> PublicKey { self.validators()[id.0 as usize] } pub fn s(&self, id: ValidatorId) -> &SecretKey { let p = self.p(id); &self.validators_map[&p] } pub fn service_public_key(&self, id: ValidatorId) -> PublicKey { let id: usize = id.into(); self.nodes_keys()[id].service_key } pub fn service_secret_key(&self, id: ValidatorId) -> &SecretKey { let public_key = self.service_public_key(id); &self.services_map[&public_key] } pub fn a(&self, id: ValidatorId) -> SocketAddr { let id: usize = id.into(); self.addresses[id] } pub fn validators(&self) -> Vec<PublicKey> { self.cfg() .validator_keys .iter() .map(|x| x.consensus_key) .collect() } pub fn nodes_keys(&self) -> Vec<ValidatorKeys> { self.cfg().validator_keys } pub fn n_validators(&self) -> usize { self.validators().len() } pub fn time(&self) -> SystemTime { let inner = self.inner.borrow(); let time = *inner.time.lock().unwrap().deref(); time } pub fn node_handler(&self) -> Ref<NodeHandler> { Ref::map(self.inner.borrow(), |inner| &inner.handler) } pub fn node_handler_mut(&self) -> RefMut<NodeHandler> { RefMut::map(self.inner.borrow_mut(), |inner| &mut inner.handler) } pub fn node_state(&self) -> Ref<State> { Ref::map(self.inner.borrow(), |inner| inner.handler.state()) } pub fn blockchain_ref(&self) -> Ref<Blockchain> { Ref::map(self.inner.borrow(), |inner| &inner.handler.blockchain) } pub fn blockchain_mut(&self) -> RefMut<Blockchain> { RefMut::map( self.inner.borrow_mut(), |inner| &mut inner.handler.blockchain, ) } pub fn recv<T: Message>(&self, msg: T) { self.check_unexpected_message(); // TODO Think about addresses. let dummy_addr = SocketAddr::from(([127, 0, 0, 1], 12_039)); let event = NetworkEvent::MessageReceived(dummy_addr, msg.raw().clone()); self.inner.borrow_mut().handle_event(event); } pub fn send<T: Message>(&self, addr: SocketAddr, msg: T) { let any_expected_msg = Any::from_raw(msg.raw().clone()).unwrap(); let sended = self.inner.borrow_mut().sent.pop_front(); if let Some((real_addr, real_msg)) = sended { let any_real_msg = Any::from_raw(real_msg.clone()).expect("Send incorrect message"); if real_addr != addr || any_real_msg != any_expected_msg { panic!( "Expected to send the message {:?} to {} instead sending {:?} to {}", any_expected_msg, addr, any_real_msg, real_addr ) } } else { panic!( "Expected to send the message {:?} to {} but nothing happened", any_expected_msg, addr ); } } pub fn broadcast<T: Message>(&self, msg: T) { self.broadcast_to_addrs(msg, self.addresses.iter().skip(1)); } // TODO: add self-test for broadcasting? pub fn broadcast_to_addrs<'a, T: Message, I>(&self, msg: T, addresses: I) where I: IntoIterator<Item = &'a SocketAddr>, { let any_expected_msg = Any::from_raw(msg.raw().clone()).unwrap(); // If node is excluded from validators, then it still will broadcast messages. // So in that case we should not skip addresses and validators count. let mut expected_set: HashSet<_> = HashSet::from_iter(addresses); for _ in 0..expected_set.len() { let sended = self.inner.borrow_mut().sent.pop_front(); if let Some((real_addr, real_msg)) = sended { let any_real_msg = Any::from_raw(real_msg.clone()).expect("Send incorrect message"); if any_real_msg != any_expected_msg { panic!( "Expected to broadcast the message {:?} instead sending {:?} to {}", any_expected_msg, any_real_msg, real_addr ) } if !expected_set.contains(&real_addr) { panic!( "Double send the same message {:?} to {:?} during broadcasting", any_expected_msg, real_addr ) } else { expected_set.remove(&real_addr); } } else { panic!( "Expected to broadcast the message {:?} but someone don't recieve \ messages: {:?}", any_expected_msg, expected_set ); } } } pub fn check_broadcast_status(&self, height: Height, block_hash: &Hash) { self.broadcast(Status::new( &self.node_public_key(), height, block_hash, &self.node_secret_key(), )); } pub fn add_time(&self, duration: Duration) { self.check_unexpected_message(); let now = { let inner = self.inner.borrow_mut(); let mut time = inner.time.lock().unwrap(); time.add_assign(duration); *time.deref() }; // handle timeouts if occurs loop { let timeout = { let timers = &mut self.inner.borrow_mut().timers; if let Some(TimeoutRequest(time, timeout)) = timers.pop() { if time > now { timers.push(TimeoutRequest(time, timeout)); break; } else { timeout } } else { break; } }; self.inner.borrow_mut().handle_event(timeout); } } pub fn is_leader(&self) -> bool { self.node_state().is_leader() } pub fn leader(&self, round: Round) -> ValidatorId { self.node_state().leader(round) } pub fn is_validator(&self) -> bool { self.node_state().is_validator() } pub fn last_block(&self) -> Block { self.blockchain_ref().last_block() } pub fn last_hash(&self) -> Hash { self.blockchain_ref().last_hash() } pub fn last_state_hash(&self) -> Hash { *self.last_block().state_hash() } pub fn filter_present_transactions<'a, I>(&self, txs: I) -> Vec<RawMessage> where I: IntoIterator<Item = &'a RawMessage>, { let mut unique_set: HashSet<Hash> = HashSet::new(); let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); let schema_transactions = schema.transactions(); txs.into_iter() .filter(|elem| { let hash_elem = elem.hash(); if unique_set.contains(&hash_elem) { return false; } unique_set.insert(hash_elem); if schema_transactions.contains(&hash_elem) { return false; } true }) .cloned() .collect() } /// Extract state_hash from fake block pub fn compute_state_hash<'a, I>(&self, txs: I) -> Hash where I: IntoIterator<Item = &'a RawTransaction>, { let blockchain = &self.blockchain_ref(); let (hashes, tx_pool) = { let mut pool = BTreeMap::new(); let mut hashes = Vec::new(); for raw in txs { let tx = blockchain.tx_from_raw(raw.clone()).unwrap(); let hash = tx.hash(); hashes.push(hash); pool.insert(hash, tx); } (hashes, pool) }; let fork = { let mut fork = blockchain.fork(); let (_, patch) = blockchain.create_patch(ValidatorId(0), self.current_height(), &hashes, &tx_pool); fork.merge(patch); fork }; *Schema::new(&fork).last_block().unwrap().state_hash() } pub fn get_proof_to_service_table(&self, service_id: u16, table_idx: usize) -> MapProof<Hash> { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.get_proof_to_service_table(service_id, table_idx) } pub fn get_configs_root_hash(&self) -> Hash { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.configs().root_hash() } pub fn cfg(&self) -> StoredConfiguration { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.actual_configuration() } pub fn following_cfg(&self) -> Option<StoredConfiguration> { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.following_configuration() } pub fn propose_timeout(&self) -> Milliseconds { match self.cfg().consensus.timeout_adjuster { TimeoutAdjusterConfig::Constant { timeout } => timeout, _ => panic!("Unexpected timeout adjuster config type"), } } pub fn majority_count(&self, num_validators: usize) -> usize { num_validators * 2 / 3 + 1 } pub fn round_timeout(&self) -> Milliseconds { self.cfg().consensus.round_timeout } pub fn transactions_hashes(&self) -> Vec<Hash> { let node_state = self.node_state(); let rlock = node_state.transactions().read().expect( "Expected read lock", ); rlock.keys().cloned().collect() } pub fn current_round(&self) -> Round { self.node_state().round() } pub fn block_and_precommits(&self, height: Height) -> Option<BlockProof> { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.block_and_precommits(height) } pub fn current_height(&self) -> Height { self.node_state().height() } pub fn current_leader(&self) -> ValidatorId { self.node_state().leader(self.current_round()) } pub fn assert_state(&self, expected_height: Height, expected_round: Round) { let state = self.node_state(); let achual_height = state.height(); let actual_round = state.round(); assert_eq!(achual_height, expected_height); assert_eq!(actual_round, expected_round); } pub fn assert_lock(&self, expected_round: Round, expected_hash: Option<Hash>) { let state = self.node_state(); let actual_round = state.locked_round(); let actual_hash = state.locked_propose(); assert_eq!(actual_round, expected_round); assert_eq!(actual_hash, expected_hash); } fn node_public_key(&self) -> PublicKey { *self.node_state().consensus_public_key() } fn node_secret_key(&self) -> SecretKey { self.node_state().consensus_secret_key().clone() } } impl Drop for Sandbox { fn drop(&mut self) { if !::std::thread::panicking() { self.check_unexpected_message(); } } } fn gen_primitive_socket_addr(idx: u8) -> SocketAddr { let addr = Ipv4Addr::new(idx, idx, idx, idx); SocketAddr::new(IpAddr::V4(addr), idx as u16) } pub fn sandbox_with_services(services: Vec<Box<Service>>) -> Sandbox { let validators = vec![ gen_keypair_from_seed(&Seed::new([12; 32])), gen_keypair_from_seed(&Seed::new([13; 32])), gen_keypair_from_seed(&Seed::new([16; 32])), gen_keypair_from_seed(&Seed::new([19; 32])), ]; let service_keys = vec![ gen_keypair_from_seed(&Seed::new([20; 32])), gen_keypair_from_seed(&Seed::new([21; 32])), gen_keypair_from_seed(&Seed::new([22; 32])), gen_keypair_from_seed(&Seed::new([23; 32])), ]; let addresses: Vec<SocketAddr> = (1..5).map(gen_primitive_socket_addr).collect::<Vec<_>>(); let db = Box::new(MemoryDB::new()); let mut blockchain = Blockchain::new(db, services); let consensus = ConsensusConfig { round_timeout: 1000, status_timeout: 600_000, peers_timeout: 600_000, txs_block_limit: 1000, timeout_adjuster: TimeoutAdjusterConfig::Constant { timeout: 200 }, }; let genesis = GenesisConfig::new_with_consensus( consensus, validators.iter().zip(service_keys.iter()).map(|x| { ValidatorKeys { consensus_key: (x.0).0, service_key: (x.1).0, } }), ); blockchain.create_genesis_block(genesis).unwrap(); let config = Configuration { listener: ListenerConfig { address: addresses[0], consensus_public_key: validators[0].0, consensus_secret_key: validators[0].1.clone(), whitelist: Default::default(), }, service: ServiceConfig { service_public_key: service_keys[0].0, service_secret_key: service_keys[0].1.clone(), }, network: NetworkConfiguration::default(), peer_discovery: Vec::new(), mempool: Default::default(), }; // TODO use factory or other solution like set_handler or run let system_state = SandboxSystemStateProvider { listen_address: addresses[0], shared_time: SharedTime::new(Mutex::new(UNIX_EPOCH + Duration::new(1_486_720_340, 0))), }; let shared_time = system_state.shared_time.clone(); let channel = NodeChannel::new(64); let mut handler = NodeHandler::new( blockchain.clone(), addresses[0], channel.0, Box::new(system_state), config.clone(), SharedNodeState::new(5000), ); handler.initialize(); let inner = SandboxInner { sent: VecDeque::new(), events: VecDeque::new(), timers: BinaryHeap::new(), events_receiver: channel.1, handler, time: shared_time, }; let sandbox = Sandbox { inner: RefCell::new(inner), validators_map: HashMap::from_iter(validators.clone()), services_map: HashMap::from_iter(service_keys), addresses: addresses, }; sandbox.initialize(sandbox.time(), 1, validators.len()); // General assumption; necessary for correct work of consensus algorithm assert!(sandbox.propose_timeout() < sandbox.round_timeout()); sandbox } pub fn timestamping_sandbox() -> Sandbox { let _ = init_logger(); sandbox_with_services(vec![ Box::new(TimestampingService::new()), Box::new(ConfigUpdateService::new()), ]) } #[cfg(test)] mod tests { use sandbox_tests_helper::{VALIDATOR_1, VALIDATOR_2, VALIDATOR_3, HEIGHT_ONE, ROUND_ONE, ROUND_TWO}; use super::*; #[test] fn test_sandbox_init() { timestamping_sandbox(); } #[test] fn test_sandbox_recv_and_send() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.send( s.a(VALIDATOR_2), Connect::new( &s.p(VALIDATOR_0), s.a(VALIDATOR_0), s.time(), s.s(VALIDATOR_0), ), ); } #[test] fn test_sandbox_assert_status() { // TODO: remove this? let s = timestamping_sandbox(); s.assert_state(HEIGHT_ONE, ROUND_ONE); s.add_time(Duration::from_millis(999)); s.assert_state(HEIGHT_ONE, ROUND_ONE); s.add_time(Duration::from_millis(1)); s.assert_state(HEIGHT_ONE, ROUND_TWO); } #[test] #[should_panic(expected = "Expected to send the message")] fn test_sandbox_expected_to_send_but_nothing_happened() { let s = timestamping_sandbox(); s.send( s.a(VALIDATOR_1), Connect::new( &s.p(VALIDATOR_0), s.a(VALIDATOR_0), s.time(), s.s(VALIDATOR_0), ), ); } #[test] #[should_panic(expected = "Expected to send the message")] fn test_sandbox_expected_to_send_another_message() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.send( s.a(VALIDATOR_1), Connect::new( &s.p(VALIDATOR_0), s.a(VALIDATOR_0), s.time(), s.s(VALIDATOR_0), ), ); } #[test] #[should_panic(expected = "Send unexpected message")] fn test_sandbox_unexpected_message_when_drop() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); } #[test] #[should_panic(expected = "Send unexpected message")] fn test_sandbox_unexpected_message_when_handle_another_message() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.recv(Connect::new(&public, s.a(VALIDATOR_3), s.time(), &secret)); panic!("Oops! We don't catch unexpected message"); } #[test] #[should_panic(expected = "Send unexpected message")] fn test_sandbox_unexpected_message_when_time_changed() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.add_time(Duration::from_millis(1000)); panic!("Oops! We don't catch unexpected message"); } } Fix formatting // Copyright 2017 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Workaround: Clippy does not correctly handle borrowing checking rules for returned types. #![cfg_attr(feature="cargo-clippy", allow(let_and_return))] use futures::{self, Async, Future, Stream}; use std::ops::{AddAssign, Deref}; use std::sync::{Arc, Mutex}; use std::cell::{Ref, RefCell, RefMut}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::collections::{BTreeMap, BinaryHeap, HashMap, HashSet, VecDeque}; use std::iter::FromIterator; use exonum::node::{Configuration, ListenerConfig, NodeHandler, ServiceConfig, State, SystemStateProvider}; use exonum::blockchain::{Block, BlockProof, Blockchain, ConsensusConfig, GenesisConfig, Schema, Service, SharedNodeState, StoredConfiguration, TimeoutAdjusterConfig, Transaction, ValidatorKeys}; use exonum::storage::{MapProof, MemoryDB}; use exonum::messages::{Any, Connect, Message, RawMessage, RawTransaction, Status}; use exonum::crypto::{gen_keypair_from_seed, Hash, PublicKey, SecretKey, Seed}; #[cfg(test)] use exonum::crypto::gen_keypair; use exonum::helpers::{init_logger, Height, Milliseconds, Round, ValidatorId}; use exonum::events::{Event, EventHandler, NetworkEvent, NetworkRequest}; use exonum::events::handler::{NodeChannel, NodeReceiver, TimeoutRequest}; use exonum::events::network::NetworkConfiguration; use timestamping::TimestampingService; use config_updater::ConfigUpdateService; use sandbox_tests_helper::VALIDATOR_0; pub type SharedTime = Arc<Mutex<SystemTime>>; #[derive(Debug)] pub struct SandboxSystemStateProvider { listen_address: SocketAddr, shared_time: SharedTime, } impl SystemStateProvider for SandboxSystemStateProvider { fn current_time(&self) -> SystemTime { *self.shared_time.lock().unwrap() } fn listen_address(&self) -> SocketAddr { self.listen_address } } #[derive(Debug)] pub struct SandboxInner { pub time: SharedTime, pub handler: NodeHandler, pub sent: VecDeque<(SocketAddr, RawMessage)>, pub events: VecDeque<Event>, pub timers: BinaryHeap<TimeoutRequest>, pub events_receiver: NodeReceiver, } impl SandboxInner { pub fn process_events(&mut self) { self.process_network_requests(); self.process_timeout_requests(); } pub fn handle_event<E: Into<Event>>(&mut self, e: E) { self.handler.handle_event(e.into()); self.process_events(); } fn process_network_requests(&mut self) { let network_getter = futures::lazy(|| -> Result<(), ()> { while let Async::Ready(Some(network)) = self.events_receiver.network.poll()? { debug!("{:?}", network); match network { NetworkRequest::SendMessage(peer, msg) => self.sent.push_back((peer, msg)), NetworkRequest::DisconnectWithPeer(_) => {} } } Ok(()) }); network_getter.wait().unwrap(); } fn process_timeout_requests(&mut self) { let timeouts_getter = futures::lazy(|| -> Result<(), ()> { while let Async::Ready(Some(timeout)) = self.events_receiver.timeout.poll()? { debug!("{:?}", timeout); self.timers.push(timeout); } Ok(()) }); timeouts_getter.wait().unwrap(); } } pub struct Sandbox { pub validators_map: HashMap<PublicKey, SecretKey>, pub services_map: HashMap<PublicKey, SecretKey>, inner: RefCell<SandboxInner>, addresses: Vec<SocketAddr>, } impl Sandbox { pub fn initialize( &self, connect_message_time: SystemTime, start_index: usize, end_index: usize, ) { let connect = Connect::new( &self.p(VALIDATOR_0), self.a(VALIDATOR_0), connect_message_time, self.s(VALIDATOR_0), ); for validator in start_index..end_index { let validator = ValidatorId(validator as u16); self.recv(Connect::new( &self.p(validator), self.a(validator), self.time(), self.s(validator), )); self.send(self.a(validator), connect.clone()); } self.check_unexpected_message() } pub fn set_validators_map( &mut self, new_addresses_len: u8, validators: Vec<(PublicKey, SecretKey)>, services: Vec<(PublicKey, SecretKey)>, ) { self.addresses = (1..(new_addresses_len + 1) as u8) .map(gen_primitive_socket_addr) .collect::<Vec<_>>(); self.validators_map.extend(validators); self.services_map.extend(services); } fn check_unexpected_message(&self) { if let Some((addr, msg)) = self.inner.borrow_mut().sent.pop_front() { let any_msg = Any::from_raw(msg.clone()).expect("Send incorrect message"); panic!("Send unexpected message {:?} to {}", any_msg, addr); } } pub fn tx_from_raw(&self, raw: RawTransaction) -> Option<Box<Transaction>> { self.blockchain_ref().tx_from_raw(raw) } pub fn p(&self, id: ValidatorId) -> PublicKey { self.validators()[id.0 as usize] } pub fn s(&self, id: ValidatorId) -> &SecretKey { let p = self.p(id); &self.validators_map[&p] } pub fn service_public_key(&self, id: ValidatorId) -> PublicKey { let id: usize = id.into(); self.nodes_keys()[id].service_key } pub fn service_secret_key(&self, id: ValidatorId) -> &SecretKey { let public_key = self.service_public_key(id); &self.services_map[&public_key] } pub fn a(&self, id: ValidatorId) -> SocketAddr { let id: usize = id.into(); self.addresses[id] } pub fn validators(&self) -> Vec<PublicKey> { self.cfg() .validator_keys .iter() .map(|x| x.consensus_key) .collect() } pub fn nodes_keys(&self) -> Vec<ValidatorKeys> { self.cfg().validator_keys } pub fn n_validators(&self) -> usize { self.validators().len() } pub fn time(&self) -> SystemTime { let inner = self.inner.borrow(); let time = *inner.time.lock().unwrap().deref(); time } pub fn node_handler(&self) -> Ref<NodeHandler> { Ref::map(self.inner.borrow(), |inner| &inner.handler) } pub fn node_handler_mut(&self) -> RefMut<NodeHandler> { RefMut::map(self.inner.borrow_mut(), |inner| &mut inner.handler) } pub fn node_state(&self) -> Ref<State> { Ref::map(self.inner.borrow(), |inner| inner.handler.state()) } pub fn blockchain_ref(&self) -> Ref<Blockchain> { Ref::map(self.inner.borrow(), |inner| &inner.handler.blockchain) } pub fn blockchain_mut(&self) -> RefMut<Blockchain> { RefMut::map( self.inner.borrow_mut(), |inner| &mut inner.handler.blockchain, ) } pub fn recv<T: Message>(&self, msg: T) { self.check_unexpected_message(); // TODO Think about addresses. let dummy_addr = SocketAddr::from(([127, 0, 0, 1], 12_039)); let event = NetworkEvent::MessageReceived(dummy_addr, msg.raw().clone()); self.inner.borrow_mut().handle_event(event); } pub fn send<T: Message>(&self, addr: SocketAddr, msg: T) { let any_expected_msg = Any::from_raw(msg.raw().clone()).unwrap(); let sended = self.inner.borrow_mut().sent.pop_front(); if let Some((real_addr, real_msg)) = sended { let any_real_msg = Any::from_raw(real_msg.clone()).expect("Send incorrect message"); if real_addr != addr || any_real_msg != any_expected_msg { panic!( "Expected to send the message {:?} to {} instead sending {:?} to {}", any_expected_msg, addr, any_real_msg, real_addr ) } } else { panic!( "Expected to send the message {:?} to {} but nothing happened", any_expected_msg, addr ); } } pub fn broadcast<T: Message>(&self, msg: T) { self.broadcast_to_addrs(msg, self.addresses.iter().skip(1)); } // TODO: add self-test for broadcasting? pub fn broadcast_to_addrs<'a, T: Message, I>(&self, msg: T, addresses: I) where I: IntoIterator<Item = &'a SocketAddr>, { let any_expected_msg = Any::from_raw(msg.raw().clone()).unwrap(); // If node is excluded from validators, then it still will broadcast messages. // So in that case we should not skip addresses and validators count. let mut expected_set: HashSet<_> = HashSet::from_iter(addresses); for _ in 0..expected_set.len() { let sended = self.inner.borrow_mut().sent.pop_front(); if let Some((real_addr, real_msg)) = sended { let any_real_msg = Any::from_raw(real_msg.clone()).expect("Send incorrect message"); if any_real_msg != any_expected_msg { panic!( "Expected to broadcast the message {:?} instead sending {:?} to {}", any_expected_msg, any_real_msg, real_addr ) } if !expected_set.contains(&real_addr) { panic!( "Double send the same message {:?} to {:?} during broadcasting", any_expected_msg, real_addr ) } else { expected_set.remove(&real_addr); } } else { panic!( "Expected to broadcast the message {:?} but someone don't recieve \ messages: {:?}", any_expected_msg, expected_set ); } } } pub fn check_broadcast_status(&self, height: Height, block_hash: &Hash) { self.broadcast(Status::new( &self.node_public_key(), height, block_hash, &self.node_secret_key(), )); } pub fn add_time(&self, duration: Duration) { self.check_unexpected_message(); let now = { let inner = self.inner.borrow_mut(); let mut time = inner.time.lock().unwrap(); time.add_assign(duration); *time.deref() }; // handle timeouts if occurs loop { let timeout = { let timers = &mut self.inner.borrow_mut().timers; if let Some(TimeoutRequest(time, timeout)) = timers.pop() { if time > now { timers.push(TimeoutRequest(time, timeout)); break; } else { timeout } } else { break; } }; self.inner.borrow_mut().handle_event(timeout); } } pub fn is_leader(&self) -> bool { self.node_state().is_leader() } pub fn leader(&self, round: Round) -> ValidatorId { self.node_state().leader(round) } pub fn is_validator(&self) -> bool { self.node_state().is_validator() } pub fn last_block(&self) -> Block { self.blockchain_ref().last_block() } pub fn last_hash(&self) -> Hash { self.blockchain_ref().last_hash() } pub fn last_state_hash(&self) -> Hash { *self.last_block().state_hash() } pub fn filter_present_transactions<'a, I>(&self, txs: I) -> Vec<RawMessage> where I: IntoIterator<Item = &'a RawMessage>, { let mut unique_set: HashSet<Hash> = HashSet::new(); let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); let schema_transactions = schema.transactions(); txs.into_iter() .filter(|elem| { let hash_elem = elem.hash(); if unique_set.contains(&hash_elem) { return false; } unique_set.insert(hash_elem); if schema_transactions.contains(&hash_elem) { return false; } true }) .cloned() .collect() } /// Extract state_hash from fake block pub fn compute_state_hash<'a, I>(&self, txs: I) -> Hash where I: IntoIterator<Item = &'a RawTransaction>, { let blockchain = &self.blockchain_ref(); let (hashes, tx_pool) = { let mut pool = BTreeMap::new(); let mut hashes = Vec::new(); for raw in txs { let tx = blockchain.tx_from_raw(raw.clone()).unwrap(); let hash = tx.hash(); hashes.push(hash); pool.insert(hash, tx); } (hashes, pool) }; let fork = { let mut fork = blockchain.fork(); let (_, patch) = blockchain.create_patch(ValidatorId(0), self.current_height(), &hashes, &tx_pool); fork.merge(patch); fork }; *Schema::new(&fork).last_block().unwrap().state_hash() } pub fn get_proof_to_service_table(&self, service_id: u16, table_idx: usize) -> MapProof<Hash> { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.get_proof_to_service_table(service_id, table_idx) } pub fn get_configs_root_hash(&self) -> Hash { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.configs().root_hash() } pub fn cfg(&self) -> StoredConfiguration { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.actual_configuration() } pub fn following_cfg(&self) -> Option<StoredConfiguration> { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.following_configuration() } pub fn propose_timeout(&self) -> Milliseconds { match self.cfg().consensus.timeout_adjuster { TimeoutAdjusterConfig::Constant { timeout } => timeout, _ => panic!("Unexpected timeout adjuster config type"), } } pub fn majority_count(&self, num_validators: usize) -> usize { num_validators * 2 / 3 + 1 } pub fn round_timeout(&self) -> Milliseconds { self.cfg().consensus.round_timeout } pub fn transactions_hashes(&self) -> Vec<Hash> { let node_state = self.node_state(); let rlock = node_state.transactions().read().expect( "Expected read lock", ); rlock.keys().cloned().collect() } pub fn current_round(&self) -> Round { self.node_state().round() } pub fn block_and_precommits(&self, height: Height) -> Option<BlockProof> { let snapshot = self.blockchain_ref().snapshot(); let schema = Schema::new(&snapshot); schema.block_and_precommits(height) } pub fn current_height(&self) -> Height { self.node_state().height() } pub fn current_leader(&self) -> ValidatorId { self.node_state().leader(self.current_round()) } pub fn assert_state(&self, expected_height: Height, expected_round: Round) { let state = self.node_state(); let achual_height = state.height(); let actual_round = state.round(); assert_eq!(achual_height, expected_height); assert_eq!(actual_round, expected_round); } pub fn assert_lock(&self, expected_round: Round, expected_hash: Option<Hash>) { let state = self.node_state(); let actual_round = state.locked_round(); let actual_hash = state.locked_propose(); assert_eq!(actual_round, expected_round); assert_eq!(actual_hash, expected_hash); } fn node_public_key(&self) -> PublicKey { *self.node_state().consensus_public_key() } fn node_secret_key(&self) -> SecretKey { self.node_state().consensus_secret_key().clone() } } impl Drop for Sandbox { fn drop(&mut self) { if !::std::thread::panicking() { self.check_unexpected_message(); } } } fn gen_primitive_socket_addr(idx: u8) -> SocketAddr { let addr = Ipv4Addr::new(idx, idx, idx, idx); SocketAddr::new(IpAddr::V4(addr), idx as u16) } pub fn sandbox_with_services(services: Vec<Box<Service>>) -> Sandbox { let validators = vec![ gen_keypair_from_seed(&Seed::new([12; 32])), gen_keypair_from_seed(&Seed::new([13; 32])), gen_keypair_from_seed(&Seed::new([16; 32])), gen_keypair_from_seed(&Seed::new([19; 32])), ]; let service_keys = vec![ gen_keypair_from_seed(&Seed::new([20; 32])), gen_keypair_from_seed(&Seed::new([21; 32])), gen_keypair_from_seed(&Seed::new([22; 32])), gen_keypair_from_seed(&Seed::new([23; 32])), ]; let addresses: Vec<SocketAddr> = (1..5).map(gen_primitive_socket_addr).collect::<Vec<_>>(); let db = Box::new(MemoryDB::new()); let mut blockchain = Blockchain::new(db, services); let consensus = ConsensusConfig { round_timeout: 1000, status_timeout: 600_000, peers_timeout: 600_000, txs_block_limit: 1000, timeout_adjuster: TimeoutAdjusterConfig::Constant { timeout: 200 }, }; let genesis = GenesisConfig::new_with_consensus( consensus, validators.iter().zip(service_keys.iter()).map(|x| { ValidatorKeys { consensus_key: (x.0).0, service_key: (x.1).0, } }), ); blockchain.create_genesis_block(genesis).unwrap(); let config = Configuration { listener: ListenerConfig { address: addresses[0], consensus_public_key: validators[0].0, consensus_secret_key: validators[0].1.clone(), whitelist: Default::default(), }, service: ServiceConfig { service_public_key: service_keys[0].0, service_secret_key: service_keys[0].1.clone(), }, network: NetworkConfiguration::default(), peer_discovery: Vec::new(), mempool: Default::default(), }; // TODO use factory or other solution like set_handler or run let system_state = SandboxSystemStateProvider { listen_address: addresses[0], shared_time: SharedTime::new(Mutex::new(UNIX_EPOCH + Duration::new(1_486_720_340, 0))), }; let shared_time = system_state.shared_time.clone(); let channel = NodeChannel::new(64); let mut handler = NodeHandler::new( blockchain.clone(), addresses[0], channel.0, Box::new(system_state), config.clone(), SharedNodeState::new(5000), ); handler.initialize(); let inner = SandboxInner { sent: VecDeque::new(), events: VecDeque::new(), timers: BinaryHeap::new(), events_receiver: channel.1, handler, time: shared_time, }; let sandbox = Sandbox { inner: RefCell::new(inner), validators_map: HashMap::from_iter(validators.clone()), services_map: HashMap::from_iter(service_keys), addresses: addresses, }; sandbox.initialize(sandbox.time(), 1, validators.len()); // General assumption; necessary for correct work of consensus algorithm assert!(sandbox.propose_timeout() < sandbox.round_timeout()); sandbox } pub fn timestamping_sandbox() -> Sandbox { let _ = init_logger(); sandbox_with_services(vec![ Box::new(TimestampingService::new()), Box::new(ConfigUpdateService::new()), ]) } #[cfg(test)] mod tests { use sandbox_tests_helper::{VALIDATOR_1, VALIDATOR_2, VALIDATOR_3, HEIGHT_ONE, ROUND_ONE, ROUND_TWO}; use super::*; #[test] fn test_sandbox_init() { timestamping_sandbox(); } #[test] fn test_sandbox_recv_and_send() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.send( s.a(VALIDATOR_2), Connect::new( &s.p(VALIDATOR_0), s.a(VALIDATOR_0), s.time(), s.s(VALIDATOR_0), ), ); } #[test] fn test_sandbox_assert_status() { // TODO: remove this? let s = timestamping_sandbox(); s.assert_state(HEIGHT_ONE, ROUND_ONE); s.add_time(Duration::from_millis(999)); s.assert_state(HEIGHT_ONE, ROUND_ONE); s.add_time(Duration::from_millis(1)); s.assert_state(HEIGHT_ONE, ROUND_TWO); } #[test] #[should_panic(expected = "Expected to send the message")] fn test_sandbox_expected_to_send_but_nothing_happened() { let s = timestamping_sandbox(); s.send( s.a(VALIDATOR_1), Connect::new( &s.p(VALIDATOR_0), s.a(VALIDATOR_0), s.time(), s.s(VALIDATOR_0), ), ); } #[test] #[should_panic(expected = "Expected to send the message")] fn test_sandbox_expected_to_send_another_message() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.send( s.a(VALIDATOR_1), Connect::new( &s.p(VALIDATOR_0), s.a(VALIDATOR_0), s.time(), s.s(VALIDATOR_0), ), ); } #[test] #[should_panic(expected = "Send unexpected message")] fn test_sandbox_unexpected_message_when_drop() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); } #[test] #[should_panic(expected = "Send unexpected message")] fn test_sandbox_unexpected_message_when_handle_another_message() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.recv(Connect::new(&public, s.a(VALIDATOR_3), s.time(), &secret)); panic!("Oops! We don't catch unexpected message"); } #[test] #[should_panic(expected = "Send unexpected message")] fn test_sandbox_unexpected_message_when_time_changed() { let s = timestamping_sandbox(); let (public, secret) = gen_keypair(); s.recv(Connect::new(&public, s.a(VALIDATOR_2), s.time(), &secret)); s.add_time(Duration::from_millis(1000)); panic!("Oops! We don't catch unexpected message"); } }
use serde::{de::Deserializer, ser::Serializer}; use serde::{Deserialize, Serialize}; use std::{fmt, ops::Deref, result::Result as StdResult, sync::Arc}; const CUTOFF: usize = std::mem::size_of::<&[u8]>() - 1; type Inner = [u8; CUTOFF]; /// A buffer that may either be inline or remote and protected /// by an Arc #[derive(Clone, Ord, Eq)] pub enum IVec { /// An inlined small value Inline(u8, Inner), /// A heap-allocated value protected by an Arc Remote { /// The value protected by an Arc buf: Arc<[u8]>, }, } impl Serialize for IVec { fn serialize<S: Serializer>( &self, serializer: S, ) -> StdResult<S::Ok, S::Error> { serde_bytes::serialize(self, serializer) } } impl<'de> Deserialize<'de> for IVec { fn deserialize<D: Deserializer<'de>>( deserializer: D, ) -> StdResult<Self, D::Error> { serde_bytes::deserialize(deserializer) } } impl IVec { pub(crate) fn new(v: &[u8]) -> IVec { if v.len() <= CUTOFF { let sz = v.len() as u8; let mut data: Inner = [0u8; CUTOFF]; unsafe { std::ptr::copy_nonoverlapping( v.as_ptr(), data.as_mut_ptr(), v.len(), ); } IVec::Inline(sz, data) } else { IVec::Remote { buf: v.into() } } } #[inline] pub(crate) fn size_in_bytes(&self) -> u64 { if let IVec::Inline(..) = self { std::mem::size_of::<IVec>() as u64 } else { let sz = std::mem::size_of::<IVec>() as u64; sz.saturating_add(self.len() as u64) } } } impl From<&[u8]> for IVec { fn from(v: &[u8]) -> IVec { IVec::new(v) } } impl From<&str> for IVec { fn from(v: &str) -> IVec { v.as_bytes().into() } } impl From<&IVec> for IVec { fn from(v: &IVec) -> IVec { v.clone() } } impl From<Vec<u8>> for IVec { fn from(v: Vec<u8>) -> IVec { if v.len() <= CUTOFF { IVec::new(&v) } else { IVec::Remote { // rely on the Arc From specialization // for Vec<[T]>, which may improve // over time for T's that are Copy buf: v.into(), } } } } impl Deref for IVec { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.as_ref() } } impl AsRef<[u8]> for IVec { #[inline] fn as_ref(&self) -> &[u8] { match self { IVec::Inline(sz, buf) => unsafe { buf.get_unchecked(..*sz as usize) }, IVec::Remote { buf } => buf, } } } impl PartialOrd for IVec { fn partial_cmp(&self, other: &IVec) -> Option<std::cmp::Ordering> { Some(self.as_ref().cmp(other.as_ref())) } } impl<T: AsRef<[u8]>> PartialEq<T> for IVec { fn eq(&self, other: &T) -> bool { self.as_ref() == other.as_ref() } } impl PartialEq<[u8]> for IVec { fn eq(&self, other: &[u8]) -> bool { self.as_ref() == other } } impl fmt::Debug for IVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_ref().fmt(f) } } #[test] fn ivec_usage() { let iv1: IVec = vec![1, 2, 3].into(); assert_eq!(iv1, vec![1, 2, 3]); let iv2 = IVec::new(&[4; 128]); assert_eq!(iv2, vec![4; 128]); } Add IVec conversion from Box<[u8]> and explicitly use it with serde_bytes use serde::{de::Deserializer, ser::Serializer}; use serde::{Deserialize, Serialize}; use std::{fmt, ops::Deref, result::Result as StdResult, sync::Arc}; const CUTOFF: usize = std::mem::size_of::<&[u8]>() - 1; type Inner = [u8; CUTOFF]; /// A buffer that may either be inline or remote and protected /// by an Arc #[derive(Clone, Ord, Eq)] pub enum IVec { /// An inlined small value Inline(u8, Inner), /// A heap-allocated value protected by an Arc Remote { /// The value protected by an Arc buf: Arc<[u8]>, }, } impl Serialize for IVec { fn serialize<S: Serializer>( &self, serializer: S, ) -> StdResult<S::Ok, S::Error> { serde_bytes::serialize(self.as_ref(), serializer) } } impl<'de> Deserialize<'de> for IVec { fn deserialize<D: Deserializer<'de>>( deserializer: D, ) -> StdResult<Self, D::Error> { let bytes: StdResult<Box<[u8]>, D::Error> = serde_bytes::deserialize(deserializer); bytes.map(IVec::from) } } impl IVec { pub(crate) fn new(v: &[u8]) -> IVec { if v.len() <= CUTOFF { let sz = v.len() as u8; let mut data: Inner = [0u8; CUTOFF]; unsafe { std::ptr::copy_nonoverlapping( v.as_ptr(), data.as_mut_ptr(), v.len(), ); } IVec::Inline(sz, data) } else { IVec::Remote { buf: v.into() } } } #[inline] pub(crate) fn size_in_bytes(&self) -> u64 { if let IVec::Inline(..) = self { std::mem::size_of::<IVec>() as u64 } else { let sz = std::mem::size_of::<IVec>() as u64; sz.saturating_add(self.len() as u64) } } } impl From<Box<[u8]>> for IVec { fn from(v: Box<[u8]>) -> IVec { if v.len() <= CUTOFF { IVec::new(&v) } else { IVec::Remote { // rely on the Arc From specialization // for Box<T>, which may improve // over time buf: v.into(), } } } } impl From<&[u8]> for IVec { fn from(v: &[u8]) -> IVec { IVec::new(v) } } impl From<&str> for IVec { fn from(v: &str) -> IVec { v.as_bytes().into() } } impl From<&IVec> for IVec { fn from(v: &IVec) -> IVec { v.clone() } } impl From<Vec<u8>> for IVec { fn from(v: Vec<u8>) -> IVec { v.into_boxed_slice().into() } } impl Deref for IVec { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.as_ref() } } impl AsRef<[u8]> for IVec { #[inline] fn as_ref(&self) -> &[u8] { match self { IVec::Inline(sz, buf) => unsafe { buf.get_unchecked(..*sz as usize) }, IVec::Remote { buf } => buf, } } } impl PartialOrd for IVec { fn partial_cmp(&self, other: &IVec) -> Option<std::cmp::Ordering> { Some(self.as_ref().cmp(other.as_ref())) } } impl<T: AsRef<[u8]>> PartialEq<T> for IVec { fn eq(&self, other: &T) -> bool { self.as_ref() == other.as_ref() } } impl PartialEq<[u8]> for IVec { fn eq(&self, other: &[u8]) -> bool { self.as_ref() == other } } impl fmt::Debug for IVec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_ref().fmt(f) } } #[test] fn ivec_usage() { let iv1: IVec = vec![1, 2, 3].into(); assert_eq!(iv1, vec![1, 2, 3]); let iv2 = IVec::new(&[4; 128]); assert_eq!(iv2, vec![4; 128]); }
use game::*; use behaviour::LeafResolution; use direction::Direction; use coord::{Coord, StraightLine}; pub fn player_input(input_source: InputSourceRef) -> BehaviourLeaf { BehaviourLeaf::new(move |input| { loop { if let Some(meta_action) = get_meta_action(input, input_source) { return LeafResolution::Yield(meta_action); } } }) } fn get_direction(map: &ControlMap, input_source: InputSourceRef) -> Option<Direction> { input_source.next_input().and_then(|event| { map.control(event).and_then(|control| { control_to_direction(control) }) }) } fn control_to_direction(control: Control) -> Option<Direction> { match control { Control::Direction(d) => Some(d), _ => None, } } fn aim(input: BehaviourInput, map: &ControlMap, input_source: InputSourceRef) -> Option<Coord> { let start = input.entity.position().unwrap(); let mut knowledge = input.entity.drawable_knowledge_borrow_mut().unwrap(); let level_knowledge = knowledge.level_mut_or_insert_size(input.level_id, input.spatial_hash.width(), input.spatial_hash.height()); let targets = level_knowledge.sort_targets(start); let mut target_idx = 0; let mut end = if !targets.is_empty() { targets[target_idx] } else { start }; let mut renderer = input.renderer.borrow_mut(); loop { let overlay = RenderOverlay { aim_line: Some(StraightLine::new(start, end)), }; renderer.draw_with_overlay(&overlay); if let Some(event) = input_source.next_input() { if let Some(control) = map.control(event) { if let Some(direction) = control_to_direction(control) { let next_end = end + direction.vector(); if renderer.contains_world_coord(next_end) { end = next_end; } } else if control == Control::NextTarget { if !targets.is_empty() { target_idx = (target_idx + 1) % targets.len(); end = targets[target_idx]; } } else if control == Control::PrevTarget { if !targets.is_empty() { target_idx = (target_idx + targets.len() - 1) % targets.len(); end = targets[target_idx]; } } else if control == Control::Fire { renderer.draw(); return Some(end); } else { break; } } } } renderer.draw(); None } fn get_meta_action(input: BehaviourInput, input_source: InputSourceRef) -> Option<MetaAction> { input_source.next_input().and_then(|event| { input.entity.control_map().and_then(|map| { map.control(event).and_then(|control| { match control { Control::Direction(d) => Some(MetaAction::ActionArgs(ActionArgs::Walk(input.entity.id(), d))), Control::Close => { get_direction(map, input_source).map(|d| MetaAction::ActionArgs(ActionArgs::Close(input.entity.id(), d))) } Control::Fire => { aim(input, map, input_source).map(|coord| { let delta = coord - input.entity.position().unwrap(); MetaAction::ActionArgs(ActionArgs::FireBullet(input.entity.id(), delta)) }) } Control::Wait => { Some(MetaAction::ActionArgs(ActionArgs::Null)) } Control::Quit => Some(MetaAction::External(External::Quit)), Control::NextTarget => None, Control::PrevTarget => None, } }) }) }) } Pressing an unbound key closes aim ui use game::*; use behaviour::LeafResolution; use direction::Direction; use coord::{Coord, StraightLine}; pub fn player_input(input_source: InputSourceRef) -> BehaviourLeaf { BehaviourLeaf::new(move |input| { loop { if let Some(meta_action) = get_meta_action(input, input_source) { return LeafResolution::Yield(meta_action); } } }) } fn get_direction(map: &ControlMap, input_source: InputSourceRef) -> Option<Direction> { input_source.next_input().and_then(|event| { map.control(event).and_then(|control| { control_to_direction(control) }) }) } fn control_to_direction(control: Control) -> Option<Direction> { match control { Control::Direction(d) => Some(d), _ => None, } } fn aim(input: BehaviourInput, map: &ControlMap, input_source: InputSourceRef) -> Option<Coord> { let start = input.entity.position().unwrap(); let mut knowledge = input.entity.drawable_knowledge_borrow_mut().unwrap(); let level_knowledge = knowledge.level_mut_or_insert_size(input.level_id, input.spatial_hash.width(), input.spatial_hash.height()); let targets = level_knowledge.sort_targets(start); let mut target_idx = 0; let mut end = if !targets.is_empty() { targets[target_idx] } else { start }; let mut renderer = input.renderer.borrow_mut(); loop { let overlay = RenderOverlay { aim_line: Some(StraightLine::new(start, end)), }; renderer.draw_with_overlay(&overlay); if let Some(event) = input_source.next_input() { if let Some(control) = map.control(event) { if let Some(direction) = control_to_direction(control) { let next_end = end + direction.vector(); if renderer.contains_world_coord(next_end) { end = next_end; } } else if control == Control::NextTarget { if !targets.is_empty() { target_idx = (target_idx + 1) % targets.len(); end = targets[target_idx]; } } else if control == Control::PrevTarget { if !targets.is_empty() { target_idx = (target_idx + targets.len() - 1) % targets.len(); end = targets[target_idx]; } } else if control == Control::Fire { renderer.draw(); return Some(end); } else { break; } } else { break; } } } renderer.draw(); None } fn get_meta_action(input: BehaviourInput, input_source: InputSourceRef) -> Option<MetaAction> { input_source.next_input().and_then(|event| { input.entity.control_map().and_then(|map| { map.control(event).and_then(|control| { match control { Control::Direction(d) => Some(MetaAction::ActionArgs(ActionArgs::Walk(input.entity.id(), d))), Control::Close => { get_direction(map, input_source).map(|d| MetaAction::ActionArgs(ActionArgs::Close(input.entity.id(), d))) } Control::Fire => { aim(input, map, input_source).map(|coord| { let delta = coord - input.entity.position().unwrap(); MetaAction::ActionArgs(ActionArgs::FireBullet(input.entity.id(), delta)) }) } Control::Wait => { Some(MetaAction::ActionArgs(ActionArgs::Null)) } Control::Quit => Some(MetaAction::External(External::Quit)), Control::NextTarget => None, Control::PrevTarget => None, } }) }) }) }
use innovation::{Innovation, InnovationRange}; use acyclic_network::{Network, NodeIndex}; pub use acyclic_network::NodeType; use traits::{Distance, Genotype}; use weight::Weight; use alignment_metric::AlignmentMetric; use std::collections::BTreeMap; use alignment::{Alignment, align_sorted_iterators}; use std::cmp; #[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct AnyInnovation(usize); #[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct NodeInnovation(usize); #[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct LinkInnovation(usize); impl Innovation for AnyInnovation { } impl Innovation for NodeInnovation { } impl Innovation for LinkInnovation { } struct CombinedAlignmentMetric { node_metric: AlignmentMetric, link_metric: AlignmentMetric, } impl CombinedAlignmentMetric { fn new() -> Self { CombinedAlignmentMetric { node_metric: AlignmentMetric::new(), link_metric: AlignmentMetric::new(), } } } /// Genome representing a feed-forward (acyclic) network. /// /// Each node is uniquely identified by it's Innovation number. Each link is sorted according it's /// associated Innovation number. /// /// We have to keep both the `network` and the `node_innovation_map` in sync. That is, whenever we /// add or remove a node, we have to update both. #[derive(Clone, Debug)] pub struct Genome<NT: NodeType> { /// Represents the acyclic feed forward network. network: Network<NT, Weight, AnyInnovation>, /// Maps the external id (innovation number) which is globally allocated, to the internal /// network node index. node_innovation_map: BTreeMap<NodeInnovation, NodeIndex>, } impl<NT: NodeType> Genotype for Genome<NT> {} impl<NT: NodeType> Genome<NT> { fn new() -> Self { Genome { network: Network::new(), node_innovation_map: BTreeMap::new(), } } /// Counts the number of matching, disjoint and excess node innovation numbers between `self` /// and `other`. fn node_alignment_metric(&self, other: &Self) -> AlignmentMetric { let mut node_metric = AlignmentMetric::new(); node_metric.max_len = cmp::max(self.node_innovation_map.len(), other.node_innovation_map.len()); let left = self.node_innovation_map.keys(); let right = other.node_innovation_map.keys(); align_sorted_iterators(left, right, Ord::cmp, |alignment| { match alignment { Alignment::Match(_l, _r) => { node_metric.matching += 1; } ref align if align.is_disjoint() => { node_metric.disjoint += 1; } ref align if align.is_excess() => { node_metric.excess += 1; } _ => unreachable!() } }); node_metric } /// Determine the genetic compatibility between `self` and `other` in terms of matching, /// disjoint and excess genes (both node and link genes), as well as weight distance. fn combined_alignment_metric(&self, other: &Self) -> CombinedAlignmentMetric { let mut metric = CombinedAlignmentMetric::new(); metric.node_metric.max_len = cmp::max(self.network.node_count(), other.network.node_count()); metric.link_metric.max_len = cmp::max(self.network.link_count(), other.network.link_count()); let left = self.node_innovation_map.iter(); let right = other.node_innovation_map.iter(); let left_link_innov_range = self.link_innovation_range(); let right_link_innov_range = other.link_innovation_range(); let left_network = &self.network; let right_network = &other.network; align_sorted_iterators(left, right, |&(kl, _), &(kr, _)| Ord::cmp(kl, kr), |node_alignment| { match node_alignment { Alignment::Match((_, &left_node_index), (_, &right_node_index)) => { metric.node_metric.matching += 1; // Both nodes are topological identical. So the link innovations can // also match up. align_sorted_iterators(left_network.link_iter_for_node(left_node_index), right_network.link_iter_for_node(right_node_index), |&(_, left_link), &(_, right_link)| Ord::cmp(&left_link.external_link_id(), &right_link.external_link_id()), |link_alignment| { if let Alignment::Match((_, left_link), (_, right_link)) = link_alignment { // we have a link match! metric.link_metric.matching += 1; // add up the weight distance metric.link_metric.weight_distance += (left_link.weight().0 - right_link.weight().0).abs(); } else if link_alignment.is_disjoint() { // the link is locally disjoint (list of links of the node) metric.link_metric.disjoint += 1; } else if link_alignment.is_left() { let &(_, left_link) = link_alignment.get_left().unwrap(); if right_link_innov_range.contains(&LinkInnovation(left_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } else if link_alignment.is_right() { let &(_, right_link) = link_alignment.get_right().unwrap(); if left_link_innov_range.contains(&LinkInnovation(right_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } else { unreachable!(); } }); } // in general, if a node is disjoint (or excess), it's link innovations cannot match up! ref align_left if align_left.is_left() => { let &(_, &left_node_index) = align_left.get_left().unwrap(); // XXX: Optimize: once we hit an excess link id, all remaining ids are excess as well. for (_, left_link) in left_network.link_iter_for_node(left_node_index) { // check if link is disjoint or excess if right_link_innov_range.contains(&LinkInnovation(left_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } if align_left.is_excess() { metric.node_metric.excess += 1; } else if align_left.is_disjoint() { metric.node_metric.disjoint += 1; } else { unreachable!(); } } ref align_right if align_right.is_right() => { let &(_, &right_node_index) = align_right.get_right().unwrap(); // XXX: Optimize: once we hit an excess link id, all remaining ids are excess as well. for (_, right_link) in right_network.link_iter_for_node(right_node_index) { // check if link is disjoint or excess if left_link_innov_range.contains(&LinkInnovation(right_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } if align_right.is_excess() { metric.node_metric.excess += 1; } else if align_right.is_disjoint() { metric.node_metric.disjoint += 1; } else { unreachable!(); } } _ => { unreachable!() } } }); metric } /// Determine the genomes range of node innovations. If the genome /// contains no nodes, this will return `None`. Otherwise it will /// return the Some((min, max)). /// /// # Complexity /// /// This runs in O(log n). fn node_innovation_range(&self) -> InnovationRange<NodeInnovation> { let mut range = InnovationRange::empty(); if let Some(&min) = self.node_innovation_map.keys().min() { range.insert(min); } if let Some(&max) = self.node_innovation_map.keys().max() { range.insert(max); } return range; } /// Determine the link innovation range for that Genome. /// /// # Complexity /// /// O(n) where `n` is the number of nodes. fn link_innovation_range(&self) -> InnovationRange<LinkInnovation> { let mut range = InnovationRange::empty(); let network = &self.network; network.each_node_with_index(|_, node_idx| { if let Some(link) = network.first_link_of_node(node_idx) { range.insert(link.external_link_id()); } if let Some(link) = network.last_link_of_node(node_idx) { range.insert(link.external_link_id()); } }); range.map(|i| LinkInnovation(i.0)) } /// Add a link between `source_node` and `target_node`. Associates the new /// link with `link_innovation` and gives it `weight`. /// /// Does not check for cycles. Test for cycles before using this method! /// /// # Note /// /// Does not panic or abort if a link with the same link innovation is added. /// /// # Panics /// /// If one of `source_node` or `target_node` does not exist. /// /// If a link between these nodes already exists! /// /// # Complexity /// /// This runs in O(k) + O(log n), where `k` is the number of edges of `source_node`. /// This is because we keep the edges sorted. `n` is the number of nodes, because /// we have to lookup the internal node indices from the node innovations. fn add_link(&mut self, source_node: NodeInnovation, target_node: NodeInnovation, link_innovation: LinkInnovation, weight: Weight) { let source_node_index = self.node_innovation_map[&source_node]; let target_node_index = self.node_innovation_map[&target_node]; debug_assert!(!self.network.link_would_cycle(source_node_index, target_node_index)); debug_assert!(self.network.valid_link(source_node_index, target_node_index).is_ok()); let _link_index = self.network.add_link(source_node_index, target_node_index, weight, AnyInnovation(link_innovation.0)); } fn link_count(&self) -> usize { self.network.link_count() } /// Add a new node with external id `node_innovation` and of type `node_type` /// to the genome. /// /// # Panics /// /// Panics if a node with the same innovation already exists in the genome. fn add_node(&mut self, node_innovation: NodeInnovation, node_type: NT) { if self.node_innovation_map.contains_key(&node_innovation) { panic!("Duplicate node_innovation"); } let node_index = self.network.add_node(node_type, AnyInnovation(node_innovation.0)); self.node_innovation_map.insert(node_innovation, node_index); } fn node_count(&self) -> usize { assert!(self.node_innovation_map.len() == self.network.node_count()); return self.node_innovation_map.len(); } } /// This is used to weight a link AlignmentMetric. pub struct GenomeDistance { pub excess: f64, pub disjoint: f64, pub weight: f64, } impl<NT: NodeType> Distance<Genome<NT>> for GenomeDistance { fn distance(&self, genome_left: &Genome<NT>, genome_right: &Genome<NT>) -> f64 { let m = genome_left.combined_alignment_metric(genome_right).link_metric; if m.max_len == 0 { return 0.0; } self.excess * (m.excess as f64) / (m.max_len as f64) + self.disjoint * (m.disjoint as f64) / (m.max_len as f64) + self.weight * if m.matching > 0 { m.weight_distance / (m.matching as f64) } else { 0.0 } } } #[cfg(test)] mod tests { use super::{NodeType, Genome, NodeInnovation, LinkInnovation}; use weight::Weight; use innovation::InnovationRange; #[derive(Clone, Debug)] struct NT; impl NodeType for NT { fn accept_incoming_links(&self) -> bool { true } fn accept_outgoing_links(&self) -> bool { true } } #[test] fn test_add_node() { let mut genome = Genome::<NT>::new(); assert_eq!(0, genome.node_count()); genome.add_node(NodeInnovation(0), NT); assert_eq!(1, genome.node_count()); genome.add_node(NodeInnovation(1), NT); assert_eq!(2, genome.node_count()); } #[test] #[should_panic(expected = "Duplicate node_innovation")] fn test_add_duplicate_node() { let mut genome = Genome::<NT>::new(); genome.add_node(NodeInnovation(0), NT); genome.add_node(NodeInnovation(0), NT); } #[test] fn test_add_link() { let mut genome = Genome::<NT>::new(); let n0 = NodeInnovation(0); let n1 = NodeInnovation(1); let n2 = NodeInnovation(2); genome.add_node(n0, NT); genome.add_node(n1, NT); genome.add_node(n2, NT); assert_eq!(0, genome.link_count()); genome.add_link(n0, n1, LinkInnovation(0), Weight(0.0)); assert_eq!(1, genome.link_count()); genome.add_link(n0, n2, LinkInnovation(0), Weight(0.0)); assert_eq!(2, genome.link_count()); } #[test] fn test_link_innovation_range() { let mut genome = Genome::<NT>::new(); let n0 = NodeInnovation(0); let n1 = NodeInnovation(1); let n2 = NodeInnovation(2); genome.add_node(n0, NT); genome.add_node(n1, NT); genome.add_node(n2, NT); assert_eq!(InnovationRange::Empty, genome.link_innovation_range()); genome.add_link(n0, n1, LinkInnovation(5), Weight(0.0)); assert_eq!(InnovationRange::Single(LinkInnovation(5)), genome.link_innovation_range()); genome.add_link(n0, n2, LinkInnovation(1), Weight(0.0)); assert_eq!(InnovationRange::FromTo(LinkInnovation(1), LinkInnovation(5)), genome.link_innovation_range()); genome.add_link(n1, n2, LinkInnovation(99), Weight(0.0)); assert_eq!(InnovationRange::FromTo(LinkInnovation(1), LinkInnovation(99)), genome.link_innovation_range()); } #[test] fn test_node_innovation_range() { let mut genome = Genome::<NT>::new(); assert_eq!(InnovationRange::Empty, genome.node_innovation_range()); genome.add_node(NodeInnovation(5), NT); assert_eq!(InnovationRange::Single(NodeInnovation(5)), genome.node_innovation_range()); genome.add_node(NodeInnovation(7), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(5), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(6), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(5), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(4), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(4), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(1), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(1), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(1000), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(1), NodeInnovation(1000)), genome.node_innovation_range()); } #[test] fn test_node_align_metric() { let mut left = Genome::<NT>::new(); let mut right = Genome::<NT>::new(); let m = left.node_alignment_metric(&right); assert_eq!(0, m.max_len); assert_eq!(0, m.matching); assert_eq!(0, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); left.add_node(NodeInnovation(5), NT); let m = left.node_alignment_metric(&right); assert_eq!(1, m.max_len); assert_eq!(0, m.matching); assert_eq!(1, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); left.add_node(NodeInnovation(10), NT); let m = left.node_alignment_metric(&right); assert_eq!(2, m.max_len); assert_eq!(0, m.matching); assert_eq!(2, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); right.add_node(NodeInnovation(6), NT); let m = left.node_alignment_metric(&right); assert_eq!(2, m.max_len); assert_eq!(0, m.matching); assert_eq!(2, m.excess); assert_eq!(1, m.disjoint); assert_eq!(0.0, m.weight_distance); right.add_node(NodeInnovation(5), NT); let m = left.node_alignment_metric(&right); assert_eq!(2, m.max_len); assert_eq!(1, m.matching); assert_eq!(1, m.excess); assert_eq!(1, m.disjoint); assert_eq!(0.0, m.weight_distance); left.add_node(NodeInnovation(6), NT); let m = left.node_alignment_metric(&right); assert_eq!(3, m.max_len); assert_eq!(2, m.matching); assert_eq!(1, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); right.add_node(NodeInnovation(11), NT); let m = left.node_alignment_metric(&right); assert_eq!(3, m.max_len); assert_eq!(2, m.matching); assert_eq!(1, m.excess); assert_eq!(1, m.disjoint); assert_eq!(0.0, m.weight_distance); } #[test] fn test_combined_align_metric() { let mut left = Genome::<NT>::new(); let mut right = Genome::<NT>::new(); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); left.add_node(NodeInnovation(5), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); left.add_node(NodeInnovation(10), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); right.add_node(NodeInnovation(6), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); right.add_node(NodeInnovation(5), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); left.add_node(NodeInnovation(6), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); right.add_node(NodeInnovation(11), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); } } Add network method. Make some methods public use innovation::{Innovation, InnovationRange}; use acyclic_network::{Network, NodeIndex}; pub use acyclic_network::NodeType; use traits::{Distance, Genotype}; use weight::Weight; use alignment_metric::AlignmentMetric; use std::collections::BTreeMap; use alignment::{Alignment, align_sorted_iterators}; use std::cmp; #[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct AnyInnovation(usize); #[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct NodeInnovation(usize); #[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct LinkInnovation(usize); impl Innovation for AnyInnovation { } impl Innovation for NodeInnovation { } impl Innovation for LinkInnovation { } struct CombinedAlignmentMetric { node_metric: AlignmentMetric, link_metric: AlignmentMetric, } impl CombinedAlignmentMetric { fn new() -> Self { CombinedAlignmentMetric { node_metric: AlignmentMetric::new(), link_metric: AlignmentMetric::new(), } } } /// Genome representing a feed-forward (acyclic) network. /// /// Each node is uniquely identified by it's Innovation number. Each link is sorted according it's /// associated Innovation number. /// /// We have to keep both the `network` and the `node_innovation_map` in sync. That is, whenever we /// add or remove a node, we have to update both. #[derive(Clone, Debug)] pub struct Genome<NT: NodeType> { /// Represents the acyclic feed forward network. network: Network<NT, Weight, AnyInnovation>, /// Maps the external id (innovation number) which is globally allocated, to the internal /// network node index. node_innovation_map: BTreeMap<NodeInnovation, NodeIndex>, } impl<NT: NodeType> Genotype for Genome<NT> {} impl<NT: NodeType> Genome<NT> { fn new() -> Self { Genome { network: Network::new(), node_innovation_map: BTreeMap::new(), } } /// Counts the number of matching, disjoint and excess node innovation numbers between `self` /// and `other`. fn node_alignment_metric(&self, other: &Self) -> AlignmentMetric { let mut node_metric = AlignmentMetric::new(); node_metric.max_len = cmp::max(self.node_innovation_map.len(), other.node_innovation_map.len()); let left = self.node_innovation_map.keys(); let right = other.node_innovation_map.keys(); align_sorted_iterators(left, right, Ord::cmp, |alignment| { match alignment { Alignment::Match(_l, _r) => { node_metric.matching += 1; } ref align if align.is_disjoint() => { node_metric.disjoint += 1; } ref align if align.is_excess() => { node_metric.excess += 1; } _ => unreachable!() } }); node_metric } /// Determine the genetic compatibility between `self` and `other` in terms of matching, /// disjoint and excess genes (both node and link genes), as well as weight distance. fn combined_alignment_metric(&self, other: &Self) -> CombinedAlignmentMetric { let mut metric = CombinedAlignmentMetric::new(); metric.node_metric.max_len = cmp::max(self.network.node_count(), other.network.node_count()); metric.link_metric.max_len = cmp::max(self.network.link_count(), other.network.link_count()); let left = self.node_innovation_map.iter(); let right = other.node_innovation_map.iter(); let left_link_innov_range = self.link_innovation_range(); let right_link_innov_range = other.link_innovation_range(); let left_network = &self.network; let right_network = &other.network; align_sorted_iterators(left, right, |&(kl, _), &(kr, _)| Ord::cmp(kl, kr), |node_alignment| { match node_alignment { Alignment::Match((_, &left_node_index), (_, &right_node_index)) => { metric.node_metric.matching += 1; // Both nodes are topological identical. So the link innovations can // also match up. align_sorted_iterators(left_network.link_iter_for_node(left_node_index), right_network.link_iter_for_node(right_node_index), |&(_, left_link), &(_, right_link)| Ord::cmp(&left_link.external_link_id(), &right_link.external_link_id()), |link_alignment| { if let Alignment::Match((_, left_link), (_, right_link)) = link_alignment { // we have a link match! metric.link_metric.matching += 1; // add up the weight distance metric.link_metric.weight_distance += (left_link.weight().0 - right_link.weight().0).abs(); } else if link_alignment.is_disjoint() { // the link is locally disjoint (list of links of the node) metric.link_metric.disjoint += 1; } else if link_alignment.is_left() { let &(_, left_link) = link_alignment.get_left().unwrap(); if right_link_innov_range.contains(&LinkInnovation(left_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } else if link_alignment.is_right() { let &(_, right_link) = link_alignment.get_right().unwrap(); if left_link_innov_range.contains(&LinkInnovation(right_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } else { unreachable!(); } }); } // in general, if a node is disjoint (or excess), it's link innovations cannot match up! ref align_left if align_left.is_left() => { let &(_, &left_node_index) = align_left.get_left().unwrap(); // XXX: Optimize: once we hit an excess link id, all remaining ids are excess as well. for (_, left_link) in left_network.link_iter_for_node(left_node_index) { // check if link is disjoint or excess if right_link_innov_range.contains(&LinkInnovation(left_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } if align_left.is_excess() { metric.node_metric.excess += 1; } else if align_left.is_disjoint() { metric.node_metric.disjoint += 1; } else { unreachable!(); } } ref align_right if align_right.is_right() => { let &(_, &right_node_index) = align_right.get_right().unwrap(); // XXX: Optimize: once we hit an excess link id, all remaining ids are excess as well. for (_, right_link) in right_network.link_iter_for_node(right_node_index) { // check if link is disjoint or excess if left_link_innov_range.contains(&LinkInnovation(right_link.external_link_id().0)) { metric.link_metric.disjoint += 1; } else { metric.link_metric.excess += 1; } } if align_right.is_excess() { metric.node_metric.excess += 1; } else if align_right.is_disjoint() { metric.node_metric.disjoint += 1; } else { unreachable!(); } } _ => { unreachable!() } } }); metric } /// Determine the genomes range of node innovations. If the genome /// contains no nodes, this will return `None`. Otherwise it will /// return the Some((min, max)). /// /// # Complexity /// /// This runs in O(log n). fn node_innovation_range(&self) -> InnovationRange<NodeInnovation> { let mut range = InnovationRange::empty(); if let Some(&min) = self.node_innovation_map.keys().min() { range.insert(min); } if let Some(&max) = self.node_innovation_map.keys().max() { range.insert(max); } return range; } /// Determine the link innovation range for that Genome. /// /// # Complexity /// /// O(n) where `n` is the number of nodes. fn link_innovation_range(&self) -> InnovationRange<LinkInnovation> { let mut range = InnovationRange::empty(); let network = &self.network; network.each_node_with_index(|_, node_idx| { if let Some(link) = network.first_link_of_node(node_idx) { range.insert(link.external_link_id()); } if let Some(link) = network.last_link_of_node(node_idx) { range.insert(link.external_link_id()); } }); range.map(|i| LinkInnovation(i.0)) } /// Returns a reference to the feed forward network. pub fn network(&self) -> &Network<NT, Weight, AnyInnovation> { &self.network } /// Add a link between `source_node` and `target_node`. Associates the new /// link with `link_innovation` and gives it `weight`. /// /// Does not check for cycles. Test for cycles before using this method! /// /// # Note /// /// Does not panic or abort if a link with the same link innovation is added. /// /// # Panics /// /// If one of `source_node` or `target_node` does not exist. /// /// If a link between these nodes already exists! /// /// # Complexity /// /// This runs in O(k) + O(log n), where `k` is the number of edges of `source_node`. /// This is because we keep the edges sorted. `n` is the number of nodes, because /// we have to lookup the internal node indices from the node innovations. pub fn add_link(&mut self, source_node: NodeInnovation, target_node: NodeInnovation, link_innovation: LinkInnovation, weight: Weight) { let source_node_index = self.node_innovation_map[&source_node]; let target_node_index = self.node_innovation_map[&target_node]; debug_assert!(!self.network.link_would_cycle(source_node_index, target_node_index)); debug_assert!(self.network.valid_link(source_node_index, target_node_index).is_ok()); let _link_index = self.network.add_link(source_node_index, target_node_index, weight, AnyInnovation(link_innovation.0)); } fn link_count(&self) -> usize { self.network.link_count() } /// Add a new node with external id `node_innovation` and of type `node_type` /// to the genome. /// /// # Panics /// /// Panics if a node with the same innovation already exists in the genome. pub fn add_node(&mut self, node_innovation: NodeInnovation, node_type: NT) { if self.node_innovation_map.contains_key(&node_innovation) { panic!("Duplicate node_innovation"); } let node_index = self.network.add_node(node_type, AnyInnovation(node_innovation.0)); self.node_innovation_map.insert(node_innovation, node_index); } fn node_count(&self) -> usize { assert!(self.node_innovation_map.len() == self.network.node_count()); return self.node_innovation_map.len(); } } /// This is used to weight a link AlignmentMetric. pub struct GenomeDistance { pub excess: f64, pub disjoint: f64, pub weight: f64, } impl<NT: NodeType> Distance<Genome<NT>> for GenomeDistance { fn distance(&self, genome_left: &Genome<NT>, genome_right: &Genome<NT>) -> f64 { let m = genome_left.combined_alignment_metric(genome_right).link_metric; if m.max_len == 0 { return 0.0; } self.excess * (m.excess as f64) / (m.max_len as f64) + self.disjoint * (m.disjoint as f64) / (m.max_len as f64) + self.weight * if m.matching > 0 { m.weight_distance / (m.matching as f64) } else { 0.0 } } } #[cfg(test)] mod tests { use super::{NodeType, Genome, NodeInnovation, LinkInnovation}; use weight::Weight; use innovation::InnovationRange; #[derive(Clone, Debug)] struct NT; impl NodeType for NT { fn accept_incoming_links(&self) -> bool { true } fn accept_outgoing_links(&self) -> bool { true } } #[test] fn test_add_node() { let mut genome = Genome::<NT>::new(); assert_eq!(0, genome.node_count()); genome.add_node(NodeInnovation(0), NT); assert_eq!(1, genome.node_count()); genome.add_node(NodeInnovation(1), NT); assert_eq!(2, genome.node_count()); } #[test] #[should_panic(expected = "Duplicate node_innovation")] fn test_add_duplicate_node() { let mut genome = Genome::<NT>::new(); genome.add_node(NodeInnovation(0), NT); genome.add_node(NodeInnovation(0), NT); } #[test] fn test_add_link() { let mut genome = Genome::<NT>::new(); let n0 = NodeInnovation(0); let n1 = NodeInnovation(1); let n2 = NodeInnovation(2); genome.add_node(n0, NT); genome.add_node(n1, NT); genome.add_node(n2, NT); assert_eq!(0, genome.link_count()); genome.add_link(n0, n1, LinkInnovation(0), Weight(0.0)); assert_eq!(1, genome.link_count()); genome.add_link(n0, n2, LinkInnovation(0), Weight(0.0)); assert_eq!(2, genome.link_count()); } #[test] fn test_link_innovation_range() { let mut genome = Genome::<NT>::new(); let n0 = NodeInnovation(0); let n1 = NodeInnovation(1); let n2 = NodeInnovation(2); genome.add_node(n0, NT); genome.add_node(n1, NT); genome.add_node(n2, NT); assert_eq!(InnovationRange::Empty, genome.link_innovation_range()); genome.add_link(n0, n1, LinkInnovation(5), Weight(0.0)); assert_eq!(InnovationRange::Single(LinkInnovation(5)), genome.link_innovation_range()); genome.add_link(n0, n2, LinkInnovation(1), Weight(0.0)); assert_eq!(InnovationRange::FromTo(LinkInnovation(1), LinkInnovation(5)), genome.link_innovation_range()); genome.add_link(n1, n2, LinkInnovation(99), Weight(0.0)); assert_eq!(InnovationRange::FromTo(LinkInnovation(1), LinkInnovation(99)), genome.link_innovation_range()); } #[test] fn test_node_innovation_range() { let mut genome = Genome::<NT>::new(); assert_eq!(InnovationRange::Empty, genome.node_innovation_range()); genome.add_node(NodeInnovation(5), NT); assert_eq!(InnovationRange::Single(NodeInnovation(5)), genome.node_innovation_range()); genome.add_node(NodeInnovation(7), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(5), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(6), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(5), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(4), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(4), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(1), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(1), NodeInnovation(7)), genome.node_innovation_range()); genome.add_node(NodeInnovation(1000), NT); assert_eq!(InnovationRange::FromTo(NodeInnovation(1), NodeInnovation(1000)), genome.node_innovation_range()); } #[test] fn test_node_align_metric() { let mut left = Genome::<NT>::new(); let mut right = Genome::<NT>::new(); let m = left.node_alignment_metric(&right); assert_eq!(0, m.max_len); assert_eq!(0, m.matching); assert_eq!(0, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); left.add_node(NodeInnovation(5), NT); let m = left.node_alignment_metric(&right); assert_eq!(1, m.max_len); assert_eq!(0, m.matching); assert_eq!(1, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); left.add_node(NodeInnovation(10), NT); let m = left.node_alignment_metric(&right); assert_eq!(2, m.max_len); assert_eq!(0, m.matching); assert_eq!(2, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); right.add_node(NodeInnovation(6), NT); let m = left.node_alignment_metric(&right); assert_eq!(2, m.max_len); assert_eq!(0, m.matching); assert_eq!(2, m.excess); assert_eq!(1, m.disjoint); assert_eq!(0.0, m.weight_distance); right.add_node(NodeInnovation(5), NT); let m = left.node_alignment_metric(&right); assert_eq!(2, m.max_len); assert_eq!(1, m.matching); assert_eq!(1, m.excess); assert_eq!(1, m.disjoint); assert_eq!(0.0, m.weight_distance); left.add_node(NodeInnovation(6), NT); let m = left.node_alignment_metric(&right); assert_eq!(3, m.max_len); assert_eq!(2, m.matching); assert_eq!(1, m.excess); assert_eq!(0, m.disjoint); assert_eq!(0.0, m.weight_distance); right.add_node(NodeInnovation(11), NT); let m = left.node_alignment_metric(&right); assert_eq!(3, m.max_len); assert_eq!(2, m.matching); assert_eq!(1, m.excess); assert_eq!(1, m.disjoint); assert_eq!(0.0, m.weight_distance); } #[test] fn test_combined_align_metric() { let mut left = Genome::<NT>::new(); let mut right = Genome::<NT>::new(); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); left.add_node(NodeInnovation(5), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); left.add_node(NodeInnovation(10), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); right.add_node(NodeInnovation(6), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); right.add_node(NodeInnovation(5), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); left.add_node(NodeInnovation(6), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); right.add_node(NodeInnovation(11), NT); assert_eq!(left.node_alignment_metric(&right), left.combined_alignment_metric(&right).node_metric); } }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <% from data import to_idl_name, SYSTEM_FONT_LONGHANDS %> use app_units::Au; use cssparser::{Parser, RGBA}; use euclid::{Point2D, Size2D}; #[cfg(feature = "gecko")] use gecko_bindings::bindings::RawServoAnimationValueMap; #[cfg(feature = "gecko")] use gecko_bindings::structs::RawGeckoGfxMatrix4x4; #[cfg(feature = "gecko")] use gecko_bindings::structs::nsCSSPropertyID; #[cfg(feature = "gecko")] use gecko_bindings::sugar::ownership::{HasFFI, HasSimpleFFI}; #[cfg(feature = "gecko")] use gecko_string_cache::Atom; use properties::{CSSWideKeyword, PropertyDeclaration}; use properties::longhands; use properties::longhands::background_size::computed_value::T as BackgroundSizeList; use properties::longhands::border_spacing::computed_value::T as BorderSpacing; use properties::longhands::font_weight::computed_value::T as FontWeight; use properties::longhands::font_stretch::computed_value::T as FontStretch; use properties::longhands::line_height::computed_value::T as LineHeight; use properties::longhands::transform::computed_value::ComputedMatrix; use properties::longhands::transform::computed_value::ComputedOperation as TransformOperation; use properties::longhands::transform::computed_value::T as TransformList; use properties::longhands::vertical_align::computed_value::T as VerticalAlign; use properties::longhands::visibility::computed_value::T as Visibility; #[cfg(feature = "gecko")] use properties::{PropertyId, PropertyDeclarationId, LonghandId}; #[cfg(feature = "gecko")] use properties::{ShorthandId}; use selectors::parser::SelectorParseError; use smallvec::SmallVec; use std::cmp; #[cfg(feature = "gecko")] use fnv::FnvHashMap; use style_traits::ParseError; use super::ComputedValues; #[cfg(any(feature = "gecko", feature = "testing"))] use values::Auto; use values::{CSSFloat, CustomIdent, Either}; use values::animated::{ToAnimatedValue, ToAnimatedZero}; use values::animated::effects::BoxShadowList as AnimatedBoxShadowList; use values::animated::effects::Filter as AnimatedFilter; use values::animated::effects::FilterList as AnimatedFilterList; use values::animated::effects::TextShadowList as AnimatedTextShadowList; use values::computed::{Angle, LengthOrPercentageOrAuto, LengthOrPercentageOrNone}; use values::computed::{BorderCornerRadius, ClipRect}; use values::computed::{CalcLengthOrPercentage, Color, Context, ComputedValueAsSpecified}; use values::computed::{LengthOrPercentage, MaxLength, MozLength, Percentage, ToComputedValue}; use values::computed::{NonNegativeAu, NonNegativeNumber, PositiveIntegerOrAuto}; use values::computed::length::{NonNegativeLengthOrAuto, NonNegativeLengthOrNormal}; use values::computed::length::NonNegativeLengthOrPercentage; use values::generics::{GreaterThanOrEqualToOne, NonNegative}; use values::generics::border::BorderCornerRadius as GenericBorderCornerRadius; use values::generics::effects::Filter; use values::generics::position as generic_position; use values::generics::svg::{SVGLength, SVGOpacity, SVGPaint, SVGPaintKind, SVGStrokeDashArray}; /// A trait used to implement various procedures used during animation. pub trait Animatable: Sized { /// Performs a weighted sum of this value and |other|. This is used for /// interpolation and addition of animation values. fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()>; /// [Interpolates][interpolation] a value with another for a given property. /// /// [interpolation]: https://w3c.github.io/web-animations/#animation-interpolation fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> { self.add_weighted(other, 1.0 - progress, progress) } /// Returns the [sum][animation-addition] of this value and |other|. /// /// [animation-addition]: https://w3c.github.io/web-animations/#animation-addition fn add(&self, other: &Self) -> Result<Self, ()> { self.add_weighted(other, 1.0, 1.0) } /// [Accumulates][animation-accumulation] this value onto itself (|count| - 1) times then /// accumulates |other| onto the result. /// If |count| is zero, the result will be |other|. /// /// [animation-accumulation]: https://w3c.github.io/web-animations/#animation-accumulation fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { self.add_weighted(other, count as f64, 1.0) } /// Compute distance between a value and another for a given property. fn compute_distance(&self, _other: &Self) -> Result<f64, ()> { Err(()) } /// In order to compute the Euclidean distance of a list or property value with multiple /// components, we need to compute squared distance for each element, so the vector can sum it /// and then get its squared root as the distance. fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_distance(other).map(|d| d * d) } } /// https://drafts.csswg.org/css-transitions/#animtype-repeatable-list pub trait RepeatableListAnimatable: Animatable {} /// A longhand property whose animation type is not "none". /// /// NOTE: This includes the 'display' property since it is animatable from SMIL even though it is /// not animatable from CSS animations or Web Animations. CSS transitions also does not allow /// animating 'display', but for CSS transitions we have the separate TransitionProperty type. #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatableLonghand { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} ${prop.camel_case}, % endif % endfor } impl AnimatableLonghand { /// Returns true if this AnimatableLonghand is one of the discretely animatable properties. pub fn is_discrete(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animation_value_type == "discrete": AnimatableLonghand::${prop.camel_case} => true, % endif % endfor _ => false } } /// Converts from an nsCSSPropertyID. Returns None if nsCSSPropertyID is not an animatable /// longhand in Servo. #[cfg(feature = "gecko")] pub fn from_nscsspropertyid(css_property: nsCSSPropertyID) -> Option<Self> { match css_property { % for prop in data.longhands: % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Converts from TransitionProperty. Returns None if the property is not an animatable /// longhand. pub fn from_transition_property(transition_property: &TransitionProperty) -> Option<Self> { match *transition_property { % for prop in data.longhands: % if prop.transitionable and prop.animatable: TransitionProperty::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Get an animatable longhand property from a property declaration. pub fn from_declaration(declaration: &PropertyDeclaration) -> Option<Self> { use properties::LonghandId; match *declaration { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(..) => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor PropertyDeclaration::CSSWideKeyword(id, _) | PropertyDeclaration::WithVariables(id, _) => { match id { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None, } }, _ => None, } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a AnimatableLonghand> for nsCSSPropertyID { fn from(property: &'a AnimatableLonghand) -> nsCSSPropertyID { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor } } } /// Convert to PropertyDeclarationId. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From<AnimatableLonghand> for PropertyDeclarationId<'a> { fn from(property: AnimatableLonghand) -> PropertyDeclarationId<'a> { match property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => PropertyDeclarationId::Longhand(LonghandId::${prop.camel_case}), % endif % endfor } } } /// Returns true if this nsCSSPropertyID is one of the animatable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_animatable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// A given transition property, that is either `All`, a transitionable longhand property, /// a shorthand with at least one transitionable longhand component, or an unsupported property. // NB: This needs to be here because it needs all the longhands generated // beforehand. #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, Eq, Hash, PartialEq, ToCss)] pub enum TransitionProperty { /// All, any transitionable property changing should generate a transition. All, % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: /// ${prop.name} ${prop.camel_case}, % endif % endfor /// Unrecognized property which could be any non-transitionable, custom property, or /// unknown property. Unsupported(CustomIdent) } no_viewport_percentage!(TransitionProperty); impl ComputedValueAsSpecified for TransitionProperty {} impl TransitionProperty { /// Iterates over each longhand property. pub fn each<F: FnMut(&TransitionProperty) -> ()>(mut cb: F) { % for prop in data.longhands: % if prop.transitionable: cb(&TransitionProperty::${prop.camel_case}); % endif % endfor } /// Iterates over every longhand property that is not TransitionProperty::All, stopping and /// returning true when the provided callback returns true for the first time. pub fn any<F: FnMut(&TransitionProperty) -> bool>(mut cb: F) -> bool { % for prop in data.longhands: % if prop.transitionable: if cb(&TransitionProperty::${prop.camel_case}) { return true; } % endif % endfor false } /// Parse a transition-property value. pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { let ident = input.expect_ident()?; let supported = match_ignore_ascii_case! { &ident, "all" => Ok(Some(TransitionProperty::All)), % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: "${prop.name}" => Ok(Some(TransitionProperty::${prop.camel_case})), % endif % endfor "none" => Err(()), _ => Ok(None), }; match supported { Ok(Some(property)) => Ok(property), Ok(None) => CustomIdent::from_ident(ident, &[]).map(TransitionProperty::Unsupported), Err(()) => Err(SelectorParseError::UnexpectedIdent(ident.clone()).into()), } } /// Return transitionable longhands of this shorthand TransitionProperty, except for "all". pub fn longhands(&self) -> &'static [TransitionProperty] { % for prop in data.shorthands_except_all(): % if prop.transitionable: static ${prop.ident.upper()}: &'static [TransitionProperty] = &[ % for sub in prop.sub_properties: % if sub.transitionable: TransitionProperty::${sub.camel_case}, % endif % endfor ]; % endif % endfor match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${prop.ident.upper()}, % endif % endfor _ => panic!("Not allowed to call longhands() for this TransitionProperty") } } /// Returns true if this TransitionProperty is a shorthand. pub fn is_shorthand(&self) -> bool { match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => true, % endif % endfor _ => false } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a TransitionProperty> for nsCSSPropertyID { fn from(transition_property: &'a TransitionProperty) -> nsCSSPropertyID { match *transition_property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor TransitionProperty::All => nsCSSPropertyID::eCSSPropertyExtra_all_properties, _ => panic!("Unconvertable Servo transition property: {:?}", transition_property), } } } /// Convert nsCSSPropertyID to TransitionProperty #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl From<nsCSSPropertyID> for TransitionProperty { fn from(property: nsCSSPropertyID) -> TransitionProperty { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::${prop.camel_case}, % else: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::Unsupported(CustomIdent(Atom::from("${prop.ident}"))), % endif % endfor nsCSSPropertyID::eCSSPropertyExtra_all_properties => TransitionProperty::All, _ => panic!("Unconvertable nsCSSPropertyID: {:?}", property), } } } /// Returns true if this nsCSSPropertyID is one of the transitionable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_transitionable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// An animated property interpolation between two computed values for that /// property. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatedProperty { % for prop in data.longhands: % if prop.animatable: <% if prop.is_animatable_with_computed_value: value_type = "longhands::{}::computed_value::T".format(prop.ident) else: value_type = prop.animation_value_type %> /// ${prop.name} ${prop.camel_case}(${value_type}, ${value_type}), % endif % endfor } impl AnimatedProperty { /// Get the name of this property. pub fn name(&self) -> &'static str { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(..) => "${prop.name}", % endif % endfor } } /// Whether this interpolation does animate, that is, whether the start and /// end values are different. pub fn does_animate(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => from != to, % endif % endfor } } /// Whether an animated property has the same end value as another. pub fn has_the_same_end_value_as(&self, other: &Self) -> bool { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimatedProperty::${prop.camel_case}(_, ref this_end_value), &AnimatedProperty::${prop.camel_case}(_, ref other_end_value)) => { this_end_value == other_end_value } % endif % endfor _ => false, } } /// Update `style` with the proper computed style corresponding to this /// animation at `progress`. pub fn update(&self, style: &mut ComputedValues, progress: f64) { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => { // https://w3c.github.io/web-animations/#discrete-animation-type % if prop.animation_value_type == "discrete": let value = if progress < 0.5 { from.clone() } else { to.clone() }; % else: let value = match from.interpolate(to, progress) { Ok(value) => value, Err(()) => return, }; % endif % if not prop.is_animatable_with_computed_value: let value: longhands::${prop.ident}::computed_value::T = ToAnimatedValue::from_animated_value(value); % endif style.mutate_${prop.style_struct.name_lower}().set_${prop.ident}(value); } % endif % endfor } } /// Get an animatable value from a transition-property, an old style, and a /// new style. pub fn from_animatable_longhand(property: &AnimatableLonghand, old_style: &ComputedValues, new_style: &ComputedValues) -> AnimatedProperty { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { let old_computed = old_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(); let new_computed = new_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(); AnimatedProperty::${prop.camel_case}( % if prop.is_animatable_with_computed_value: old_computed, new_computed, % else: old_computed.to_animated_value(), new_computed.to_animated_value(), % endif ) } % endif % endfor } } } /// A collection of AnimationValue that were composed on an element. /// This HashMap stores the values that are the last AnimationValue to be /// composed for each TransitionProperty. #[cfg(feature = "gecko")] pub type AnimationValueMap = FnvHashMap<AnimatableLonghand, AnimationValue>; #[cfg(feature = "gecko")] unsafe impl HasFFI for AnimationValueMap { type FFIType = RawServoAnimationValueMap; } #[cfg(feature = "gecko")] unsafe impl HasSimpleFFI for AnimationValueMap {} /// An enum to represent a single computed value belonging to an animated /// property in order to be interpolated with another one. When interpolating, /// both values need to belong to the same property. /// /// This is different to AnimatedProperty in the sense that AnimatedProperty /// also knows the final value to be used during the animation. /// /// This is to be used in Gecko integration code. /// /// FIXME: We need to add a path for custom properties, but that's trivial after /// this (is a similar path to that of PropertyDeclaration). #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimationValue { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} % if prop.is_animatable_with_computed_value: ${prop.camel_case}(longhands::${prop.ident}::computed_value::T), % else: ${prop.camel_case}(${prop.animation_value_type}), % endif % endif % endfor } impl AnimationValue { /// "Uncompute" this animation value in order to be used inside the CSS /// cascade. pub fn uncompute(&self) -> PropertyDeclaration { use properties::longhands; match *self { % for prop in data.longhands: % if prop.animatable: AnimationValue::${prop.camel_case}(ref from) => { PropertyDeclaration::${prop.camel_case}( % if prop.boxed: Box::new( % endif longhands::${prop.ident}::SpecifiedValue::from_computed_value( % if prop.is_animatable_with_computed_value: from % else: &ToAnimatedValue::from_animated_value(from.clone()) % endif )) % if prop.boxed: ) % endif } % endif % endfor } } /// Construct an AnimationValue from a property declaration. pub fn from_declaration( decl: &PropertyDeclaration, context: &mut Context, initial: &ComputedValues ) -> Option<Self> { use properties::LonghandId; match *decl { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(ref val) => { % if prop.ident in SYSTEM_FONT_LONGHANDS and product == "gecko": if let Some(sf) = val.get_system() { longhands::system_font::resolve_system_font(sf, context); } % endif let computed = val.to_computed_value(context); Some(AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed % else: computed.to_animated_value() % endif )) }, % endif % endfor PropertyDeclaration::CSSWideKeyword(id, keyword) => { match id { // We put all the animatable properties first in the hopes // that it might increase match locality. % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let computed = match keyword { % if not prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Initial => { let initial_struct = initial.get_${prop.style_struct.name_lower}(); initial_struct.clone_${prop.ident}() }, % if prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Inherit => { let inherit_struct = context.builder .get_parent_${prop.style_struct.name_lower}(); inherit_struct.clone_${prop.ident}() }, }; % if not prop.is_animatable_with_computed_value: let computed = computed.to_animated_value(); % endif Some(AnimationValue::${prop.camel_case}(computed)) }, % endif % endfor % for prop in data.longhands: % if not prop.animatable: LonghandId::${prop.camel_case} => None, % endif % endfor } }, PropertyDeclaration::WithVariables(id, ref unparsed) => { let custom_props = context.style().custom_properties(); let substituted = unparsed.substitute_variables(id, &custom_props, context.quirks_mode); AnimationValue::from_declaration(&substituted, context, initial) }, _ => None // non animatable properties will get included because of shorthands. ignore. } } /// Get an AnimationValue for an AnimatableLonghand from a given computed values. pub fn from_computed_values(property: &AnimatableLonghand, computed_values: &ComputedValues) -> Self { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { let computed = computed_values .get_${prop.style_struct.ident.strip("_")}() .clone_${prop.ident}(); AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed % else: computed.to_animated_value() % endif ) } % endif % endfor } } } impl Animatable for AnimationValue { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { % if prop.animation_value_type == "discrete": if self_portion > other_portion { Ok(AnimationValue::${prop.camel_case}(from.clone())) } else { Ok(AnimationValue::${prop.camel_case}(to.clone())) } % else: from.add_weighted(to, self_portion, other_portion) .map(AnimationValue::${prop.camel_case}) % endif } % endif % endfor _ => { panic!("Expected weighted addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn add(&self, other: &Self) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.add(to).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.accumulate(to, count).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected accumulation of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type != "discrete": (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.compute_distance(to) }, % else: (&AnimationValue::${prop.camel_case}(ref _from), &AnimationValue::${prop.camel_case}(ref _to)) => { Err(()) }, % endif % endif % endfor _ => { panic!("Expected compute_distance of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } } impl ToAnimatedZero for AnimationValue { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { % for prop in data.longhands: % if prop.animatable and prop.animation_value_type != "discrete": AnimationValue::${prop.camel_case}(ref base) => { Ok(AnimationValue::${prop.camel_case}(base.to_animated_zero()?)) }, % endif % endfor _ => Err(()), } } } impl RepeatableListAnimatable for LengthOrPercentage {} impl RepeatableListAnimatable for Either<f32, LengthOrPercentage> {} impl RepeatableListAnimatable for Either<NonNegativeNumber, NonNegativeLengthOrPercentage> {} macro_rules! repeated_vec_impl { ($($ty:ty),*) => { $(impl<T: RepeatableListAnimatable> Animatable for $ty { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // If the length of either list is zero, the least common multiple is undefined. if self.is_empty() || other.is_empty() { return Err(()); } use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.add_weighted(you, self_portion, other_portion) }).collect() } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { // If the length of either list is zero, the least common multiple is undefined. if cmp::min(self.len(), other.len()) < 1 { return Err(()); } use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.compute_squared_distance(you) }).sum() } })* }; } repeated_vec_impl!(SmallVec<[T; 1]>, Vec<T>); /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Au { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Au((self.0 as f64 * self_portion + other.0 as f64 * other_portion).round() as i32)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl <T> Animatable for Option<T> where T: Animatable, { #[inline] fn add_weighted(&self, other: &Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { Ok(this.add_weighted(other, self_portion, other_portion).ok()) } (&None, &None) => Ok(None), _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_squared_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f32 { #[inline] fn add_weighted(&self, other: &f32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion) as f32) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f64 { #[inline] fn add_weighted(&self, other: &f64, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(*self * self_portion + *other * other_portion) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-integer impl Animatable for i32 { #[inline] fn add_weighted(&self, other: &i32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion).round() as i32) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Angle { #[inline] fn add_weighted(&self, other: &Angle, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { % for angle_type in [ 'Degree', 'Gradian', 'Turn' ]: (Angle::${angle_type}(val1), Angle::${angle_type}(val2)) => { Ok(Angle::${angle_type}( try!(val1.add_weighted(&val2, self_portion, other_portion)) )) } % endfor _ => { self.radians() .add_weighted(&other.radians(), self_portion, other_portion) .map(Angle::from_radians) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { // Use the formula for calculating the distance between angles defined in SVG: // https://www.w3.org/TR/SVG/animate.html#complexDistances Ok((self.radians64() - other.radians64()).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-percentage impl Animatable for Percentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Percentage((self.0 as f64 * self_portion + other.0 as f64 * other_portion) as f32)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((self.0 as f64 - other.0 as f64).abs()) } } impl ToAnimatedZero for Percentage { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(Percentage(0.)) } } /// https://drafts.csswg.org/css-transitions/#animtype-visibility impl Animatable for Visibility { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Visibility::visible, _) => { Ok(if self_portion > 0.0 { *self } else { *other }) }, (_, Visibility::visible) => { Ok(if other_portion > 0.0 { *other } else { *self }) }, _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { if *self == *other { Ok(0.0) } else { Ok(1.0) } } } impl ToAnimatedZero for Visibility { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } impl<T: Animatable + Copy> Animatable for Size2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let width = self.width.add_weighted(&other.width, self_portion, other_portion)?; let height = self.height.add_weighted(&other.height, self_portion, other_portion)?; Ok(Size2D::new(width, height)) } } impl<T: Animatable + Copy> Animatable for Point2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let x = self.x.add_weighted(&other.x, self_portion, other_portion)?; let y = self.y.add_weighted(&other.y, self_portion, other_portion)?; Ok(Point2D::new(x, y)) } } impl Animatable for BorderCornerRadius { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(GenericBorderCornerRadius) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.0.width.compute_squared_distance(&other.0.width)? + self.0.height.compute_squared_distance(&other.0.height)?) } } impl ToAnimatedZero for BorderCornerRadius { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// https://drafts.csswg.org/css-transitions/#animtype-length impl Animatable for VerticalAlign { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref this)), VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref other))) => { this.add_weighted(other, self_portion, other_portion).map(|value| { VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(value)) }) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(ref this), VerticalAlign::LengthOrPercentage(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl ToAnimatedZero for VerticalAlign { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for CalcLengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { fn add_weighted_half<T>(this: Option<T>, other: Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> where T: Default + Animatable, { match (this, other) { (None, None) => Ok(None), (this, other) => { let this = this.unwrap_or(T::default()); let other = other.unwrap_or(T::default()); this.add_weighted(&other, self_portion, other_portion).map(Some) } } } let length = self.unclamped_length().add_weighted(&other.unclamped_length(), self_portion, other_portion)?; let percentage = add_weighted_half(self.percentage, other.percentage, self_portion, other_portion)?; Ok(CalcLengthOrPercentage::with_clamping_mode(length, percentage, self.clamping_mode)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let length_diff = (self.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (self.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Length) } (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Percentage) } (this, other) => { // Special handling for zero values since these should not require calc(). if this.is_definitely_zero() { return other.add_weighted(&other, 0., other_portion) } else if other.is_definitely_zero() { return this.add_weighted(self, self_portion, 0.) } let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.add_weighted(&other, self_portion, other_portion) .map(LengthOrPercentage::Calc) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } } } impl ToAnimatedZero for LengthOrPercentage { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(LengthOrPercentage::zero()) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrAuto { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Length) } (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Percentage) } (LengthOrPercentageOrAuto::Auto, LengthOrPercentageOrAuto::Auto) => { Ok(LengthOrPercentageOrAuto::Auto) } (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrAuto::Calc(result)), _ => Err(()), } } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); if let (Some(this), Some(other)) = (this, other) { let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } else { Err(()) } } } } } impl ToAnimatedZero for LengthOrPercentageOrAuto { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { LengthOrPercentageOrAuto::Length(_) | LengthOrPercentageOrAuto::Percentage(_) | LengthOrPercentageOrAuto::Calc(_) => { Ok(LengthOrPercentageOrAuto::Length(Au(0))) }, LengthOrPercentageOrAuto::Auto => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrNone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Length) } (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Percentage) } (LengthOrPercentageOrNone::None, LengthOrPercentageOrNone::None) => { Ok(LengthOrPercentageOrNone::None) } (this, other) => { let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrNone::Calc(result)), _ => Err(()), } }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); this.compute_distance(&other) }, } } } impl ToAnimatedZero for LengthOrPercentageOrNone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { LengthOrPercentageOrNone::Length(_) | LengthOrPercentageOrNone::Percentage(_) | LengthOrPercentageOrNone::Calc(_) => { Ok(LengthOrPercentageOrNone::Length(Au(0))) }, LengthOrPercentageOrNone::None => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MozLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MozLength::LengthOrPercentageOrAuto) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl ToAnimatedZero for MozLength { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { MozLength::LengthOrPercentageOrAuto(ref length) => { Ok(MozLength::LengthOrPercentageOrAuto(length.to_animated_zero()?)) }, _ => Err(()) } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MaxLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MaxLength::LengthOrPercentageOrNone) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl ToAnimatedZero for MaxLength { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// http://dev.w3.org/csswg/css-transitions/#animtype-font-weight impl Animatable for FontWeight { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let a = self.0 as f64; let b = other.0 as f64; const NORMAL: f64 = 400.; let weight = (a - NORMAL) * self_portion + (b - NORMAL) * other_portion + NORMAL; let weight = (weight.min(100.).max(900.) / 100.).round() * 100.; Ok(FontWeight(weight as u16)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let a = self.0 as f64; let b = other.0 as f64; a.compute_distance(&b) } } impl ToAnimatedZero for FontWeight { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(FontWeight::normal()) } } /// https://drafts.csswg.org/css-fonts/#font-stretch-prop impl Animatable for FontStretch { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let from = f64::from(*self); let to = f64::from(*other); // FIXME: When `const fn` is available in release rust, make |normal|, below, const. let normal = f64::from(FontStretch::normal); let result = (from - normal) * self_portion + (to - normal) * other_portion + normal; Ok(result.into()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let from = f64::from(*self); let to = f64::from(*other); from.compute_distance(&to) } } impl ToAnimatedZero for FontStretch { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// We should treat font stretch as real number in order to interpolate this property. /// https://drafts.csswg.org/css-fonts-3/#font-stretch-animation impl From<FontStretch> for f64 { fn from(stretch: FontStretch) -> f64 { use self::FontStretch::*; match stretch { ultra_condensed => 1.0, extra_condensed => 2.0, condensed => 3.0, semi_condensed => 4.0, normal => 5.0, semi_expanded => 6.0, expanded => 7.0, extra_expanded => 8.0, ultra_expanded => 9.0, } } } impl Into<FontStretch> for f64 { fn into(self) -> FontStretch { use properties::longhands::font_stretch::computed_value::T::*; let index = (self + 0.5).floor().min(9.0).max(1.0); static FONT_STRETCH_ENUM_MAP: [FontStretch; 9] = [ ultra_condensed, extra_condensed, condensed, semi_condensed, normal, semi_expanded, expanded, extra_expanded, ultra_expanded ]; FONT_STRETCH_ENUM_MAP[(index - 1.0) as usize] } } /// https://drafts.csswg.org/css-transitions/#animtype-simple-list impl<H: Animatable, V: Animatable> Animatable for generic_position::Position<H, V> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(generic_position::Position { horizontal: self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?, vertical: self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.horizontal.compute_squared_distance(&other.horizontal)? + self.vertical.compute_squared_distance(&other.vertical)?) } } impl<H, V> ToAnimatedZero for generic_position::Position<H, V> where H: ToAnimatedZero, V: ToAnimatedZero, { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(generic_position::Position { horizontal: self.horizontal.to_animated_zero()?, vertical: self.vertical.to_animated_zero()?, }) } } impl<H, V> RepeatableListAnimatable for generic_position::Position<H, V> where H: RepeatableListAnimatable, V: RepeatableListAnimatable {} /// https://drafts.csswg.org/css-transitions/#animtype-rect impl Animatable for ClipRect { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(ClipRect { top: self.top.add_weighted(&other.top, self_portion, other_portion)?, right: self.right.add_weighted(&other.right, self_portion, other_portion)?, bottom: self.bottom.add_weighted(&other.bottom, self_portion, other_portion)?, left: self.left.add_weighted(&other.left, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let list = [ self.top.compute_distance(&other.top)?, self.right.compute_distance(&other.right)?, self.bottom.compute_distance(&other.bottom)?, self.left.compute_distance(&other.left)? ]; Ok(list.iter().fold(0.0f64, |sum, diff| sum + diff * diff)) } } impl ToAnimatedZero for ClipRect { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// Check if it's possible to do a direct numerical interpolation /// between these two transform lists. /// http://dev.w3.org/csswg/css-transforms/#transform-transform-animation fn can_interpolate_list(from_list: &[TransformOperation], to_list: &[TransformOperation]) -> bool { // Lists must be equal length if from_list.len() != to_list.len() { return false; } // Each transform operation must match primitive type in other list for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(..), &TransformOperation::Matrix(..)) | (&TransformOperation::Skew(..), &TransformOperation::Skew(..)) | (&TransformOperation::Translate(..), &TransformOperation::Translate(..)) | (&TransformOperation::Scale(..), &TransformOperation::Scale(..)) | (&TransformOperation::Rotate(..), &TransformOperation::Rotate(..)) | (&TransformOperation::Perspective(..), &TransformOperation::Perspective(..)) => {} _ => { return false; } } } true } /// Build an equivalent 'identity transform function list' based /// on an existing transform list. /// http://dev.w3.org/csswg/css-transforms/#none-transform-animation fn build_identity_transform_list(list: &[TransformOperation]) -> Vec<TransformOperation> { let mut result = vec!(); for operation in list { match *operation { TransformOperation::Matrix(..) => { let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } TransformOperation::MatrixWithPercents(..) => {} TransformOperation::Skew(..) => { result.push(TransformOperation::Skew(Angle::zero(), Angle::zero())) } TransformOperation::Translate(..) => { result.push(TransformOperation::Translate(LengthOrPercentage::zero(), LengthOrPercentage::zero(), Au(0))); } TransformOperation::Scale(..) => { result.push(TransformOperation::Scale(1.0, 1.0, 1.0)); } TransformOperation::Rotate(..) => { result.push(TransformOperation::Rotate(0.0, 0.0, 1.0, Angle::zero())); } TransformOperation::Perspective(..) | TransformOperation::AccumulateMatrix { .. } | TransformOperation::InterpolateMatrix { .. } => { // Perspective: We convert a perspective function into an equivalent // ComputedMatrix, and then decompose/interpolate/recompose these matrices. // AccumulateMatrix/InterpolateMatrix: We do interpolation on // AccumulateMatrix/InterpolateMatrix by reading it as a ComputedMatrix // (with layout information), and then do matrix interpolation. // // Therefore, we use an identity matrix to represent the identity transform list. // http://dev.w3.org/csswg/css-transforms/#identity-transform-function let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } } } result } /// A wrapper for calling add_weighted that interpolates the distance of the two values from /// an initial_value and uses that to produce an interpolated value. /// This is used for values such as 'scale' where the initial value is 1 and where if we interpolate /// the absolute values, we will produce odd results for accumulation. fn add_weighted_with_initial_val<T: Animatable>(a: &T, b: &T, a_portion: f64, b_portion: f64, initial_val: &T) -> Result<T, ()> { let a = a.add_weighted(&initial_val, 1.0, -1.0)?; let b = b.add_weighted(&initial_val, 1.0, -1.0)?; let result = a.add_weighted(&b, a_portion, b_portion)?; result.add_weighted(&initial_val, 1.0, 1.0) } /// Add two transform lists. /// http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms fn add_weighted_transform_lists(from_list: &[TransformOperation], to_list: &[TransformOperation], self_portion: f64, other_portion: f64) -> TransformList { let mut result = vec![]; if can_interpolate_list(from_list, to_list) { for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(from), &TransformOperation::Matrix(_to)) => { let sum = from.add_weighted(&_to, self_portion, other_portion).unwrap(); result.push(TransformOperation::Matrix(sum)); } (&TransformOperation::MatrixWithPercents(_), &TransformOperation::MatrixWithPercents(_)) => { // We don't add_weighted `-moz-transform` matrices yet. // They contain percentage values. {} } (&TransformOperation::Skew(fx, fy), &TransformOperation::Skew(tx, ty)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); result.push(TransformOperation::Skew(ix, iy)); } (&TransformOperation::Translate(fx, fy, fz), &TransformOperation::Translate(tx, ty, tz)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); let iz = fz.add_weighted(&tz, self_portion, other_portion).unwrap(); result.push(TransformOperation::Translate(ix, iy, iz)); } (&TransformOperation::Scale(fx, fy, fz), &TransformOperation::Scale(tx, ty, tz)) => { let ix = add_weighted_with_initial_val(&fx, &tx, self_portion, other_portion, &1.0).unwrap(); let iy = add_weighted_with_initial_val(&fy, &ty, self_portion, other_portion, &1.0).unwrap(); let iz = add_weighted_with_initial_val(&fz, &tz, self_portion, other_portion, &1.0).unwrap(); result.push(TransformOperation::Scale(ix, iy, iz)); } (&TransformOperation::Rotate(fx, fy, fz, fa), &TransformOperation::Rotate(tx, ty, tz, ta)) => { let norm_f = ((fx * fx) + (fy * fy) + (fz * fz)).sqrt(); let norm_t = ((tx * tx) + (ty * ty) + (tz * tz)).sqrt(); let (fx, fy, fz) = (fx / norm_f, fy / norm_f, fz / norm_f); let (tx, ty, tz) = (tx / norm_t, ty / norm_t, tz / norm_t); if fx == tx && fy == ty && fz == tz { let ia = fa.add_weighted(&ta, self_portion, other_portion).unwrap(); result.push(TransformOperation::Rotate(fx, fy, fz, ia)); } else { let matrix_f = rotate_to_matrix(fx, fy, fz, fa); let matrix_t = rotate_to_matrix(tx, ty, tz, ta); let sum = matrix_f.add_weighted(&matrix_t, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } } (&TransformOperation::Perspective(fd), &TransformOperation::Perspective(_td)) => { let mut fd_matrix = ComputedMatrix::identity(); let mut td_matrix = ComputedMatrix::identity(); fd_matrix.m43 = -1. / fd.to_f32_px(); td_matrix.m43 = -1. / _td.to_f32_px(); let sum = fd_matrix.add_weighted(&td_matrix, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } _ => { // This should be unreachable due to the can_interpolate_list() call. unreachable!(); } } } } else { let from_transform_list = TransformList(Some(from_list.to_vec())); let to_transform_list = TransformList(Some(to_list.to_vec())); result.push( TransformOperation::InterpolateMatrix { from_list: from_transform_list, to_list: to_transform_list, progress: Percentage(other_portion as f32) }); } TransformList(Some(result)) } /// https://www.w3.org/TR/css-transforms-1/#Rotate3dDefined fn rotate_to_matrix(x: f32, y: f32, z: f32, a: Angle) -> ComputedMatrix { let half_rad = a.radians() / 2.0; let sc = (half_rad).sin() * (half_rad).cos(); let sq = (half_rad).sin().powi(2); ComputedMatrix { m11: 1.0 - 2.0 * (y * y + z * z) * sq, m12: 2.0 * (x * y * sq + z * sc), m13: 2.0 * (x * z * sq - y * sc), m14: 0.0, m21: 2.0 * (x * y * sq - z * sc), m22: 1.0 - 2.0 * (x * x + z * z) * sq, m23: 2.0 * (y * z * sq + x * sc), m24: 0.0, m31: 2.0 * (x * z * sq + y * sc), m32: 2.0 * (y * z * sq - x * sc), m33: 1.0 - 2.0 * (x * x + y * y) * sq, m34: 0.0, m41: 0.0, m42: 0.0, m43: 0.0, m44: 1.0 } } /// A 2d matrix for interpolation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct InnerMatrix2D { pub m11: CSSFloat, pub m12: CSSFloat, pub m21: CSSFloat, pub m22: CSSFloat, } /// A 2d translation function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate2D(f32, f32); /// A 2d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale2D(f32, f32); /// A decomposed 2d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed2D { /// The translation function. pub translate: Translate2D, /// The scale function. pub scale: Scale2D, /// The rotation angle. pub angle: f32, /// The inner matrix. pub matrix: InnerMatrix2D, } impl Animatable for InnerMatrix2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(InnerMatrix2D { m11: add_weighted_with_initial_val(&self.m11, &other.m11, self_portion, other_portion, &1.0)?, m12: self.m12.add_weighted(&other.m12, self_portion, other_portion)?, m21: self.m21.add_weighted(&other.m21, self_portion, other_portion)?, m22: add_weighted_with_initial_val(&self.m22, &other.m22, self_portion, other_portion, &1.0)?, }) } } impl Animatable for Translate2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate2D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, )) } } impl Animatable for Scale2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale2D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed2D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-2d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // If x-axis of one is flipped, and y-axis of the other, // convert to an unflipped rotation. let mut scale = self.scale; let mut angle = self.angle; let mut other_angle = other.angle; if (scale.0 < 0.0 && other.scale.1 < 0.0) || (scale.1 < 0.0 && other.scale.0 < 0.0) { scale.0 = -scale.0; scale.1 = -scale.1; angle += if angle < 0.0 {180.} else {-180.}; } // Don't rotate the long way around. if angle == 0.0 { angle = 360. } if other_angle == 0.0 { other_angle = 360. } if (angle - other_angle).abs() > 180. { if angle > other_angle { angle -= 360. } else{ other_angle -= 360. } } // Interpolate all values. let translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; let scale = scale.add_weighted(&other.scale, self_portion, other_portion)?; let angle = angle.add_weighted(&other_angle, self_portion, other_portion)?; let matrix = self.matrix.add_weighted(&other.matrix, self_portion, other_portion)?; Ok(MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: matrix, }) } } impl Animatable for ComputedMatrix { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { if self.is_3d() || other.is_3d() { let decomposed_from = decompose_3d_matrix(*self); let decomposed_to = decompose_3d_matrix(*other); match (decomposed_from, decomposed_to) { (Ok(from), Ok(to)) => { let sum = from.add_weighted(&to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } else { let decomposed_from = MatrixDecomposed2D::from(*self); let decomposed_to = MatrixDecomposed2D::from(*other); let sum = decomposed_from.add_weighted(&decomposed_to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) } } } impl From<ComputedMatrix> for MatrixDecomposed2D { /// Decompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-2d-matrix fn from(matrix: ComputedMatrix) -> MatrixDecomposed2D { let mut row0x = matrix.m11; let mut row0y = matrix.m12; let mut row1x = matrix.m21; let mut row1y = matrix.m22; let translate = Translate2D(matrix.m41, matrix.m42); let mut scale = Scale2D((row0x * row0x + row0y * row0y).sqrt(), (row1x * row1x + row1y * row1y).sqrt()); // If determinant is negative, one axis was flipped. let determinant = row0x * row1y - row0y * row1x; if determinant < 0. { if row0x < row1y { scale.0 = -scale.0; } else { scale.1 = -scale.1; } } // Renormalize matrix to remove scale. if scale.0 != 0.0 { row0x *= 1. / scale.0; row0y *= 1. / scale.0; } if scale.1 != 0.0 { row1x *= 1. / scale.1; row1y *= 1. / scale.1; } // Compute rotation and renormalize matrix. let mut angle = row0y.atan2(row0x); if angle != 0.0 { let sn = -row0y; let cs = row0x; let m11 = row0x; let m12 = row0y; let m21 = row1x; let m22 = row1y; row0x = cs * m11 + sn * m21; row0y = cs * m12 + sn * m22; row1x = -sn * m11 + cs * m21; row1y = -sn * m12 + cs * m22; } let m = InnerMatrix2D { m11: row0x, m12: row0y, m21: row1x, m22: row1y, }; // Convert into degrees because our rotation functions expect it. angle = angle.to_degrees(); MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: m, } } } impl From<MatrixDecomposed2D> for ComputedMatrix { /// Recompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-2d-matrix fn from(decomposed: MatrixDecomposed2D) -> ComputedMatrix { let mut computed_matrix = ComputedMatrix::identity(); computed_matrix.m11 = decomposed.matrix.m11; computed_matrix.m12 = decomposed.matrix.m12; computed_matrix.m21 = decomposed.matrix.m21; computed_matrix.m22 = decomposed.matrix.m22; // Translate matrix. computed_matrix.m41 = decomposed.translate.0; computed_matrix.m42 = decomposed.translate.1; // Rotate matrix. let angle = decomposed.angle.to_radians(); let cos_angle = angle.cos(); let sin_angle = angle.sin(); let mut rotate_matrix = ComputedMatrix::identity(); rotate_matrix.m11 = cos_angle; rotate_matrix.m12 = sin_angle; rotate_matrix.m21 = -sin_angle; rotate_matrix.m22 = cos_angle; // Multiplication of computed_matrix and rotate_matrix computed_matrix = multiply(rotate_matrix, computed_matrix); // Scale matrix. computed_matrix.m11 *= decomposed.scale.0; computed_matrix.m12 *= decomposed.scale.0; computed_matrix.m21 *= decomposed.scale.1; computed_matrix.m22 *= decomposed.scale.1; computed_matrix } } #[cfg(feature = "gecko")] impl<'a> From< &'a RawGeckoGfxMatrix4x4> for ComputedMatrix { fn from(m: &'a RawGeckoGfxMatrix4x4) -> ComputedMatrix { ComputedMatrix { m11: m[0], m12: m[1], m13: m[2], m14: m[3], m21: m[4], m22: m[5], m23: m[6], m24: m[7], m31: m[8], m32: m[9], m33: m[10], m34: m[11], m41: m[12], m42: m[13], m43: m[14], m44: m[15], } } } #[cfg(feature = "gecko")] impl From<ComputedMatrix> for RawGeckoGfxMatrix4x4 { fn from(matrix: ComputedMatrix) -> RawGeckoGfxMatrix4x4 { [ matrix.m11, matrix.m12, matrix.m13, matrix.m14, matrix.m21, matrix.m22, matrix.m23, matrix.m24, matrix.m31, matrix.m32, matrix.m33, matrix.m34, matrix.m41, matrix.m42, matrix.m43, matrix.m44 ] } } /// A 3d translation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate3D(f32, f32, f32); /// A 3d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale3D(f32, f32, f32); /// A 3d skew function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Skew(f32, f32, f32); /// A 3d perspective transformation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Perspective(f32, f32, f32, f32); /// A quaternion used to represent a rotation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Quaternion(f32, f32, f32, f32); /// A decomposed 3d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed3D { /// A translation function. pub translate: Translate3D, /// A scale function. pub scale: Scale3D, /// The skew component of the transformation. pub skew: Skew, /// The perspective component of the transformation. pub perspective: Perspective, /// The quaternion used to represent the rotation. pub quaternion: Quaternion, } /// Decompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-3d-matrix fn decompose_3d_matrix(mut matrix: ComputedMatrix) -> Result<MatrixDecomposed3D, ()> { // Normalize the matrix. if matrix.m44 == 0.0 { return Err(()); } let scaling_factor = matrix.m44; % for i in range(1, 5): % for j in range(1, 5): matrix.m${i}${j} /= scaling_factor; % endfor % endfor // perspective_matrix is used to solve for perspective, but it also provides // an easy way to test for singularity of the upper 3x3 component. let mut perspective_matrix = matrix; % for i in range(1, 4): perspective_matrix.m${i}4 = 0.0; % endfor perspective_matrix.m44 = 1.0; if perspective_matrix.determinant() == 0.0 { return Err(()); } // First, isolate perspective. let perspective = if matrix.m14 != 0.0 || matrix.m24 != 0.0 || matrix.m34 != 0.0 { let right_hand_side: [f32; 4] = [ matrix.m14, matrix.m24, matrix.m34, matrix.m44 ]; perspective_matrix = perspective_matrix.inverse().unwrap(); // Transpose perspective_matrix perspective_matrix = ComputedMatrix { % for i in range(1, 5): % for j in range(1, 5): m${i}${j}: perspective_matrix.m${j}${i}, % endfor % endfor }; // Multiply right_hand_side with perspective_matrix let mut tmp: [f32; 4] = [0.0; 4]; % for i in range(1, 5): tmp[${i - 1}] = (right_hand_side[0] * perspective_matrix.m1${i}) + (right_hand_side[1] * perspective_matrix.m2${i}) + (right_hand_side[2] * perspective_matrix.m3${i}) + (right_hand_side[3] * perspective_matrix.m4${i}); % endfor Perspective(tmp[0], tmp[1], tmp[2], tmp[3]) } else { Perspective(0.0, 0.0, 0.0, 1.0) }; // Next take care of translation let translate = Translate3D ( matrix.m41, matrix.m42, matrix.m43 ); // Now get scale and shear. 'row' is a 3 element array of 3 component vectors let mut row: [[f32; 3]; 3] = [[0.0; 3]; 3]; % for i in range(1, 4): row[${i - 1}][0] = matrix.m${i}1; row[${i - 1}][1] = matrix.m${i}2; row[${i - 1}][2] = matrix.m${i}3; % endfor // Compute X scale factor and normalize first row. let row0len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); let mut scale = Scale3D(row0len, 0.0, 0.0); row[0] = [row[0][0] / row0len, row[0][1] / row0len, row[0][2] / row0len]; // Compute XY shear factor and make 2nd row orthogonal to 1st. let mut skew = Skew(dot(row[0], row[1]), 0.0, 0.0); row[1] = combine(row[1], row[0], 1.0, -skew.0); // Now, compute Y scale and normalize 2nd row. let row1len = (row[1][0] * row[1][0] + row[1][1] * row[1][1] + row[1][2] * row[1][2]).sqrt(); scale.1 = row1len; row[1] = [row[1][0] / row1len, row[1][1] / row1len, row[1][2] / row1len]; skew.0 /= scale.1; // Compute XZ and YZ shears, orthogonalize 3rd row skew.1 = dot(row[0], row[2]); row[2] = combine(row[2], row[0], 1.0, -skew.1); skew.2 = dot(row[1], row[2]); row[2] = combine(row[2], row[1], 1.0, -skew.2); // Next, get Z scale and normalize 3rd row. let row2len = (row[2][0] * row[2][0] + row[2][1] * row[2][1] + row[2][2] * row[2][2]).sqrt(); scale.2 = row2len; row[2] = [row[2][0] / row2len, row[2][1] / row2len, row[2][2] / row2len]; skew.1 /= scale.2; skew.2 /= scale.2; // At this point, the matrix (in rows) is orthonormal. // Check for a coordinate system flip. If the determinant // is -1, then negate the matrix and the scaling factors. let pdum3 = cross(row[1], row[2]); if dot(row[0], pdum3) < 0.0 { % for i in range(3): scale.${i} *= -1.0; row[${i}][0] *= -1.0; row[${i}][1] *= -1.0; row[${i}][2] *= -1.0; % endfor } // Now, get the rotations out let mut quaternion = Quaternion ( 0.5 * ((1.0 + row[0][0] - row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] + row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] - row[1][1] + row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 + row[0][0] + row[1][1] + row[2][2]).max(0.0)).sqrt() ); if row[2][1] > row[1][2] { quaternion.0 = -quaternion.0 } if row[0][2] > row[2][0] { quaternion.1 = -quaternion.1 } if row[1][0] > row[0][1] { quaternion.2 = -quaternion.2 } Ok(MatrixDecomposed3D { translate: translate, scale: scale, skew: skew, perspective: perspective, quaternion: quaternion }) } // Combine 2 point. fn combine(a: [f32; 3], b: [f32; 3], ascl: f32, bscl: f32) -> [f32; 3] { [ (ascl * a[0]) + (bscl * b[0]), (ascl * a[1]) + (bscl * b[1]), (ascl * a[2]) + (bscl * b[2]) ] } // Dot product. fn dot(a: [f32; 3], b: [f32; 3]) -> f32 { a[0] * b[0] + a[1] * b[1] + a[2] * b[2] } // Cross product. fn cross(row1: [f32; 3], row2: [f32; 3]) -> [f32; 3] { [ row1[1] * row2[2] - row1[2] * row2[1], row1[2] * row2[0] - row1[0] * row2[2], row1[0] * row2[1] - row1[1] * row2[0] ] } impl Animatable for Translate3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate3D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Scale3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale3D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.2, &other.2, self_portion, other_portion, &1.0)?, )) } } impl Animatable for Skew { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Skew( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Perspective { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Perspective( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, add_weighted_with_initial_val(&self.3, &other.3, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed3D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-3d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { use std::f64; debug_assert!((self_portion + other_portion - 1.0f64).abs() <= f64::EPSILON || other_portion == 1.0f64 || other_portion == 0.0f64, "add_weighted should only be used for interpolating or accumulating transforms"); let mut sum = *self; // Add translate, scale, skew and perspective components. sum.translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; sum.scale = self.scale.add_weighted(&other.scale, self_portion, other_portion)?; sum.skew = self.skew.add_weighted(&other.skew, self_portion, other_portion)?; sum.perspective = self.perspective.add_weighted(&other.perspective, self_portion, other_portion)?; // Add quaternions using spherical linear interpolation (Slerp). // // We take a specialized code path for accumulation (where other_portion is 1) if other_portion == 1.0 { if self_portion == 0.0 { return Ok(*other) } let clamped_w = self.quaternion.3.min(1.0).max(-1.0); // Determine the scale factor. let mut theta = clamped_w.acos(); let mut scale = if theta == 0.0 { 0.0 } else { 1.0 / theta.sin() }; theta *= self_portion as f32; scale *= theta.sin(); // Scale the self matrix by self_portion. let mut scaled_self = *self; % for i in range(3): scaled_self.quaternion.${i} *= scale; % endfor scaled_self.quaternion.3 = theta.cos(); // Multiply scaled-self by other. let a = &scaled_self.quaternion; let b = &other.quaternion; sum.quaternion = Quaternion( a.3 * b.0 + a.0 * b.3 + a.1 * b.2 - a.2 * b.1, a.3 * b.1 - a.0 * b.2 + a.1 * b.3 + a.2 * b.0, a.3 * b.2 + a.0 * b.1 - a.1 * b.0 + a.2 * b.3, a.3 * b.3 - a.0 * b.0 - a.1 * b.1 - a.2 * b.2, ); } else { let mut product = self.quaternion.0 * other.quaternion.0 + self.quaternion.1 * other.quaternion.1 + self.quaternion.2 * other.quaternion.2 + self.quaternion.3 * other.quaternion.3; // Clamp product to -1.0 <= product <= 1.0 product = product.min(1.0); product = product.max(-1.0); if product == 1.0 { return Ok(sum); } let theta = product.acos(); let w = (other_portion as f32 * theta).sin() * 1.0 / (1.0 - product * product).sqrt(); let mut a = *self; let mut b = *other; % for i in range(4): a.quaternion.${i} *= (other_portion as f32 * theta).cos() - product * w; b.quaternion.${i} *= w; sum.quaternion.${i} = a.quaternion.${i} + b.quaternion.${i}; % endfor } Ok(sum) } } impl From<MatrixDecomposed3D> for ComputedMatrix { /// Recompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-3d-matrix fn from(decomposed: MatrixDecomposed3D) -> ComputedMatrix { let mut matrix = ComputedMatrix::identity(); // Apply perspective % for i in range(1, 5): matrix.m${i}4 = decomposed.perspective.${i - 1}; % endfor // Apply translation % for i in range(1, 4): % for j in range(1, 4): matrix.m4${i} += decomposed.translate.${j - 1} * matrix.m${j}${i}; % endfor % endfor // Apply rotation let x = decomposed.quaternion.0; let y = decomposed.quaternion.1; let z = decomposed.quaternion.2; let w = decomposed.quaternion.3; // Construct a composite rotation matrix from the quaternion values // rotationMatrix is a identity 4x4 matrix initially let mut rotation_matrix = ComputedMatrix::identity(); rotation_matrix.m11 = 1.0 - 2.0 * (y * y + z * z); rotation_matrix.m12 = 2.0 * (x * y + z * w); rotation_matrix.m13 = 2.0 * (x * z - y * w); rotation_matrix.m21 = 2.0 * (x * y - z * w); rotation_matrix.m22 = 1.0 - 2.0 * (x * x + z * z); rotation_matrix.m23 = 2.0 * (y * z + x * w); rotation_matrix.m31 = 2.0 * (x * z + y * w); rotation_matrix.m32 = 2.0 * (y * z - x * w); rotation_matrix.m33 = 1.0 - 2.0 * (x * x + y * y); matrix = multiply(rotation_matrix, matrix); // Apply skew let mut temp = ComputedMatrix::identity(); if decomposed.skew.2 != 0.0 { temp.m32 = decomposed.skew.2; matrix = multiply(temp, matrix); } if decomposed.skew.1 != 0.0 { temp.m32 = 0.0; temp.m31 = decomposed.skew.1; matrix = multiply(temp, matrix); } if decomposed.skew.0 != 0.0 { temp.m31 = 0.0; temp.m21 = decomposed.skew.0; matrix = multiply(temp, matrix); } // Apply scale % for i in range(1, 4): % for j in range(1, 4): matrix.m${i}${j} *= decomposed.scale.${i - 1}; % endfor % endfor matrix } } // Multiplication of two 4x4 matrices. fn multiply(a: ComputedMatrix, b: ComputedMatrix) -> ComputedMatrix { let mut a_clone = a; % for i in range(1, 5): % for j in range(1, 5): a_clone.m${i}${j} = (a.m${i}1 * b.m1${j}) + (a.m${i}2 * b.m2${j}) + (a.m${i}3 * b.m3${j}) + (a.m${i}4 * b.m4${j}); % endfor % endfor a_clone } impl ComputedMatrix { fn is_3d(&self) -> bool { self.m13 != 0.0 || self.m14 != 0.0 || self.m23 != 0.0 || self.m24 != 0.0 || self.m31 != 0.0 || self.m32 != 0.0 || self.m33 != 1.0 || self.m34 != 0.0 || self.m43 != 0.0 || self.m44 != 1.0 } fn determinant(&self) -> CSSFloat { self.m14 * self.m23 * self.m32 * self.m41 - self.m13 * self.m24 * self.m32 * self.m41 - self.m14 * self.m22 * self.m33 * self.m41 + self.m12 * self.m24 * self.m33 * self.m41 + self.m13 * self.m22 * self.m34 * self.m41 - self.m12 * self.m23 * self.m34 * self.m41 - self.m14 * self.m23 * self.m31 * self.m42 + self.m13 * self.m24 * self.m31 * self.m42 + self.m14 * self.m21 * self.m33 * self.m42 - self.m11 * self.m24 * self.m33 * self.m42 - self.m13 * self.m21 * self.m34 * self.m42 + self.m11 * self.m23 * self.m34 * self.m42 + self.m14 * self.m22 * self.m31 * self.m43 - self.m12 * self.m24 * self.m31 * self.m43 - self.m14 * self.m21 * self.m32 * self.m43 + self.m11 * self.m24 * self.m32 * self.m43 + self.m12 * self.m21 * self.m34 * self.m43 - self.m11 * self.m22 * self.m34 * self.m43 - self.m13 * self.m22 * self.m31 * self.m44 + self.m12 * self.m23 * self.m31 * self.m44 + self.m13 * self.m21 * self.m32 * self.m44 - self.m11 * self.m23 * self.m32 * self.m44 - self.m12 * self.m21 * self.m33 * self.m44 + self.m11 * self.m22 * self.m33 * self.m44 } fn inverse(&self) -> Option<ComputedMatrix> { let mut det = self.determinant(); if det == 0.0 { return None; } det = 1.0 / det; let x = ComputedMatrix { m11: det * (self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 + self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 - self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44), m12: det * (self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 - self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 + self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44), m13: det * (self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 + self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 - self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44), m14: det * (self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 - self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 + self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34), m21: det * (self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 - self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 + self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44), m22: det * (self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 + self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 - self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44), m23: det * (self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 - self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 + self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44), m24: det * (self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 + self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 - self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34), m31: det * (self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 + self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 - self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44), m32: det * (self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 - self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 + self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44), m33: det * (self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 + self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 - self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44), m34: det * (self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 - self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 + self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34), m41: det * (self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 - self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 + self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43), m42: det * (self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 + self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 - self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43), m43: det * (self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 - self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 + self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43), m44: det * (self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 + self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 - self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33), }; Some(x) } } /// https://drafts.csswg.org/css-transforms/#interpolation-of-transforms impl Animatable for TransformList { #[inline] fn add_weighted(&self, other: &TransformList, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms let result = match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { // Two lists of transforms add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&Some(ref from_list), &None) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let to_list = build_identity_transform_list(from_list); add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&None, &Some(ref to_list)) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let from_list = build_identity_transform_list(to_list); add_weighted_transform_lists(&from_list, to_list, self_portion, other_portion) } _ => { // http://dev.w3.org/csswg/css-transforms/#none-none-animation TransformList(None) } }; Ok(result) } fn add(&self, other: &Self) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { Ok(TransformList(Some([&from_list[..], &to_list[..]].concat()))) } (&Some(_), &None) => { Ok(self.clone()) } (&None, &Some(_)) => { Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } #[inline] fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { if can_interpolate_list(from_list, to_list) { Ok(add_weighted_transform_lists(from_list, &to_list, count as f64, 1.0)) } else { use std::i32; let result = vec![TransformOperation::AccumulateMatrix { from_list: self.clone(), to_list: other.clone(), count: cmp::min(count, i32::MAX as u64) as i32 }]; Ok(TransformList(Some(result))) } } (&Some(ref from_list), &None) => { Ok(add_weighted_transform_lists(from_list, from_list, count as f64, 0.0)) } (&None, &Some(_)) => { // If |self| is 'none' then we are calculating: // // none * |count| + |other| // = none + |other| // = |other| // // Hence the result is just |other|. Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } } impl ToAnimatedZero for TransformList { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(TransformList(None)) } } impl<T, U> Animatable for Either<T, U> where T: Animatable + Copy, U: Animatable + Copy, { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Either::First(ref this), Either::First(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::First) }, (Either::Second(ref this), Either::Second(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::Second) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_distance(other) }, _ => Err(()) } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_squared_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_squared_distance(other) }, _ => Err(()) } } } impl<A, B> ToAnimatedZero for Either<A, B> where A: ToAnimatedZero, B: ToAnimatedZero, { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { Either::First(ref first) => { Ok(Either::First(first.to_animated_zero()?)) }, Either::Second(ref second) => { Ok(Either::Second(second.to_animated_zero()?)) }, } } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] /// Unlike RGBA, each component value may exceed the range [0.0, 1.0]. pub struct IntermediateRGBA { /// The red component. pub red: f32, /// The green component. pub green: f32, /// The blue component. pub blue: f32, /// The alpha component. pub alpha: f32, } impl IntermediateRGBA { /// Returns a transparent color. #[inline] pub fn transparent() -> Self { Self::new(0., 0., 0., 0.) } /// Returns a new color. #[inline] pub fn new(red: f32, green: f32, blue: f32, alpha: f32) -> Self { IntermediateRGBA { red: red, green: green, blue: blue, alpha: alpha } } } impl ToAnimatedValue for RGBA { type AnimatedValue = IntermediateRGBA; #[inline] fn to_animated_value(self) -> Self::AnimatedValue { IntermediateRGBA::new( self.red_f32(), self.green_f32(), self.blue_f32(), self.alpha_f32(), ) } #[inline] fn from_animated_value(animated: Self::AnimatedValue) -> Self { // RGBA::from_floats clamps each component values. RGBA::from_floats( animated.red, animated.green, animated.blue, animated.alpha, ) } } /// Unlike Animatable for RGBA we don't clamp any component values. impl Animatable for IntermediateRGBA { #[inline] fn add_weighted(&self, other: &IntermediateRGBA, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut alpha = self.alpha.add_weighted(&other.alpha, self_portion, other_portion)?; if alpha <= 0. { // Ideally we should return color value that only alpha component is // 0, but this is what current gecko does. Ok(IntermediateRGBA::transparent()) } else { alpha = alpha.min(1.); let red = (self.red * self.alpha).add_weighted( &(other.red * other.alpha), self_portion, other_portion )? * 1. / alpha; let green = (self.green * self.alpha).add_weighted( &(other.green * other.alpha), self_portion, other_portion )? * 1. / alpha; let blue = (self.blue * self.alpha).add_weighted( &(other.blue * other.alpha), self_portion, other_portion )? * 1. / alpha; Ok(IntermediateRGBA::new(red, green, blue, alpha)) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let start = [ self.alpha, self.red * self.alpha, self.green * self.alpha, self.blue * self.alpha ]; let end = [ other.alpha, other.red * other.alpha, other.green * other.alpha, other.blue * other.alpha ]; let diff = start.iter().zip(&end) .fold(0.0f64, |n, (&a, &b)| { let diff = (a - b) as f64; n + diff * diff }); Ok(diff) } } impl ToAnimatedZero for IntermediateRGBA { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(IntermediateRGBA::transparent()) } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct IntermediateColor { color: IntermediateRGBA, foreground_ratio: f32, } impl IntermediateColor { fn currentcolor() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 1., } } /// Returns a transparent intermediate color. pub fn transparent() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 0., } } fn is_currentcolor(&self) -> bool { self.foreground_ratio >= 1. } fn is_numeric(&self) -> bool { self.foreground_ratio <= 0. } fn effective_intermediate_rgba(&self) -> IntermediateRGBA { IntermediateRGBA { alpha: self.color.alpha * (1. - self.foreground_ratio), .. self.color } } } impl ToAnimatedValue for Color { type AnimatedValue = IntermediateColor; #[inline] fn to_animated_value(self) -> Self::AnimatedValue { IntermediateColor { color: self.color.to_animated_value(), foreground_ratio: self.foreground_ratio as f32 * (1. / 255.), } } #[inline] fn from_animated_value(animated: Self::AnimatedValue) -> Self { Color { color: RGBA::from_animated_value(animated.color), foreground_ratio: (animated.foreground_ratio * 255.).round() as u8, } } } impl Animatable for IntermediateColor { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // Common cases are interpolating between two numeric colors, // two currentcolors, and a numeric color and a currentcolor. // // Note: this algorithm assumes self_portion + other_portion // equals to one, so it may be broken for additive operation. // To properly support additive color interpolation, we would // need two ratio fields in computed color types. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(IntermediateColor::currentcolor()) } else { Ok(IntermediateColor { color: self.color.add_weighted(&other.color, self_portion, other_portion)?, foreground_ratio: self.foreground_ratio, }) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateColor { color: other.color, foreground_ratio: self_portion as f32, }) } else if self.is_numeric() && other.is_currentcolor() { Ok(IntermediateColor { color: self.color, foreground_ratio: other_portion as f32, }) } else { // For interpolating between two complex colors, we need to // generate colors with effective alpha value. let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let color = self_color.add_weighted(&other_color, self_portion, other_portion)?; // Then we compute the final foreground ratio, and derive // the final alpha value from the effective alpha value. let foreground_ratio = self.foreground_ratio .add_weighted(&other.foreground_ratio, self_portion, other_portion)?; let alpha = color.alpha / (1. - foreground_ratio); Ok(IntermediateColor { color: IntermediateRGBA { alpha: alpha, .. color }, foreground_ratio: foreground_ratio, }) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { // All comments in add_weighted also applies here. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(0.) } else { self.color.compute_squared_distance(&other.color) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateRGBA::transparent().compute_squared_distance(&other.color)? + 1.) } else if self.is_numeric() && other.is_currentcolor() { Ok(self.color.compute_squared_distance(&IntermediateRGBA::transparent())? + 1.) } else { let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let dist = self_color.compute_squared_distance(&other_color)?; let ratio_diff = (self.foreground_ratio - other.foreground_ratio) as f64; Ok(dist + ratio_diff * ratio_diff) } } } impl ToAnimatedZero for IntermediateColor { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// Animatable SVGPaint pub type IntermediateSVGPaint = SVGPaint<IntermediateRGBA>; /// Animatable SVGPaintKind pub type IntermediateSVGPaintKind = SVGPaintKind<IntermediateRGBA>; impl Animatable for IntermediateSVGPaint { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.add_weighted(&other.kind, self_portion, other_portion)?, fallback: self.fallback.add_weighted(&other.fallback, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.kind.compute_squared_distance(&other.kind)? + self.fallback.compute_squared_distance(&other.fallback)?) } } impl ToAnimatedZero for IntermediateSVGPaint { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.to_animated_zero()?, fallback: self.fallback.and_then(|v| v.to_animated_zero().ok()), }) } } impl Animatable for IntermediateSVGPaintKind { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { Ok(SVGPaintKind::Color(self_color.add_weighted(other_color, self_portion, other_portion)?)) } // FIXME context values should be interpolable with colors // Gecko doesn't implement this behavior either. (&SVGPaintKind::None, &SVGPaintKind::None) => Ok(SVGPaintKind::None), (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) => Ok(SVGPaintKind::ContextFill), (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke) => Ok(SVGPaintKind::ContextStroke), _ => Err(()) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { self_color.compute_distance(other_color) } (&SVGPaintKind::None, &SVGPaintKind::None) | (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) | (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke)=> Ok(0.0), _ => Err(()) } } } impl ToAnimatedZero for IntermediateSVGPaintKind { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { SVGPaintKind::Color(ref color) => { Ok(SVGPaintKind::Color(color.to_animated_zero()?)) }, SVGPaintKind::None | SVGPaintKind::ContextFill | SVGPaintKind::ContextStroke => Ok(self.clone()), _ => Err(()), } } } impl<LengthType> Animatable for SVGLength<LengthType> where LengthType: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGLength::Length(ref this), &SVGLength::Length(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(SVGLength::Length) } _ => { Ok(if self_portion > other_portion { self.clone() } else { other.clone() }) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGLength::Length(ref this), &SVGLength::Length(ref other)) => { this.compute_distance(other) } _ => Err(()) } } } impl<LengthType> ToAnimatedZero for SVGLength<LengthType> where LengthType : ToAnimatedZero { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match self { &SVGLength::Length(ref length) => length.to_animated_zero().map(SVGLength::Length), &SVGLength::ContextValue => Ok(SVGLength::ContextValue), } } } impl<LengthType> Animatable for SVGStrokeDashArray<LengthType> where LengthType : RepeatableListAnimatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGStrokeDashArray::Values(ref this), &SVGStrokeDashArray::Values(ref other))=> { this.add_weighted(other, self_portion, other_portion) .map(SVGStrokeDashArray::Values) } _ => { Ok(if self_portion > other_portion { self.clone() } else { other.clone() }) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGStrokeDashArray::Values(ref this), &SVGStrokeDashArray::Values(ref other)) => { this.compute_distance(other) } _ => Err(()) } } } impl<LengthType> ToAnimatedZero for SVGStrokeDashArray<LengthType> where LengthType : ToAnimatedZero + Clone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match self { &SVGStrokeDashArray::Values(ref values) => { values.iter().map(ToAnimatedZero::to_animated_zero) .collect::<Result<Vec<_>, ()>>().map(SVGStrokeDashArray::Values) } &SVGStrokeDashArray::ContextValue => Ok(SVGStrokeDashArray::ContextValue), } } } impl<OpacityType> Animatable for SVGOpacity<OpacityType> where OpacityType: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGOpacity::Opacity(ref this), &SVGOpacity::Opacity(ref other)) => { this.add_weighted(other, self_portion, other_portion).map(SVGOpacity::Opacity) } _ => { Ok(if self_portion > other_portion { self.clone() } else { other.clone() }) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGOpacity::Opacity(ref this), &SVGOpacity::Opacity(ref other)) => { this.compute_distance(other) } _ => Err(()) } } } impl<OpacityType> ToAnimatedZero for SVGOpacity<OpacityType> where OpacityType: ToAnimatedZero + Clone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match self { &SVGOpacity::Opacity(ref opacity) => opacity.to_animated_zero().map(SVGOpacity::Opacity), other => Ok(other.clone()), } } } <% FILTER_FUNCTIONS = [ 'Blur', 'Brightness', 'Contrast', 'Grayscale', 'HueRotate', 'Invert', 'Opacity', 'Saturate', 'Sepia' ] %> /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function_impl(from: &AnimatedFilter, to: &AnimatedFilter, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { % for func in [ 'Blur', 'HueRotate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, % endfor % for func in [ 'Grayscale', 'Invert', 'Sepia' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &NonNegative::<CSSFloat>(0.0), )?)) }, % endfor % for func in [ 'Brightness', 'Contrast', 'Opacity', 'Saturate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &NonNegative::<CSSFloat>(1.0), )?)) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref from_value), &Filter::DropShadow(ref to_value)) => { Ok(Filter::DropShadow(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, (&Filter::Url(_), &Filter::Url(_)) => { Err(()) }, % endif _ => { // If specified the different filter functions, // we will need to interpolate as discreate. Err(()) }, } } /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function(from: Option<<&AnimatedFilter>, to: Option<<&AnimatedFilter>, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { (Some(f), Some(t)) => { add_weighted_filter_function_impl(f, t, self_portion, other_portion) }, (Some(f), None) => { add_weighted_filter_function_impl(f, f, self_portion, 0.0) }, (None, Some(t)) => { add_weighted_filter_function_impl(t, t, other_portion, 0.0) }, _ => { Err(()) } } } fn compute_filter_square_distance(from: &AnimatedFilter, to: &AnimatedFilter) -> Result<f64, ()> { match (from, to) { % for func in FILTER_FUNCTIONS : (&Filter::${func}(f), &Filter::${func}(t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref f), &Filter::DropShadow(ref t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endif _ => { Err(()) } } } impl Animatable for AnimatedFilterList { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut filters = vec![]; let mut from_iter = self.0.iter(); let mut to_iter = other.0.iter(); let mut from = from_iter.next(); let mut to = to_iter.next(); while from.is_some() || to.is_some() { filters.push(try!(add_weighted_filter_function(from, to, self_portion, other_portion))); if from.is_some() { from = from_iter.next(); } if to.is_some() { to = to_iter.next(); } } Ok(AnimatedFilterList(filters)) } fn add(&self, other: &Self) -> Result<Self, ()> { Ok(AnimatedFilterList(self.0.iter().chain(other.0.iter()).cloned().collect())) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { use itertools::{EitherOrBoth, Itertools}; let mut square_distance: f64 = 0.0; for it in self.0.iter().zip_longest(other.0.iter()) { square_distance += match it { EitherOrBoth::Both(from, to) => { compute_filter_square_distance(&from, &to)? }, EitherOrBoth::Left(list) | EitherOrBoth::Right(list)=> { let none = add_weighted_filter_function(Some(list), Some(list), 0.0, 0.0)?; compute_filter_square_distance(&none, &list)? }, }; } Ok(square_distance) } } /// A comparator to sort PropertyIds such that longhands are sorted before shorthands, /// shorthands with fewer components are sorted before shorthands with more components, /// and otherwise shorthands are sorted by IDL name as defined by [Web Animations][property-order]. /// /// Using this allows us to prioritize values specified by longhands (or smaller /// shorthand subsets) when longhands and shorthands are both specified on the one keyframe. /// /// Example orderings that result from this: /// /// margin-left, margin /// /// and: /// /// border-top-color, border-color, border-top, border /// /// [property-order] https://w3c.github.io/web-animations/#calculating-computed-keyframes #[cfg(feature = "gecko")] pub fn compare_property_priority(a: &PropertyId, b: &PropertyId) -> cmp::Ordering { match (a.as_shorthand(), b.as_shorthand()) { // Within shorthands, sort by the number of subproperties, then by IDL name. (Ok(a), Ok(b)) => { let subprop_count_a = a.longhands().len(); let subprop_count_b = b.longhands().len(); subprop_count_a.cmp(&subprop_count_b).then_with( || get_idl_name_sort_order(&a).cmp(&get_idl_name_sort_order(&b))) }, // Longhands go before shorthands. (Ok(_), Err(_)) => cmp::Ordering::Greater, (Err(_), Ok(_)) => cmp::Ordering::Less, // Both are longhands or custom properties in which case they don't overlap and should // sort equally. _ => cmp::Ordering::Equal, } } #[cfg(feature = "gecko")] fn get_idl_name_sort_order(shorthand: &ShorthandId) -> u32 { <% # Sort by IDL name. sorted_shorthands = sorted(data.shorthands, key=lambda p: to_idl_name(p.ident)) # Annotate with sorted position sorted_shorthands = [(p, position) for position, p in enumerate(sorted_shorthands)] %> match *shorthand { % for property, position in sorted_shorthands: ShorthandId::${property.camel_case} => ${position}, % endfor } } impl<T> Animatable for NonNegative<T> where T: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(NonNegative::<T>) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl<T> ToAnimatedZero for NonNegative<T> where T: ToAnimatedZero { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { self.0.to_animated_zero().map(NonNegative::<T>) } } impl<T> Animatable for GreaterThanOrEqualToOne<T> where T: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(GreaterThanOrEqualToOne::<T>) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl<T> ToAnimatedZero for GreaterThanOrEqualToOne<T> where T: ToAnimatedZero { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { self.0.to_animated_zero().map(GreaterThanOrEqualToOne::<T>) } } Auto merge of #18007 - BorisChiou:stylo/animation/interpolation/font_weight, r=xidorn stylo: Fix the computation of the interpolation of FontWeight. Interpolated result of FontWeight is wrong because clamping code is incorrect. --- - [X] `./mach build -d` does not report any errors - [X] `./mach test-tidy` does not report any errors - [X] These changes fix [Bug 1387948](https://bugzilla.mozilla.org/show_bug.cgi?id=1387948). - [X] These changes do not require tests because Gecko has tests /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <% from data import to_idl_name, SYSTEM_FONT_LONGHANDS %> use app_units::Au; use cssparser::{Parser, RGBA}; use euclid::{Point2D, Size2D}; #[cfg(feature = "gecko")] use gecko_bindings::bindings::RawServoAnimationValueMap; #[cfg(feature = "gecko")] use gecko_bindings::structs::RawGeckoGfxMatrix4x4; #[cfg(feature = "gecko")] use gecko_bindings::structs::nsCSSPropertyID; #[cfg(feature = "gecko")] use gecko_bindings::sugar::ownership::{HasFFI, HasSimpleFFI}; #[cfg(feature = "gecko")] use gecko_string_cache::Atom; use properties::{CSSWideKeyword, PropertyDeclaration}; use properties::longhands; use properties::longhands::background_size::computed_value::T as BackgroundSizeList; use properties::longhands::border_spacing::computed_value::T as BorderSpacing; use properties::longhands::font_weight::computed_value::T as FontWeight; use properties::longhands::font_stretch::computed_value::T as FontStretch; use properties::longhands::line_height::computed_value::T as LineHeight; use properties::longhands::transform::computed_value::ComputedMatrix; use properties::longhands::transform::computed_value::ComputedOperation as TransformOperation; use properties::longhands::transform::computed_value::T as TransformList; use properties::longhands::vertical_align::computed_value::T as VerticalAlign; use properties::longhands::visibility::computed_value::T as Visibility; #[cfg(feature = "gecko")] use properties::{PropertyId, PropertyDeclarationId, LonghandId}; #[cfg(feature = "gecko")] use properties::{ShorthandId}; use selectors::parser::SelectorParseError; use smallvec::SmallVec; use std::cmp; #[cfg(feature = "gecko")] use fnv::FnvHashMap; use style_traits::ParseError; use super::ComputedValues; #[cfg(any(feature = "gecko", feature = "testing"))] use values::Auto; use values::{CSSFloat, CustomIdent, Either}; use values::animated::{ToAnimatedValue, ToAnimatedZero}; use values::animated::effects::BoxShadowList as AnimatedBoxShadowList; use values::animated::effects::Filter as AnimatedFilter; use values::animated::effects::FilterList as AnimatedFilterList; use values::animated::effects::TextShadowList as AnimatedTextShadowList; use values::computed::{Angle, LengthOrPercentageOrAuto, LengthOrPercentageOrNone}; use values::computed::{BorderCornerRadius, ClipRect}; use values::computed::{CalcLengthOrPercentage, Color, Context, ComputedValueAsSpecified}; use values::computed::{LengthOrPercentage, MaxLength, MozLength, Percentage, ToComputedValue}; use values::computed::{NonNegativeAu, NonNegativeNumber, PositiveIntegerOrAuto}; use values::computed::length::{NonNegativeLengthOrAuto, NonNegativeLengthOrNormal}; use values::computed::length::NonNegativeLengthOrPercentage; use values::generics::{GreaterThanOrEqualToOne, NonNegative}; use values::generics::border::BorderCornerRadius as GenericBorderCornerRadius; use values::generics::effects::Filter; use values::generics::position as generic_position; use values::generics::svg::{SVGLength, SVGOpacity, SVGPaint, SVGPaintKind, SVGStrokeDashArray}; /// A trait used to implement various procedures used during animation. pub trait Animatable: Sized { /// Performs a weighted sum of this value and |other|. This is used for /// interpolation and addition of animation values. fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()>; /// [Interpolates][interpolation] a value with another for a given property. /// /// [interpolation]: https://w3c.github.io/web-animations/#animation-interpolation fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> { self.add_weighted(other, 1.0 - progress, progress) } /// Returns the [sum][animation-addition] of this value and |other|. /// /// [animation-addition]: https://w3c.github.io/web-animations/#animation-addition fn add(&self, other: &Self) -> Result<Self, ()> { self.add_weighted(other, 1.0, 1.0) } /// [Accumulates][animation-accumulation] this value onto itself (|count| - 1) times then /// accumulates |other| onto the result. /// If |count| is zero, the result will be |other|. /// /// [animation-accumulation]: https://w3c.github.io/web-animations/#animation-accumulation fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { self.add_weighted(other, count as f64, 1.0) } /// Compute distance between a value and another for a given property. fn compute_distance(&self, _other: &Self) -> Result<f64, ()> { Err(()) } /// In order to compute the Euclidean distance of a list or property value with multiple /// components, we need to compute squared distance for each element, so the vector can sum it /// and then get its squared root as the distance. fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_distance(other).map(|d| d * d) } } /// https://drafts.csswg.org/css-transitions/#animtype-repeatable-list pub trait RepeatableListAnimatable: Animatable {} /// A longhand property whose animation type is not "none". /// /// NOTE: This includes the 'display' property since it is animatable from SMIL even though it is /// not animatable from CSS animations or Web Animations. CSS transitions also does not allow /// animating 'display', but for CSS transitions we have the separate TransitionProperty type. #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatableLonghand { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} ${prop.camel_case}, % endif % endfor } impl AnimatableLonghand { /// Returns true if this AnimatableLonghand is one of the discretely animatable properties. pub fn is_discrete(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animation_value_type == "discrete": AnimatableLonghand::${prop.camel_case} => true, % endif % endfor _ => false } } /// Converts from an nsCSSPropertyID. Returns None if nsCSSPropertyID is not an animatable /// longhand in Servo. #[cfg(feature = "gecko")] pub fn from_nscsspropertyid(css_property: nsCSSPropertyID) -> Option<Self> { match css_property { % for prop in data.longhands: % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Converts from TransitionProperty. Returns None if the property is not an animatable /// longhand. pub fn from_transition_property(transition_property: &TransitionProperty) -> Option<Self> { match *transition_property { % for prop in data.longhands: % if prop.transitionable and prop.animatable: TransitionProperty::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Get an animatable longhand property from a property declaration. pub fn from_declaration(declaration: &PropertyDeclaration) -> Option<Self> { use properties::LonghandId; match *declaration { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(..) => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor PropertyDeclaration::CSSWideKeyword(id, _) | PropertyDeclaration::WithVariables(id, _) => { match id { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None, } }, _ => None, } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a AnimatableLonghand> for nsCSSPropertyID { fn from(property: &'a AnimatableLonghand) -> nsCSSPropertyID { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor } } } /// Convert to PropertyDeclarationId. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From<AnimatableLonghand> for PropertyDeclarationId<'a> { fn from(property: AnimatableLonghand) -> PropertyDeclarationId<'a> { match property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => PropertyDeclarationId::Longhand(LonghandId::${prop.camel_case}), % endif % endfor } } } /// Returns true if this nsCSSPropertyID is one of the animatable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_animatable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// A given transition property, that is either `All`, a transitionable longhand property, /// a shorthand with at least one transitionable longhand component, or an unsupported property. // NB: This needs to be here because it needs all the longhands generated // beforehand. #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, Eq, Hash, PartialEq, ToCss)] pub enum TransitionProperty { /// All, any transitionable property changing should generate a transition. All, % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: /// ${prop.name} ${prop.camel_case}, % endif % endfor /// Unrecognized property which could be any non-transitionable, custom property, or /// unknown property. Unsupported(CustomIdent) } no_viewport_percentage!(TransitionProperty); impl ComputedValueAsSpecified for TransitionProperty {} impl TransitionProperty { /// Iterates over each longhand property. pub fn each<F: FnMut(&TransitionProperty) -> ()>(mut cb: F) { % for prop in data.longhands: % if prop.transitionable: cb(&TransitionProperty::${prop.camel_case}); % endif % endfor } /// Iterates over every longhand property that is not TransitionProperty::All, stopping and /// returning true when the provided callback returns true for the first time. pub fn any<F: FnMut(&TransitionProperty) -> bool>(mut cb: F) -> bool { % for prop in data.longhands: % if prop.transitionable: if cb(&TransitionProperty::${prop.camel_case}) { return true; } % endif % endfor false } /// Parse a transition-property value. pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { let ident = input.expect_ident()?; let supported = match_ignore_ascii_case! { &ident, "all" => Ok(Some(TransitionProperty::All)), % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: "${prop.name}" => Ok(Some(TransitionProperty::${prop.camel_case})), % endif % endfor "none" => Err(()), _ => Ok(None), }; match supported { Ok(Some(property)) => Ok(property), Ok(None) => CustomIdent::from_ident(ident, &[]).map(TransitionProperty::Unsupported), Err(()) => Err(SelectorParseError::UnexpectedIdent(ident.clone()).into()), } } /// Return transitionable longhands of this shorthand TransitionProperty, except for "all". pub fn longhands(&self) -> &'static [TransitionProperty] { % for prop in data.shorthands_except_all(): % if prop.transitionable: static ${prop.ident.upper()}: &'static [TransitionProperty] = &[ % for sub in prop.sub_properties: % if sub.transitionable: TransitionProperty::${sub.camel_case}, % endif % endfor ]; % endif % endfor match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${prop.ident.upper()}, % endif % endfor _ => panic!("Not allowed to call longhands() for this TransitionProperty") } } /// Returns true if this TransitionProperty is a shorthand. pub fn is_shorthand(&self) -> bool { match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => true, % endif % endfor _ => false } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a TransitionProperty> for nsCSSPropertyID { fn from(transition_property: &'a TransitionProperty) -> nsCSSPropertyID { match *transition_property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor TransitionProperty::All => nsCSSPropertyID::eCSSPropertyExtra_all_properties, _ => panic!("Unconvertable Servo transition property: {:?}", transition_property), } } } /// Convert nsCSSPropertyID to TransitionProperty #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl From<nsCSSPropertyID> for TransitionProperty { fn from(property: nsCSSPropertyID) -> TransitionProperty { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::${prop.camel_case}, % else: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::Unsupported(CustomIdent(Atom::from("${prop.ident}"))), % endif % endfor nsCSSPropertyID::eCSSPropertyExtra_all_properties => TransitionProperty::All, _ => panic!("Unconvertable nsCSSPropertyID: {:?}", property), } } } /// Returns true if this nsCSSPropertyID is one of the transitionable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_transitionable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// An animated property interpolation between two computed values for that /// property. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatedProperty { % for prop in data.longhands: % if prop.animatable: <% if prop.is_animatable_with_computed_value: value_type = "longhands::{}::computed_value::T".format(prop.ident) else: value_type = prop.animation_value_type %> /// ${prop.name} ${prop.camel_case}(${value_type}, ${value_type}), % endif % endfor } impl AnimatedProperty { /// Get the name of this property. pub fn name(&self) -> &'static str { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(..) => "${prop.name}", % endif % endfor } } /// Whether this interpolation does animate, that is, whether the start and /// end values are different. pub fn does_animate(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => from != to, % endif % endfor } } /// Whether an animated property has the same end value as another. pub fn has_the_same_end_value_as(&self, other: &Self) -> bool { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimatedProperty::${prop.camel_case}(_, ref this_end_value), &AnimatedProperty::${prop.camel_case}(_, ref other_end_value)) => { this_end_value == other_end_value } % endif % endfor _ => false, } } /// Update `style` with the proper computed style corresponding to this /// animation at `progress`. pub fn update(&self, style: &mut ComputedValues, progress: f64) { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => { // https://w3c.github.io/web-animations/#discrete-animation-type % if prop.animation_value_type == "discrete": let value = if progress < 0.5 { from.clone() } else { to.clone() }; % else: let value = match from.interpolate(to, progress) { Ok(value) => value, Err(()) => return, }; % endif % if not prop.is_animatable_with_computed_value: let value: longhands::${prop.ident}::computed_value::T = ToAnimatedValue::from_animated_value(value); % endif style.mutate_${prop.style_struct.name_lower}().set_${prop.ident}(value); } % endif % endfor } } /// Get an animatable value from a transition-property, an old style, and a /// new style. pub fn from_animatable_longhand(property: &AnimatableLonghand, old_style: &ComputedValues, new_style: &ComputedValues) -> AnimatedProperty { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { let old_computed = old_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(); let new_computed = new_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(); AnimatedProperty::${prop.camel_case}( % if prop.is_animatable_with_computed_value: old_computed, new_computed, % else: old_computed.to_animated_value(), new_computed.to_animated_value(), % endif ) } % endif % endfor } } } /// A collection of AnimationValue that were composed on an element. /// This HashMap stores the values that are the last AnimationValue to be /// composed for each TransitionProperty. #[cfg(feature = "gecko")] pub type AnimationValueMap = FnvHashMap<AnimatableLonghand, AnimationValue>; #[cfg(feature = "gecko")] unsafe impl HasFFI for AnimationValueMap { type FFIType = RawServoAnimationValueMap; } #[cfg(feature = "gecko")] unsafe impl HasSimpleFFI for AnimationValueMap {} /// An enum to represent a single computed value belonging to an animated /// property in order to be interpolated with another one. When interpolating, /// both values need to belong to the same property. /// /// This is different to AnimatedProperty in the sense that AnimatedProperty /// also knows the final value to be used during the animation. /// /// This is to be used in Gecko integration code. /// /// FIXME: We need to add a path for custom properties, but that's trivial after /// this (is a similar path to that of PropertyDeclaration). #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimationValue { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} % if prop.is_animatable_with_computed_value: ${prop.camel_case}(longhands::${prop.ident}::computed_value::T), % else: ${prop.camel_case}(${prop.animation_value_type}), % endif % endif % endfor } impl AnimationValue { /// "Uncompute" this animation value in order to be used inside the CSS /// cascade. pub fn uncompute(&self) -> PropertyDeclaration { use properties::longhands; match *self { % for prop in data.longhands: % if prop.animatable: AnimationValue::${prop.camel_case}(ref from) => { PropertyDeclaration::${prop.camel_case}( % if prop.boxed: Box::new( % endif longhands::${prop.ident}::SpecifiedValue::from_computed_value( % if prop.is_animatable_with_computed_value: from % else: &ToAnimatedValue::from_animated_value(from.clone()) % endif )) % if prop.boxed: ) % endif } % endif % endfor } } /// Construct an AnimationValue from a property declaration. pub fn from_declaration( decl: &PropertyDeclaration, context: &mut Context, initial: &ComputedValues ) -> Option<Self> { use properties::LonghandId; match *decl { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(ref val) => { % if prop.ident in SYSTEM_FONT_LONGHANDS and product == "gecko": if let Some(sf) = val.get_system() { longhands::system_font::resolve_system_font(sf, context); } % endif let computed = val.to_computed_value(context); Some(AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed % else: computed.to_animated_value() % endif )) }, % endif % endfor PropertyDeclaration::CSSWideKeyword(id, keyword) => { match id { // We put all the animatable properties first in the hopes // that it might increase match locality. % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let computed = match keyword { % if not prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Initial => { let initial_struct = initial.get_${prop.style_struct.name_lower}(); initial_struct.clone_${prop.ident}() }, % if prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Inherit => { let inherit_struct = context.builder .get_parent_${prop.style_struct.name_lower}(); inherit_struct.clone_${prop.ident}() }, }; % if not prop.is_animatable_with_computed_value: let computed = computed.to_animated_value(); % endif Some(AnimationValue::${prop.camel_case}(computed)) }, % endif % endfor % for prop in data.longhands: % if not prop.animatable: LonghandId::${prop.camel_case} => None, % endif % endfor } }, PropertyDeclaration::WithVariables(id, ref unparsed) => { let custom_props = context.style().custom_properties(); let substituted = unparsed.substitute_variables(id, &custom_props, context.quirks_mode); AnimationValue::from_declaration(&substituted, context, initial) }, _ => None // non animatable properties will get included because of shorthands. ignore. } } /// Get an AnimationValue for an AnimatableLonghand from a given computed values. pub fn from_computed_values(property: &AnimatableLonghand, computed_values: &ComputedValues) -> Self { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { let computed = computed_values .get_${prop.style_struct.ident.strip("_")}() .clone_${prop.ident}(); AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed % else: computed.to_animated_value() % endif ) } % endif % endfor } } } impl Animatable for AnimationValue { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { % if prop.animation_value_type == "discrete": if self_portion > other_portion { Ok(AnimationValue::${prop.camel_case}(from.clone())) } else { Ok(AnimationValue::${prop.camel_case}(to.clone())) } % else: from.add_weighted(to, self_portion, other_portion) .map(AnimationValue::${prop.camel_case}) % endif } % endif % endfor _ => { panic!("Expected weighted addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn add(&self, other: &Self) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.add(to).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.accumulate(to, count).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected accumulation of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type != "discrete": (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.compute_distance(to) }, % else: (&AnimationValue::${prop.camel_case}(ref _from), &AnimationValue::${prop.camel_case}(ref _to)) => { Err(()) }, % endif % endif % endfor _ => { panic!("Expected compute_distance of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } } impl ToAnimatedZero for AnimationValue { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { % for prop in data.longhands: % if prop.animatable and prop.animation_value_type != "discrete": AnimationValue::${prop.camel_case}(ref base) => { Ok(AnimationValue::${prop.camel_case}(base.to_animated_zero()?)) }, % endif % endfor _ => Err(()), } } } impl RepeatableListAnimatable for LengthOrPercentage {} impl RepeatableListAnimatable for Either<f32, LengthOrPercentage> {} impl RepeatableListAnimatable for Either<NonNegativeNumber, NonNegativeLengthOrPercentage> {} macro_rules! repeated_vec_impl { ($($ty:ty),*) => { $(impl<T: RepeatableListAnimatable> Animatable for $ty { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // If the length of either list is zero, the least common multiple is undefined. if self.is_empty() || other.is_empty() { return Err(()); } use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.add_weighted(you, self_portion, other_portion) }).collect() } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { // If the length of either list is zero, the least common multiple is undefined. if cmp::min(self.len(), other.len()) < 1 { return Err(()); } use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.compute_squared_distance(you) }).sum() } })* }; } repeated_vec_impl!(SmallVec<[T; 1]>, Vec<T>); /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Au { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Au((self.0 as f64 * self_portion + other.0 as f64 * other_portion).round() as i32)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl <T> Animatable for Option<T> where T: Animatable, { #[inline] fn add_weighted(&self, other: &Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { Ok(this.add_weighted(other, self_portion, other_portion).ok()) } (&None, &None) => Ok(None), _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_squared_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f32 { #[inline] fn add_weighted(&self, other: &f32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion) as f32) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f64 { #[inline] fn add_weighted(&self, other: &f64, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(*self * self_portion + *other * other_portion) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-integer impl Animatable for i32 { #[inline] fn add_weighted(&self, other: &i32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion).round() as i32) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Angle { #[inline] fn add_weighted(&self, other: &Angle, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { % for angle_type in [ 'Degree', 'Gradian', 'Turn' ]: (Angle::${angle_type}(val1), Angle::${angle_type}(val2)) => { Ok(Angle::${angle_type}( try!(val1.add_weighted(&val2, self_portion, other_portion)) )) } % endfor _ => { self.radians() .add_weighted(&other.radians(), self_portion, other_portion) .map(Angle::from_radians) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { // Use the formula for calculating the distance between angles defined in SVG: // https://www.w3.org/TR/SVG/animate.html#complexDistances Ok((self.radians64() - other.radians64()).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-percentage impl Animatable for Percentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Percentage((self.0 as f64 * self_portion + other.0 as f64 * other_portion) as f32)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((self.0 as f64 - other.0 as f64).abs()) } } impl ToAnimatedZero for Percentage { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(Percentage(0.)) } } /// https://drafts.csswg.org/css-transitions/#animtype-visibility impl Animatable for Visibility { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Visibility::visible, _) => { Ok(if self_portion > 0.0 { *self } else { *other }) }, (_, Visibility::visible) => { Ok(if other_portion > 0.0 { *other } else { *self }) }, _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { if *self == *other { Ok(0.0) } else { Ok(1.0) } } } impl ToAnimatedZero for Visibility { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } impl<T: Animatable + Copy> Animatable for Size2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let width = self.width.add_weighted(&other.width, self_portion, other_portion)?; let height = self.height.add_weighted(&other.height, self_portion, other_portion)?; Ok(Size2D::new(width, height)) } } impl<T: Animatable + Copy> Animatable for Point2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let x = self.x.add_weighted(&other.x, self_portion, other_portion)?; let y = self.y.add_weighted(&other.y, self_portion, other_portion)?; Ok(Point2D::new(x, y)) } } impl Animatable for BorderCornerRadius { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(GenericBorderCornerRadius) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.0.width.compute_squared_distance(&other.0.width)? + self.0.height.compute_squared_distance(&other.0.height)?) } } impl ToAnimatedZero for BorderCornerRadius { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// https://drafts.csswg.org/css-transitions/#animtype-length impl Animatable for VerticalAlign { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref this)), VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref other))) => { this.add_weighted(other, self_portion, other_portion).map(|value| { VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(value)) }) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(ref this), VerticalAlign::LengthOrPercentage(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl ToAnimatedZero for VerticalAlign { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for CalcLengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { fn add_weighted_half<T>(this: Option<T>, other: Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> where T: Default + Animatable, { match (this, other) { (None, None) => Ok(None), (this, other) => { let this = this.unwrap_or(T::default()); let other = other.unwrap_or(T::default()); this.add_weighted(&other, self_portion, other_portion).map(Some) } } } let length = self.unclamped_length().add_weighted(&other.unclamped_length(), self_portion, other_portion)?; let percentage = add_weighted_half(self.percentage, other.percentage, self_portion, other_portion)?; Ok(CalcLengthOrPercentage::with_clamping_mode(length, percentage, self.clamping_mode)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let length_diff = (self.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (self.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Length) } (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Percentage) } (this, other) => { // Special handling for zero values since these should not require calc(). if this.is_definitely_zero() { return other.add_weighted(&other, 0., other_portion) } else if other.is_definitely_zero() { return this.add_weighted(self, self_portion, 0.) } let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.add_weighted(&other, self_portion, other_portion) .map(LengthOrPercentage::Calc) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } } } impl ToAnimatedZero for LengthOrPercentage { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(LengthOrPercentage::zero()) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrAuto { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Length) } (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Percentage) } (LengthOrPercentageOrAuto::Auto, LengthOrPercentageOrAuto::Auto) => { Ok(LengthOrPercentageOrAuto::Auto) } (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrAuto::Calc(result)), _ => Err(()), } } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); if let (Some(this), Some(other)) = (this, other) { let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } else { Err(()) } } } } } impl ToAnimatedZero for LengthOrPercentageOrAuto { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { LengthOrPercentageOrAuto::Length(_) | LengthOrPercentageOrAuto::Percentage(_) | LengthOrPercentageOrAuto::Calc(_) => { Ok(LengthOrPercentageOrAuto::Length(Au(0))) }, LengthOrPercentageOrAuto::Auto => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrNone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Length) } (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Percentage) } (LengthOrPercentageOrNone::None, LengthOrPercentageOrNone::None) => { Ok(LengthOrPercentageOrNone::None) } (this, other) => { let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrNone::Calc(result)), _ => Err(()), } }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); this.compute_distance(&other) }, } } } impl ToAnimatedZero for LengthOrPercentageOrNone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { LengthOrPercentageOrNone::Length(_) | LengthOrPercentageOrNone::Percentage(_) | LengthOrPercentageOrNone::Calc(_) => { Ok(LengthOrPercentageOrNone::Length(Au(0))) }, LengthOrPercentageOrNone::None => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MozLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MozLength::LengthOrPercentageOrAuto) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl ToAnimatedZero for MozLength { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { MozLength::LengthOrPercentageOrAuto(ref length) => { Ok(MozLength::LengthOrPercentageOrAuto(length.to_animated_zero()?)) }, _ => Err(()) } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MaxLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MaxLength::LengthOrPercentageOrNone) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl ToAnimatedZero for MaxLength { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// http://dev.w3.org/csswg/css-transitions/#animtype-font-weight impl Animatable for FontWeight { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let a = self.0 as f64; let b = other.0 as f64; const NORMAL: f64 = 400.; let weight = (a - NORMAL) * self_portion + (b - NORMAL) * other_portion + NORMAL; let weight = (weight.max(100.).min(900.) / 100.).round() * 100.; Ok(FontWeight(weight as u16)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let a = self.0 as f64; let b = other.0 as f64; a.compute_distance(&b) } } impl ToAnimatedZero for FontWeight { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(FontWeight::normal()) } } /// https://drafts.csswg.org/css-fonts/#font-stretch-prop impl Animatable for FontStretch { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let from = f64::from(*self); let to = f64::from(*other); // FIXME: When `const fn` is available in release rust, make |normal|, below, const. let normal = f64::from(FontStretch::normal); let result = (from - normal) * self_portion + (to - normal) * other_portion + normal; Ok(result.into()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let from = f64::from(*self); let to = f64::from(*other); from.compute_distance(&to) } } impl ToAnimatedZero for FontStretch { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// We should treat font stretch as real number in order to interpolate this property. /// https://drafts.csswg.org/css-fonts-3/#font-stretch-animation impl From<FontStretch> for f64 { fn from(stretch: FontStretch) -> f64 { use self::FontStretch::*; match stretch { ultra_condensed => 1.0, extra_condensed => 2.0, condensed => 3.0, semi_condensed => 4.0, normal => 5.0, semi_expanded => 6.0, expanded => 7.0, extra_expanded => 8.0, ultra_expanded => 9.0, } } } impl Into<FontStretch> for f64 { fn into(self) -> FontStretch { use properties::longhands::font_stretch::computed_value::T::*; let index = (self + 0.5).floor().min(9.0).max(1.0); static FONT_STRETCH_ENUM_MAP: [FontStretch; 9] = [ ultra_condensed, extra_condensed, condensed, semi_condensed, normal, semi_expanded, expanded, extra_expanded, ultra_expanded ]; FONT_STRETCH_ENUM_MAP[(index - 1.0) as usize] } } /// https://drafts.csswg.org/css-transitions/#animtype-simple-list impl<H: Animatable, V: Animatable> Animatable for generic_position::Position<H, V> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(generic_position::Position { horizontal: self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?, vertical: self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.horizontal.compute_squared_distance(&other.horizontal)? + self.vertical.compute_squared_distance(&other.vertical)?) } } impl<H, V> ToAnimatedZero for generic_position::Position<H, V> where H: ToAnimatedZero, V: ToAnimatedZero, { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(generic_position::Position { horizontal: self.horizontal.to_animated_zero()?, vertical: self.vertical.to_animated_zero()?, }) } } impl<H, V> RepeatableListAnimatable for generic_position::Position<H, V> where H: RepeatableListAnimatable, V: RepeatableListAnimatable {} /// https://drafts.csswg.org/css-transitions/#animtype-rect impl Animatable for ClipRect { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(ClipRect { top: self.top.add_weighted(&other.top, self_portion, other_portion)?, right: self.right.add_weighted(&other.right, self_portion, other_portion)?, bottom: self.bottom.add_weighted(&other.bottom, self_portion, other_portion)?, left: self.left.add_weighted(&other.left, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let list = [ self.top.compute_distance(&other.top)?, self.right.compute_distance(&other.right)?, self.bottom.compute_distance(&other.bottom)?, self.left.compute_distance(&other.left)? ]; Ok(list.iter().fold(0.0f64, |sum, diff| sum + diff * diff)) } } impl ToAnimatedZero for ClipRect { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// Check if it's possible to do a direct numerical interpolation /// between these two transform lists. /// http://dev.w3.org/csswg/css-transforms/#transform-transform-animation fn can_interpolate_list(from_list: &[TransformOperation], to_list: &[TransformOperation]) -> bool { // Lists must be equal length if from_list.len() != to_list.len() { return false; } // Each transform operation must match primitive type in other list for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(..), &TransformOperation::Matrix(..)) | (&TransformOperation::Skew(..), &TransformOperation::Skew(..)) | (&TransformOperation::Translate(..), &TransformOperation::Translate(..)) | (&TransformOperation::Scale(..), &TransformOperation::Scale(..)) | (&TransformOperation::Rotate(..), &TransformOperation::Rotate(..)) | (&TransformOperation::Perspective(..), &TransformOperation::Perspective(..)) => {} _ => { return false; } } } true } /// Build an equivalent 'identity transform function list' based /// on an existing transform list. /// http://dev.w3.org/csswg/css-transforms/#none-transform-animation fn build_identity_transform_list(list: &[TransformOperation]) -> Vec<TransformOperation> { let mut result = vec!(); for operation in list { match *operation { TransformOperation::Matrix(..) => { let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } TransformOperation::MatrixWithPercents(..) => {} TransformOperation::Skew(..) => { result.push(TransformOperation::Skew(Angle::zero(), Angle::zero())) } TransformOperation::Translate(..) => { result.push(TransformOperation::Translate(LengthOrPercentage::zero(), LengthOrPercentage::zero(), Au(0))); } TransformOperation::Scale(..) => { result.push(TransformOperation::Scale(1.0, 1.0, 1.0)); } TransformOperation::Rotate(..) => { result.push(TransformOperation::Rotate(0.0, 0.0, 1.0, Angle::zero())); } TransformOperation::Perspective(..) | TransformOperation::AccumulateMatrix { .. } | TransformOperation::InterpolateMatrix { .. } => { // Perspective: We convert a perspective function into an equivalent // ComputedMatrix, and then decompose/interpolate/recompose these matrices. // AccumulateMatrix/InterpolateMatrix: We do interpolation on // AccumulateMatrix/InterpolateMatrix by reading it as a ComputedMatrix // (with layout information), and then do matrix interpolation. // // Therefore, we use an identity matrix to represent the identity transform list. // http://dev.w3.org/csswg/css-transforms/#identity-transform-function let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } } } result } /// A wrapper for calling add_weighted that interpolates the distance of the two values from /// an initial_value and uses that to produce an interpolated value. /// This is used for values such as 'scale' where the initial value is 1 and where if we interpolate /// the absolute values, we will produce odd results for accumulation. fn add_weighted_with_initial_val<T: Animatable>(a: &T, b: &T, a_portion: f64, b_portion: f64, initial_val: &T) -> Result<T, ()> { let a = a.add_weighted(&initial_val, 1.0, -1.0)?; let b = b.add_weighted(&initial_val, 1.0, -1.0)?; let result = a.add_weighted(&b, a_portion, b_portion)?; result.add_weighted(&initial_val, 1.0, 1.0) } /// Add two transform lists. /// http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms fn add_weighted_transform_lists(from_list: &[TransformOperation], to_list: &[TransformOperation], self_portion: f64, other_portion: f64) -> TransformList { let mut result = vec![]; if can_interpolate_list(from_list, to_list) { for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(from), &TransformOperation::Matrix(_to)) => { let sum = from.add_weighted(&_to, self_portion, other_portion).unwrap(); result.push(TransformOperation::Matrix(sum)); } (&TransformOperation::MatrixWithPercents(_), &TransformOperation::MatrixWithPercents(_)) => { // We don't add_weighted `-moz-transform` matrices yet. // They contain percentage values. {} } (&TransformOperation::Skew(fx, fy), &TransformOperation::Skew(tx, ty)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); result.push(TransformOperation::Skew(ix, iy)); } (&TransformOperation::Translate(fx, fy, fz), &TransformOperation::Translate(tx, ty, tz)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); let iz = fz.add_weighted(&tz, self_portion, other_portion).unwrap(); result.push(TransformOperation::Translate(ix, iy, iz)); } (&TransformOperation::Scale(fx, fy, fz), &TransformOperation::Scale(tx, ty, tz)) => { let ix = add_weighted_with_initial_val(&fx, &tx, self_portion, other_portion, &1.0).unwrap(); let iy = add_weighted_with_initial_val(&fy, &ty, self_portion, other_portion, &1.0).unwrap(); let iz = add_weighted_with_initial_val(&fz, &tz, self_portion, other_portion, &1.0).unwrap(); result.push(TransformOperation::Scale(ix, iy, iz)); } (&TransformOperation::Rotate(fx, fy, fz, fa), &TransformOperation::Rotate(tx, ty, tz, ta)) => { let norm_f = ((fx * fx) + (fy * fy) + (fz * fz)).sqrt(); let norm_t = ((tx * tx) + (ty * ty) + (tz * tz)).sqrt(); let (fx, fy, fz) = (fx / norm_f, fy / norm_f, fz / norm_f); let (tx, ty, tz) = (tx / norm_t, ty / norm_t, tz / norm_t); if fx == tx && fy == ty && fz == tz { let ia = fa.add_weighted(&ta, self_portion, other_portion).unwrap(); result.push(TransformOperation::Rotate(fx, fy, fz, ia)); } else { let matrix_f = rotate_to_matrix(fx, fy, fz, fa); let matrix_t = rotate_to_matrix(tx, ty, tz, ta); let sum = matrix_f.add_weighted(&matrix_t, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } } (&TransformOperation::Perspective(fd), &TransformOperation::Perspective(_td)) => { let mut fd_matrix = ComputedMatrix::identity(); let mut td_matrix = ComputedMatrix::identity(); fd_matrix.m43 = -1. / fd.to_f32_px(); td_matrix.m43 = -1. / _td.to_f32_px(); let sum = fd_matrix.add_weighted(&td_matrix, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } _ => { // This should be unreachable due to the can_interpolate_list() call. unreachable!(); } } } } else { let from_transform_list = TransformList(Some(from_list.to_vec())); let to_transform_list = TransformList(Some(to_list.to_vec())); result.push( TransformOperation::InterpolateMatrix { from_list: from_transform_list, to_list: to_transform_list, progress: Percentage(other_portion as f32) }); } TransformList(Some(result)) } /// https://www.w3.org/TR/css-transforms-1/#Rotate3dDefined fn rotate_to_matrix(x: f32, y: f32, z: f32, a: Angle) -> ComputedMatrix { let half_rad = a.radians() / 2.0; let sc = (half_rad).sin() * (half_rad).cos(); let sq = (half_rad).sin().powi(2); ComputedMatrix { m11: 1.0 - 2.0 * (y * y + z * z) * sq, m12: 2.0 * (x * y * sq + z * sc), m13: 2.0 * (x * z * sq - y * sc), m14: 0.0, m21: 2.0 * (x * y * sq - z * sc), m22: 1.0 - 2.0 * (x * x + z * z) * sq, m23: 2.0 * (y * z * sq + x * sc), m24: 0.0, m31: 2.0 * (x * z * sq + y * sc), m32: 2.0 * (y * z * sq - x * sc), m33: 1.0 - 2.0 * (x * x + y * y) * sq, m34: 0.0, m41: 0.0, m42: 0.0, m43: 0.0, m44: 1.0 } } /// A 2d matrix for interpolation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct InnerMatrix2D { pub m11: CSSFloat, pub m12: CSSFloat, pub m21: CSSFloat, pub m22: CSSFloat, } /// A 2d translation function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate2D(f32, f32); /// A 2d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale2D(f32, f32); /// A decomposed 2d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed2D { /// The translation function. pub translate: Translate2D, /// The scale function. pub scale: Scale2D, /// The rotation angle. pub angle: f32, /// The inner matrix. pub matrix: InnerMatrix2D, } impl Animatable for InnerMatrix2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(InnerMatrix2D { m11: add_weighted_with_initial_val(&self.m11, &other.m11, self_portion, other_portion, &1.0)?, m12: self.m12.add_weighted(&other.m12, self_portion, other_portion)?, m21: self.m21.add_weighted(&other.m21, self_portion, other_portion)?, m22: add_weighted_with_initial_val(&self.m22, &other.m22, self_portion, other_portion, &1.0)?, }) } } impl Animatable for Translate2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate2D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, )) } } impl Animatable for Scale2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale2D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed2D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-2d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // If x-axis of one is flipped, and y-axis of the other, // convert to an unflipped rotation. let mut scale = self.scale; let mut angle = self.angle; let mut other_angle = other.angle; if (scale.0 < 0.0 && other.scale.1 < 0.0) || (scale.1 < 0.0 && other.scale.0 < 0.0) { scale.0 = -scale.0; scale.1 = -scale.1; angle += if angle < 0.0 {180.} else {-180.}; } // Don't rotate the long way around. if angle == 0.0 { angle = 360. } if other_angle == 0.0 { other_angle = 360. } if (angle - other_angle).abs() > 180. { if angle > other_angle { angle -= 360. } else{ other_angle -= 360. } } // Interpolate all values. let translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; let scale = scale.add_weighted(&other.scale, self_portion, other_portion)?; let angle = angle.add_weighted(&other_angle, self_portion, other_portion)?; let matrix = self.matrix.add_weighted(&other.matrix, self_portion, other_portion)?; Ok(MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: matrix, }) } } impl Animatable for ComputedMatrix { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { if self.is_3d() || other.is_3d() { let decomposed_from = decompose_3d_matrix(*self); let decomposed_to = decompose_3d_matrix(*other); match (decomposed_from, decomposed_to) { (Ok(from), Ok(to)) => { let sum = from.add_weighted(&to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } else { let decomposed_from = MatrixDecomposed2D::from(*self); let decomposed_to = MatrixDecomposed2D::from(*other); let sum = decomposed_from.add_weighted(&decomposed_to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) } } } impl From<ComputedMatrix> for MatrixDecomposed2D { /// Decompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-2d-matrix fn from(matrix: ComputedMatrix) -> MatrixDecomposed2D { let mut row0x = matrix.m11; let mut row0y = matrix.m12; let mut row1x = matrix.m21; let mut row1y = matrix.m22; let translate = Translate2D(matrix.m41, matrix.m42); let mut scale = Scale2D((row0x * row0x + row0y * row0y).sqrt(), (row1x * row1x + row1y * row1y).sqrt()); // If determinant is negative, one axis was flipped. let determinant = row0x * row1y - row0y * row1x; if determinant < 0. { if row0x < row1y { scale.0 = -scale.0; } else { scale.1 = -scale.1; } } // Renormalize matrix to remove scale. if scale.0 != 0.0 { row0x *= 1. / scale.0; row0y *= 1. / scale.0; } if scale.1 != 0.0 { row1x *= 1. / scale.1; row1y *= 1. / scale.1; } // Compute rotation and renormalize matrix. let mut angle = row0y.atan2(row0x); if angle != 0.0 { let sn = -row0y; let cs = row0x; let m11 = row0x; let m12 = row0y; let m21 = row1x; let m22 = row1y; row0x = cs * m11 + sn * m21; row0y = cs * m12 + sn * m22; row1x = -sn * m11 + cs * m21; row1y = -sn * m12 + cs * m22; } let m = InnerMatrix2D { m11: row0x, m12: row0y, m21: row1x, m22: row1y, }; // Convert into degrees because our rotation functions expect it. angle = angle.to_degrees(); MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: m, } } } impl From<MatrixDecomposed2D> for ComputedMatrix { /// Recompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-2d-matrix fn from(decomposed: MatrixDecomposed2D) -> ComputedMatrix { let mut computed_matrix = ComputedMatrix::identity(); computed_matrix.m11 = decomposed.matrix.m11; computed_matrix.m12 = decomposed.matrix.m12; computed_matrix.m21 = decomposed.matrix.m21; computed_matrix.m22 = decomposed.matrix.m22; // Translate matrix. computed_matrix.m41 = decomposed.translate.0; computed_matrix.m42 = decomposed.translate.1; // Rotate matrix. let angle = decomposed.angle.to_radians(); let cos_angle = angle.cos(); let sin_angle = angle.sin(); let mut rotate_matrix = ComputedMatrix::identity(); rotate_matrix.m11 = cos_angle; rotate_matrix.m12 = sin_angle; rotate_matrix.m21 = -sin_angle; rotate_matrix.m22 = cos_angle; // Multiplication of computed_matrix and rotate_matrix computed_matrix = multiply(rotate_matrix, computed_matrix); // Scale matrix. computed_matrix.m11 *= decomposed.scale.0; computed_matrix.m12 *= decomposed.scale.0; computed_matrix.m21 *= decomposed.scale.1; computed_matrix.m22 *= decomposed.scale.1; computed_matrix } } #[cfg(feature = "gecko")] impl<'a> From< &'a RawGeckoGfxMatrix4x4> for ComputedMatrix { fn from(m: &'a RawGeckoGfxMatrix4x4) -> ComputedMatrix { ComputedMatrix { m11: m[0], m12: m[1], m13: m[2], m14: m[3], m21: m[4], m22: m[5], m23: m[6], m24: m[7], m31: m[8], m32: m[9], m33: m[10], m34: m[11], m41: m[12], m42: m[13], m43: m[14], m44: m[15], } } } #[cfg(feature = "gecko")] impl From<ComputedMatrix> for RawGeckoGfxMatrix4x4 { fn from(matrix: ComputedMatrix) -> RawGeckoGfxMatrix4x4 { [ matrix.m11, matrix.m12, matrix.m13, matrix.m14, matrix.m21, matrix.m22, matrix.m23, matrix.m24, matrix.m31, matrix.m32, matrix.m33, matrix.m34, matrix.m41, matrix.m42, matrix.m43, matrix.m44 ] } } /// A 3d translation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate3D(f32, f32, f32); /// A 3d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale3D(f32, f32, f32); /// A 3d skew function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Skew(f32, f32, f32); /// A 3d perspective transformation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Perspective(f32, f32, f32, f32); /// A quaternion used to represent a rotation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Quaternion(f32, f32, f32, f32); /// A decomposed 3d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed3D { /// A translation function. pub translate: Translate3D, /// A scale function. pub scale: Scale3D, /// The skew component of the transformation. pub skew: Skew, /// The perspective component of the transformation. pub perspective: Perspective, /// The quaternion used to represent the rotation. pub quaternion: Quaternion, } /// Decompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-3d-matrix fn decompose_3d_matrix(mut matrix: ComputedMatrix) -> Result<MatrixDecomposed3D, ()> { // Normalize the matrix. if matrix.m44 == 0.0 { return Err(()); } let scaling_factor = matrix.m44; % for i in range(1, 5): % for j in range(1, 5): matrix.m${i}${j} /= scaling_factor; % endfor % endfor // perspective_matrix is used to solve for perspective, but it also provides // an easy way to test for singularity of the upper 3x3 component. let mut perspective_matrix = matrix; % for i in range(1, 4): perspective_matrix.m${i}4 = 0.0; % endfor perspective_matrix.m44 = 1.0; if perspective_matrix.determinant() == 0.0 { return Err(()); } // First, isolate perspective. let perspective = if matrix.m14 != 0.0 || matrix.m24 != 0.0 || matrix.m34 != 0.0 { let right_hand_side: [f32; 4] = [ matrix.m14, matrix.m24, matrix.m34, matrix.m44 ]; perspective_matrix = perspective_matrix.inverse().unwrap(); // Transpose perspective_matrix perspective_matrix = ComputedMatrix { % for i in range(1, 5): % for j in range(1, 5): m${i}${j}: perspective_matrix.m${j}${i}, % endfor % endfor }; // Multiply right_hand_side with perspective_matrix let mut tmp: [f32; 4] = [0.0; 4]; % for i in range(1, 5): tmp[${i - 1}] = (right_hand_side[0] * perspective_matrix.m1${i}) + (right_hand_side[1] * perspective_matrix.m2${i}) + (right_hand_side[2] * perspective_matrix.m3${i}) + (right_hand_side[3] * perspective_matrix.m4${i}); % endfor Perspective(tmp[0], tmp[1], tmp[2], tmp[3]) } else { Perspective(0.0, 0.0, 0.0, 1.0) }; // Next take care of translation let translate = Translate3D ( matrix.m41, matrix.m42, matrix.m43 ); // Now get scale and shear. 'row' is a 3 element array of 3 component vectors let mut row: [[f32; 3]; 3] = [[0.0; 3]; 3]; % for i in range(1, 4): row[${i - 1}][0] = matrix.m${i}1; row[${i - 1}][1] = matrix.m${i}2; row[${i - 1}][2] = matrix.m${i}3; % endfor // Compute X scale factor and normalize first row. let row0len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); let mut scale = Scale3D(row0len, 0.0, 0.0); row[0] = [row[0][0] / row0len, row[0][1] / row0len, row[0][2] / row0len]; // Compute XY shear factor and make 2nd row orthogonal to 1st. let mut skew = Skew(dot(row[0], row[1]), 0.0, 0.0); row[1] = combine(row[1], row[0], 1.0, -skew.0); // Now, compute Y scale and normalize 2nd row. let row1len = (row[1][0] * row[1][0] + row[1][1] * row[1][1] + row[1][2] * row[1][2]).sqrt(); scale.1 = row1len; row[1] = [row[1][0] / row1len, row[1][1] / row1len, row[1][2] / row1len]; skew.0 /= scale.1; // Compute XZ and YZ shears, orthogonalize 3rd row skew.1 = dot(row[0], row[2]); row[2] = combine(row[2], row[0], 1.0, -skew.1); skew.2 = dot(row[1], row[2]); row[2] = combine(row[2], row[1], 1.0, -skew.2); // Next, get Z scale and normalize 3rd row. let row2len = (row[2][0] * row[2][0] + row[2][1] * row[2][1] + row[2][2] * row[2][2]).sqrt(); scale.2 = row2len; row[2] = [row[2][0] / row2len, row[2][1] / row2len, row[2][2] / row2len]; skew.1 /= scale.2; skew.2 /= scale.2; // At this point, the matrix (in rows) is orthonormal. // Check for a coordinate system flip. If the determinant // is -1, then negate the matrix and the scaling factors. let pdum3 = cross(row[1], row[2]); if dot(row[0], pdum3) < 0.0 { % for i in range(3): scale.${i} *= -1.0; row[${i}][0] *= -1.0; row[${i}][1] *= -1.0; row[${i}][2] *= -1.0; % endfor } // Now, get the rotations out let mut quaternion = Quaternion ( 0.5 * ((1.0 + row[0][0] - row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] + row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] - row[1][1] + row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 + row[0][0] + row[1][1] + row[2][2]).max(0.0)).sqrt() ); if row[2][1] > row[1][2] { quaternion.0 = -quaternion.0 } if row[0][2] > row[2][0] { quaternion.1 = -quaternion.1 } if row[1][0] > row[0][1] { quaternion.2 = -quaternion.2 } Ok(MatrixDecomposed3D { translate: translate, scale: scale, skew: skew, perspective: perspective, quaternion: quaternion }) } // Combine 2 point. fn combine(a: [f32; 3], b: [f32; 3], ascl: f32, bscl: f32) -> [f32; 3] { [ (ascl * a[0]) + (bscl * b[0]), (ascl * a[1]) + (bscl * b[1]), (ascl * a[2]) + (bscl * b[2]) ] } // Dot product. fn dot(a: [f32; 3], b: [f32; 3]) -> f32 { a[0] * b[0] + a[1] * b[1] + a[2] * b[2] } // Cross product. fn cross(row1: [f32; 3], row2: [f32; 3]) -> [f32; 3] { [ row1[1] * row2[2] - row1[2] * row2[1], row1[2] * row2[0] - row1[0] * row2[2], row1[0] * row2[1] - row1[1] * row2[0] ] } impl Animatable for Translate3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate3D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Scale3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale3D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.2, &other.2, self_portion, other_portion, &1.0)?, )) } } impl Animatable for Skew { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Skew( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Perspective { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Perspective( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, add_weighted_with_initial_val(&self.3, &other.3, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed3D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-3d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { use std::f64; debug_assert!((self_portion + other_portion - 1.0f64).abs() <= f64::EPSILON || other_portion == 1.0f64 || other_portion == 0.0f64, "add_weighted should only be used for interpolating or accumulating transforms"); let mut sum = *self; // Add translate, scale, skew and perspective components. sum.translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; sum.scale = self.scale.add_weighted(&other.scale, self_portion, other_portion)?; sum.skew = self.skew.add_weighted(&other.skew, self_portion, other_portion)?; sum.perspective = self.perspective.add_weighted(&other.perspective, self_portion, other_portion)?; // Add quaternions using spherical linear interpolation (Slerp). // // We take a specialized code path for accumulation (where other_portion is 1) if other_portion == 1.0 { if self_portion == 0.0 { return Ok(*other) } let clamped_w = self.quaternion.3.min(1.0).max(-1.0); // Determine the scale factor. let mut theta = clamped_w.acos(); let mut scale = if theta == 0.0 { 0.0 } else { 1.0 / theta.sin() }; theta *= self_portion as f32; scale *= theta.sin(); // Scale the self matrix by self_portion. let mut scaled_self = *self; % for i in range(3): scaled_self.quaternion.${i} *= scale; % endfor scaled_self.quaternion.3 = theta.cos(); // Multiply scaled-self by other. let a = &scaled_self.quaternion; let b = &other.quaternion; sum.quaternion = Quaternion( a.3 * b.0 + a.0 * b.3 + a.1 * b.2 - a.2 * b.1, a.3 * b.1 - a.0 * b.2 + a.1 * b.3 + a.2 * b.0, a.3 * b.2 + a.0 * b.1 - a.1 * b.0 + a.2 * b.3, a.3 * b.3 - a.0 * b.0 - a.1 * b.1 - a.2 * b.2, ); } else { let mut product = self.quaternion.0 * other.quaternion.0 + self.quaternion.1 * other.quaternion.1 + self.quaternion.2 * other.quaternion.2 + self.quaternion.3 * other.quaternion.3; // Clamp product to -1.0 <= product <= 1.0 product = product.min(1.0); product = product.max(-1.0); if product == 1.0 { return Ok(sum); } let theta = product.acos(); let w = (other_portion as f32 * theta).sin() * 1.0 / (1.0 - product * product).sqrt(); let mut a = *self; let mut b = *other; % for i in range(4): a.quaternion.${i} *= (other_portion as f32 * theta).cos() - product * w; b.quaternion.${i} *= w; sum.quaternion.${i} = a.quaternion.${i} + b.quaternion.${i}; % endfor } Ok(sum) } } impl From<MatrixDecomposed3D> for ComputedMatrix { /// Recompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-3d-matrix fn from(decomposed: MatrixDecomposed3D) -> ComputedMatrix { let mut matrix = ComputedMatrix::identity(); // Apply perspective % for i in range(1, 5): matrix.m${i}4 = decomposed.perspective.${i - 1}; % endfor // Apply translation % for i in range(1, 4): % for j in range(1, 4): matrix.m4${i} += decomposed.translate.${j - 1} * matrix.m${j}${i}; % endfor % endfor // Apply rotation let x = decomposed.quaternion.0; let y = decomposed.quaternion.1; let z = decomposed.quaternion.2; let w = decomposed.quaternion.3; // Construct a composite rotation matrix from the quaternion values // rotationMatrix is a identity 4x4 matrix initially let mut rotation_matrix = ComputedMatrix::identity(); rotation_matrix.m11 = 1.0 - 2.0 * (y * y + z * z); rotation_matrix.m12 = 2.0 * (x * y + z * w); rotation_matrix.m13 = 2.0 * (x * z - y * w); rotation_matrix.m21 = 2.0 * (x * y - z * w); rotation_matrix.m22 = 1.0 - 2.0 * (x * x + z * z); rotation_matrix.m23 = 2.0 * (y * z + x * w); rotation_matrix.m31 = 2.0 * (x * z + y * w); rotation_matrix.m32 = 2.0 * (y * z - x * w); rotation_matrix.m33 = 1.0 - 2.0 * (x * x + y * y); matrix = multiply(rotation_matrix, matrix); // Apply skew let mut temp = ComputedMatrix::identity(); if decomposed.skew.2 != 0.0 { temp.m32 = decomposed.skew.2; matrix = multiply(temp, matrix); } if decomposed.skew.1 != 0.0 { temp.m32 = 0.0; temp.m31 = decomposed.skew.1; matrix = multiply(temp, matrix); } if decomposed.skew.0 != 0.0 { temp.m31 = 0.0; temp.m21 = decomposed.skew.0; matrix = multiply(temp, matrix); } // Apply scale % for i in range(1, 4): % for j in range(1, 4): matrix.m${i}${j} *= decomposed.scale.${i - 1}; % endfor % endfor matrix } } // Multiplication of two 4x4 matrices. fn multiply(a: ComputedMatrix, b: ComputedMatrix) -> ComputedMatrix { let mut a_clone = a; % for i in range(1, 5): % for j in range(1, 5): a_clone.m${i}${j} = (a.m${i}1 * b.m1${j}) + (a.m${i}2 * b.m2${j}) + (a.m${i}3 * b.m3${j}) + (a.m${i}4 * b.m4${j}); % endfor % endfor a_clone } impl ComputedMatrix { fn is_3d(&self) -> bool { self.m13 != 0.0 || self.m14 != 0.0 || self.m23 != 0.0 || self.m24 != 0.0 || self.m31 != 0.0 || self.m32 != 0.0 || self.m33 != 1.0 || self.m34 != 0.0 || self.m43 != 0.0 || self.m44 != 1.0 } fn determinant(&self) -> CSSFloat { self.m14 * self.m23 * self.m32 * self.m41 - self.m13 * self.m24 * self.m32 * self.m41 - self.m14 * self.m22 * self.m33 * self.m41 + self.m12 * self.m24 * self.m33 * self.m41 + self.m13 * self.m22 * self.m34 * self.m41 - self.m12 * self.m23 * self.m34 * self.m41 - self.m14 * self.m23 * self.m31 * self.m42 + self.m13 * self.m24 * self.m31 * self.m42 + self.m14 * self.m21 * self.m33 * self.m42 - self.m11 * self.m24 * self.m33 * self.m42 - self.m13 * self.m21 * self.m34 * self.m42 + self.m11 * self.m23 * self.m34 * self.m42 + self.m14 * self.m22 * self.m31 * self.m43 - self.m12 * self.m24 * self.m31 * self.m43 - self.m14 * self.m21 * self.m32 * self.m43 + self.m11 * self.m24 * self.m32 * self.m43 + self.m12 * self.m21 * self.m34 * self.m43 - self.m11 * self.m22 * self.m34 * self.m43 - self.m13 * self.m22 * self.m31 * self.m44 + self.m12 * self.m23 * self.m31 * self.m44 + self.m13 * self.m21 * self.m32 * self.m44 - self.m11 * self.m23 * self.m32 * self.m44 - self.m12 * self.m21 * self.m33 * self.m44 + self.m11 * self.m22 * self.m33 * self.m44 } fn inverse(&self) -> Option<ComputedMatrix> { let mut det = self.determinant(); if det == 0.0 { return None; } det = 1.0 / det; let x = ComputedMatrix { m11: det * (self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 + self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 - self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44), m12: det * (self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 - self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 + self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44), m13: det * (self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 + self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 - self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44), m14: det * (self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 - self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 + self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34), m21: det * (self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 - self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 + self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44), m22: det * (self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 + self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 - self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44), m23: det * (self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 - self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 + self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44), m24: det * (self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 + self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 - self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34), m31: det * (self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 + self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 - self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44), m32: det * (self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 - self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 + self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44), m33: det * (self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 + self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 - self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44), m34: det * (self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 - self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 + self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34), m41: det * (self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 - self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 + self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43), m42: det * (self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 + self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 - self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43), m43: det * (self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 - self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 + self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43), m44: det * (self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 + self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 - self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33), }; Some(x) } } /// https://drafts.csswg.org/css-transforms/#interpolation-of-transforms impl Animatable for TransformList { #[inline] fn add_weighted(&self, other: &TransformList, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms let result = match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { // Two lists of transforms add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&Some(ref from_list), &None) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let to_list = build_identity_transform_list(from_list); add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&None, &Some(ref to_list)) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let from_list = build_identity_transform_list(to_list); add_weighted_transform_lists(&from_list, to_list, self_portion, other_portion) } _ => { // http://dev.w3.org/csswg/css-transforms/#none-none-animation TransformList(None) } }; Ok(result) } fn add(&self, other: &Self) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { Ok(TransformList(Some([&from_list[..], &to_list[..]].concat()))) } (&Some(_), &None) => { Ok(self.clone()) } (&None, &Some(_)) => { Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } #[inline] fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { if can_interpolate_list(from_list, to_list) { Ok(add_weighted_transform_lists(from_list, &to_list, count as f64, 1.0)) } else { use std::i32; let result = vec![TransformOperation::AccumulateMatrix { from_list: self.clone(), to_list: other.clone(), count: cmp::min(count, i32::MAX as u64) as i32 }]; Ok(TransformList(Some(result))) } } (&Some(ref from_list), &None) => { Ok(add_weighted_transform_lists(from_list, from_list, count as f64, 0.0)) } (&None, &Some(_)) => { // If |self| is 'none' then we are calculating: // // none * |count| + |other| // = none + |other| // = |other| // // Hence the result is just |other|. Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } } impl ToAnimatedZero for TransformList { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(TransformList(None)) } } impl<T, U> Animatable for Either<T, U> where T: Animatable + Copy, U: Animatable + Copy, { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Either::First(ref this), Either::First(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::First) }, (Either::Second(ref this), Either::Second(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::Second) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_distance(other) }, _ => Err(()) } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_squared_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_squared_distance(other) }, _ => Err(()) } } } impl<A, B> ToAnimatedZero for Either<A, B> where A: ToAnimatedZero, B: ToAnimatedZero, { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { Either::First(ref first) => { Ok(Either::First(first.to_animated_zero()?)) }, Either::Second(ref second) => { Ok(Either::Second(second.to_animated_zero()?)) }, } } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] /// Unlike RGBA, each component value may exceed the range [0.0, 1.0]. pub struct IntermediateRGBA { /// The red component. pub red: f32, /// The green component. pub green: f32, /// The blue component. pub blue: f32, /// The alpha component. pub alpha: f32, } impl IntermediateRGBA { /// Returns a transparent color. #[inline] pub fn transparent() -> Self { Self::new(0., 0., 0., 0.) } /// Returns a new color. #[inline] pub fn new(red: f32, green: f32, blue: f32, alpha: f32) -> Self { IntermediateRGBA { red: red, green: green, blue: blue, alpha: alpha } } } impl ToAnimatedValue for RGBA { type AnimatedValue = IntermediateRGBA; #[inline] fn to_animated_value(self) -> Self::AnimatedValue { IntermediateRGBA::new( self.red_f32(), self.green_f32(), self.blue_f32(), self.alpha_f32(), ) } #[inline] fn from_animated_value(animated: Self::AnimatedValue) -> Self { // RGBA::from_floats clamps each component values. RGBA::from_floats( animated.red, animated.green, animated.blue, animated.alpha, ) } } /// Unlike Animatable for RGBA we don't clamp any component values. impl Animatable for IntermediateRGBA { #[inline] fn add_weighted(&self, other: &IntermediateRGBA, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut alpha = self.alpha.add_weighted(&other.alpha, self_portion, other_portion)?; if alpha <= 0. { // Ideally we should return color value that only alpha component is // 0, but this is what current gecko does. Ok(IntermediateRGBA::transparent()) } else { alpha = alpha.min(1.); let red = (self.red * self.alpha).add_weighted( &(other.red * other.alpha), self_portion, other_portion )? * 1. / alpha; let green = (self.green * self.alpha).add_weighted( &(other.green * other.alpha), self_portion, other_portion )? * 1. / alpha; let blue = (self.blue * self.alpha).add_weighted( &(other.blue * other.alpha), self_portion, other_portion )? * 1. / alpha; Ok(IntermediateRGBA::new(red, green, blue, alpha)) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let start = [ self.alpha, self.red * self.alpha, self.green * self.alpha, self.blue * self.alpha ]; let end = [ other.alpha, other.red * other.alpha, other.green * other.alpha, other.blue * other.alpha ]; let diff = start.iter().zip(&end) .fold(0.0f64, |n, (&a, &b)| { let diff = (a - b) as f64; n + diff * diff }); Ok(diff) } } impl ToAnimatedZero for IntermediateRGBA { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(IntermediateRGBA::transparent()) } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct IntermediateColor { color: IntermediateRGBA, foreground_ratio: f32, } impl IntermediateColor { fn currentcolor() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 1., } } /// Returns a transparent intermediate color. pub fn transparent() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 0., } } fn is_currentcolor(&self) -> bool { self.foreground_ratio >= 1. } fn is_numeric(&self) -> bool { self.foreground_ratio <= 0. } fn effective_intermediate_rgba(&self) -> IntermediateRGBA { IntermediateRGBA { alpha: self.color.alpha * (1. - self.foreground_ratio), .. self.color } } } impl ToAnimatedValue for Color { type AnimatedValue = IntermediateColor; #[inline] fn to_animated_value(self) -> Self::AnimatedValue { IntermediateColor { color: self.color.to_animated_value(), foreground_ratio: self.foreground_ratio as f32 * (1. / 255.), } } #[inline] fn from_animated_value(animated: Self::AnimatedValue) -> Self { Color { color: RGBA::from_animated_value(animated.color), foreground_ratio: (animated.foreground_ratio * 255.).round() as u8, } } } impl Animatable for IntermediateColor { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // Common cases are interpolating between two numeric colors, // two currentcolors, and a numeric color and a currentcolor. // // Note: this algorithm assumes self_portion + other_portion // equals to one, so it may be broken for additive operation. // To properly support additive color interpolation, we would // need two ratio fields in computed color types. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(IntermediateColor::currentcolor()) } else { Ok(IntermediateColor { color: self.color.add_weighted(&other.color, self_portion, other_portion)?, foreground_ratio: self.foreground_ratio, }) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateColor { color: other.color, foreground_ratio: self_portion as f32, }) } else if self.is_numeric() && other.is_currentcolor() { Ok(IntermediateColor { color: self.color, foreground_ratio: other_portion as f32, }) } else { // For interpolating between two complex colors, we need to // generate colors with effective alpha value. let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let color = self_color.add_weighted(&other_color, self_portion, other_portion)?; // Then we compute the final foreground ratio, and derive // the final alpha value from the effective alpha value. let foreground_ratio = self.foreground_ratio .add_weighted(&other.foreground_ratio, self_portion, other_portion)?; let alpha = color.alpha / (1. - foreground_ratio); Ok(IntermediateColor { color: IntermediateRGBA { alpha: alpha, .. color }, foreground_ratio: foreground_ratio, }) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { // All comments in add_weighted also applies here. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(0.) } else { self.color.compute_squared_distance(&other.color) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateRGBA::transparent().compute_squared_distance(&other.color)? + 1.) } else if self.is_numeric() && other.is_currentcolor() { Ok(self.color.compute_squared_distance(&IntermediateRGBA::transparent())? + 1.) } else { let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let dist = self_color.compute_squared_distance(&other_color)?; let ratio_diff = (self.foreground_ratio - other.foreground_ratio) as f64; Ok(dist + ratio_diff * ratio_diff) } } } impl ToAnimatedZero for IntermediateColor { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// Animatable SVGPaint pub type IntermediateSVGPaint = SVGPaint<IntermediateRGBA>; /// Animatable SVGPaintKind pub type IntermediateSVGPaintKind = SVGPaintKind<IntermediateRGBA>; impl Animatable for IntermediateSVGPaint { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.add_weighted(&other.kind, self_portion, other_portion)?, fallback: self.fallback.add_weighted(&other.fallback, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.kind.compute_squared_distance(&other.kind)? + self.fallback.compute_squared_distance(&other.fallback)?) } } impl ToAnimatedZero for IntermediateSVGPaint { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.to_animated_zero()?, fallback: self.fallback.and_then(|v| v.to_animated_zero().ok()), }) } } impl Animatable for IntermediateSVGPaintKind { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { Ok(SVGPaintKind::Color(self_color.add_weighted(other_color, self_portion, other_portion)?)) } // FIXME context values should be interpolable with colors // Gecko doesn't implement this behavior either. (&SVGPaintKind::None, &SVGPaintKind::None) => Ok(SVGPaintKind::None), (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) => Ok(SVGPaintKind::ContextFill), (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke) => Ok(SVGPaintKind::ContextStroke), _ => Err(()) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { self_color.compute_distance(other_color) } (&SVGPaintKind::None, &SVGPaintKind::None) | (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) | (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke)=> Ok(0.0), _ => Err(()) } } } impl ToAnimatedZero for IntermediateSVGPaintKind { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { SVGPaintKind::Color(ref color) => { Ok(SVGPaintKind::Color(color.to_animated_zero()?)) }, SVGPaintKind::None | SVGPaintKind::ContextFill | SVGPaintKind::ContextStroke => Ok(self.clone()), _ => Err(()), } } } impl<LengthType> Animatable for SVGLength<LengthType> where LengthType: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGLength::Length(ref this), &SVGLength::Length(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(SVGLength::Length) } _ => { Ok(if self_portion > other_portion { self.clone() } else { other.clone() }) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGLength::Length(ref this), &SVGLength::Length(ref other)) => { this.compute_distance(other) } _ => Err(()) } } } impl<LengthType> ToAnimatedZero for SVGLength<LengthType> where LengthType : ToAnimatedZero { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match self { &SVGLength::Length(ref length) => length.to_animated_zero().map(SVGLength::Length), &SVGLength::ContextValue => Ok(SVGLength::ContextValue), } } } impl<LengthType> Animatable for SVGStrokeDashArray<LengthType> where LengthType : RepeatableListAnimatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGStrokeDashArray::Values(ref this), &SVGStrokeDashArray::Values(ref other))=> { this.add_weighted(other, self_portion, other_portion) .map(SVGStrokeDashArray::Values) } _ => { Ok(if self_portion > other_portion { self.clone() } else { other.clone() }) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGStrokeDashArray::Values(ref this), &SVGStrokeDashArray::Values(ref other)) => { this.compute_distance(other) } _ => Err(()) } } } impl<LengthType> ToAnimatedZero for SVGStrokeDashArray<LengthType> where LengthType : ToAnimatedZero + Clone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match self { &SVGStrokeDashArray::Values(ref values) => { values.iter().map(ToAnimatedZero::to_animated_zero) .collect::<Result<Vec<_>, ()>>().map(SVGStrokeDashArray::Values) } &SVGStrokeDashArray::ContextValue => Ok(SVGStrokeDashArray::ContextValue), } } } impl<OpacityType> Animatable for SVGOpacity<OpacityType> where OpacityType: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGOpacity::Opacity(ref this), &SVGOpacity::Opacity(ref other)) => { this.add_weighted(other, self_portion, other_portion).map(SVGOpacity::Opacity) } _ => { Ok(if self_portion > other_portion { self.clone() } else { other.clone() }) } } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGOpacity::Opacity(ref this), &SVGOpacity::Opacity(ref other)) => { this.compute_distance(other) } _ => Err(()) } } } impl<OpacityType> ToAnimatedZero for SVGOpacity<OpacityType> where OpacityType: ToAnimatedZero + Clone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match self { &SVGOpacity::Opacity(ref opacity) => opacity.to_animated_zero().map(SVGOpacity::Opacity), other => Ok(other.clone()), } } } <% FILTER_FUNCTIONS = [ 'Blur', 'Brightness', 'Contrast', 'Grayscale', 'HueRotate', 'Invert', 'Opacity', 'Saturate', 'Sepia' ] %> /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function_impl(from: &AnimatedFilter, to: &AnimatedFilter, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { % for func in [ 'Blur', 'HueRotate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, % endfor % for func in [ 'Grayscale', 'Invert', 'Sepia' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &NonNegative::<CSSFloat>(0.0), )?)) }, % endfor % for func in [ 'Brightness', 'Contrast', 'Opacity', 'Saturate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &NonNegative::<CSSFloat>(1.0), )?)) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref from_value), &Filter::DropShadow(ref to_value)) => { Ok(Filter::DropShadow(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, (&Filter::Url(_), &Filter::Url(_)) => { Err(()) }, % endif _ => { // If specified the different filter functions, // we will need to interpolate as discreate. Err(()) }, } } /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function(from: Option<<&AnimatedFilter>, to: Option<<&AnimatedFilter>, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { (Some(f), Some(t)) => { add_weighted_filter_function_impl(f, t, self_portion, other_portion) }, (Some(f), None) => { add_weighted_filter_function_impl(f, f, self_portion, 0.0) }, (None, Some(t)) => { add_weighted_filter_function_impl(t, t, other_portion, 0.0) }, _ => { Err(()) } } } fn compute_filter_square_distance(from: &AnimatedFilter, to: &AnimatedFilter) -> Result<f64, ()> { match (from, to) { % for func in FILTER_FUNCTIONS : (&Filter::${func}(f), &Filter::${func}(t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref f), &Filter::DropShadow(ref t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endif _ => { Err(()) } } } impl Animatable for AnimatedFilterList { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut filters = vec![]; let mut from_iter = self.0.iter(); let mut to_iter = other.0.iter(); let mut from = from_iter.next(); let mut to = to_iter.next(); while from.is_some() || to.is_some() { filters.push(try!(add_weighted_filter_function(from, to, self_portion, other_portion))); if from.is_some() { from = from_iter.next(); } if to.is_some() { to = to_iter.next(); } } Ok(AnimatedFilterList(filters)) } fn add(&self, other: &Self) -> Result<Self, ()> { Ok(AnimatedFilterList(self.0.iter().chain(other.0.iter()).cloned().collect())) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { use itertools::{EitherOrBoth, Itertools}; let mut square_distance: f64 = 0.0; for it in self.0.iter().zip_longest(other.0.iter()) { square_distance += match it { EitherOrBoth::Both(from, to) => { compute_filter_square_distance(&from, &to)? }, EitherOrBoth::Left(list) | EitherOrBoth::Right(list)=> { let none = add_weighted_filter_function(Some(list), Some(list), 0.0, 0.0)?; compute_filter_square_distance(&none, &list)? }, }; } Ok(square_distance) } } /// A comparator to sort PropertyIds such that longhands are sorted before shorthands, /// shorthands with fewer components are sorted before shorthands with more components, /// and otherwise shorthands are sorted by IDL name as defined by [Web Animations][property-order]. /// /// Using this allows us to prioritize values specified by longhands (or smaller /// shorthand subsets) when longhands and shorthands are both specified on the one keyframe. /// /// Example orderings that result from this: /// /// margin-left, margin /// /// and: /// /// border-top-color, border-color, border-top, border /// /// [property-order] https://w3c.github.io/web-animations/#calculating-computed-keyframes #[cfg(feature = "gecko")] pub fn compare_property_priority(a: &PropertyId, b: &PropertyId) -> cmp::Ordering { match (a.as_shorthand(), b.as_shorthand()) { // Within shorthands, sort by the number of subproperties, then by IDL name. (Ok(a), Ok(b)) => { let subprop_count_a = a.longhands().len(); let subprop_count_b = b.longhands().len(); subprop_count_a.cmp(&subprop_count_b).then_with( || get_idl_name_sort_order(&a).cmp(&get_idl_name_sort_order(&b))) }, // Longhands go before shorthands. (Ok(_), Err(_)) => cmp::Ordering::Greater, (Err(_), Ok(_)) => cmp::Ordering::Less, // Both are longhands or custom properties in which case they don't overlap and should // sort equally. _ => cmp::Ordering::Equal, } } #[cfg(feature = "gecko")] fn get_idl_name_sort_order(shorthand: &ShorthandId) -> u32 { <% # Sort by IDL name. sorted_shorthands = sorted(data.shorthands, key=lambda p: to_idl_name(p.ident)) # Annotate with sorted position sorted_shorthands = [(p, position) for position, p in enumerate(sorted_shorthands)] %> match *shorthand { % for property, position in sorted_shorthands: ShorthandId::${property.camel_case} => ${position}, % endfor } } impl<T> Animatable for NonNegative<T> where T: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(NonNegative::<T>) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl<T> ToAnimatedZero for NonNegative<T> where T: ToAnimatedZero { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { self.0.to_animated_zero().map(NonNegative::<T>) } } impl<T> Animatable for GreaterThanOrEqualToOne<T> where T: Animatable + Clone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(GreaterThanOrEqualToOne::<T>) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl<T> ToAnimatedZero for GreaterThanOrEqualToOne<T> where T: ToAnimatedZero { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { self.0.to_animated_zero().map(GreaterThanOrEqualToOne::<T>) } }
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <% from data import SYSTEM_FONT_LONGHANDS %> use app_units::Au; use cssparser::{Parser, RGBA}; use euclid::{Point2D, Size2D}; #[cfg(feature = "gecko")] use gecko_bindings::bindings::RawServoAnimationValueMap; #[cfg(feature = "gecko")] use gecko_bindings::structs::RawGeckoGfxMatrix4x4; #[cfg(feature = "gecko")] use gecko_bindings::structs::nsCSSPropertyID; #[cfg(feature = "gecko")] use gecko_bindings::sugar::ownership::{HasFFI, HasSimpleFFI}; #[cfg(feature = "gecko")] use gecko_string_cache::Atom; use properties::{CSSWideKeyword, PropertyDeclaration}; use properties::longhands; use properties::longhands::background_size::computed_value::T as BackgroundSizeList; use properties::longhands::font_weight::computed_value::T as FontWeight; use properties::longhands::font_stretch::computed_value::T as FontStretch; use properties::longhands::transform::computed_value::ComputedMatrix; use properties::longhands::transform::computed_value::ComputedOperation as TransformOperation; use properties::longhands::transform::computed_value::T as TransformList; use properties::longhands::vertical_align::computed_value::T as VerticalAlign; use properties::longhands::visibility::computed_value::T as Visibility; #[cfg(feature = "gecko")] use properties::{PropertyDeclarationId, LonghandId}; use selectors::parser::SelectorParseError; use smallvec::SmallVec; use std::cmp; #[cfg(feature = "gecko")] use fnv::FnvHashMap; use style_traits::ParseError; use super::ComputedValues; use values::{Auto, CSSFloat, CustomIdent, Either}; use values::animated::effects::BoxShadowList as AnimatedBoxShadowList; use values::animated::effects::Filter as AnimatedFilter; use values::animated::effects::FilterList as AnimatedFilterList; use values::animated::effects::TextShadowList as AnimatedTextShadowList; use values::computed::{Angle, LengthOrPercentageOrAuto, LengthOrPercentageOrNone}; use values::computed::{BorderCornerRadius, ClipRect}; use values::computed::{CalcLengthOrPercentage, Color, Context, ComputedValueAsSpecified}; use values::computed::{LengthOrPercentage, MaxLength, MozLength, ToComputedValue}; use values::generics::{SVGPaint, SVGPaintKind}; use values::generics::border::BorderCornerRadius as GenericBorderCornerRadius; use values::generics::effects::Filter; use values::generics::position as generic_position; use values::specified::length::Percentage; /// A longhand property whose animation type is not "none". /// /// NOTE: This includes the 'display' property since it is animatable from SMIL even though it is /// not animatable from CSS animations or Web Animations. CSS transitions also does not allow /// animating 'display', but for CSS transitions we have the separate TransitionProperty type. #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatableLonghand { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} ${prop.camel_case}, % endif % endfor } impl AnimatableLonghand { /// Returns true if this AnimatableLonghand is one of the discretely animatable properties. pub fn is_discrete(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animation_value_type == "discrete": AnimatableLonghand::${prop.camel_case} => true, % endif % endfor _ => false } } /// Converts from an nsCSSPropertyID. Returns None if nsCSSPropertyID is not an animatable /// longhand in Servo. #[cfg(feature = "gecko")] pub fn from_nscsspropertyid(css_property: nsCSSPropertyID) -> Option<Self> { match css_property { % for prop in data.longhands: % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Converts from TransitionProperty. Returns None if the property is not an animatable /// longhand. pub fn from_transition_property(transition_property: &TransitionProperty) -> Option<Self> { match *transition_property { % for prop in data.longhands: % if prop.transitionable and prop.animatable: TransitionProperty::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Get an animatable longhand property from a property declaration. pub fn from_declaration(declaration: &PropertyDeclaration) -> Option<Self> { use properties::LonghandId; match *declaration { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(..) => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor PropertyDeclaration::CSSWideKeyword(id, _) | PropertyDeclaration::WithVariables(id, _) => { match id { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None, } }, _ => None, } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a AnimatableLonghand> for nsCSSPropertyID { fn from(property: &'a AnimatableLonghand) -> nsCSSPropertyID { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor } } } /// Convert to PropertyDeclarationId. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From<AnimatableLonghand> for PropertyDeclarationId<'a> { fn from(property: AnimatableLonghand) -> PropertyDeclarationId<'a> { match property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => PropertyDeclarationId::Longhand(LonghandId::${prop.camel_case}), % endif % endfor } } } /// Returns true if this nsCSSPropertyID is one of the animatable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_animatable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// A given transition property, that is either `All`, a transitionable longhand property, /// a shorthand with at least one transitionable longhand component, or an unsupported property. // NB: This needs to be here because it needs all the longhands generated // beforehand. #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, Eq, Hash, PartialEq, ToCss)] pub enum TransitionProperty { /// All, any transitionable property changing should generate a transition. All, % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: /// ${prop.name} ${prop.camel_case}, % endif % endfor /// Unrecognized property which could be any non-transitionable, custom property, or /// unknown property. Unsupported(CustomIdent) } no_viewport_percentage!(TransitionProperty); impl ComputedValueAsSpecified for TransitionProperty {} impl TransitionProperty { /// Iterates over each longhand property. pub fn each<F: FnMut(&TransitionProperty) -> ()>(mut cb: F) { % for prop in data.longhands: % if prop.transitionable: cb(&TransitionProperty::${prop.camel_case}); % endif % endfor } /// Iterates over every longhand property that is not TransitionProperty::All, stopping and /// returning true when the provided callback returns true for the first time. pub fn any<F: FnMut(&TransitionProperty) -> bool>(mut cb: F) -> bool { % for prop in data.longhands: % if prop.transitionable: if cb(&TransitionProperty::${prop.camel_case}) { return true; } % endif % endfor false } /// Parse a transition-property value. pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { let ident = input.expect_ident()?; let supported = match_ignore_ascii_case! { &ident, "all" => Ok(Some(TransitionProperty::All)), % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: "${prop.name}" => Ok(Some(TransitionProperty::${prop.camel_case})), % endif % endfor "none" => Err(()), _ => Ok(None), }; match supported { Ok(Some(property)) => Ok(property), Ok(None) => CustomIdent::from_ident(ident, &[]).map(TransitionProperty::Unsupported), Err(()) => Err(SelectorParseError::UnexpectedIdent(ident).into()), } } /// Return transitionable longhands of this shorthand TransitionProperty, except for "all". pub fn longhands(&self) -> &'static [TransitionProperty] { % for prop in data.shorthands_except_all(): % if prop.transitionable: static ${prop.ident.upper()}: &'static [TransitionProperty] = &[ % for sub in prop.sub_properties: % if sub.transitionable: TransitionProperty::${sub.camel_case}, % endif % endfor ]; % endif % endfor match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${prop.ident.upper()}, % endif % endfor _ => panic!("Not allowed to call longhands() for this TransitionProperty") } } /// Returns true if this TransitionProperty is a shorthand. pub fn is_shorthand(&self) -> bool { match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => true, % endif % endfor _ => false } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a TransitionProperty> for nsCSSPropertyID { fn from(transition_property: &'a TransitionProperty) -> nsCSSPropertyID { match *transition_property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor TransitionProperty::All => nsCSSPropertyID::eCSSPropertyExtra_all_properties, _ => panic!("Unconvertable Servo transition property: {:?}", transition_property), } } } /// Convert nsCSSPropertyID to TransitionProperty #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl From<nsCSSPropertyID> for TransitionProperty { fn from(property: nsCSSPropertyID) -> TransitionProperty { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::${prop.camel_case}, % else: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::Unsupported(CustomIdent(Atom::from("${prop.ident}"))), % endif % endfor nsCSSPropertyID::eCSSPropertyExtra_all_properties => TransitionProperty::All, _ => panic!("Unconvertable nsCSSPropertyID: {:?}", property), } } } /// Returns true if this nsCSSPropertyID is one of the transitionable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_transitionable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// An animated property interpolation between two computed values for that /// property. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatedProperty { % for prop in data.longhands: % if prop.animatable: <% if prop.is_animatable_with_computed_value: value_type = "longhands::{}::computed_value::T".format(prop.ident) else: value_type = prop.animation_value_type %> /// ${prop.name} ${prop.camel_case}(${value_type}, ${value_type}), % endif % endfor } impl AnimatedProperty { /// Get the name of this property. pub fn name(&self) -> &'static str { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(..) => "${prop.name}", % endif % endfor } } /// Whether this interpolation does animate, that is, whether the start and /// end values are different. pub fn does_animate(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => from != to, % endif % endfor } } /// Whether an animated property has the same end value as another. pub fn has_the_same_end_value_as(&self, other: &Self) -> bool { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimatedProperty::${prop.camel_case}(_, ref this_end_value), &AnimatedProperty::${prop.camel_case}(_, ref other_end_value)) => { this_end_value == other_end_value } % endif % endfor _ => false, } } /// Update `style` with the proper computed style corresponding to this /// animation at `progress`. pub fn update(&self, style: &mut ComputedValues, progress: f64) { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => { // https://w3c.github.io/web-animations/#discrete-animation-type % if prop.animation_value_type == "discrete": let value = if progress < 0.5 { from.clone() } else { to.clone() }; % else: let value = match from.interpolate(to, progress) { Ok(value) => value, Err(()) => return, }; % endif % if not prop.is_animatable_with_computed_value: let value: longhands::${prop.ident}::computed_value::T = value.into(); % endif style.mutate_${prop.style_struct.ident.strip("_")}().set_${prop.ident}(value); } % endif % endfor } } /// Get an animatable value from a transition-property, an old style, and a /// new style. pub fn from_animatable_longhand(property: &AnimatableLonghand, old_style: &ComputedValues, new_style: &ComputedValues) -> AnimatedProperty { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { AnimatedProperty::${prop.camel_case}( old_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}().into(), new_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}().into()) } % endif % endfor } } } /// A collection of AnimationValue that were composed on an element. /// This HashMap stores the values that are the last AnimationValue to be /// composed for each TransitionProperty. #[cfg(feature = "gecko")] pub type AnimationValueMap = FnvHashMap<AnimatableLonghand, AnimationValue>; #[cfg(feature = "gecko")] unsafe impl HasFFI for AnimationValueMap { type FFIType = RawServoAnimationValueMap; } #[cfg(feature = "gecko")] unsafe impl HasSimpleFFI for AnimationValueMap {} /// An enum to represent a single computed value belonging to an animated /// property in order to be interpolated with another one. When interpolating, /// both values need to belong to the same property. /// /// This is different to AnimatedProperty in the sense that AnimatedProperty /// also knows the final value to be used during the animation. /// /// This is to be used in Gecko integration code. /// /// FIXME: We need to add a path for custom properties, but that's trivial after /// this (is a similar path to that of PropertyDeclaration). #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimationValue { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} % if prop.is_animatable_with_computed_value: ${prop.camel_case}(longhands::${prop.ident}::computed_value::T), % else: ${prop.camel_case}(${prop.animation_value_type}), % endif % endif % endfor } impl AnimationValue { /// "Uncompute" this animation value in order to be used inside the CSS /// cascade. pub fn uncompute(&self) -> PropertyDeclaration { use properties::longhands; match *self { % for prop in data.longhands: % if prop.animatable: AnimationValue::${prop.camel_case}(ref from) => { PropertyDeclaration::${prop.camel_case}( % if prop.boxed: Box::new( % endif longhands::${prop.ident}::SpecifiedValue::from_computed_value( % if prop.is_animatable_with_computed_value: from % else: &from.clone().into() % endif )) % if prop.boxed: ) % endif } % endif % endfor } } /// Construct an AnimationValue from a property declaration pub fn from_declaration(decl: &PropertyDeclaration, context: &mut Context, initial: &ComputedValues) -> Option<Self> { use error_reporting::create_error_reporter; use properties::LonghandId; use properties::DeclaredValue; match *decl { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(ref val) => { % if prop.ident in SYSTEM_FONT_LONGHANDS and product == "gecko": if let Some(sf) = val.get_system() { longhands::system_font::resolve_system_font(sf, context); } % endif Some(AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: val.to_computed_value(context) % else: From::from(val.to_computed_value(context)) % endif )) }, % endif % endfor PropertyDeclaration::CSSWideKeyword(id, keyword) => { match id { // We put all the animatable properties first in the hopes // that it might increase match locality. % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let computed = match keyword { % if not prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Initial => { let initial_struct = initial.get_${prop.style_struct.name_lower}(); initial_struct.clone_${prop.ident}() }, % if prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Inherit => { let inherit_struct = context.inherited_style .get_${prop.style_struct.name_lower}(); inherit_struct.clone_${prop.ident}() }, }; % if not prop.is_animatable_with_computed_value: let computed = From::from(computed); % endif Some(AnimationValue::${prop.camel_case}(computed)) }, % endif % endfor % for prop in data.longhands: % if not prop.animatable: LonghandId::${prop.camel_case} => None, % endif % endfor } }, PropertyDeclaration::WithVariables(id, ref variables) => { let custom_props = context.style().custom_properties(); let reporter = create_error_reporter(); match id { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let mut result = None; let quirks_mode = context.quirks_mode; ::properties::substitute_variables_${prop.ident}_slow( &variables.css, variables.first_token_type, &variables.url_data, variables.from_shorthand, &custom_props, &mut |v| { let declaration = match *v { DeclaredValue::Value(value) => { PropertyDeclaration::${prop.camel_case}(value.clone()) }, DeclaredValue::CSSWideKeyword(keyword) => { PropertyDeclaration::CSSWideKeyword(id, keyword) }, DeclaredValue::WithVariables(_) => unreachable!(), }; result = AnimationValue::from_declaration(&declaration, context, initial); }, &reporter, quirks_mode); result }, % else: LonghandId::${prop.camel_case} => None, % endif % endfor } }, _ => None // non animatable properties will get included because of shorthands. ignore. } } /// Get an AnimationValue for an AnimatableLonghand from a given computed values. pub fn from_computed_values(property: &AnimatableLonghand, computed_values: &ComputedValues) -> Self { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed_values.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}()) % else: From::from(computed_values.get_${prop.style_struct.ident.strip("_")}() .clone_${prop.ident}())) % endif } % endif % endfor } } } impl Animatable for AnimationValue { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { % if prop.animation_value_type == "discrete": if self_portion > other_portion { Ok(AnimationValue::${prop.camel_case}(from.clone())) } else { Ok(AnimationValue::${prop.camel_case}(to.clone())) } % else: from.add_weighted(to, self_portion, other_portion) .map(AnimationValue::${prop.camel_case}) % endif } % endif % endfor _ => { panic!("Expected weighted addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn add(&self, other: &Self) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.add(to).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.accumulate(to, count).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected accumulation of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn get_zero_value(&self) -> Option<Self> { match self { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": &AnimationValue::${prop.camel_case}(_) => { None } % else: &AnimationValue::${prop.camel_case}(ref base) => { base.get_zero_value().map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor } } fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type != "discrete": (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.compute_distance(to) }, % else: (&AnimationValue::${prop.camel_case}(ref _from), &AnimationValue::${prop.camel_case}(ref _to)) => { Err(()) }, % endif % endif % endfor _ => { panic!("Expected compute_distance of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } } /// A trait used to implement various procedures used during animation. pub trait Animatable: Sized { /// Performs a weighted sum of this value and |other|. This is used for /// interpolation and addition of animation values. fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()>; /// [Interpolates][interpolation] a value with another for a given property. /// /// [interpolation]: https://w3c.github.io/web-animations/#animation-interpolation fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> { self.add_weighted(other, 1.0 - progress, progress) } /// Returns the [sum][animation-addition] of this value and |other|. /// /// [animation-addition]: https://w3c.github.io/web-animations/#animation-addition fn add(&self, other: &Self) -> Result<Self, ()> { self.add_weighted(other, 1.0, 1.0) } /// [Accumulates][animation-accumulation] this value onto itself (|count| - 1) times then /// accumulates |other| onto the result. /// If |count| is zero, the result will be |other|. /// /// [animation-accumulation]: https://w3c.github.io/web-animations/#animation-accumulation fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { self.add_weighted(other, count as f64, 1.0) } /// Returns a value that, when added with an underlying value, will produce the underlying /// value. This is used for SMIL animation's "by-animation" where SMIL first interpolates from /// the zero value to the 'by' value, and then adds the result to the underlying value. /// /// This is not the necessarily the same as the initial value of a property. For example, the /// initial value of 'stroke-width' is 1, but the zero value is 0, since adding 1 to the /// underlying value will not produce the underlying value. fn get_zero_value(&self) -> Option<Self> { None } /// Compute distance between a value and another for a given property. fn compute_distance(&self, _other: &Self) -> Result<f64, ()> { Err(()) } /// In order to compute the Euclidean distance of a list or property value with multiple /// components, we need to compute squared distance for each element, so the vector can sum it /// and then get its squared root as the distance. fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_distance(other).map(|d| d * d) } } /// https://drafts.csswg.org/css-transitions/#animtype-repeatable-list pub trait RepeatableListAnimatable: Animatable {} impl RepeatableListAnimatable for LengthOrPercentage {} impl RepeatableListAnimatable for Either<f32, LengthOrPercentage> {} macro_rules! repeated_vec_impl { ($($ty:ty),*) => { $(impl<T: RepeatableListAnimatable> Animatable for $ty { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.add_weighted(you, self_portion, other_portion) }).collect() } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.compute_squared_distance(you) }).sum() } })* }; } repeated_vec_impl!(SmallVec<[T; 1]>, Vec<T>); /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Au { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Au((self.0 as f64 * self_portion + other.0 as f64 * other_portion).round() as i32)) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(Au(0)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl <T> Animatable for Option<T> where T: Animatable, { #[inline] fn add_weighted(&self, other: &Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { Ok(this.add_weighted(other, self_portion, other_portion).ok()) } (&None, &None) => Ok(None), _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_squared_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f32 { #[inline] fn add_weighted(&self, other: &f32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion) as f32) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(0.) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f64 { #[inline] fn add_weighted(&self, other: &f64, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(*self * self_portion + *other * other_portion) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(0.) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-integer impl Animatable for i32 { #[inline] fn add_weighted(&self, other: &i32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion).round() as i32) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(0) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Angle { #[inline] fn add_weighted(&self, other: &Angle, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { % for angle_type in [ 'Degree', 'Gradian', 'Turn' ]: (Angle::${angle_type}(val1), Angle::${angle_type}(val2)) => { Ok(Angle::${angle_type}( try!(val1.add_weighted(&val2, self_portion, other_portion)) )) } % endfor _ => { self.radians() .add_weighted(&other.radians(), self_portion, other_portion) .map(Angle::from_radians) } } } } /// https://drafts.csswg.org/css-transitions/#animtype-percentage impl Animatable for Percentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Percentage((self.0 as f64 * self_portion + other.0 as f64 * other_portion) as f32)) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(Percentage(0.)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((self.0 as f64 - other.0 as f64).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-visibility impl Animatable for Visibility { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Visibility::visible, _) => { Ok(if self_portion > 0.0 { *self } else { *other }) }, (_, Visibility::visible) => { Ok(if other_portion > 0.0 { *other } else { *self }) }, _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { if *self == *other { Ok(0.0) } else { Ok(1.0) } } } impl<T: Animatable + Copy> Animatable for Size2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let width = self.width.add_weighted(&other.width, self_portion, other_portion)?; let height = self.height.add_weighted(&other.height, self_portion, other_portion)?; Ok(Size2D::new(width, height)) } } impl<T: Animatable + Copy> Animatable for Point2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let x = self.x.add_weighted(&other.x, self_portion, other_portion)?; let y = self.y.add_weighted(&other.y, self_portion, other_portion)?; Ok(Point2D::new(x, y)) } } impl Animatable for BorderCornerRadius { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(GenericBorderCornerRadius) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.0.width.compute_squared_distance(&other.0.width)? + self.0.height.compute_squared_distance(&other.0.height)?) } } /// https://drafts.csswg.org/css-transitions/#animtype-length impl Animatable for VerticalAlign { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref this)), VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref other))) => { this.add_weighted(other, self_portion, other_portion).map(|value| { VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(value)) }) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(ref this), VerticalAlign::LengthOrPercentage(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl Animatable for BackgroundSizeList { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(BackgroundSizeList) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_squared_distance(&other.0) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for CalcLengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { fn add_weighted_half<T>(this: Option<T>, other: Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> where T: Default + Animatable, { match (this, other) { (None, None) => Ok(None), (this, other) => { let this = this.unwrap_or(T::default()); let other = other.unwrap_or(T::default()); this.add_weighted(&other, self_portion, other_portion).map(Some) } } } let length = self.unclamped_length().add_weighted(&other.unclamped_length(), self_portion, other_portion)?; let percentage = add_weighted_half(self.percentage, other.percentage, self_portion, other_portion)?; Ok(CalcLengthOrPercentage::with_clamping_mode(length, percentage, self.clamping_mode)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let length_diff = (self.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (self.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Length) } (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Percentage) } (this, other) => { // Special handling for zero values since these should not require calc(). if this.is_definitely_zero() { return other.add_weighted(&other, 0., other_portion) } else if other.is_definitely_zero() { return this.add_weighted(self, self_portion, 0.) } let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.add_weighted(&other, self_portion, other_portion) .map(LengthOrPercentage::Calc) } } } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(LengthOrPercentage::zero()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrAuto { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Length) } (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Percentage) } (LengthOrPercentageOrAuto::Auto, LengthOrPercentageOrAuto::Auto) => { Ok(LengthOrPercentageOrAuto::Auto) } (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrAuto::Calc(result)), _ => Err(()), } } } } #[inline] fn get_zero_value(&self) -> Option<Self> { match *self { LengthOrPercentageOrAuto::Length(_) | LengthOrPercentageOrAuto::Percentage(_) | LengthOrPercentageOrAuto::Calc(_) => { Some(LengthOrPercentageOrAuto::Length(Au(0))) }, LengthOrPercentageOrAuto::Auto => { None }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); if let (Some(this), Some(other)) = (this, other) { let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } else { Err(()) } } } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrNone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Length) } (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Percentage) } (LengthOrPercentageOrNone::None, LengthOrPercentageOrNone::None) => { Ok(LengthOrPercentageOrNone::None) } (this, other) => { let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrNone::Calc(result)), _ => Err(()), } }, } } #[inline] fn get_zero_value(&self) -> Option<Self> { match *self { LengthOrPercentageOrNone::Length(_) | LengthOrPercentageOrNone::Percentage(_) | LengthOrPercentageOrNone::Calc(_) => { Some(LengthOrPercentageOrNone::Length(Au(0))) }, LengthOrPercentageOrNone::None => { None }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); this.compute_distance(&other) }, } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MozLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MozLength::LengthOrPercentageOrAuto) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MaxLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MaxLength::LengthOrPercentageOrNone) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } /// http://dev.w3.org/csswg/css-transitions/#animtype-font-weight impl Animatable for FontWeight { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let a = (*self as u32) as f64; let b = (*other as u32) as f64; const NORMAL: f64 = 400.; let weight = (a - NORMAL) * self_portion + (b - NORMAL) * other_portion + NORMAL; Ok(if weight < 150. { FontWeight::Weight100 } else if weight < 250. { FontWeight::Weight200 } else if weight < 350. { FontWeight::Weight300 } else if weight < 450. { FontWeight::Weight400 } else if weight < 550. { FontWeight::Weight500 } else if weight < 650. { FontWeight::Weight600 } else if weight < 750. { FontWeight::Weight700 } else if weight < 850. { FontWeight::Weight800 } else { FontWeight::Weight900 }) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(FontWeight::Weight400) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let a = (*self as u32) as f64; let b = (*other as u32) as f64; a.compute_distance(&b) } } /// https://drafts.csswg.org/css-fonts/#font-stretch-prop impl Animatable for FontStretch { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let from = f64::from(*self); let to = f64::from(*other); // FIXME: When `const fn` is available in release rust, make |normal|, below, const. let normal = f64::from(FontStretch::normal); let result = (from - normal) * self_portion + (to - normal) * other_portion + normal; Ok(result.into()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let from = f64::from(*self); let to = f64::from(*other); from.compute_distance(&to) } } /// We should treat font stretch as real number in order to interpolate this property. /// https://drafts.csswg.org/css-fonts-3/#font-stretch-animation impl From<FontStretch> for f64 { fn from(stretch: FontStretch) -> f64 { use self::FontStretch::*; match stretch { ultra_condensed => 1.0, extra_condensed => 2.0, condensed => 3.0, semi_condensed => 4.0, normal => 5.0, semi_expanded => 6.0, expanded => 7.0, extra_expanded => 8.0, ultra_expanded => 9.0, } } } impl Into<FontStretch> for f64 { fn into(self) -> FontStretch { use properties::longhands::font_stretch::computed_value::T::*; let index = (self + 0.5).floor().min(9.0).max(1.0); static FONT_STRETCH_ENUM_MAP: [FontStretch; 9] = [ ultra_condensed, extra_condensed, condensed, semi_condensed, normal, semi_expanded, expanded, extra_expanded, ultra_expanded ]; FONT_STRETCH_ENUM_MAP[(index - 1.0) as usize] } } // Like std::macros::try!, but for Option<>. macro_rules! option_try { ($e:expr) => (match $e { Some(e) => e, None => return None }) } /// https://drafts.csswg.org/css-transitions/#animtype-simple-list impl<H: Animatable, V: Animatable> Animatable for generic_position::Position<H, V> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(generic_position::Position { horizontal: self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?, vertical: self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?, }) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(generic_position::Position { horizontal: option_try!(self.horizontal.get_zero_value()), vertical: option_try!(self.vertical.get_zero_value()), }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.horizontal.compute_squared_distance(&other.horizontal)? + self.vertical.compute_squared_distance(&other.vertical)?) } } impl<H, V> RepeatableListAnimatable for generic_position::Position<H, V> where H: RepeatableListAnimatable, V: RepeatableListAnimatable {} /// https://drafts.csswg.org/css-transitions/#animtype-rect impl Animatable for ClipRect { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(ClipRect { top: self.top.add_weighted(&other.top, self_portion, other_portion)?, right: self.right.add_weighted(&other.right, self_portion, other_portion)?, bottom: self.bottom.add_weighted(&other.bottom, self_portion, other_portion)?, left: self.left.add_weighted(&other.left, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let list = [ self.top.compute_distance(&other.top)?, self.right.compute_distance(&other.right)?, self.bottom.compute_distance(&other.bottom)?, self.left.compute_distance(&other.left)? ]; Ok(list.iter().fold(0.0f64, |sum, diff| sum + diff * diff)) } } /// Check if it's possible to do a direct numerical interpolation /// between these two transform lists. /// http://dev.w3.org/csswg/css-transforms/#transform-transform-animation fn can_interpolate_list(from_list: &[TransformOperation], to_list: &[TransformOperation]) -> bool { // Lists must be equal length if from_list.len() != to_list.len() { return false; } // Each transform operation must match primitive type in other list for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(..), &TransformOperation::Matrix(..)) | (&TransformOperation::Skew(..), &TransformOperation::Skew(..)) | (&TransformOperation::Translate(..), &TransformOperation::Translate(..)) | (&TransformOperation::Scale(..), &TransformOperation::Scale(..)) | (&TransformOperation::Rotate(..), &TransformOperation::Rotate(..)) | (&TransformOperation::Perspective(..), &TransformOperation::Perspective(..)) => {} _ => { return false; } } } true } /// Build an equivalent 'identity transform function list' based /// on an existing transform list. /// http://dev.w3.org/csswg/css-transforms/#none-transform-animation fn build_identity_transform_list(list: &[TransformOperation]) -> Vec<TransformOperation> { let mut result = vec!(); for operation in list { match *operation { TransformOperation::Matrix(..) => { let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } TransformOperation::MatrixWithPercents(..) => {} TransformOperation::Skew(..) => { result.push(TransformOperation::Skew(Angle::zero(), Angle::zero())) } TransformOperation::Translate(..) => { result.push(TransformOperation::Translate(LengthOrPercentage::zero(), LengthOrPercentage::zero(), Au(0))); } TransformOperation::Scale(..) => { result.push(TransformOperation::Scale(1.0, 1.0, 1.0)); } TransformOperation::Rotate(..) => { result.push(TransformOperation::Rotate(0.0, 0.0, 1.0, Angle::zero())); } TransformOperation::Perspective(..) | TransformOperation::AccumulateMatrix { .. } => { // Perspective: We convert a perspective function into an equivalent // ComputedMatrix, and then decompose/interpolate/recompose these matrices. // AccumulateMatrix: We do interpolation on AccumulateMatrix by reading it as a // ComputedMatrix (with layout information), and then do matrix interpolation. // // Therefore, we use an identity matrix to represent the identity transform list. // http://dev.w3.org/csswg/css-transforms/#identity-transform-function let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } TransformOperation::InterpolateMatrix { .. } => { panic!("Building the identity matrix for InterpolateMatrix is not supported"); } } } result } /// A wrapper for calling add_weighted that interpolates the distance of the two values from /// an initial_value and uses that to produce an interpolated value. /// This is used for values such as 'scale' where the initial value is 1 and where if we interpolate /// the absolute values, we will produce odd results for accumulation. fn add_weighted_with_initial_val<T: Animatable>(a: &T, b: &T, a_portion: f64, b_portion: f64, initial_val: &T) -> Result<T, ()> { let a = a.add_weighted(&initial_val, 1.0, -1.0)?; let b = b.add_weighted(&initial_val, 1.0, -1.0)?; let result = a.add_weighted(&b, a_portion, b_portion)?; result.add_weighted(&initial_val, 1.0, 1.0) } /// Add two transform lists. /// http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms fn add_weighted_transform_lists(from_list: &[TransformOperation], to_list: &[TransformOperation], self_portion: f64, other_portion: f64) -> TransformList { let mut result = vec![]; if can_interpolate_list(from_list, to_list) { for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(from), &TransformOperation::Matrix(_to)) => { let sum = from.add_weighted(&_to, self_portion, other_portion).unwrap(); result.push(TransformOperation::Matrix(sum)); } (&TransformOperation::MatrixWithPercents(_), &TransformOperation::MatrixWithPercents(_)) => { // We don't add_weighted `-moz-transform` matrices yet. // They contain percentage values. {} } (&TransformOperation::Skew(fx, fy), &TransformOperation::Skew(tx, ty)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); result.push(TransformOperation::Skew(ix, iy)); } (&TransformOperation::Translate(fx, fy, fz), &TransformOperation::Translate(tx, ty, tz)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); let iz = fz.add_weighted(&tz, self_portion, other_portion).unwrap(); result.push(TransformOperation::Translate(ix, iy, iz)); } (&TransformOperation::Scale(fx, fy, fz), &TransformOperation::Scale(tx, ty, tz)) => { let ix = add_weighted_with_initial_val(&fx, &tx, self_portion, other_portion, &1.0).unwrap(); let iy = add_weighted_with_initial_val(&fy, &ty, self_portion, other_portion, &1.0).unwrap(); let iz = add_weighted_with_initial_val(&fz, &tz, self_portion, other_portion, &1.0).unwrap(); result.push(TransformOperation::Scale(ix, iy, iz)); } (&TransformOperation::Rotate(fx, fy, fz, fa), &TransformOperation::Rotate(tx, ty, tz, ta)) => { let norm_f = ((fx * fx) + (fy * fy) + (fz * fz)).sqrt(); let norm_t = ((tx * tx) + (ty * ty) + (tz * tz)).sqrt(); let (fx, fy, fz) = (fx / norm_f, fy / norm_f, fz / norm_f); let (tx, ty, tz) = (tx / norm_t, ty / norm_t, tz / norm_t); if fx == tx && fy == ty && fz == tz { let ia = fa.add_weighted(&ta, self_portion, other_portion).unwrap(); result.push(TransformOperation::Rotate(fx, fy, fz, ia)); } else { let matrix_f = rotate_to_matrix(fx, fy, fz, fa); let matrix_t = rotate_to_matrix(tx, ty, tz, ta); let sum = matrix_f.add_weighted(&matrix_t, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } } (&TransformOperation::Perspective(fd), &TransformOperation::Perspective(_td)) => { let mut fd_matrix = ComputedMatrix::identity(); let mut td_matrix = ComputedMatrix::identity(); fd_matrix.m43 = -1. / fd.to_f32_px(); td_matrix.m43 = -1. / _td.to_f32_px(); let sum = fd_matrix.add_weighted(&td_matrix, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } _ => { // This should be unreachable due to the can_interpolate_list() call. unreachable!(); } } } } else { use values::specified::Percentage; let from_transform_list = TransformList(Some(from_list.to_vec())); let to_transform_list = TransformList(Some(to_list.to_vec())); result.push( TransformOperation::InterpolateMatrix { from_list: from_transform_list, to_list: to_transform_list, progress: Percentage(other_portion as f32) }); } TransformList(Some(result)) } /// https://drafts.csswg.org/css-transforms/#Rotate3dDefined fn rotate_to_matrix(x: f32, y: f32, z: f32, a: Angle) -> ComputedMatrix { let half_rad = a.radians() / 2.0; let sc = (half_rad).sin() * (half_rad).cos(); let sq = (half_rad).sin().powi(2); ComputedMatrix { m11: 1.0 - 2.0 * (y * y + z * z) * sq, m12: 2.0 * (x * y * sq - z * sc), m13: 2.0 * (x * z * sq + y * sc), m14: 0.0, m21: 2.0 * (x * y * sq + z * sc), m22: 1.0 - 2.0 * (x * x + z * z) * sq, m23: 2.0 * (y * z * sq - x * sc), m24: 0.0, m31: 2.0 * (x * z * sq - y * sc), m32: 2.0 * (y * z * sq + x * sc), m33: 1.0 - 2.0 * (x * x + y * y) * sq, m34: 0.0, m41: 0.0, m42: 0.0, m43: 0.0, m44: 1.0 } } /// A 2d matrix for interpolation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct InnerMatrix2D { pub m11: CSSFloat, pub m12: CSSFloat, pub m21: CSSFloat, pub m22: CSSFloat, } /// A 2d translation function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate2D(f32, f32); /// A 2d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale2D(f32, f32); /// A decomposed 2d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed2D { /// The translation function. pub translate: Translate2D, /// The scale function. pub scale: Scale2D, /// The rotation angle. pub angle: f32, /// The inner matrix. pub matrix: InnerMatrix2D, } impl Animatable for InnerMatrix2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(InnerMatrix2D { m11: add_weighted_with_initial_val(&self.m11, &other.m11, self_portion, other_portion, &1.0)?, m12: self.m12.add_weighted(&other.m12, self_portion, other_portion)?, m21: self.m21.add_weighted(&other.m21, self_portion, other_portion)?, m22: add_weighted_with_initial_val(&self.m22, &other.m22, self_portion, other_portion, &1.0)?, }) } } impl Animatable for Translate2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate2D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, )) } } impl Animatable for Scale2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale2D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed2D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-2d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // If x-axis of one is flipped, and y-axis of the other, // convert to an unflipped rotation. let mut scale = self.scale; let mut angle = self.angle; let mut other_angle = other.angle; if (scale.0 < 0.0 && other.scale.1 < 0.0) || (scale.1 < 0.0 && other.scale.0 < 0.0) { scale.0 = -scale.0; scale.1 = -scale.1; angle += if angle < 0.0 {180.} else {-180.}; } // Don't rotate the long way around. if angle == 0.0 { angle = 360. } if other_angle == 0.0 { other_angle = 360. } if (angle - other_angle).abs() > 180. { if angle > other_angle { angle -= 360. } else{ other_angle -= 360. } } // Interpolate all values. let translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; let scale = scale.add_weighted(&other.scale, self_portion, other_portion)?; let angle = angle.add_weighted(&other_angle, self_portion, other_portion)?; let matrix = self.matrix.add_weighted(&other.matrix, self_portion, other_portion)?; Ok(MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: matrix, }) } } impl Animatable for ComputedMatrix { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { if self.is_3d() || other.is_3d() { let decomposed_from = decompose_3d_matrix(*self); let decomposed_to = decompose_3d_matrix(*other); match (decomposed_from, decomposed_to) { (Ok(from), Ok(to)) => { let sum = from.add_weighted(&to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } else { let decomposed_from = MatrixDecomposed2D::from(*self); let decomposed_to = MatrixDecomposed2D::from(*other); let sum = decomposed_from.add_weighted(&decomposed_to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) } } } impl From<ComputedMatrix> for MatrixDecomposed2D { /// Decompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-2d-matrix fn from(matrix: ComputedMatrix) -> MatrixDecomposed2D { let mut row0x = matrix.m11; let mut row0y = matrix.m12; let mut row1x = matrix.m21; let mut row1y = matrix.m22; let translate = Translate2D(matrix.m41, matrix.m42); let mut scale = Scale2D((row0x * row0x + row0y * row0y).sqrt(), (row1x * row1x + row1y * row1y).sqrt()); // If determinant is negative, one axis was flipped. let determinant = row0x * row1y - row0y * row1x; if determinant < 0. { if row0x < row1y { scale.0 = -scale.0; } else { scale.1 = -scale.1; } } // Renormalize matrix to remove scale. if scale.0 != 0.0 { row0x *= 1. / scale.0; row0y *= 1. / scale.0; } if scale.1 != 0.0 { row1x *= 1. / scale.1; row1y *= 1. / scale.1; } // Compute rotation and renormalize matrix. let mut angle = row0y.atan2(row0x); if angle != 0.0 { let sn = -row0y; let cs = row0x; let m11 = row0x; let m12 = row0y; let m21 = row1x; let m22 = row1y; row0x = cs * m11 + sn * m21; row0y = cs * m12 + sn * m22; row1x = -sn * m11 + cs * m21; row1y = -sn * m12 + cs * m22; } let m = InnerMatrix2D { m11: row0x, m12: row0y, m21: row1x, m22: row1y, }; // Convert into degrees because our rotation functions expect it. angle = angle.to_degrees(); MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: m, } } } impl From<MatrixDecomposed2D> for ComputedMatrix { /// Recompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-2d-matrix fn from(decomposed: MatrixDecomposed2D) -> ComputedMatrix { let mut computed_matrix = ComputedMatrix::identity(); computed_matrix.m11 = decomposed.matrix.m11; computed_matrix.m12 = decomposed.matrix.m12; computed_matrix.m21 = decomposed.matrix.m21; computed_matrix.m22 = decomposed.matrix.m22; // Translate matrix. computed_matrix.m41 = decomposed.translate.0; computed_matrix.m42 = decomposed.translate.1; // Rotate matrix. let angle = decomposed.angle.to_radians(); let cos_angle = angle.cos(); let sin_angle = angle.sin(); let mut rotate_matrix = ComputedMatrix::identity(); rotate_matrix.m11 = cos_angle; rotate_matrix.m12 = sin_angle; rotate_matrix.m21 = -sin_angle; rotate_matrix.m22 = cos_angle; // Multiplication of computed_matrix and rotate_matrix computed_matrix = multiply(rotate_matrix, computed_matrix); // Scale matrix. computed_matrix.m11 *= decomposed.scale.0; computed_matrix.m12 *= decomposed.scale.0; computed_matrix.m21 *= decomposed.scale.1; computed_matrix.m22 *= decomposed.scale.1; computed_matrix } } #[cfg(feature = "gecko")] impl<'a> From< &'a RawGeckoGfxMatrix4x4> for ComputedMatrix { fn from(m: &'a RawGeckoGfxMatrix4x4) -> ComputedMatrix { ComputedMatrix { m11: m[0], m12: m[1], m13: m[2], m14: m[3], m21: m[4], m22: m[5], m23: m[6], m24: m[7], m31: m[8], m32: m[9], m33: m[10], m34: m[11], m41: m[12], m42: m[13], m43: m[14], m44: m[15], } } } #[cfg(feature = "gecko")] impl From<ComputedMatrix> for RawGeckoGfxMatrix4x4 { fn from(matrix: ComputedMatrix) -> RawGeckoGfxMatrix4x4 { [ matrix.m11, matrix.m12, matrix.m13, matrix.m14, matrix.m21, matrix.m22, matrix.m23, matrix.m24, matrix.m31, matrix.m32, matrix.m33, matrix.m34, matrix.m41, matrix.m42, matrix.m43, matrix.m44 ] } } /// A 3d translation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate3D(f32, f32, f32); /// A 3d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale3D(f32, f32, f32); /// A 3d skew function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Skew(f32, f32, f32); /// A 3d perspective transformation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Perspective(f32, f32, f32, f32); /// A quaternion used to represent a rotation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Quaternion(f32, f32, f32, f32); /// A decomposed 3d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed3D { /// A translation function. pub translate: Translate3D, /// A scale function. pub scale: Scale3D, /// The skew component of the transformation. pub skew: Skew, /// The perspective component of the transformation. pub perspective: Perspective, /// The quaternion used to represent the rotation. pub quaternion: Quaternion, } /// Decompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-3d-matrix fn decompose_3d_matrix(mut matrix: ComputedMatrix) -> Result<MatrixDecomposed3D, ()> { // Normalize the matrix. if matrix.m44 == 0.0 { return Err(()); } let scaling_factor = matrix.m44; % for i in range(1, 5): % for j in range(1, 5): matrix.m${i}${j} /= scaling_factor; % endfor % endfor // perspective_matrix is used to solve for perspective, but it also provides // an easy way to test for singularity of the upper 3x3 component. let mut perspective_matrix = matrix; % for i in range(1, 4): perspective_matrix.m${i}4 = 0.0; % endfor perspective_matrix.m44 = 1.0; if perspective_matrix.determinant() == 0.0 { return Err(()); } // First, isolate perspective. let perspective = if matrix.m14 != 0.0 || matrix.m24 != 0.0 || matrix.m34 != 0.0 { let right_hand_side: [f32; 4] = [ matrix.m14, matrix.m24, matrix.m34, matrix.m44 ]; perspective_matrix = perspective_matrix.inverse().unwrap(); // Transpose perspective_matrix perspective_matrix = ComputedMatrix { % for i in range(1, 5): % for j in range(1, 5): m${i}${j}: perspective_matrix.m${j}${i}, % endfor % endfor }; // Multiply right_hand_side with perspective_matrix let mut tmp: [f32; 4] = [0.0; 4]; % for i in range(1, 5): tmp[${i - 1}] = (right_hand_side[0] * perspective_matrix.m1${i}) + (right_hand_side[1] * perspective_matrix.m2${i}) + (right_hand_side[2] * perspective_matrix.m3${i}) + (right_hand_side[3] * perspective_matrix.m4${i}); % endfor Perspective(tmp[0], tmp[1], tmp[2], tmp[3]) } else { Perspective(0.0, 0.0, 0.0, 1.0) }; // Next take care of translation let translate = Translate3D ( matrix.m41, matrix.m42, matrix.m43 ); // Now get scale and shear. 'row' is a 3 element array of 3 component vectors let mut row: [[f32; 3]; 3] = [[0.0; 3]; 3]; % for i in range(1, 4): row[${i - 1}][0] = matrix.m${i}1; row[${i - 1}][1] = matrix.m${i}2; row[${i - 1}][2] = matrix.m${i}3; % endfor // Compute X scale factor and normalize first row. let row0len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); let mut scale = Scale3D(row0len, 0.0, 0.0); row[0] = [row[0][0] / row0len, row[0][1] / row0len, row[0][2] / row0len]; // Compute XY shear factor and make 2nd row orthogonal to 1st. let mut skew = Skew(dot(row[0], row[1]), 0.0, 0.0); row[1] = combine(row[1], row[0], 1.0, -skew.0); // Now, compute Y scale and normalize 2nd row. let row1len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); scale.1 = row1len; row[1] = [row[1][0] / row1len, row[1][1] / row1len, row[1][2] / row1len]; skew.0 /= scale.1; // Compute XZ and YZ shears, orthogonalize 3rd row skew.1 = dot(row[0], row[2]); row[2] = combine(row[2], row[0], 1.0, -skew.1); skew.2 = dot(row[1], row[2]); row[2] = combine(row[2], row[1], 1.0, -skew.2); // Next, get Z scale and normalize 3rd row. let row2len = (row[2][0] * row[2][0] + row[2][1] * row[2][1] + row[2][2] * row[2][2]).sqrt(); scale.2 = row2len; row[2] = [row[2][0] / row2len, row[2][1] / row2len, row[2][2] / row2len]; skew.1 /= scale.2; skew.2 /= scale.2; // At this point, the matrix (in rows) is orthonormal. // Check for a coordinate system flip. If the determinant // is -1, then negate the matrix and the scaling factors. let pdum3 = cross(row[1], row[2]); if dot(row[0], pdum3) < 0.0 { % for i in range(3): scale.${i} *= -1.0; row[${i}][0] *= -1.0; row[${i}][1] *= -1.0; row[${i}][2] *= -1.0; % endfor } // Now, get the rotations out let mut quaternion = Quaternion ( 0.5 * ((1.0 + row[0][0] - row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] + row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] - row[1][1] + row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 + row[0][0] + row[1][1] + row[2][2]).max(0.0)).sqrt() ); if row[2][1] > row[1][2] { quaternion.0 = -quaternion.0 } if row[0][2] > row[2][0] { quaternion.1 = -quaternion.1 } if row[1][0] > row[0][1] { quaternion.2 = -quaternion.2 } Ok(MatrixDecomposed3D { translate: translate, scale: scale, skew: skew, perspective: perspective, quaternion: quaternion }) } // Combine 2 point. fn combine(a: [f32; 3], b: [f32; 3], ascl: f32, bscl: f32) -> [f32; 3] { [ (ascl * a[0]) + (bscl * b[0]), (ascl * a[1]) + (bscl * b[1]), (ascl * a[2]) + (bscl * b[2]) ] } // Dot product. fn dot(a: [f32; 3], b: [f32; 3]) -> f32 { a[0] * b[0] + a[1] * b[1] + a[2] * b[2] } // Cross product. fn cross(row1: [f32; 3], row2: [f32; 3]) -> [f32; 3] { [ row1[1] * row2[2] - row1[2] * row2[1], row1[2] * row2[0] - row1[0] * row2[2], row1[0] * row2[1] - row1[1] * row2[0] ] } impl Animatable for Translate3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate3D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Scale3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale3D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.2, &other.2, self_portion, other_portion, &1.0)?, )) } } impl Animatable for Skew { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Skew( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Perspective { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Perspective( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, add_weighted_with_initial_val(&self.3, &other.3, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed3D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-3d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { assert!(self_portion + other_portion == 1.0f64 || other_portion == 1.0f64, "add_weighted should only be used for interpolating or accumulating transforms"); let mut sum = *self; // Add translate, scale, skew and perspective components. sum.translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; sum.scale = self.scale.add_weighted(&other.scale, self_portion, other_portion)?; sum.skew = self.skew.add_weighted(&other.skew, self_portion, other_portion)?; sum.perspective = self.perspective.add_weighted(&other.perspective, self_portion, other_portion)?; // Add quaternions using spherical linear interpolation (Slerp). // // We take a specialized code path for accumulation (where other_portion is 1) if other_portion == 1.0 { if self_portion == 0.0 { return Ok(*other) } let clamped_w = self.quaternion.3.min(1.0).max(-1.0); // Determine the scale factor. let mut theta = clamped_w.acos(); let mut scale = if theta == 0.0 { 0.0 } else { 1.0 / theta.sin() }; theta *= self_portion as f32; scale *= theta.sin(); // Scale the self matrix by self_portion. let mut scaled_self = *self; % for i in range(3): scaled_self.quaternion.${i} *= scale; % endfor scaled_self.quaternion.3 = theta.cos(); // Multiply scaled-self by other. let a = &scaled_self.quaternion; let b = &other.quaternion; sum.quaternion = Quaternion( a.3 * b.0 + a.0 * b.3 + a.1 * b.2 - a.2 * b.1, a.3 * b.1 - a.0 * b.2 + a.1 * b.3 + a.2 * b.0, a.3 * b.2 + a.0 * b.1 - a.1 * b.0 + a.2 * b.3, a.3 * b.3 - a.0 * b.0 - a.1 * b.1 - a.2 * b.2, ); } else { let mut product = self.quaternion.0 * other.quaternion.0 + self.quaternion.1 * other.quaternion.1 + self.quaternion.2 * other.quaternion.2 + self.quaternion.3 * other.quaternion.3; // Clamp product to -1.0 <= product <= 1.0 product = product.min(1.0); product = product.max(-1.0); if product == 1.0 { return Ok(sum); } let theta = product.acos(); let w = (other_portion as f32 * theta).sin() * 1.0 / (1.0 - product * product).sqrt(); let mut a = *self; let mut b = *other; % for i in range(4): a.quaternion.${i} *= (other_portion as f32 * theta).cos() - product * w; b.quaternion.${i} *= w; sum.quaternion.${i} = a.quaternion.${i} + b.quaternion.${i}; % endfor } Ok(sum) } } impl From<MatrixDecomposed3D> for ComputedMatrix { /// Recompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-3d-matrix fn from(decomposed: MatrixDecomposed3D) -> ComputedMatrix { let mut matrix = ComputedMatrix::identity(); // Apply perspective % for i in range(1, 5): matrix.m${i}4 = decomposed.perspective.${i - 1}; % endfor // Apply translation % for i in range(1, 4): % for j in range(1, 4): matrix.m4${i} += decomposed.translate.${j - 1} * matrix.m${j}${i}; % endfor % endfor // Apply rotation let x = decomposed.quaternion.0; let y = decomposed.quaternion.1; let z = decomposed.quaternion.2; let w = decomposed.quaternion.3; // Construct a composite rotation matrix from the quaternion values // rotationMatrix is a identity 4x4 matrix initially let mut rotation_matrix = ComputedMatrix::identity(); rotation_matrix.m11 = 1.0 - 2.0 * (y * y + z * z); rotation_matrix.m12 = 2.0 * (x * y + z * w); rotation_matrix.m13 = 2.0 * (x * z - y * w); rotation_matrix.m21 = 2.0 * (x * y - z * w); rotation_matrix.m22 = 1.0 - 2.0 * (x * x + z * z); rotation_matrix.m23 = 2.0 * (y * z + x * w); rotation_matrix.m31 = 2.0 * (x * z + y * w); rotation_matrix.m32 = 2.0 * (y * z - x * w); rotation_matrix.m33 = 1.0 - 2.0 * (x * x + y * y); matrix = multiply(rotation_matrix, matrix); // Apply skew let mut temp = ComputedMatrix::identity(); if decomposed.skew.2 != 0.0 { temp.m32 = decomposed.skew.2; matrix = multiply(matrix, temp); } if decomposed.skew.1 != 0.0 { temp.m32 = 0.0; temp.m31 = decomposed.skew.1; matrix = multiply(matrix, temp); } if decomposed.skew.0 != 0.0 { temp.m31 = 0.0; temp.m21 = decomposed.skew.0; matrix = multiply(matrix, temp); } // Apply scale % for i in range(1, 4): % for j in range(1, 4): matrix.m${i}${j} *= decomposed.scale.${i - 1}; % endfor % endfor matrix } } // Multiplication of two 4x4 matrices. fn multiply(a: ComputedMatrix, b: ComputedMatrix) -> ComputedMatrix { let mut a_clone = a; % for i in range(1, 5): % for j in range(1, 5): a_clone.m${i}${j} = (a.m${i}1 * b.m1${j}) + (a.m${i}2 * b.m2${j}) + (a.m${i}3 * b.m3${j}) + (a.m${i}4 * b.m4${j}); % endfor % endfor a_clone } impl ComputedMatrix { fn is_3d(&self) -> bool { self.m13 != 0.0 || self.m14 != 0.0 || self.m23 != 0.0 || self.m24 != 0.0 || self.m31 != 0.0 || self.m32 != 0.0 || self.m33 != 1.0 || self.m34 != 0.0 || self.m43 != 0.0 || self.m44 != 1.0 } fn determinant(&self) -> CSSFloat { self.m14 * self.m23 * self.m32 * self.m41 - self.m13 * self.m24 * self.m32 * self.m41 - self.m14 * self.m22 * self.m33 * self.m41 + self.m12 * self.m24 * self.m33 * self.m41 + self.m13 * self.m22 * self.m34 * self.m41 - self.m12 * self.m23 * self.m34 * self.m41 - self.m14 * self.m23 * self.m31 * self.m42 + self.m13 * self.m24 * self.m31 * self.m42 + self.m14 * self.m21 * self.m33 * self.m42 - self.m11 * self.m24 * self.m33 * self.m42 - self.m13 * self.m21 * self.m34 * self.m42 + self.m11 * self.m23 * self.m34 * self.m42 + self.m14 * self.m22 * self.m31 * self.m43 - self.m12 * self.m24 * self.m31 * self.m43 - self.m14 * self.m21 * self.m32 * self.m43 + self.m11 * self.m24 * self.m32 * self.m43 + self.m12 * self.m21 * self.m34 * self.m43 - self.m11 * self.m22 * self.m34 * self.m43 - self.m13 * self.m22 * self.m31 * self.m44 + self.m12 * self.m23 * self.m31 * self.m44 + self.m13 * self.m21 * self.m32 * self.m44 - self.m11 * self.m23 * self.m32 * self.m44 - self.m12 * self.m21 * self.m33 * self.m44 + self.m11 * self.m22 * self.m33 * self.m44 } fn inverse(&self) -> Option<ComputedMatrix> { let mut det = self.determinant(); if det == 0.0 { return None; } det = 1.0 / det; let x = ComputedMatrix { m11: det * (self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 + self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 - self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44), m12: det * (self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 - self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 + self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44), m13: det * (self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 + self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 - self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44), m14: det * (self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 - self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 + self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34), m21: det * (self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 - self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 + self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44), m22: det * (self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 + self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 - self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44), m23: det * (self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 - self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 + self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44), m24: det * (self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 + self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 - self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34), m31: det * (self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 + self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 - self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44), m32: det * (self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 - self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 + self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44), m33: det * (self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 + self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 - self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44), m34: det * (self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 - self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 + self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34), m41: det * (self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 - self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 + self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43), m42: det * (self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 + self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 - self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43), m43: det * (self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 - self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 + self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43), m44: det * (self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 + self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 - self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33), }; Some(x) } } /// https://drafts.csswg.org/css-transforms/#interpolation-of-transforms impl Animatable for TransformList { #[inline] fn add_weighted(&self, other: &TransformList, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms let result = match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { // Two lists of transforms add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&Some(ref from_list), &None) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let to_list = build_identity_transform_list(from_list); add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&None, &Some(ref to_list)) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let from_list = build_identity_transform_list(to_list); add_weighted_transform_lists(&from_list, to_list, self_portion, other_portion) } _ => { // http://dev.w3.org/csswg/css-transforms/#none-none-animation TransformList(None) } }; Ok(result) } fn add(&self, other: &Self) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { Ok(TransformList(Some([&from_list[..], &to_list[..]].concat()))) } (&Some(_), &None) => { Ok(self.clone()) } (&None, &Some(_)) => { Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } #[inline] fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { if can_interpolate_list(from_list, to_list) { Ok(add_weighted_transform_lists(from_list, &to_list, count as f64, 1.0)) } else { use std::i32; let result = vec![TransformOperation::AccumulateMatrix { from_list: self.clone(), to_list: other.clone(), count: cmp::min(count, i32::MAX as u64) as i32 }]; Ok(TransformList(Some(result))) } } (&Some(ref from_list), &None) => { Ok(add_weighted_transform_lists(from_list, from_list, count as f64, 0.0)) } (&None, &Some(_)) => { // If |self| is 'none' then we are calculating: // // none * |count| + |other| // = none + |other| // = |other| // // Hence the result is just |other|. Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(TransformList(None)) } } impl<T, U> Animatable for Either<T, U> where T: Animatable + Copy, U: Animatable + Copy, { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Either::First(ref this), Either::First(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::First) }, (Either::Second(ref this), Either::Second(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::Second) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } #[inline] fn get_zero_value(&self) -> Option<Self> { match *self { Either::First(ref this) => { this.get_zero_value().map(Either::First) }, Either::Second(ref this) => { this.get_zero_value().map(Either::Second) }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_distance(other) }, _ => Err(()) } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_squared_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_squared_distance(other) }, _ => Err(()) } } } impl From<IntermediateRGBA> for RGBA { fn from(extended_rgba: IntermediateRGBA) -> RGBA { // RGBA::from_floats clamps each component values. RGBA::from_floats(extended_rgba.red, extended_rgba.green, extended_rgba.blue, extended_rgba.alpha) } } impl From<RGBA> for IntermediateRGBA { fn from(rgba: RGBA) -> IntermediateRGBA { IntermediateRGBA::new(rgba.red_f32(), rgba.green_f32(), rgba.blue_f32(), rgba.alpha_f32()) } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] /// Unlike RGBA, each component value may exceed the range [0.0, 1.0]. pub struct IntermediateRGBA { /// The red component. pub red: f32, /// The green component. pub green: f32, /// The blue component. pub blue: f32, /// The alpha component. pub alpha: f32, } impl IntermediateRGBA { /// Returns a transparent color. #[inline] pub fn transparent() -> Self { Self::new(0., 0., 0., 0.) } /// Returns a new color. #[inline] pub fn new(red: f32, green: f32, blue: f32, alpha: f32) -> Self { IntermediateRGBA { red: red, green: green, blue: blue, alpha: alpha } } } /// Unlike Animatable for RGBA we don't clamp any component values. impl Animatable for IntermediateRGBA { #[inline] fn add_weighted(&self, other: &IntermediateRGBA, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut alpha = self.alpha.add_weighted(&other.alpha, self_portion, other_portion)?; if alpha <= 0. { // Ideally we should return color value that only alpha component is // 0, but this is what current gecko does. Ok(IntermediateRGBA::transparent()) } else { alpha = alpha.min(1.); let red = (self.red * self.alpha).add_weighted( &(other.red * other.alpha), self_portion, other_portion )? * 1. / alpha; let green = (self.green * self.alpha).add_weighted( &(other.green * other.alpha), self_portion, other_portion )? * 1. / alpha; let blue = (self.blue * self.alpha).add_weighted( &(other.blue * other.alpha), self_portion, other_portion )? * 1. / alpha; Ok(IntermediateRGBA::new(red, green, blue, alpha)) } } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(IntermediateRGBA::transparent()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let start = [ self.alpha, self.red * self.alpha, self.green * self.alpha, self.blue * self.alpha ]; let end = [ other.alpha, other.red * other.alpha, other.green * other.alpha, other.blue * other.alpha ]; let diff = start.iter().zip(&end) .fold(0.0f64, |n, (&a, &b)| { let diff = (a - b) as f64; n + diff * diff }); Ok(diff) } } impl From<Either<Color, Auto>> for Either<IntermediateColor, Auto> { fn from(from: Either<Color, Auto>) -> Either<IntermediateColor, Auto> { match from { Either::First(from) => Either::First(from.into()), Either::Second(Auto) => Either::Second(Auto), } } } impl From<Either<IntermediateColor, Auto>> for Either<Color, Auto> { fn from(from: Either<IntermediateColor, Auto>) -> Either<Color, Auto> { match from { Either::First(from) => Either::First(from.into()), Either::Second(Auto) => Either::Second(Auto), } } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct IntermediateColor { color: IntermediateRGBA, foreground_ratio: f32, } impl IntermediateColor { fn currentcolor() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 1., } } /// Returns a transparent intermediate color. pub fn transparent() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 0., } } fn is_currentcolor(&self) -> bool { self.foreground_ratio >= 1. } fn is_numeric(&self) -> bool { self.foreground_ratio <= 0. } fn effective_intermediate_rgba(&self) -> IntermediateRGBA { IntermediateRGBA { alpha: self.color.alpha * (1. - self.foreground_ratio), .. self.color } } } impl Animatable for IntermediateColor { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // Common cases are interpolating between two numeric colors, // two currentcolors, and a numeric color and a currentcolor. // // Note: this algorithm assumes self_portion + other_portion // equals to one, so it may be broken for additive operation. // To properly support additive color interpolation, we would // need two ratio fields in computed color types. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(IntermediateColor::currentcolor()) } else { Ok(IntermediateColor { color: self.color.add_weighted(&other.color, self_portion, other_portion)?, foreground_ratio: self.foreground_ratio, }) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateColor { color: other.color, foreground_ratio: self_portion as f32, }) } else if self.is_numeric() && other.is_currentcolor() { Ok(IntermediateColor { color: self.color, foreground_ratio: other_portion as f32, }) } else { // For interpolating between two complex colors, we need to // generate colors with effective alpha value. let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let color = self_color.add_weighted(&other_color, self_portion, other_portion)?; // Then we compute the final foreground ratio, and derive // the final alpha value from the effective alpha value. let foreground_ratio = self.foreground_ratio .add_weighted(&other.foreground_ratio, self_portion, other_portion)?; let alpha = color.alpha / (1. - foreground_ratio); Ok(IntermediateColor { color: IntermediateRGBA { alpha: alpha, .. color }, foreground_ratio: foreground_ratio, }) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { // All comments in add_weighted also applies here. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(0.) } else { self.color.compute_squared_distance(&other.color) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateRGBA::transparent().compute_squared_distance(&other.color)? + 1.) } else if self.is_numeric() && other.is_currentcolor() { Ok(self.color.compute_squared_distance(&IntermediateRGBA::transparent())? + 1.) } else { let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let dist = self_color.compute_squared_distance(&other_color)?; let ratio_diff = (self.foreground_ratio - other.foreground_ratio) as f64; Ok(dist + ratio_diff * ratio_diff) } } } impl From<Color> for IntermediateColor { fn from(color: Color) -> IntermediateColor { IntermediateColor { color: color.color.into(), foreground_ratio: color.foreground_ratio as f32 * (1. / 255.), } } } impl From<IntermediateColor> for Color { fn from(color: IntermediateColor) -> Color { Color { color: color.color.into(), foreground_ratio: (color.foreground_ratio * 255.).round() as u8, } } } /// Animatable SVGPaint pub type IntermediateSVGPaint = SVGPaint<IntermediateRGBA>; /// Animatable SVGPaintKind pub type IntermediateSVGPaintKind = SVGPaintKind<IntermediateRGBA>; impl From<::values::computed::SVGPaint> for IntermediateSVGPaint { fn from(paint: ::values::computed::SVGPaint) -> IntermediateSVGPaint { paint.convert(|color| (*color).into()) } } impl From<IntermediateSVGPaint> for ::values::computed::SVGPaint { fn from(paint: IntermediateSVGPaint) -> ::values::computed::SVGPaint { paint.convert(|color| (*color).into()) } } impl Animatable for IntermediateSVGPaint { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.add_weighted(&other.kind, self_portion, other_portion)?, fallback: self.fallback.add_weighted(&other.fallback, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.kind.compute_squared_distance(&other.kind)? + self.fallback.compute_squared_distance(&other.fallback)?) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(IntermediateSVGPaint { kind: option_try!(self.kind.get_zero_value()), fallback: self.fallback.and_then(|v| v.get_zero_value()), }) } } impl Animatable for IntermediateSVGPaintKind { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { Ok(SVGPaintKind::Color(self_color.add_weighted(other_color, self_portion, other_portion)?)) } // FIXME context values should be interpolable with colors // Gecko doesn't implement this behavior either. (&SVGPaintKind::None, &SVGPaintKind::None) => Ok(SVGPaintKind::None), (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) => Ok(SVGPaintKind::ContextFill), (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke) => Ok(SVGPaintKind::ContextStroke), _ => Err(()) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { self_color.compute_distance(other_color) } (&SVGPaintKind::None, &SVGPaintKind::None) | (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) | (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke)=> Ok(0.0), _ => Err(()) } } #[inline] fn get_zero_value(&self) -> Option<Self> { match self { &SVGPaintKind::Color(ref color) => color.get_zero_value() .map(SVGPaintKind::Color), &SVGPaintKind::None | &SVGPaintKind::ContextFill | &SVGPaintKind::ContextStroke => Some(self.clone()), _ => None, } } } <% FILTER_FUNCTIONS = [ 'Blur', 'Brightness', 'Contrast', 'Grayscale', 'HueRotate', 'Invert', 'Opacity', 'Saturate', 'Sepia' ] %> /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function_impl(from: &AnimatedFilter, to: &AnimatedFilter, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { % for func in [ 'Blur', 'HueRotate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, % endfor % for func in [ 'Grayscale', 'Invert', 'Sepia' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &0.0, )?)) }, % endfor % for func in [ 'Brightness', 'Contrast', 'Opacity', 'Saturate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &1.0, )?)) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref from_value), &Filter::DropShadow(ref to_value)) => { Ok(Filter::DropShadow(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, (&Filter::Url(_), &Filter::Url(_)) => { Err(()) }, % endif _ => { // If specified the different filter functions, // we will need to interpolate as discreate. Err(()) }, } } /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function(from: Option<<&AnimatedFilter>, to: Option<<&AnimatedFilter>, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { (Some(f), Some(t)) => { add_weighted_filter_function_impl(f, t, self_portion, other_portion) }, (Some(f), None) => { add_weighted_filter_function_impl(f, f, self_portion, 0.0) }, (None, Some(t)) => { add_weighted_filter_function_impl(t, t, other_portion, 0.0) }, _ => { Err(()) } } } fn compute_filter_square_distance(from: &AnimatedFilter, to: &AnimatedFilter) -> Result<f64, ()> { match (from, to) { % for func in FILTER_FUNCTIONS : (&Filter::${func}(f), &Filter::${func}(t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref f), &Filter::DropShadow(ref t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endif _ => { Err(()) } } } impl Animatable for AnimatedFilterList { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut filters = vec![]; let mut from_iter = self.0.iter(); let mut to_iter = other.0.iter(); let mut from = from_iter.next(); let mut to = to_iter.next(); while from.is_some() || to.is_some() { filters.push(try!(add_weighted_filter_function(from, to, self_portion, other_portion))); if from.is_some() { from = from_iter.next(); } if to.is_some() { to = to_iter.next(); } } Ok(AnimatedFilterList(filters)) } fn add(&self, other: &Self) -> Result<Self, ()> { Ok(AnimatedFilterList(self.0.iter().chain(other.0.iter()).cloned().collect())) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let mut square_distance: f64 = 0.0; let mut from_iter = self.0.iter(); let mut to_iter = other.0.iter(); let mut from = from_iter.next(); let mut to = to_iter.next(); while from.is_some() || to.is_some() { let current_square_distance: f64 ; if from.is_none() { let none = try!(add_weighted_filter_function(to, to, 0.0, 0.0)); current_square_distance = compute_filter_square_distance(&none, &(to.unwrap())).unwrap(); to = to_iter.next(); } else if to.is_none() { let none = try!(add_weighted_filter_function(from, from, 0.0, 0.0)); current_square_distance = compute_filter_square_distance(&none, &(from.unwrap())).unwrap(); from = from_iter.next(); } else { current_square_distance = compute_filter_square_distance(&(from.unwrap()), &(to.unwrap())).unwrap(); from = from_iter.next(); to = to_iter.next(); } square_distance += current_square_distance; } Ok(square_distance.sqrt()) } } Auto merge of #17553 - BorisChiou:stylo/animation/interpolatematrix_none, r=birtles stylo: Bug 1375812 - Build an identity matrix for InterpolateMatrix. We have to build an identity matrix while add_weighted() between InterpolateMatrix and none transform in some cases, e.g. trigger a transition from a mid-point of another transition to none. --- - [X] `./mach build -d` does not report any errors - [X] `./mach test-tidy` does not report any errors - [X] These changes fix [Bug 1375812](https://bugzilla.mozilla.org/show_bug.cgi?id=1375812). - [X] These changes do not require tests because we have tests in Gecko already. <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/17553) <!-- Reviewable:end --> /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <% from data import SYSTEM_FONT_LONGHANDS %> use app_units::Au; use cssparser::{Parser, RGBA}; use euclid::{Point2D, Size2D}; #[cfg(feature = "gecko")] use gecko_bindings::bindings::RawServoAnimationValueMap; #[cfg(feature = "gecko")] use gecko_bindings::structs::RawGeckoGfxMatrix4x4; #[cfg(feature = "gecko")] use gecko_bindings::structs::nsCSSPropertyID; #[cfg(feature = "gecko")] use gecko_bindings::sugar::ownership::{HasFFI, HasSimpleFFI}; #[cfg(feature = "gecko")] use gecko_string_cache::Atom; use properties::{CSSWideKeyword, PropertyDeclaration}; use properties::longhands; use properties::longhands::background_size::computed_value::T as BackgroundSizeList; use properties::longhands::font_weight::computed_value::T as FontWeight; use properties::longhands::font_stretch::computed_value::T as FontStretch; use properties::longhands::transform::computed_value::ComputedMatrix; use properties::longhands::transform::computed_value::ComputedOperation as TransformOperation; use properties::longhands::transform::computed_value::T as TransformList; use properties::longhands::vertical_align::computed_value::T as VerticalAlign; use properties::longhands::visibility::computed_value::T as Visibility; #[cfg(feature = "gecko")] use properties::{PropertyDeclarationId, LonghandId}; use selectors::parser::SelectorParseError; use smallvec::SmallVec; use std::cmp; #[cfg(feature = "gecko")] use fnv::FnvHashMap; use style_traits::ParseError; use super::ComputedValues; use values::{Auto, CSSFloat, CustomIdent, Either}; use values::animated::effects::BoxShadowList as AnimatedBoxShadowList; use values::animated::effects::Filter as AnimatedFilter; use values::animated::effects::FilterList as AnimatedFilterList; use values::animated::effects::TextShadowList as AnimatedTextShadowList; use values::computed::{Angle, LengthOrPercentageOrAuto, LengthOrPercentageOrNone}; use values::computed::{BorderCornerRadius, ClipRect}; use values::computed::{CalcLengthOrPercentage, Color, Context, ComputedValueAsSpecified}; use values::computed::{LengthOrPercentage, MaxLength, MozLength, ToComputedValue}; use values::generics::{SVGPaint, SVGPaintKind}; use values::generics::border::BorderCornerRadius as GenericBorderCornerRadius; use values::generics::effects::Filter; use values::generics::position as generic_position; use values::specified::length::Percentage; /// A longhand property whose animation type is not "none". /// /// NOTE: This includes the 'display' property since it is animatable from SMIL even though it is /// not animatable from CSS animations or Web Animations. CSS transitions also does not allow /// animating 'display', but for CSS transitions we have the separate TransitionProperty type. #[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatableLonghand { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} ${prop.camel_case}, % endif % endfor } impl AnimatableLonghand { /// Returns true if this AnimatableLonghand is one of the discretely animatable properties. pub fn is_discrete(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animation_value_type == "discrete": AnimatableLonghand::${prop.camel_case} => true, % endif % endfor _ => false } } /// Converts from an nsCSSPropertyID. Returns None if nsCSSPropertyID is not an animatable /// longhand in Servo. #[cfg(feature = "gecko")] pub fn from_nscsspropertyid(css_property: nsCSSPropertyID) -> Option<Self> { match css_property { % for prop in data.longhands: % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Converts from TransitionProperty. Returns None if the property is not an animatable /// longhand. pub fn from_transition_property(transition_property: &TransitionProperty) -> Option<Self> { match *transition_property { % for prop in data.longhands: % if prop.transitionable and prop.animatable: TransitionProperty::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None } } /// Get an animatable longhand property from a property declaration. pub fn from_declaration(declaration: &PropertyDeclaration) -> Option<Self> { use properties::LonghandId; match *declaration { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(..) => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor PropertyDeclaration::CSSWideKeyword(id, _) | PropertyDeclaration::WithVariables(id, _) => { match id { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => Some(AnimatableLonghand::${prop.camel_case}), % endif % endfor _ => None, } }, _ => None, } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a AnimatableLonghand> for nsCSSPropertyID { fn from(property: &'a AnimatableLonghand) -> nsCSSPropertyID { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor } } } /// Convert to PropertyDeclarationId. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From<AnimatableLonghand> for PropertyDeclarationId<'a> { fn from(property: AnimatableLonghand) -> PropertyDeclarationId<'a> { match property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => PropertyDeclarationId::Longhand(LonghandId::${prop.camel_case}), % endif % endfor } } } /// Returns true if this nsCSSPropertyID is one of the animatable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_animatable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// A given transition property, that is either `All`, a transitionable longhand property, /// a shorthand with at least one transitionable longhand component, or an unsupported property. // NB: This needs to be here because it needs all the longhands generated // beforehand. #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[derive(Clone, Debug, Eq, Hash, PartialEq, ToCss)] pub enum TransitionProperty { /// All, any transitionable property changing should generate a transition. All, % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: /// ${prop.name} ${prop.camel_case}, % endif % endfor /// Unrecognized property which could be any non-transitionable, custom property, or /// unknown property. Unsupported(CustomIdent) } no_viewport_percentage!(TransitionProperty); impl ComputedValueAsSpecified for TransitionProperty {} impl TransitionProperty { /// Iterates over each longhand property. pub fn each<F: FnMut(&TransitionProperty) -> ()>(mut cb: F) { % for prop in data.longhands: % if prop.transitionable: cb(&TransitionProperty::${prop.camel_case}); % endif % endfor } /// Iterates over every longhand property that is not TransitionProperty::All, stopping and /// returning true when the provided callback returns true for the first time. pub fn any<F: FnMut(&TransitionProperty) -> bool>(mut cb: F) -> bool { % for prop in data.longhands: % if prop.transitionable: if cb(&TransitionProperty::${prop.camel_case}) { return true; } % endif % endfor false } /// Parse a transition-property value. pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { let ident = input.expect_ident()?; let supported = match_ignore_ascii_case! { &ident, "all" => Ok(Some(TransitionProperty::All)), % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: "${prop.name}" => Ok(Some(TransitionProperty::${prop.camel_case})), % endif % endfor "none" => Err(()), _ => Ok(None), }; match supported { Ok(Some(property)) => Ok(property), Ok(None) => CustomIdent::from_ident(ident, &[]).map(TransitionProperty::Unsupported), Err(()) => Err(SelectorParseError::UnexpectedIdent(ident).into()), } } /// Return transitionable longhands of this shorthand TransitionProperty, except for "all". pub fn longhands(&self) -> &'static [TransitionProperty] { % for prop in data.shorthands_except_all(): % if prop.transitionable: static ${prop.ident.upper()}: &'static [TransitionProperty] = &[ % for sub in prop.sub_properties: % if sub.transitionable: TransitionProperty::${sub.camel_case}, % endif % endfor ]; % endif % endfor match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${prop.ident.upper()}, % endif % endfor _ => panic!("Not allowed to call longhands() for this TransitionProperty") } } /// Returns true if this TransitionProperty is a shorthand. pub fn is_shorthand(&self) -> bool { match *self { % for prop in data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => true, % endif % endfor _ => false } } } /// Convert to nsCSSPropertyID. #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl<'a> From< &'a TransitionProperty> for nsCSSPropertyID { fn from(transition_property: &'a TransitionProperty) -> nsCSSPropertyID { match *transition_property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: TransitionProperty::${prop.camel_case} => ${helpers.to_nscsspropertyid(prop.ident)}, % endif % endfor TransitionProperty::All => nsCSSPropertyID::eCSSPropertyExtra_all_properties, _ => panic!("Unconvertable Servo transition property: {:?}", transition_property), } } } /// Convert nsCSSPropertyID to TransitionProperty #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl From<nsCSSPropertyID> for TransitionProperty { fn from(property: nsCSSPropertyID) -> TransitionProperty { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::${prop.camel_case}, % else: ${helpers.to_nscsspropertyid(prop.ident)} => TransitionProperty::Unsupported(CustomIdent(Atom::from("${prop.ident}"))), % endif % endfor nsCSSPropertyID::eCSSPropertyExtra_all_properties => TransitionProperty::All, _ => panic!("Unconvertable nsCSSPropertyID: {:?}", property), } } } /// Returns true if this nsCSSPropertyID is one of the transitionable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_transitionable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// An animated property interpolation between two computed values for that /// property. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimatedProperty { % for prop in data.longhands: % if prop.animatable: <% if prop.is_animatable_with_computed_value: value_type = "longhands::{}::computed_value::T".format(prop.ident) else: value_type = prop.animation_value_type %> /// ${prop.name} ${prop.camel_case}(${value_type}, ${value_type}), % endif % endfor } impl AnimatedProperty { /// Get the name of this property. pub fn name(&self) -> &'static str { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(..) => "${prop.name}", % endif % endfor } } /// Whether this interpolation does animate, that is, whether the start and /// end values are different. pub fn does_animate(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => from != to, % endif % endfor } } /// Whether an animated property has the same end value as another. pub fn has_the_same_end_value_as(&self, other: &Self) -> bool { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimatedProperty::${prop.camel_case}(_, ref this_end_value), &AnimatedProperty::${prop.camel_case}(_, ref other_end_value)) => { this_end_value == other_end_value } % endif % endfor _ => false, } } /// Update `style` with the proper computed style corresponding to this /// animation at `progress`. pub fn update(&self, style: &mut ComputedValues, progress: f64) { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => { // https://w3c.github.io/web-animations/#discrete-animation-type % if prop.animation_value_type == "discrete": let value = if progress < 0.5 { from.clone() } else { to.clone() }; % else: let value = match from.interpolate(to, progress) { Ok(value) => value, Err(()) => return, }; % endif % if not prop.is_animatable_with_computed_value: let value: longhands::${prop.ident}::computed_value::T = value.into(); % endif style.mutate_${prop.style_struct.ident.strip("_")}().set_${prop.ident}(value); } % endif % endfor } } /// Get an animatable value from a transition-property, an old style, and a /// new style. pub fn from_animatable_longhand(property: &AnimatableLonghand, old_style: &ComputedValues, new_style: &ComputedValues) -> AnimatedProperty { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { AnimatedProperty::${prop.camel_case}( old_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}().into(), new_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}().into()) } % endif % endfor } } } /// A collection of AnimationValue that were composed on an element. /// This HashMap stores the values that are the last AnimationValue to be /// composed for each TransitionProperty. #[cfg(feature = "gecko")] pub type AnimationValueMap = FnvHashMap<AnimatableLonghand, AnimationValue>; #[cfg(feature = "gecko")] unsafe impl HasFFI for AnimationValueMap { type FFIType = RawServoAnimationValueMap; } #[cfg(feature = "gecko")] unsafe impl HasSimpleFFI for AnimationValueMap {} /// An enum to represent a single computed value belonging to an animated /// property in order to be interpolated with another one. When interpolating, /// both values need to belong to the same property. /// /// This is different to AnimatedProperty in the sense that AnimatedProperty /// also knows the final value to be used during the animation. /// /// This is to be used in Gecko integration code. /// /// FIXME: We need to add a path for custom properties, but that's trivial after /// this (is a similar path to that of PropertyDeclaration). #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub enum AnimationValue { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} % if prop.is_animatable_with_computed_value: ${prop.camel_case}(longhands::${prop.ident}::computed_value::T), % else: ${prop.camel_case}(${prop.animation_value_type}), % endif % endif % endfor } impl AnimationValue { /// "Uncompute" this animation value in order to be used inside the CSS /// cascade. pub fn uncompute(&self) -> PropertyDeclaration { use properties::longhands; match *self { % for prop in data.longhands: % if prop.animatable: AnimationValue::${prop.camel_case}(ref from) => { PropertyDeclaration::${prop.camel_case}( % if prop.boxed: Box::new( % endif longhands::${prop.ident}::SpecifiedValue::from_computed_value( % if prop.is_animatable_with_computed_value: from % else: &from.clone().into() % endif )) % if prop.boxed: ) % endif } % endif % endfor } } /// Construct an AnimationValue from a property declaration pub fn from_declaration(decl: &PropertyDeclaration, context: &mut Context, initial: &ComputedValues) -> Option<Self> { use error_reporting::create_error_reporter; use properties::LonghandId; use properties::DeclaredValue; match *decl { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(ref val) => { % if prop.ident in SYSTEM_FONT_LONGHANDS and product == "gecko": if let Some(sf) = val.get_system() { longhands::system_font::resolve_system_font(sf, context); } % endif Some(AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: val.to_computed_value(context) % else: From::from(val.to_computed_value(context)) % endif )) }, % endif % endfor PropertyDeclaration::CSSWideKeyword(id, keyword) => { match id { // We put all the animatable properties first in the hopes // that it might increase match locality. % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let computed = match keyword { % if not prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Initial => { let initial_struct = initial.get_${prop.style_struct.name_lower}(); initial_struct.clone_${prop.ident}() }, % if prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Inherit => { let inherit_struct = context.inherited_style .get_${prop.style_struct.name_lower}(); inherit_struct.clone_${prop.ident}() }, }; % if not prop.is_animatable_with_computed_value: let computed = From::from(computed); % endif Some(AnimationValue::${prop.camel_case}(computed)) }, % endif % endfor % for prop in data.longhands: % if not prop.animatable: LonghandId::${prop.camel_case} => None, % endif % endfor } }, PropertyDeclaration::WithVariables(id, ref variables) => { let custom_props = context.style().custom_properties(); let reporter = create_error_reporter(); match id { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let mut result = None; let quirks_mode = context.quirks_mode; ::properties::substitute_variables_${prop.ident}_slow( &variables.css, variables.first_token_type, &variables.url_data, variables.from_shorthand, &custom_props, &mut |v| { let declaration = match *v { DeclaredValue::Value(value) => { PropertyDeclaration::${prop.camel_case}(value.clone()) }, DeclaredValue::CSSWideKeyword(keyword) => { PropertyDeclaration::CSSWideKeyword(id, keyword) }, DeclaredValue::WithVariables(_) => unreachable!(), }; result = AnimationValue::from_declaration(&declaration, context, initial); }, &reporter, quirks_mode); result }, % else: LonghandId::${prop.camel_case} => None, % endif % endfor } }, _ => None // non animatable properties will get included because of shorthands. ignore. } } /// Get an AnimationValue for an AnimatableLonghand from a given computed values. pub fn from_computed_values(property: &AnimatableLonghand, computed_values: &ComputedValues) -> Self { match *property { % for prop in data.longhands: % if prop.animatable: AnimatableLonghand::${prop.camel_case} => { AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed_values.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}()) % else: From::from(computed_values.get_${prop.style_struct.ident.strip("_")}() .clone_${prop.ident}())) % endif } % endif % endfor } } } impl Animatable for AnimationValue { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { % if prop.animation_value_type == "discrete": if self_portion > other_portion { Ok(AnimationValue::${prop.camel_case}(from.clone())) } else { Ok(AnimationValue::${prop.camel_case}(to.clone())) } % else: from.add_weighted(to, self_portion, other_portion) .map(AnimationValue::${prop.camel_case}) % endif } % endif % endfor _ => { panic!("Expected weighted addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn add(&self, other: &Self) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.add(to).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected addition of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": (&AnimationValue::${prop.camel_case}(_), &AnimationValue::${prop.camel_case}(_)) => { Err(()) } % else: (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.accumulate(to, count).map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor _ => { panic!("Expected accumulation of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } fn get_zero_value(&self) -> Option<Self> { match self { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type == "discrete": &AnimationValue::${prop.camel_case}(_) => { None } % else: &AnimationValue::${prop.camel_case}(ref base) => { base.get_zero_value().map(AnimationValue::${prop.camel_case}) } % endif % endif % endfor } } fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type != "discrete": (&AnimationValue::${prop.camel_case}(ref from), &AnimationValue::${prop.camel_case}(ref to)) => { from.compute_distance(to) }, % else: (&AnimationValue::${prop.camel_case}(ref _from), &AnimationValue::${prop.camel_case}(ref _to)) => { Err(()) }, % endif % endif % endfor _ => { panic!("Expected compute_distance of computed values of the same \ property, got: {:?}, {:?}", self, other); } } } } /// A trait used to implement various procedures used during animation. pub trait Animatable: Sized { /// Performs a weighted sum of this value and |other|. This is used for /// interpolation and addition of animation values. fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()>; /// [Interpolates][interpolation] a value with another for a given property. /// /// [interpolation]: https://w3c.github.io/web-animations/#animation-interpolation fn interpolate(&self, other: &Self, progress: f64) -> Result<Self, ()> { self.add_weighted(other, 1.0 - progress, progress) } /// Returns the [sum][animation-addition] of this value and |other|. /// /// [animation-addition]: https://w3c.github.io/web-animations/#animation-addition fn add(&self, other: &Self) -> Result<Self, ()> { self.add_weighted(other, 1.0, 1.0) } /// [Accumulates][animation-accumulation] this value onto itself (|count| - 1) times then /// accumulates |other| onto the result. /// If |count| is zero, the result will be |other|. /// /// [animation-accumulation]: https://w3c.github.io/web-animations/#animation-accumulation fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { self.add_weighted(other, count as f64, 1.0) } /// Returns a value that, when added with an underlying value, will produce the underlying /// value. This is used for SMIL animation's "by-animation" where SMIL first interpolates from /// the zero value to the 'by' value, and then adds the result to the underlying value. /// /// This is not the necessarily the same as the initial value of a property. For example, the /// initial value of 'stroke-width' is 1, but the zero value is 0, since adding 1 to the /// underlying value will not produce the underlying value. fn get_zero_value(&self) -> Option<Self> { None } /// Compute distance between a value and another for a given property. fn compute_distance(&self, _other: &Self) -> Result<f64, ()> { Err(()) } /// In order to compute the Euclidean distance of a list or property value with multiple /// components, we need to compute squared distance for each element, so the vector can sum it /// and then get its squared root as the distance. fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_distance(other).map(|d| d * d) } } /// https://drafts.csswg.org/css-transitions/#animtype-repeatable-list pub trait RepeatableListAnimatable: Animatable {} impl RepeatableListAnimatable for LengthOrPercentage {} impl RepeatableListAnimatable for Either<f32, LengthOrPercentage> {} macro_rules! repeated_vec_impl { ($($ty:ty),*) => { $(impl<T: RepeatableListAnimatable> Animatable for $ty { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.add_weighted(you, self_portion, other_portion) }).collect() } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(me, you)| { me.compute_squared_distance(you) }).sum() } })* }; } repeated_vec_impl!(SmallVec<[T; 1]>, Vec<T>); /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Au { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Au((self.0 as f64 * self_portion + other.0 as f64 * other_portion).round() as i32)) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(Au(0)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } } impl <T> Animatable for Option<T> where T: Animatable, { #[inline] fn add_weighted(&self, other: &Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { Ok(this.add_weighted(other, self_portion, other_portion).ok()) } (&None, &None) => Ok(None), _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Some(ref this), &Some(ref other)) => { this.compute_squared_distance(other) }, (&None, &None) => Ok(0.0), _ => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f32 { #[inline] fn add_weighted(&self, other: &f32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion) as f32) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(0.) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for f64 { #[inline] fn add_weighted(&self, other: &f64, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(*self * self_portion + *other * other_portion) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(0.) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-integer impl Animatable for i32 { #[inline] fn add_weighted(&self, other: &i32, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok((*self as f64 * self_portion + *other as f64 * other_portion).round() as i32) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(0) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((*self - *other).abs() as f64) } } /// https://drafts.csswg.org/css-transitions/#animtype-number impl Animatable for Angle { #[inline] fn add_weighted(&self, other: &Angle, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { % for angle_type in [ 'Degree', 'Gradian', 'Turn' ]: (Angle::${angle_type}(val1), Angle::${angle_type}(val2)) => { Ok(Angle::${angle_type}( try!(val1.add_weighted(&val2, self_portion, other_portion)) )) } % endfor _ => { self.radians() .add_weighted(&other.radians(), self_portion, other_portion) .map(Angle::from_radians) } } } } /// https://drafts.csswg.org/css-transitions/#animtype-percentage impl Animatable for Percentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Percentage((self.0 as f64 * self_portion + other.0 as f64 * other_portion) as f32)) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(Percentage(0.)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { Ok((self.0 as f64 - other.0 as f64).abs()) } } /// https://drafts.csswg.org/css-transitions/#animtype-visibility impl Animatable for Visibility { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Visibility::visible, _) => { Ok(if self_portion > 0.0 { *self } else { *other }) }, (_, Visibility::visible) => { Ok(if other_portion > 0.0 { *other } else { *self }) }, _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { if *self == *other { Ok(0.0) } else { Ok(1.0) } } } impl<T: Animatable + Copy> Animatable for Size2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let width = self.width.add_weighted(&other.width, self_portion, other_portion)?; let height = self.height.add_weighted(&other.height, self_portion, other_portion)?; Ok(Size2D::new(width, height)) } } impl<T: Animatable + Copy> Animatable for Point2D<T> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let x = self.x.add_weighted(&other.x, self_portion, other_portion)?; let y = self.y.add_weighted(&other.y, self_portion, other_portion)?; Ok(Point2D::new(x, y)) } } impl Animatable for BorderCornerRadius { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(GenericBorderCornerRadius) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.0.width.compute_squared_distance(&other.0.width)? + self.0.height.compute_squared_distance(&other.0.height)?) } } /// https://drafts.csswg.org/css-transitions/#animtype-length impl Animatable for VerticalAlign { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref this)), VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(ref other))) => { this.add_weighted(other, self_portion, other_portion).map(|value| { VerticalAlign::LengthOrPercentage(LengthOrPercentage::Length(value)) }) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (VerticalAlign::LengthOrPercentage(ref this), VerticalAlign::LengthOrPercentage(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } impl Animatable for BackgroundSizeList { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { self.0.add_weighted(&other.0, self_portion, other_portion).map(BackgroundSizeList) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_distance(&other.0) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { self.0.compute_squared_distance(&other.0) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for CalcLengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { fn add_weighted_half<T>(this: Option<T>, other: Option<T>, self_portion: f64, other_portion: f64) -> Result<Option<T>, ()> where T: Default + Animatable, { match (this, other) { (None, None) => Ok(None), (this, other) => { let this = this.unwrap_or(T::default()); let other = other.unwrap_or(T::default()); this.add_weighted(&other, self_portion, other_portion).map(Some) } } } let length = self.unclamped_length().add_weighted(&other.unclamped_length(), self_portion, other_portion)?; let percentage = add_weighted_half(self.percentage, other.percentage, self_portion, other_portion)?; Ok(CalcLengthOrPercentage::with_clamping_mode(length, percentage, self.clamping_mode)) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let length_diff = (self.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (self.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentage { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Length) } (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentage::Percentage) } (this, other) => { // Special handling for zero values since these should not require calc(). if this.is_definitely_zero() { return other.add_weighted(&other, 0., other_portion) } else if other.is_definitely_zero() { return this.add_weighted(self, self_portion, 0.) } let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.add_weighted(&other, self_portion, other_portion) .map(LengthOrPercentage::Calc) } } } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(LengthOrPercentage::zero()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentage::Length(ref this), LengthOrPercentage::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentage::Percentage(ref this), LengthOrPercentage::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: CalcLengthOrPercentage = From::from(this); let other: CalcLengthOrPercentage = From::from(other); let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrAuto { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Length) } (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrAuto::Percentage) } (LengthOrPercentageOrAuto::Auto, LengthOrPercentageOrAuto::Auto) => { Ok(LengthOrPercentageOrAuto::Auto) } (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrAuto::Calc(result)), _ => Err(()), } } } } #[inline] fn get_zero_value(&self) -> Option<Self> { match *self { LengthOrPercentageOrAuto::Length(_) | LengthOrPercentageOrAuto::Percentage(_) | LengthOrPercentageOrAuto::Calc(_) => { Some(LengthOrPercentageOrAuto::Length(Au(0))) }, LengthOrPercentageOrAuto::Auto => { None }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); this.compute_distance(&other) } } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrAuto::Length(ref this), LengthOrPercentageOrAuto::Length(ref other)) => { let diff = (this.0 - other.0) as f64; Ok(diff * diff) }, (LengthOrPercentageOrAuto::Percentage(ref this), LengthOrPercentageOrAuto::Percentage(ref other)) => { let diff = this.0 as f64 - other.0 as f64; Ok(diff * diff) }, (this, other) => { let this: Option<CalcLengthOrPercentage> = From::from(this); let other: Option<CalcLengthOrPercentage> = From::from(other); if let (Some(this), Some(other)) = (this, other) { let length_diff = (this.unclamped_length().0 - other.unclamped_length().0) as f64; let percentage_diff = (this.percentage() - other.percentage()) as f64; Ok(length_diff * length_diff + percentage_diff * percentage_diff) } else { Err(()) } } } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for LengthOrPercentageOrNone { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Length) } (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(LengthOrPercentageOrNone::Percentage) } (LengthOrPercentageOrNone::None, LengthOrPercentageOrNone::None) => { Ok(LengthOrPercentageOrNone::None) } (this, other) => { let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); match this.add_weighted(&other, self_portion, other_portion) { Ok(Some(result)) => Ok(LengthOrPercentageOrNone::Calc(result)), _ => Err(()), } }, } } #[inline] fn get_zero_value(&self) -> Option<Self> { match *self { LengthOrPercentageOrNone::Length(_) | LengthOrPercentageOrNone::Percentage(_) | LengthOrPercentageOrNone::Calc(_) => { Some(LengthOrPercentageOrNone::Length(Au(0))) }, LengthOrPercentageOrNone::None => { None }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (LengthOrPercentageOrNone::Length(ref this), LengthOrPercentageOrNone::Length(ref other)) => { this.compute_distance(other) }, (LengthOrPercentageOrNone::Percentage(ref this), LengthOrPercentageOrNone::Percentage(ref other)) => { this.compute_distance(other) }, (this, other) => { // If one of the element is Auto, Option<> will be None, and the returned distance is Err(()) let this = <Option<CalcLengthOrPercentage>>::from(this); let other = <Option<CalcLengthOrPercentage>>::from(other); this.compute_distance(&other) }, } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MozLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MozLength::LengthOrPercentageOrAuto) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MozLength::LengthOrPercentageOrAuto(ref this), MozLength::LengthOrPercentageOrAuto(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } /// https://drafts.csswg.org/css-transitions/#animtype-lpcalc impl Animatable for MaxLength { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.add_weighted(other, self_portion, other_portion) .map(MaxLength::LengthOrPercentageOrNone) } _ => Err(()), } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (*self, *other) { (MaxLength::LengthOrPercentageOrNone(ref this), MaxLength::LengthOrPercentageOrNone(ref other)) => { this.compute_distance(other) }, _ => Err(()), } } } /// http://dev.w3.org/csswg/css-transitions/#animtype-font-weight impl Animatable for FontWeight { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let a = (*self as u32) as f64; let b = (*other as u32) as f64; const NORMAL: f64 = 400.; let weight = (a - NORMAL) * self_portion + (b - NORMAL) * other_portion + NORMAL; Ok(if weight < 150. { FontWeight::Weight100 } else if weight < 250. { FontWeight::Weight200 } else if weight < 350. { FontWeight::Weight300 } else if weight < 450. { FontWeight::Weight400 } else if weight < 550. { FontWeight::Weight500 } else if weight < 650. { FontWeight::Weight600 } else if weight < 750. { FontWeight::Weight700 } else if weight < 850. { FontWeight::Weight800 } else { FontWeight::Weight900 }) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(FontWeight::Weight400) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let a = (*self as u32) as f64; let b = (*other as u32) as f64; a.compute_distance(&b) } } /// https://drafts.csswg.org/css-fonts/#font-stretch-prop impl Animatable for FontStretch { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let from = f64::from(*self); let to = f64::from(*other); // FIXME: When `const fn` is available in release rust, make |normal|, below, const. let normal = f64::from(FontStretch::normal); let result = (from - normal) * self_portion + (to - normal) * other_portion + normal; Ok(result.into()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { let from = f64::from(*self); let to = f64::from(*other); from.compute_distance(&to) } } /// We should treat font stretch as real number in order to interpolate this property. /// https://drafts.csswg.org/css-fonts-3/#font-stretch-animation impl From<FontStretch> for f64 { fn from(stretch: FontStretch) -> f64 { use self::FontStretch::*; match stretch { ultra_condensed => 1.0, extra_condensed => 2.0, condensed => 3.0, semi_condensed => 4.0, normal => 5.0, semi_expanded => 6.0, expanded => 7.0, extra_expanded => 8.0, ultra_expanded => 9.0, } } } impl Into<FontStretch> for f64 { fn into(self) -> FontStretch { use properties::longhands::font_stretch::computed_value::T::*; let index = (self + 0.5).floor().min(9.0).max(1.0); static FONT_STRETCH_ENUM_MAP: [FontStretch; 9] = [ ultra_condensed, extra_condensed, condensed, semi_condensed, normal, semi_expanded, expanded, extra_expanded, ultra_expanded ]; FONT_STRETCH_ENUM_MAP[(index - 1.0) as usize] } } // Like std::macros::try!, but for Option<>. macro_rules! option_try { ($e:expr) => (match $e { Some(e) => e, None => return None }) } /// https://drafts.csswg.org/css-transitions/#animtype-simple-list impl<H: Animatable, V: Animatable> Animatable for generic_position::Position<H, V> { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(generic_position::Position { horizontal: self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?, vertical: self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?, }) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(generic_position::Position { horizontal: option_try!(self.horizontal.get_zero_value()), vertical: option_try!(self.vertical.get_zero_value()), }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.horizontal.compute_squared_distance(&other.horizontal)? + self.vertical.compute_squared_distance(&other.vertical)?) } } impl<H, V> RepeatableListAnimatable for generic_position::Position<H, V> where H: RepeatableListAnimatable, V: RepeatableListAnimatable {} /// https://drafts.csswg.org/css-transitions/#animtype-rect impl Animatable for ClipRect { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(ClipRect { top: self.top.add_weighted(&other.top, self_portion, other_portion)?, right: self.right.add_weighted(&other.right, self_portion, other_portion)?, bottom: self.bottom.add_weighted(&other.bottom, self_portion, other_portion)?, left: self.left.add_weighted(&other.left, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let list = [ self.top.compute_distance(&other.top)?, self.right.compute_distance(&other.right)?, self.bottom.compute_distance(&other.bottom)?, self.left.compute_distance(&other.left)? ]; Ok(list.iter().fold(0.0f64, |sum, diff| sum + diff * diff)) } } /// Check if it's possible to do a direct numerical interpolation /// between these two transform lists. /// http://dev.w3.org/csswg/css-transforms/#transform-transform-animation fn can_interpolate_list(from_list: &[TransformOperation], to_list: &[TransformOperation]) -> bool { // Lists must be equal length if from_list.len() != to_list.len() { return false; } // Each transform operation must match primitive type in other list for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(..), &TransformOperation::Matrix(..)) | (&TransformOperation::Skew(..), &TransformOperation::Skew(..)) | (&TransformOperation::Translate(..), &TransformOperation::Translate(..)) | (&TransformOperation::Scale(..), &TransformOperation::Scale(..)) | (&TransformOperation::Rotate(..), &TransformOperation::Rotate(..)) | (&TransformOperation::Perspective(..), &TransformOperation::Perspective(..)) => {} _ => { return false; } } } true } /// Build an equivalent 'identity transform function list' based /// on an existing transform list. /// http://dev.w3.org/csswg/css-transforms/#none-transform-animation fn build_identity_transform_list(list: &[TransformOperation]) -> Vec<TransformOperation> { let mut result = vec!(); for operation in list { match *operation { TransformOperation::Matrix(..) => { let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } TransformOperation::MatrixWithPercents(..) => {} TransformOperation::Skew(..) => { result.push(TransformOperation::Skew(Angle::zero(), Angle::zero())) } TransformOperation::Translate(..) => { result.push(TransformOperation::Translate(LengthOrPercentage::zero(), LengthOrPercentage::zero(), Au(0))); } TransformOperation::Scale(..) => { result.push(TransformOperation::Scale(1.0, 1.0, 1.0)); } TransformOperation::Rotate(..) => { result.push(TransformOperation::Rotate(0.0, 0.0, 1.0, Angle::zero())); } TransformOperation::Perspective(..) | TransformOperation::AccumulateMatrix { .. } | TransformOperation::InterpolateMatrix { .. } => { // Perspective: We convert a perspective function into an equivalent // ComputedMatrix, and then decompose/interpolate/recompose these matrices. // AccumulateMatrix/InterpolateMatrix: We do interpolation on // AccumulateMatrix/InterpolateMatrix by reading it as a ComputedMatrix // (with layout information), and then do matrix interpolation. // // Therefore, we use an identity matrix to represent the identity transform list. // http://dev.w3.org/csswg/css-transforms/#identity-transform-function let identity = ComputedMatrix::identity(); result.push(TransformOperation::Matrix(identity)); } } } result } /// A wrapper for calling add_weighted that interpolates the distance of the two values from /// an initial_value and uses that to produce an interpolated value. /// This is used for values such as 'scale' where the initial value is 1 and where if we interpolate /// the absolute values, we will produce odd results for accumulation. fn add_weighted_with_initial_val<T: Animatable>(a: &T, b: &T, a_portion: f64, b_portion: f64, initial_val: &T) -> Result<T, ()> { let a = a.add_weighted(&initial_val, 1.0, -1.0)?; let b = b.add_weighted(&initial_val, 1.0, -1.0)?; let result = a.add_weighted(&b, a_portion, b_portion)?; result.add_weighted(&initial_val, 1.0, 1.0) } /// Add two transform lists. /// http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms fn add_weighted_transform_lists(from_list: &[TransformOperation], to_list: &[TransformOperation], self_portion: f64, other_portion: f64) -> TransformList { let mut result = vec![]; if can_interpolate_list(from_list, to_list) { for (from, to) in from_list.iter().zip(to_list) { match (from, to) { (&TransformOperation::Matrix(from), &TransformOperation::Matrix(_to)) => { let sum = from.add_weighted(&_to, self_portion, other_portion).unwrap(); result.push(TransformOperation::Matrix(sum)); } (&TransformOperation::MatrixWithPercents(_), &TransformOperation::MatrixWithPercents(_)) => { // We don't add_weighted `-moz-transform` matrices yet. // They contain percentage values. {} } (&TransformOperation::Skew(fx, fy), &TransformOperation::Skew(tx, ty)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); result.push(TransformOperation::Skew(ix, iy)); } (&TransformOperation::Translate(fx, fy, fz), &TransformOperation::Translate(tx, ty, tz)) => { let ix = fx.add_weighted(&tx, self_portion, other_portion).unwrap(); let iy = fy.add_weighted(&ty, self_portion, other_portion).unwrap(); let iz = fz.add_weighted(&tz, self_portion, other_portion).unwrap(); result.push(TransformOperation::Translate(ix, iy, iz)); } (&TransformOperation::Scale(fx, fy, fz), &TransformOperation::Scale(tx, ty, tz)) => { let ix = add_weighted_with_initial_val(&fx, &tx, self_portion, other_portion, &1.0).unwrap(); let iy = add_weighted_with_initial_val(&fy, &ty, self_portion, other_portion, &1.0).unwrap(); let iz = add_weighted_with_initial_val(&fz, &tz, self_portion, other_portion, &1.0).unwrap(); result.push(TransformOperation::Scale(ix, iy, iz)); } (&TransformOperation::Rotate(fx, fy, fz, fa), &TransformOperation::Rotate(tx, ty, tz, ta)) => { let norm_f = ((fx * fx) + (fy * fy) + (fz * fz)).sqrt(); let norm_t = ((tx * tx) + (ty * ty) + (tz * tz)).sqrt(); let (fx, fy, fz) = (fx / norm_f, fy / norm_f, fz / norm_f); let (tx, ty, tz) = (tx / norm_t, ty / norm_t, tz / norm_t); if fx == tx && fy == ty && fz == tz { let ia = fa.add_weighted(&ta, self_portion, other_portion).unwrap(); result.push(TransformOperation::Rotate(fx, fy, fz, ia)); } else { let matrix_f = rotate_to_matrix(fx, fy, fz, fa); let matrix_t = rotate_to_matrix(tx, ty, tz, ta); let sum = matrix_f.add_weighted(&matrix_t, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } } (&TransformOperation::Perspective(fd), &TransformOperation::Perspective(_td)) => { let mut fd_matrix = ComputedMatrix::identity(); let mut td_matrix = ComputedMatrix::identity(); fd_matrix.m43 = -1. / fd.to_f32_px(); td_matrix.m43 = -1. / _td.to_f32_px(); let sum = fd_matrix.add_weighted(&td_matrix, self_portion, other_portion) .unwrap(); result.push(TransformOperation::Matrix(sum)); } _ => { // This should be unreachable due to the can_interpolate_list() call. unreachable!(); } } } } else { use values::specified::Percentage; let from_transform_list = TransformList(Some(from_list.to_vec())); let to_transform_list = TransformList(Some(to_list.to_vec())); result.push( TransformOperation::InterpolateMatrix { from_list: from_transform_list, to_list: to_transform_list, progress: Percentage(other_portion as f32) }); } TransformList(Some(result)) } /// https://drafts.csswg.org/css-transforms/#Rotate3dDefined fn rotate_to_matrix(x: f32, y: f32, z: f32, a: Angle) -> ComputedMatrix { let half_rad = a.radians() / 2.0; let sc = (half_rad).sin() * (half_rad).cos(); let sq = (half_rad).sin().powi(2); ComputedMatrix { m11: 1.0 - 2.0 * (y * y + z * z) * sq, m12: 2.0 * (x * y * sq - z * sc), m13: 2.0 * (x * z * sq + y * sc), m14: 0.0, m21: 2.0 * (x * y * sq + z * sc), m22: 1.0 - 2.0 * (x * x + z * z) * sq, m23: 2.0 * (y * z * sq - x * sc), m24: 0.0, m31: 2.0 * (x * z * sq - y * sc), m32: 2.0 * (y * z * sq + x * sc), m33: 1.0 - 2.0 * (x * x + y * y) * sq, m34: 0.0, m41: 0.0, m42: 0.0, m43: 0.0, m44: 1.0 } } /// A 2d matrix for interpolation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct InnerMatrix2D { pub m11: CSSFloat, pub m12: CSSFloat, pub m21: CSSFloat, pub m22: CSSFloat, } /// A 2d translation function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate2D(f32, f32); /// A 2d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale2D(f32, f32); /// A decomposed 2d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed2D { /// The translation function. pub translate: Translate2D, /// The scale function. pub scale: Scale2D, /// The rotation angle. pub angle: f32, /// The inner matrix. pub matrix: InnerMatrix2D, } impl Animatable for InnerMatrix2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(InnerMatrix2D { m11: add_weighted_with_initial_val(&self.m11, &other.m11, self_portion, other_portion, &1.0)?, m12: self.m12.add_weighted(&other.m12, self_portion, other_portion)?, m21: self.m21.add_weighted(&other.m21, self_portion, other_portion)?, m22: add_weighted_with_initial_val(&self.m22, &other.m22, self_portion, other_portion, &1.0)?, }) } } impl Animatable for Translate2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate2D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, )) } } impl Animatable for Scale2D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale2D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed2D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-2d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // If x-axis of one is flipped, and y-axis of the other, // convert to an unflipped rotation. let mut scale = self.scale; let mut angle = self.angle; let mut other_angle = other.angle; if (scale.0 < 0.0 && other.scale.1 < 0.0) || (scale.1 < 0.0 && other.scale.0 < 0.0) { scale.0 = -scale.0; scale.1 = -scale.1; angle += if angle < 0.0 {180.} else {-180.}; } // Don't rotate the long way around. if angle == 0.0 { angle = 360. } if other_angle == 0.0 { other_angle = 360. } if (angle - other_angle).abs() > 180. { if angle > other_angle { angle -= 360. } else{ other_angle -= 360. } } // Interpolate all values. let translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; let scale = scale.add_weighted(&other.scale, self_portion, other_portion)?; let angle = angle.add_weighted(&other_angle, self_portion, other_portion)?; let matrix = self.matrix.add_weighted(&other.matrix, self_portion, other_portion)?; Ok(MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: matrix, }) } } impl Animatable for ComputedMatrix { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { if self.is_3d() || other.is_3d() { let decomposed_from = decompose_3d_matrix(*self); let decomposed_to = decompose_3d_matrix(*other); match (decomposed_from, decomposed_to) { (Ok(from), Ok(to)) => { let sum = from.add_weighted(&to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } else { let decomposed_from = MatrixDecomposed2D::from(*self); let decomposed_to = MatrixDecomposed2D::from(*other); let sum = decomposed_from.add_weighted(&decomposed_to, self_portion, other_portion)?; Ok(ComputedMatrix::from(sum)) } } } impl From<ComputedMatrix> for MatrixDecomposed2D { /// Decompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-2d-matrix fn from(matrix: ComputedMatrix) -> MatrixDecomposed2D { let mut row0x = matrix.m11; let mut row0y = matrix.m12; let mut row1x = matrix.m21; let mut row1y = matrix.m22; let translate = Translate2D(matrix.m41, matrix.m42); let mut scale = Scale2D((row0x * row0x + row0y * row0y).sqrt(), (row1x * row1x + row1y * row1y).sqrt()); // If determinant is negative, one axis was flipped. let determinant = row0x * row1y - row0y * row1x; if determinant < 0. { if row0x < row1y { scale.0 = -scale.0; } else { scale.1 = -scale.1; } } // Renormalize matrix to remove scale. if scale.0 != 0.0 { row0x *= 1. / scale.0; row0y *= 1. / scale.0; } if scale.1 != 0.0 { row1x *= 1. / scale.1; row1y *= 1. / scale.1; } // Compute rotation and renormalize matrix. let mut angle = row0y.atan2(row0x); if angle != 0.0 { let sn = -row0y; let cs = row0x; let m11 = row0x; let m12 = row0y; let m21 = row1x; let m22 = row1y; row0x = cs * m11 + sn * m21; row0y = cs * m12 + sn * m22; row1x = -sn * m11 + cs * m21; row1y = -sn * m12 + cs * m22; } let m = InnerMatrix2D { m11: row0x, m12: row0y, m21: row1x, m22: row1y, }; // Convert into degrees because our rotation functions expect it. angle = angle.to_degrees(); MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: m, } } } impl From<MatrixDecomposed2D> for ComputedMatrix { /// Recompose a 2D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-2d-matrix fn from(decomposed: MatrixDecomposed2D) -> ComputedMatrix { let mut computed_matrix = ComputedMatrix::identity(); computed_matrix.m11 = decomposed.matrix.m11; computed_matrix.m12 = decomposed.matrix.m12; computed_matrix.m21 = decomposed.matrix.m21; computed_matrix.m22 = decomposed.matrix.m22; // Translate matrix. computed_matrix.m41 = decomposed.translate.0; computed_matrix.m42 = decomposed.translate.1; // Rotate matrix. let angle = decomposed.angle.to_radians(); let cos_angle = angle.cos(); let sin_angle = angle.sin(); let mut rotate_matrix = ComputedMatrix::identity(); rotate_matrix.m11 = cos_angle; rotate_matrix.m12 = sin_angle; rotate_matrix.m21 = -sin_angle; rotate_matrix.m22 = cos_angle; // Multiplication of computed_matrix and rotate_matrix computed_matrix = multiply(rotate_matrix, computed_matrix); // Scale matrix. computed_matrix.m11 *= decomposed.scale.0; computed_matrix.m12 *= decomposed.scale.0; computed_matrix.m21 *= decomposed.scale.1; computed_matrix.m22 *= decomposed.scale.1; computed_matrix } } #[cfg(feature = "gecko")] impl<'a> From< &'a RawGeckoGfxMatrix4x4> for ComputedMatrix { fn from(m: &'a RawGeckoGfxMatrix4x4) -> ComputedMatrix { ComputedMatrix { m11: m[0], m12: m[1], m13: m[2], m14: m[3], m21: m[4], m22: m[5], m23: m[6], m24: m[7], m31: m[8], m32: m[9], m33: m[10], m34: m[11], m41: m[12], m42: m[13], m43: m[14], m44: m[15], } } } #[cfg(feature = "gecko")] impl From<ComputedMatrix> for RawGeckoGfxMatrix4x4 { fn from(matrix: ComputedMatrix) -> RawGeckoGfxMatrix4x4 { [ matrix.m11, matrix.m12, matrix.m13, matrix.m14, matrix.m21, matrix.m22, matrix.m23, matrix.m24, matrix.m31, matrix.m32, matrix.m33, matrix.m34, matrix.m41, matrix.m42, matrix.m43, matrix.m44 ] } } /// A 3d translation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Translate3D(f32, f32, f32); /// A 3d scale function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Scale3D(f32, f32, f32); /// A 3d skew function. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Skew(f32, f32, f32); /// A 3d perspective transformation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Perspective(f32, f32, f32, f32); /// A quaternion used to represent a rotation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct Quaternion(f32, f32, f32, f32); /// A decomposed 3d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] pub struct MatrixDecomposed3D { /// A translation function. pub translate: Translate3D, /// A scale function. pub scale: Scale3D, /// The skew component of the transformation. pub skew: Skew, /// The perspective component of the transformation. pub perspective: Perspective, /// The quaternion used to represent the rotation. pub quaternion: Quaternion, } /// Decompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#decomposing-a-3d-matrix fn decompose_3d_matrix(mut matrix: ComputedMatrix) -> Result<MatrixDecomposed3D, ()> { // Normalize the matrix. if matrix.m44 == 0.0 { return Err(()); } let scaling_factor = matrix.m44; % for i in range(1, 5): % for j in range(1, 5): matrix.m${i}${j} /= scaling_factor; % endfor % endfor // perspective_matrix is used to solve for perspective, but it also provides // an easy way to test for singularity of the upper 3x3 component. let mut perspective_matrix = matrix; % for i in range(1, 4): perspective_matrix.m${i}4 = 0.0; % endfor perspective_matrix.m44 = 1.0; if perspective_matrix.determinant() == 0.0 { return Err(()); } // First, isolate perspective. let perspective = if matrix.m14 != 0.0 || matrix.m24 != 0.0 || matrix.m34 != 0.0 { let right_hand_side: [f32; 4] = [ matrix.m14, matrix.m24, matrix.m34, matrix.m44 ]; perspective_matrix = perspective_matrix.inverse().unwrap(); // Transpose perspective_matrix perspective_matrix = ComputedMatrix { % for i in range(1, 5): % for j in range(1, 5): m${i}${j}: perspective_matrix.m${j}${i}, % endfor % endfor }; // Multiply right_hand_side with perspective_matrix let mut tmp: [f32; 4] = [0.0; 4]; % for i in range(1, 5): tmp[${i - 1}] = (right_hand_side[0] * perspective_matrix.m1${i}) + (right_hand_side[1] * perspective_matrix.m2${i}) + (right_hand_side[2] * perspective_matrix.m3${i}) + (right_hand_side[3] * perspective_matrix.m4${i}); % endfor Perspective(tmp[0], tmp[1], tmp[2], tmp[3]) } else { Perspective(0.0, 0.0, 0.0, 1.0) }; // Next take care of translation let translate = Translate3D ( matrix.m41, matrix.m42, matrix.m43 ); // Now get scale and shear. 'row' is a 3 element array of 3 component vectors let mut row: [[f32; 3]; 3] = [[0.0; 3]; 3]; % for i in range(1, 4): row[${i - 1}][0] = matrix.m${i}1; row[${i - 1}][1] = matrix.m${i}2; row[${i - 1}][2] = matrix.m${i}3; % endfor // Compute X scale factor and normalize first row. let row0len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); let mut scale = Scale3D(row0len, 0.0, 0.0); row[0] = [row[0][0] / row0len, row[0][1] / row0len, row[0][2] / row0len]; // Compute XY shear factor and make 2nd row orthogonal to 1st. let mut skew = Skew(dot(row[0], row[1]), 0.0, 0.0); row[1] = combine(row[1], row[0], 1.0, -skew.0); // Now, compute Y scale and normalize 2nd row. let row1len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); scale.1 = row1len; row[1] = [row[1][0] / row1len, row[1][1] / row1len, row[1][2] / row1len]; skew.0 /= scale.1; // Compute XZ and YZ shears, orthogonalize 3rd row skew.1 = dot(row[0], row[2]); row[2] = combine(row[2], row[0], 1.0, -skew.1); skew.2 = dot(row[1], row[2]); row[2] = combine(row[2], row[1], 1.0, -skew.2); // Next, get Z scale and normalize 3rd row. let row2len = (row[2][0] * row[2][0] + row[2][1] * row[2][1] + row[2][2] * row[2][2]).sqrt(); scale.2 = row2len; row[2] = [row[2][0] / row2len, row[2][1] / row2len, row[2][2] / row2len]; skew.1 /= scale.2; skew.2 /= scale.2; // At this point, the matrix (in rows) is orthonormal. // Check for a coordinate system flip. If the determinant // is -1, then negate the matrix and the scaling factors. let pdum3 = cross(row[1], row[2]); if dot(row[0], pdum3) < 0.0 { % for i in range(3): scale.${i} *= -1.0; row[${i}][0] *= -1.0; row[${i}][1] *= -1.0; row[${i}][2] *= -1.0; % endfor } // Now, get the rotations out let mut quaternion = Quaternion ( 0.5 * ((1.0 + row[0][0] - row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] + row[1][1] - row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 - row[0][0] - row[1][1] + row[2][2]).max(0.0)).sqrt(), 0.5 * ((1.0 + row[0][0] + row[1][1] + row[2][2]).max(0.0)).sqrt() ); if row[2][1] > row[1][2] { quaternion.0 = -quaternion.0 } if row[0][2] > row[2][0] { quaternion.1 = -quaternion.1 } if row[1][0] > row[0][1] { quaternion.2 = -quaternion.2 } Ok(MatrixDecomposed3D { translate: translate, scale: scale, skew: skew, perspective: perspective, quaternion: quaternion }) } // Combine 2 point. fn combine(a: [f32; 3], b: [f32; 3], ascl: f32, bscl: f32) -> [f32; 3] { [ (ascl * a[0]) + (bscl * b[0]), (ascl * a[1]) + (bscl * b[1]), (ascl * a[2]) + (bscl * b[2]) ] } // Dot product. fn dot(a: [f32; 3], b: [f32; 3]) -> f32 { a[0] * b[0] + a[1] * b[1] + a[2] * b[2] } // Cross product. fn cross(row1: [f32; 3], row2: [f32; 3]) -> [f32; 3] { [ row1[1] * row2[2] - row1[2] * row2[1], row1[2] * row2[0] - row1[0] * row2[2], row1[0] * row2[1] - row1[1] * row2[0] ] } impl Animatable for Translate3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Translate3D( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Scale3D { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Scale3D( add_weighted_with_initial_val(&self.0, &other.0, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.1, &other.1, self_portion, other_portion, &1.0)?, add_weighted_with_initial_val(&self.2, &other.2, self_portion, other_portion, &1.0)?, )) } } impl Animatable for Skew { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Skew( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, )) } } impl Animatable for Perspective { fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(Perspective( self.0.add_weighted(&other.0, self_portion, other_portion)?, self.1.add_weighted(&other.1, self_portion, other_portion)?, self.2.add_weighted(&other.2, self_portion, other_portion)?, add_weighted_with_initial_val(&self.3, &other.3, self_portion, other_portion, &1.0)?, )) } } impl Animatable for MatrixDecomposed3D { /// https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-3d-matrix-values fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { assert!(self_portion + other_portion == 1.0f64 || other_portion == 1.0f64, "add_weighted should only be used for interpolating or accumulating transforms"); let mut sum = *self; // Add translate, scale, skew and perspective components. sum.translate = self.translate.add_weighted(&other.translate, self_portion, other_portion)?; sum.scale = self.scale.add_weighted(&other.scale, self_portion, other_portion)?; sum.skew = self.skew.add_weighted(&other.skew, self_portion, other_portion)?; sum.perspective = self.perspective.add_weighted(&other.perspective, self_portion, other_portion)?; // Add quaternions using spherical linear interpolation (Slerp). // // We take a specialized code path for accumulation (where other_portion is 1) if other_portion == 1.0 { if self_portion == 0.0 { return Ok(*other) } let clamped_w = self.quaternion.3.min(1.0).max(-1.0); // Determine the scale factor. let mut theta = clamped_w.acos(); let mut scale = if theta == 0.0 { 0.0 } else { 1.0 / theta.sin() }; theta *= self_portion as f32; scale *= theta.sin(); // Scale the self matrix by self_portion. let mut scaled_self = *self; % for i in range(3): scaled_self.quaternion.${i} *= scale; % endfor scaled_self.quaternion.3 = theta.cos(); // Multiply scaled-self by other. let a = &scaled_self.quaternion; let b = &other.quaternion; sum.quaternion = Quaternion( a.3 * b.0 + a.0 * b.3 + a.1 * b.2 - a.2 * b.1, a.3 * b.1 - a.0 * b.2 + a.1 * b.3 + a.2 * b.0, a.3 * b.2 + a.0 * b.1 - a.1 * b.0 + a.2 * b.3, a.3 * b.3 - a.0 * b.0 - a.1 * b.1 - a.2 * b.2, ); } else { let mut product = self.quaternion.0 * other.quaternion.0 + self.quaternion.1 * other.quaternion.1 + self.quaternion.2 * other.quaternion.2 + self.quaternion.3 * other.quaternion.3; // Clamp product to -1.0 <= product <= 1.0 product = product.min(1.0); product = product.max(-1.0); if product == 1.0 { return Ok(sum); } let theta = product.acos(); let w = (other_portion as f32 * theta).sin() * 1.0 / (1.0 - product * product).sqrt(); let mut a = *self; let mut b = *other; % for i in range(4): a.quaternion.${i} *= (other_portion as f32 * theta).cos() - product * w; b.quaternion.${i} *= w; sum.quaternion.${i} = a.quaternion.${i} + b.quaternion.${i}; % endfor } Ok(sum) } } impl From<MatrixDecomposed3D> for ComputedMatrix { /// Recompose a 3D matrix. /// https://drafts.csswg.org/css-transforms/#recomposing-to-a-3d-matrix fn from(decomposed: MatrixDecomposed3D) -> ComputedMatrix { let mut matrix = ComputedMatrix::identity(); // Apply perspective % for i in range(1, 5): matrix.m${i}4 = decomposed.perspective.${i - 1}; % endfor // Apply translation % for i in range(1, 4): % for j in range(1, 4): matrix.m4${i} += decomposed.translate.${j - 1} * matrix.m${j}${i}; % endfor % endfor // Apply rotation let x = decomposed.quaternion.0; let y = decomposed.quaternion.1; let z = decomposed.quaternion.2; let w = decomposed.quaternion.3; // Construct a composite rotation matrix from the quaternion values // rotationMatrix is a identity 4x4 matrix initially let mut rotation_matrix = ComputedMatrix::identity(); rotation_matrix.m11 = 1.0 - 2.0 * (y * y + z * z); rotation_matrix.m12 = 2.0 * (x * y + z * w); rotation_matrix.m13 = 2.0 * (x * z - y * w); rotation_matrix.m21 = 2.0 * (x * y - z * w); rotation_matrix.m22 = 1.0 - 2.0 * (x * x + z * z); rotation_matrix.m23 = 2.0 * (y * z + x * w); rotation_matrix.m31 = 2.0 * (x * z + y * w); rotation_matrix.m32 = 2.0 * (y * z - x * w); rotation_matrix.m33 = 1.0 - 2.0 * (x * x + y * y); matrix = multiply(rotation_matrix, matrix); // Apply skew let mut temp = ComputedMatrix::identity(); if decomposed.skew.2 != 0.0 { temp.m32 = decomposed.skew.2; matrix = multiply(matrix, temp); } if decomposed.skew.1 != 0.0 { temp.m32 = 0.0; temp.m31 = decomposed.skew.1; matrix = multiply(matrix, temp); } if decomposed.skew.0 != 0.0 { temp.m31 = 0.0; temp.m21 = decomposed.skew.0; matrix = multiply(matrix, temp); } // Apply scale % for i in range(1, 4): % for j in range(1, 4): matrix.m${i}${j} *= decomposed.scale.${i - 1}; % endfor % endfor matrix } } // Multiplication of two 4x4 matrices. fn multiply(a: ComputedMatrix, b: ComputedMatrix) -> ComputedMatrix { let mut a_clone = a; % for i in range(1, 5): % for j in range(1, 5): a_clone.m${i}${j} = (a.m${i}1 * b.m1${j}) + (a.m${i}2 * b.m2${j}) + (a.m${i}3 * b.m3${j}) + (a.m${i}4 * b.m4${j}); % endfor % endfor a_clone } impl ComputedMatrix { fn is_3d(&self) -> bool { self.m13 != 0.0 || self.m14 != 0.0 || self.m23 != 0.0 || self.m24 != 0.0 || self.m31 != 0.0 || self.m32 != 0.0 || self.m33 != 1.0 || self.m34 != 0.0 || self.m43 != 0.0 || self.m44 != 1.0 } fn determinant(&self) -> CSSFloat { self.m14 * self.m23 * self.m32 * self.m41 - self.m13 * self.m24 * self.m32 * self.m41 - self.m14 * self.m22 * self.m33 * self.m41 + self.m12 * self.m24 * self.m33 * self.m41 + self.m13 * self.m22 * self.m34 * self.m41 - self.m12 * self.m23 * self.m34 * self.m41 - self.m14 * self.m23 * self.m31 * self.m42 + self.m13 * self.m24 * self.m31 * self.m42 + self.m14 * self.m21 * self.m33 * self.m42 - self.m11 * self.m24 * self.m33 * self.m42 - self.m13 * self.m21 * self.m34 * self.m42 + self.m11 * self.m23 * self.m34 * self.m42 + self.m14 * self.m22 * self.m31 * self.m43 - self.m12 * self.m24 * self.m31 * self.m43 - self.m14 * self.m21 * self.m32 * self.m43 + self.m11 * self.m24 * self.m32 * self.m43 + self.m12 * self.m21 * self.m34 * self.m43 - self.m11 * self.m22 * self.m34 * self.m43 - self.m13 * self.m22 * self.m31 * self.m44 + self.m12 * self.m23 * self.m31 * self.m44 + self.m13 * self.m21 * self.m32 * self.m44 - self.m11 * self.m23 * self.m32 * self.m44 - self.m12 * self.m21 * self.m33 * self.m44 + self.m11 * self.m22 * self.m33 * self.m44 } fn inverse(&self) -> Option<ComputedMatrix> { let mut det = self.determinant(); if det == 0.0 { return None; } det = 1.0 / det; let x = ComputedMatrix { m11: det * (self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 + self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 - self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44), m12: det * (self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 - self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 + self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44), m13: det * (self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 + self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 - self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44), m14: det * (self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 - self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 + self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34), m21: det * (self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 - self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 + self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44), m22: det * (self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 + self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 - self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44), m23: det * (self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 - self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 + self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44), m24: det * (self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 + self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 - self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34), m31: det * (self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 + self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 - self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44), m32: det * (self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 - self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 + self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44), m33: det * (self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 + self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 - self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44), m34: det * (self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 - self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 + self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34), m41: det * (self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 - self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 + self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43), m42: det * (self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 + self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 - self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43), m43: det * (self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 - self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 + self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43), m44: det * (self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 + self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 - self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33), }; Some(x) } } /// https://drafts.csswg.org/css-transforms/#interpolation-of-transforms impl Animatable for TransformList { #[inline] fn add_weighted(&self, other: &TransformList, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms let result = match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { // Two lists of transforms add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&Some(ref from_list), &None) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let to_list = build_identity_transform_list(from_list); add_weighted_transform_lists(from_list, &to_list, self_portion, other_portion) } (&None, &Some(ref to_list)) => { // http://dev.w3.org/csswg/css-transforms/#none-transform-animation let from_list = build_identity_transform_list(to_list); add_weighted_transform_lists(&from_list, to_list, self_portion, other_portion) } _ => { // http://dev.w3.org/csswg/css-transforms/#none-none-animation TransformList(None) } }; Ok(result) } fn add(&self, other: &Self) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { Ok(TransformList(Some([&from_list[..], &to_list[..]].concat()))) } (&Some(_), &None) => { Ok(self.clone()) } (&None, &Some(_)) => { Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } #[inline] fn accumulate(&self, other: &Self, count: u64) -> Result<Self, ()> { match (&self.0, &other.0) { (&Some(ref from_list), &Some(ref to_list)) => { if can_interpolate_list(from_list, to_list) { Ok(add_weighted_transform_lists(from_list, &to_list, count as f64, 1.0)) } else { use std::i32; let result = vec![TransformOperation::AccumulateMatrix { from_list: self.clone(), to_list: other.clone(), count: cmp::min(count, i32::MAX as u64) as i32 }]; Ok(TransformList(Some(result))) } } (&Some(ref from_list), &None) => { Ok(add_weighted_transform_lists(from_list, from_list, count as f64, 0.0)) } (&None, &Some(_)) => { // If |self| is 'none' then we are calculating: // // none * |count| + |other| // = none + |other| // = |other| // // Hence the result is just |other|. Ok(other.clone()) } _ => { Ok(TransformList(None)) } } } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(TransformList(None)) } } impl<T, U> Animatable for Either<T, U> where T: Animatable + Copy, U: Animatable + Copy, { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (*self, *other) { (Either::First(ref this), Either::First(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::First) }, (Either::Second(ref this), Either::Second(ref other)) => { this.add_weighted(&other, self_portion, other_portion).map(Either::Second) }, _ => { let result = if self_portion > other_portion {*self} else {*other}; Ok(result) } } } #[inline] fn get_zero_value(&self) -> Option<Self> { match *self { Either::First(ref this) => { this.get_zero_value().map(Either::First) }, Either::Second(ref this) => { this.get_zero_value().map(Either::Second) }, } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_distance(other) }, _ => Err(()) } } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&Either::First(ref this), &Either::First(ref other)) => { this.compute_squared_distance(other) }, (&Either::Second(ref this), &Either::Second(ref other)) => { this.compute_squared_distance(other) }, _ => Err(()) } } } impl From<IntermediateRGBA> for RGBA { fn from(extended_rgba: IntermediateRGBA) -> RGBA { // RGBA::from_floats clamps each component values. RGBA::from_floats(extended_rgba.red, extended_rgba.green, extended_rgba.blue, extended_rgba.alpha) } } impl From<RGBA> for IntermediateRGBA { fn from(rgba: RGBA) -> IntermediateRGBA { IntermediateRGBA::new(rgba.red_f32(), rgba.green_f32(), rgba.blue_f32(), rgba.alpha_f32()) } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] /// Unlike RGBA, each component value may exceed the range [0.0, 1.0]. pub struct IntermediateRGBA { /// The red component. pub red: f32, /// The green component. pub green: f32, /// The blue component. pub blue: f32, /// The alpha component. pub alpha: f32, } impl IntermediateRGBA { /// Returns a transparent color. #[inline] pub fn transparent() -> Self { Self::new(0., 0., 0., 0.) } /// Returns a new color. #[inline] pub fn new(red: f32, green: f32, blue: f32, alpha: f32) -> Self { IntermediateRGBA { red: red, green: green, blue: blue, alpha: alpha } } } /// Unlike Animatable for RGBA we don't clamp any component values. impl Animatable for IntermediateRGBA { #[inline] fn add_weighted(&self, other: &IntermediateRGBA, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut alpha = self.alpha.add_weighted(&other.alpha, self_portion, other_portion)?; if alpha <= 0. { // Ideally we should return color value that only alpha component is // 0, but this is what current gecko does. Ok(IntermediateRGBA::transparent()) } else { alpha = alpha.min(1.); let red = (self.red * self.alpha).add_weighted( &(other.red * other.alpha), self_portion, other_portion )? * 1. / alpha; let green = (self.green * self.alpha).add_weighted( &(other.green * other.alpha), self_portion, other_portion )? * 1. / alpha; let blue = (self.blue * self.alpha).add_weighted( &(other.blue * other.alpha), self_portion, other_portion )? * 1. / alpha; Ok(IntermediateRGBA::new(red, green, blue, alpha)) } } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(IntermediateRGBA::transparent()) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let start = [ self.alpha, self.red * self.alpha, self.green * self.alpha, self.blue * self.alpha ]; let end = [ other.alpha, other.red * other.alpha, other.green * other.alpha, other.blue * other.alpha ]; let diff = start.iter().zip(&end) .fold(0.0f64, |n, (&a, &b)| { let diff = (a - b) as f64; n + diff * diff }); Ok(diff) } } impl From<Either<Color, Auto>> for Either<IntermediateColor, Auto> { fn from(from: Either<Color, Auto>) -> Either<IntermediateColor, Auto> { match from { Either::First(from) => Either::First(from.into()), Either::Second(Auto) => Either::Second(Auto), } } } impl From<Either<IntermediateColor, Auto>> for Either<Color, Auto> { fn from(from: Either<IntermediateColor, Auto>) -> Either<Color, Auto> { match from { Either::First(from) => Either::First(from.into()), Either::Second(Auto) => Either::Second(Auto), } } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(HeapSizeOf))] #[allow(missing_docs)] pub struct IntermediateColor { color: IntermediateRGBA, foreground_ratio: f32, } impl IntermediateColor { fn currentcolor() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 1., } } /// Returns a transparent intermediate color. pub fn transparent() -> Self { IntermediateColor { color: IntermediateRGBA::transparent(), foreground_ratio: 0., } } fn is_currentcolor(&self) -> bool { self.foreground_ratio >= 1. } fn is_numeric(&self) -> bool { self.foreground_ratio <= 0. } fn effective_intermediate_rgba(&self) -> IntermediateRGBA { IntermediateRGBA { alpha: self.color.alpha * (1. - self.foreground_ratio), .. self.color } } } impl Animatable for IntermediateColor { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { // Common cases are interpolating between two numeric colors, // two currentcolors, and a numeric color and a currentcolor. // // Note: this algorithm assumes self_portion + other_portion // equals to one, so it may be broken for additive operation. // To properly support additive color interpolation, we would // need two ratio fields in computed color types. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(IntermediateColor::currentcolor()) } else { Ok(IntermediateColor { color: self.color.add_weighted(&other.color, self_portion, other_portion)?, foreground_ratio: self.foreground_ratio, }) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateColor { color: other.color, foreground_ratio: self_portion as f32, }) } else if self.is_numeric() && other.is_currentcolor() { Ok(IntermediateColor { color: self.color, foreground_ratio: other_portion as f32, }) } else { // For interpolating between two complex colors, we need to // generate colors with effective alpha value. let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let color = self_color.add_weighted(&other_color, self_portion, other_portion)?; // Then we compute the final foreground ratio, and derive // the final alpha value from the effective alpha value. let foreground_ratio = self.foreground_ratio .add_weighted(&other.foreground_ratio, self_portion, other_portion)?; let alpha = color.alpha / (1. - foreground_ratio); Ok(IntermediateColor { color: IntermediateRGBA { alpha: alpha, .. color }, foreground_ratio: foreground_ratio, }) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { // All comments in add_weighted also applies here. if self.foreground_ratio == other.foreground_ratio { if self.is_currentcolor() { Ok(0.) } else { self.color.compute_squared_distance(&other.color) } } else if self.is_currentcolor() && other.is_numeric() { Ok(IntermediateRGBA::transparent().compute_squared_distance(&other.color)? + 1.) } else if self.is_numeric() && other.is_currentcolor() { Ok(self.color.compute_squared_distance(&IntermediateRGBA::transparent())? + 1.) } else { let self_color = self.effective_intermediate_rgba(); let other_color = other.effective_intermediate_rgba(); let dist = self_color.compute_squared_distance(&other_color)?; let ratio_diff = (self.foreground_ratio - other.foreground_ratio) as f64; Ok(dist + ratio_diff * ratio_diff) } } } impl From<Color> for IntermediateColor { fn from(color: Color) -> IntermediateColor { IntermediateColor { color: color.color.into(), foreground_ratio: color.foreground_ratio as f32 * (1. / 255.), } } } impl From<IntermediateColor> for Color { fn from(color: IntermediateColor) -> Color { Color { color: color.color.into(), foreground_ratio: (color.foreground_ratio * 255.).round() as u8, } } } /// Animatable SVGPaint pub type IntermediateSVGPaint = SVGPaint<IntermediateRGBA>; /// Animatable SVGPaintKind pub type IntermediateSVGPaintKind = SVGPaintKind<IntermediateRGBA>; impl From<::values::computed::SVGPaint> for IntermediateSVGPaint { fn from(paint: ::values::computed::SVGPaint) -> IntermediateSVGPaint { paint.convert(|color| (*color).into()) } } impl From<IntermediateSVGPaint> for ::values::computed::SVGPaint { fn from(paint: IntermediateSVGPaint) -> ::values::computed::SVGPaint { paint.convert(|color| (*color).into()) } } impl Animatable for IntermediateSVGPaint { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.add_weighted(&other.kind, self_portion, other_portion)?, fallback: self.fallback.add_weighted(&other.fallback, self_portion, other_portion)?, }) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sq| sq.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { Ok(self.kind.compute_squared_distance(&other.kind)? + self.fallback.compute_squared_distance(&other.fallback)?) } #[inline] fn get_zero_value(&self) -> Option<Self> { Some(IntermediateSVGPaint { kind: option_try!(self.kind.get_zero_value()), fallback: self.fallback.and_then(|v| v.get_zero_value()), }) } } impl Animatable for IntermediateSVGPaintKind { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { Ok(SVGPaintKind::Color(self_color.add_weighted(other_color, self_portion, other_portion)?)) } // FIXME context values should be interpolable with colors // Gecko doesn't implement this behavior either. (&SVGPaintKind::None, &SVGPaintKind::None) => Ok(SVGPaintKind::None), (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) => Ok(SVGPaintKind::ContextFill), (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke) => Ok(SVGPaintKind::ContextStroke), _ => Err(()) } } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { match (self, other) { (&SVGPaintKind::Color(ref self_color), &SVGPaintKind::Color(ref other_color)) => { self_color.compute_distance(other_color) } (&SVGPaintKind::None, &SVGPaintKind::None) | (&SVGPaintKind::ContextFill, &SVGPaintKind::ContextFill) | (&SVGPaintKind::ContextStroke, &SVGPaintKind::ContextStroke)=> Ok(0.0), _ => Err(()) } } #[inline] fn get_zero_value(&self) -> Option<Self> { match self { &SVGPaintKind::Color(ref color) => color.get_zero_value() .map(SVGPaintKind::Color), &SVGPaintKind::None | &SVGPaintKind::ContextFill | &SVGPaintKind::ContextStroke => Some(self.clone()), _ => None, } } } <% FILTER_FUNCTIONS = [ 'Blur', 'Brightness', 'Contrast', 'Grayscale', 'HueRotate', 'Invert', 'Opacity', 'Saturate', 'Sepia' ] %> /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function_impl(from: &AnimatedFilter, to: &AnimatedFilter, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { % for func in [ 'Blur', 'HueRotate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, % endfor % for func in [ 'Grayscale', 'Invert', 'Sepia' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &0.0, )?)) }, % endfor % for func in [ 'Brightness', 'Contrast', 'Opacity', 'Saturate' ]: (&Filter::${func}(from_value), &Filter::${func}(to_value)) => { Ok(Filter::${func}(add_weighted_with_initial_val( &from_value, &to_value, self_portion, other_portion, &1.0, )?)) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref from_value), &Filter::DropShadow(ref to_value)) => { Ok(Filter::DropShadow(from_value.add_weighted( &to_value, self_portion, other_portion, )?)) }, (&Filter::Url(_), &Filter::Url(_)) => { Err(()) }, % endif _ => { // If specified the different filter functions, // we will need to interpolate as discreate. Err(()) }, } } /// https://drafts.fxtf.org/filters/#animation-of-filters fn add_weighted_filter_function(from: Option<<&AnimatedFilter>, to: Option<<&AnimatedFilter>, self_portion: f64, other_portion: f64) -> Result<AnimatedFilter, ()> { match (from, to) { (Some(f), Some(t)) => { add_weighted_filter_function_impl(f, t, self_portion, other_portion) }, (Some(f), None) => { add_weighted_filter_function_impl(f, f, self_portion, 0.0) }, (None, Some(t)) => { add_weighted_filter_function_impl(t, t, other_portion, 0.0) }, _ => { Err(()) } } } fn compute_filter_square_distance(from: &AnimatedFilter, to: &AnimatedFilter) -> Result<f64, ()> { match (from, to) { % for func in FILTER_FUNCTIONS : (&Filter::${func}(f), &Filter::${func}(t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref f), &Filter::DropShadow(ref t)) => { Ok(try!(f.compute_squared_distance(&t))) }, % endif _ => { Err(()) } } } impl Animatable for AnimatedFilterList { #[inline] fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> { let mut filters = vec![]; let mut from_iter = self.0.iter(); let mut to_iter = other.0.iter(); let mut from = from_iter.next(); let mut to = to_iter.next(); while from.is_some() || to.is_some() { filters.push(try!(add_weighted_filter_function(from, to, self_portion, other_portion))); if from.is_some() { from = from_iter.next(); } if to.is_some() { to = to_iter.next(); } } Ok(AnimatedFilterList(filters)) } fn add(&self, other: &Self) -> Result<Self, ()> { Ok(AnimatedFilterList(self.0.iter().chain(other.0.iter()).cloned().collect())) } #[inline] fn compute_distance(&self, other: &Self) -> Result<f64, ()> { self.compute_squared_distance(other).map(|sd| sd.sqrt()) } #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> { let mut square_distance: f64 = 0.0; let mut from_iter = self.0.iter(); let mut to_iter = other.0.iter(); let mut from = from_iter.next(); let mut to = to_iter.next(); while from.is_some() || to.is_some() { let current_square_distance: f64 ; if from.is_none() { let none = try!(add_weighted_filter_function(to, to, 0.0, 0.0)); current_square_distance = compute_filter_square_distance(&none, &(to.unwrap())).unwrap(); to = to_iter.next(); } else if to.is_none() { let none = try!(add_weighted_filter_function(from, from, 0.0, 0.0)); current_square_distance = compute_filter_square_distance(&none, &(from.unwrap())).unwrap(); from = from_iter.next(); } else { current_square_distance = compute_filter_square_distance(&(from.unwrap()), &(to.unwrap())).unwrap(); from = from_iter.next(); to = to_iter.next(); } square_distance += current_square_distance; } Ok(square_distance.sqrt()) } }
#![crate_name = "notedigest"] /// Application that converts handwritten notes into organized html pages. extern crate image; extern crate uuid; use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::io; use std::io::Write; use std::io::Read; use std::io::BufReader; use std::io::BufRead; use std::fs::File; use std::fs::OpenOptions; use image::GenericImage; use uuid::Uuid; /// The location where a list of already imported files may be found const IMPORTED: &'static str = "./.imported"; /// The location where the organized notes should be written to const OUT_PATH: &'static str = "Documents/Notebook/"; /// Minimum value for a channel to be considered on const MIN_THRESH: u8 = 120; /// Maximum value for a channel to be considered off const MAX_THRESH: u8 = 126; /// Minimum width to heigth ratio for object to be considered a line const LINE_RATIO: f32 = 7.0; /// Defines red channel index const RED: u8 = 0; /// Defines green channel index const GREEN: u8 = 1; /// Defines blue channel index const BLUE: u8 = 2; //should be a trait. I am not sure how to impliment one for only Vec <Vec <bool>> and not Vec <T> /** Inserts boolean into `Vec <Vec <bool>>` at specified point. If the point does not exist the vector is expanded. # Arguments * `y` - A 64 bit integer with the row to insert relative to the top right corner. * `x` - A 64 bit integer with the collum to insert relative to the top right corner. * `value` - The boolean value you wish to insert * `img` - The 2d boolean vector. Must be `&mut` */ fn boundless_insert( y: i64, x: i64, value: bool, img: &mut Vec<Vec <bool>> ) { let mut tx = x; let mut ty = y; for i in 0..img.len(){ let mut ttx = x; while ttx < 0 { img[i].insert(0, false); ttx += 1; } while img[i].len() <= ttx as usize { img[i].push(false); } tx = ttx } let mut row:Vec <bool> = Vec::new(); for _ in 0..img[0].len() { row.push(false); } while ty < 0 { img.insert(0, row.clone()); ty += 1; } while img.len() <= ty as usize { img.push(row.clone()) } img[ty as usize][tx as usize] = value; } #[derive(Debug, Clone, PartialEq)] /// Monochrome image fragment struct ImgBlob { /// Is it a line? /// 0: object, 1: line blob_type: u8, /// Top left coordinate. (collum first then row) top_left: [usize; 2], /// Bottom right coordinate. (collum first then row) bottom_right: [usize; 2], /// 2d array of booleans representing monochrome image fragment bitmap: Vec <Vec <bool>> } impl ImgBlob { /// Checks using a floodfill if an image blob can be started at a coordinate. /// If one can be found it returns it. fn from_top_left( x: usize, y: usize, claim: &mut Vec <Vec <bool>>, img: &Vec <Vec <bool>> ) -> Option<ImgBlob>{ let mut left = x; let mut top = y; //pretty sure that this is not supposed to change and can be eliminated let mut bitmap: Vec <Vec <bool>> = Vec::new(); let mut queue: Vec <[usize; 2]> = Vec::new(); claim[y][x] = true; queue.push([y+1, x]); queue.push([y, x+1]); bitmap.push(vec![true]); while queue.len() > 0 { let tempx = queue[0][1]; let tempy = queue[0][0]; if (tempy < img.len()) && (tempx < img[0].len()) && (tempy > 0) && (tempx > 0) { if img[tempy][tempx] && !(claim[tempy][tempx]){ boundless_insert( (tempy as i64)-(top as i64), (tempx as i64)-(left as i64), true, &mut bitmap ); if tempx < left { left = tempx; } if tempy < top { top = tempy; } claim[tempy][tempx] = true; if tempy > top { queue.push([tempy-1, tempx-1]); queue.push([tempy-1, tempx]); queue.push([tempy-1, tempx+1]); } queue.push([tempy, tempx-1]); queue.push([tempy+1, tempx-1]); queue.push([tempy+1, tempx]); queue.push([tempy+1, tempx+1]); queue.push([tempy, tempx+1]); } } queue.remove(0); } if ( bitmap[0].len() + bitmap.len() > 8) && (bitmap.len() > 4) && (bitmap[0].len() > 4 ) { Some(ImgBlob { blob_type: if (bitmap[0].len() as f32 / bitmap.len() as f32) > LINE_RATIO && bitmap[0].len() > 60 {1} else {0}, top_left: [left, top], bottom_right: [left+bitmap[0].len(), top+bitmap.len()], bitmap: bitmap }) } else { None } } fn new() -> ImgBlob { ImgBlob { bitmap: Vec::new(), blob_type: 0, bottom_right: [0, 0], top_left: [0, 0] } } } #[derive(Clone)] /// A group of blob objects along with the color information for the blobs struct Clump { /// What color is it? Uses color constants ctype: u8, /// Blob objects blobs: Vec <ImgBlob> } impl Clump { /// Add an `ImgBlob` object to an array of clumps. /// If the blob is not of the same type as the current clump create a new clump. /// Otherwise add it to the current clump. fn clump_update(blob:ImgBlob, t: u8, clumps: &mut Vec <Clump>){ let clen = clumps.len(); if clen > 0 { if t == clumps[clen-1].ctype { clumps[clen-1].blobs.push(blob); } else { clumps.push( Clump { ctype: t, blobs: vec![blob] } ); } } else { clumps.push( Clump { ctype: t, blobs: vec![blob] } ); } } } /// Representation of a page. Holds clumps and original page dimensions. struct Page { /// Vector of `Clump` objects clumps: Vec <Clump>, /// Dimensions of original page dimensions: [u32; 2] } impl Page { fn get_highest(a:usize, b:usize, c:usize) -> u8 { if a <= b && a <= c { 0 } else if a > b && b <= c { 1 } else { 2 } } /// Convert three channels of `ImgBlob` vectors to clumps in `Page` objects. fn from_blobs( mut rblobs: Vec <ImgBlob>, mut gblobs: Vec <ImgBlob>, mut bblobs: Vec <ImgBlob>, dimensions: [u32; 2] ) -> Page { print!("\r◔: Clustering objects "); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); let mut clumps = Vec::new(); let mut rpos; let mut gpos; let mut bpos; while rblobs.len() + gblobs.len() + bblobs.len() > 0 { if rblobs.len() > 0 { rpos = rblobs[0].top_left[1]; } else { rpos = <usize>::max_value(); } if gblobs.len() > 0 { gpos = gblobs[0].top_left[1]; } else { gpos = <usize>::max_value(); } if bblobs.len() > 0 { bpos = bblobs[0].top_left[1]; } else { bpos = <usize>::max_value(); } match Page::get_highest(rpos, gpos, bpos) { 0 => Clump::clump_update(rblobs.remove(0), 0, &mut clumps), 1 => Clump::clump_update(gblobs.remove(0), 1, &mut clumps), 2 => Clump::clump_update(bblobs.remove(0), 2, &mut clumps), _ => panic!("Invalid clump type(>2)"), }; } Page { clumps: clumps, dimensions: dimensions } } /// Create a `Page` object from a file path. fn from_path(path: String) -> Page { fn thresh_and_blob( rgbimg: &image::RgbImage, channel: u8, claimed: &mut Vec <Vec <bool>>, thresh: &mut Vec <Vec <bool>>, blobs: &mut Vec <ImgBlob> ) { match channel { RED => { for (x, y, pixel) in rgbimg.enumerate_pixels() { thresh[y as usize][x as usize] = if (pixel[0] > MIN_THRESH) && (pixel[1] <= MAX_THRESH) && (pixel[2] <= MAX_THRESH) {true} else {false}; } }, GREEN => { for (x, y, pixel) in rgbimg.enumerate_pixels() { thresh[y as usize][x as usize] = if (pixel[1] > MIN_THRESH) && (pixel[0] <= MAX_THRESH) && (pixel[2] <= MAX_THRESH) {true} else {false}; } }, BLUE => { for (x, y, pixel) in rgbimg.enumerate_pixels() { thresh[y as usize][x as usize] = if (pixel[2] > MIN_THRESH) && (pixel[1] <= MAX_THRESH) && (pixel[0] <= MAX_THRESH) {true} else {false}; } }, _ => panic!("Invalid color") } for y in 0..thresh.len() { for x in 0..thresh[0].len() { if thresh[y][x] && !claimed[y][x] { match ImgBlob::from_top_left(x, y, claimed, &thresh){ Some(o) => blobs.push(o), None => {}, } } } } } let mut claimed: Vec <Vec <bool>> = Vec::new(); let mut thresh: Vec <Vec <bool>> = Vec::new(); let mut rblobs: Vec <ImgBlob> = Vec::new(); let mut gblobs: Vec <ImgBlob> = Vec::new(); let mut bblobs: Vec <ImgBlob> = Vec::new(); let img = image::open(&Path::new(&path)).unwrap(); //img = img.adjust_contrast(-22f32); let mut row:Vec <bool> = Vec::new(); for _ in 0..img.width() { row.push(false); } for _ in 0..img.height() { claimed.push(row.clone()); thresh.push(row.clone()); } let mut rgbimg = img.to_rgb(); rgbimg.filter(); thresh_and_blob( &rgbimg, RED, &mut claimed, &mut thresh, &mut rblobs ); thresh_and_blob( &rgbimg, GREEN, &mut claimed, &mut thresh, &mut gblobs ); thresh_and_blob( &rgbimg, BLUE, &mut claimed, &mut thresh, &mut bblobs ); Page::from_blobs( rblobs, gblobs, bblobs, [img.width(), img.height()] ) } } #[derive(Clone)] /// Representation of a heading. struct Heading { id: Uuid, number: u8, /// Heading number. subject: Content } impl Heading { fn new() -> Heading { Heading { id: Uuid::new_v4(), number:0, subject: Content::empty() } } } /// Definition or important idea #[derive(Clone)] struct Idea { id: Uuid, top_pix: u32, top_precent: f64, left_pix: u32, left_precent: f64, width_pix: u32, width_precent: f64, height_pix: u32, height_precent: f64, subject: Content, // Just the header extension: Content // just the content } impl Idea { fn new() -> Idea { Idea { id: Uuid::new_v4(), top_pix: 0, top_precent: 0.0, left_pix: 0, left_precent: 0.0, width_pix: 0, width_precent: 0.0, height_pix: 0, height_precent: 0.0, subject: Content::empty(), // Just the header extension: Content::empty() // just the content } } fn update_size_pos(&mut self, dim: [u32; 2]) { if self.subject.top_pix < self.extension.top_pix { self.top_pix = self.subject.top_pix; } else { self.top_pix = self.extension.top_pix; } if self.subject.left_pix < self.extension.left_pix { self.left_pix = self.subject.left_pix; } else { self.left_pix = self.extension.left_pix; } if self.subject.top_pix + self.subject.height_pix > self.extension.top_pix + self.extension.height_pix { self.height_pix = ( self.subject.top_pix + self.subject.height_pix ) - self.top_pix; } else { self.height_pix = ( self.extension.top_pix + self.extension.height_pix ) - self.top_pix; } if self.subject.left_pix + self.subject.width_pix > self.extension.left_pix + self.extension.width_pix { self.width_pix = ( self.subject.left_pix + self.subject.width_pix ) - self.left_pix; } else { self.width_pix = ( self.extension.left_pix + self.extension.width_pix ) - self.left_pix; } self.left_precent = (self.left_pix as f64) / (dim[0] as f64); self.top_precent = (self.top_pix as f64) / (dim[1] as f64); self.width_precent = (self.width_pix as f64) / (dim[0] as f64); self.height_precent = (self.height_pix as f64) / (dim[1] as f64); } } /// Content cluster #[derive(Clone)] struct Content { id: Uuid, top_pix: u32, top_total_pix: u32, // workaround to fix index out of bounds error caused when writing image out top_precent: f64, left_pix: u32, left_precent: f64, width_pix: u32, width_precent: f64, height_pix: u32, height_precent: f64, blobs: Vec <ImgBlob> } impl Content { fn update_size_pos(&mut self, dim: [u32; 2], cur_height: i64) { let mut top: i64 = (<u32>::max_value()) as i64; let mut left: u32 = <u32>::max_value(); let mut bottom: u32 = 0; let mut right: u32 = 0; assert!( &self.blobs.len() > &0, "Output Generation: Error empty content object" ); for b in &self.blobs { if b.top_left[1] < top as usize { top = b.top_left[1] as i64; } if b.top_left[0] < left as usize { left = b.top_left[0] as u32; } if b.bottom_right[1] > bottom as usize { bottom = b.bottom_right[1] as u32; } if b.bottom_right[0] > right as usize { right = b.bottom_right[0] as u32; } } self.top_pix = top as u32; self.top_total_pix = (top + cur_height) as u32; // The height percents should not be final and should probably not be set here self.top_precent = (self.top_total_pix as f64) / ((dim[1] as i64 + cur_height) as f64); self.left_pix = left; self.left_precent = (left as f64) / (dim[0] as f64); self.width_pix = right - left; self.width_precent = (self.width_pix as f64) / (dim[0] as f64); self.height_pix = (bottom as i64 - top) as u32; self.height_precent = (self.height_pix as f64) / ((dim[1] as i64 + cur_height) as f64); } fn update_top(&mut self, cur_height: &i64) { self.top_precent = (self.top_total_pix as f64) / (*cur_height as f64); self.height_precent = (self.height_pix as f64) / (*cur_height as f64); } fn new(blobs: Vec <ImgBlob>, dim: [u32; 2], cur_height: i64) -> Content { let mut out = Content { id: Uuid::new_v4(), top_pix: 0u32, top_total_pix: 0u32, top_precent: 0f64, left_pix: 0u32, left_precent: 0f64, width_pix: 0u32, width_precent: 0f64, height_pix: 0u32, height_precent: 0f64, blobs: blobs.clone() }; out.update_size_pos(dim, cur_height); out } fn empty() -> Content { Content { id: Uuid::new_v4(), top_pix: 0u32, top_total_pix: 0u32, top_precent: 0f64, left_pix: 0u32, left_precent: 0f64, width_pix: 0u32, width_precent: 0f64, height_pix: 0u32, height_precent: 0f64, blobs: Vec::new() } } fn to_image(&self) -> image::ImageBuffer<image::LumaA<u8>, Vec<u8>> { let mut imgbuf = image::ImageBuffer::<image::LumaA<u8>, Vec<u8>>::new( self.width_pix as u32, self.height_pix as u32 ); for b in &self.blobs { let xoff = (b.top_left[0] as u32) - self.left_pix; let yoff = (b.top_left[1] as u32) - self.top_pix; let mut y: usize = 0; while y < b.bitmap.len() { let mut x: usize = 0; while x < b.bitmap[y].len() { if b.bitmap[y][x] { imgbuf.put_pixel( (x as u32)+xoff, (y as u32)+yoff, image::LumaA([0, 255]) ); } x += 1; } y += 1; } } imgbuf } } /// Objects holding `Heading`, `Idea`, and `Content` objects #[derive(Clone)] struct Chapter { id: Uuid, heading: Heading, sub_headings: Vec <Heading>, ideas: Vec <Idea>, content: Vec <Content>, writeable: bool, height_precent: f64, cur_height: i64 // use of i64 rather than u64 allows future negative starting value to componsate against mid page start } impl Chapter { /// Create a `Chapter` object fn new() -> Chapter { Chapter { id: Uuid::new_v4(), heading: Heading::new(), sub_headings: Vec::new(), ideas: Vec::new(), content: Vec::new(), writeable: false, height_precent: 0.0, cur_height: 0 } } /// Blanks a `Chapter` object. /// Used to avoid scope problems that would arise due to initializing a new object in a subroutine. fn blank(&mut self) { self.id = Uuid::new_v4(); self.heading = Heading::new(); self.sub_headings = Vec::new(); self.ideas = Vec::new(); self.content = Vec::new(); self.writeable = false; self.height_precent = 0.0; self.cur_height = 0; } /// Adds heading to table of contents fn add_to_toc(head: &Heading, parent: PathBuf, pid: Uuid) { let mut f = File::open(parent.join("index.html")) .expect( // This should never happen as we have already verified the file exists "Output Generation: could not find created table of contents" ); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("Output Generation: error reading created table of contents"); contents = contents.replace( "<!-- NEXT CHAPTER -->", &format!( "<a href=\"{}/index.html\" class=\"head tc h1\"><img src=\"{}/img/t{}.png\"/></a><br/>\n\t\t\t<!-- NEXT CHAPTER -->", pid.simple().to_string(), pid.simple().to_string(), head.id.simple().to_string() ) ); let mut file = File::create(parent.join("index.html")) .expect( "Output Generation: error recreating table of contents" ); writeln!(file, "{}", contents) .expect( "Output Generation: error rewriting table of contents" ); } /// Writes a chapter object out fn add_chapter(&mut self) { fn assemble_path() -> PathBuf { let dir: PathBuf; match env::home_dir() { Some(path) => dir = path, None => panic!( "Output Generation: system lacks valid home directory" ), } dir.as_path().join(Path::new(OUT_PATH)) } fn setup_dirs(comp_out: &PathBuf) { fs::create_dir_all(comp_out).expect( "Output Generation: error creating root path" ); let mut file = File::create( comp_out.join("index.html") ).expect( "Output Generation: error creating root index" ); writeln!(file, include_str!("templates/table/index.html")) .expect( "Output Generation: error writing to root index" ); file = File::create( comp_out.join("static.css") ).expect( "Output Generation: error creating root style" ); writeln!(file, "{}", include_str!("templates/table/static.css")) .expect( "Output Generation: error writing to root style" ); file = File::create( comp_out.join("hue.svg") ).expect( "Output Generation: error creating root color profile" ); writeln!(file, include_str!("templates/table/hue.svg")).expect( "Output Generation: error writing to root color profile" ); file = File::create( comp_out.join("fullscreen-op.svg") ).expect( "Output Generation: error creating root fullscreen" ); writeln!(file, include_str!("templates/table/fullscreen-op.svg")) .expect( "Output Generation: error writing to root fullscreen" ); file = File::create( comp_out.join("util.js") ).expect( "Output Generation: error creating root utilities" ); writeln!(file, "{}", include_str!("templates/table/util.js")) .expect( "Output Generation: error writing to root utilities" ); } let comp_out = assemble_path(); if !Path::new(&( comp_out.join("index.html") )).exists() { setup_dirs(&comp_out); } let ch_path = comp_out.join(&self.id.simple().to_string()); fs::create_dir(&ch_path) .expect("Output Generation: error creating chapter path"); fs::create_dir( ch_path.join("img") ).expect("Output Generation: error creating chapter image path"); let mut out = String::from( include_str!("template_fragments/chapter/index.html0") ); let mut gencss = String::from( include_str!("template_fragments/chapter/gen.css1") ); let ref mut fout = File::create( ch_path.join( "img/t".to_string()+ &self.heading.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); out += &( "<img class=\"head h1\" id=\"".to_string()+ &self.heading.id.simple().to_string()+ &"\" src=\"img/t".to_string()+ &self.heading.id.simple().to_string()+ &".png\"></img>".to_string() ); self.heading.subject.update_top(&self.cur_height); gencss += &( "#t".to_string()+ &self.heading.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(self.heading.subject.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(self.heading.subject.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(self.heading.subject.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); Chapter::add_to_toc( &self.heading, comp_out, self.id ); out += include_str!("template_fragments/chapter/index.html1"); let _ = image::ImageLumaA8( self.heading.subject.to_image() ).save(fout, image::PNG); for mut head in &mut self.sub_headings { let ref mut fout = File::create( ch_path.join( "img/h".to_string()+ &head.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( head.subject.to_image() ).save(fout, image::PNG); out += &( "<img class=\"head\" id=\"h".to_string()+ &head.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/h".to_string()+ &head.id.simple().to_string()+ &".png\"></img>".to_string() ); head.subject.update_top(&self.cur_height); gencss += &( "#h".to_string()+ &head.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(head.subject.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(head.subject.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(head.subject.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); } for mut cont in &mut self.content { let ref mut fout = File::create( ch_path.join( "img/c".to_string()+ &cont.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( cont.to_image() ).save(fout, image::PNG); out += &( "<img class=\"cont\" id=\"c".to_string()+ &cont.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/c".to_string()+ &cont.id.simple().to_string()+ &".png\"></img>".to_string() ); cont.update_top(&self.cur_height); gencss += &( "#c".to_string()+ &cont.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(cont.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(cont.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(cont.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); } for mut idea in &mut self.ideas { let ref mut hout = File::create( ch_path.join( "img/dh".to_string()+ &idea.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( idea.subject.to_image() ).save(hout, image::PNG); out += &( "<img class=\"defi h2\" id=\"dh".to_string()+ &idea.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/dh".to_string()+ &idea.id.simple().to_string()+ &".png\"></img>".to_string() ); idea.subject.update_top(&self.cur_height); gencss += &( "#dh".to_string()+ &idea.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(idea.subject.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(idea.subject.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(idea.subject.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); let ref mut cout = File::create( ch_path.join( "img/dc".to_string()+ &idea.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( idea.extension.to_image() ).save(cout, image::PNG); out += &( "<img class=\"defi\" id=\"dc".to_string()+ &idea.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/dc".to_string()+ &idea.id.simple().to_string()+ &".png\"></img>".to_string() ); idea.extension.update_top(&self.cur_height); gencss += &( "#dc".to_string()+ &idea.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(idea.extension.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(idea.extension.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(idea.extension.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); } out += include_str!("template_fragments/chapter/index.html2"); gencss = "\tpadding-bottom:".to_string()+ &(self.height_precent*(100 as f64)).to_string()+ &"%;\n".to_string()+ &gencss ; gencss = String::from( include_str!("template_fragments/chapter/gen.css0") ) + &gencss; let ref mut file = File::create( ch_path.join("index.html") ).unwrap(); writeln!(file, "{}", out) .expect("Chapter output: error creating index"); let ref mut file_gencss = File::create( ch_path.join("gen.css") ).unwrap(); writeln!(file_gencss, "{}", gencss) .expect("Chapter output: error creating index"); let ref mut file_scss = File::create( ch_path.join("static.css") ).unwrap(); writeln!( file_scss, "{}", include_str!("templates/chapter/static.css") ).expect("Chapter output: error creating static CSS"); let ref mut file_fscr = File::create( ch_path.join("fullscreen-op.svg") ).unwrap(); writeln!( file_fscr, "{}", include_str!("templates/chapter/fullscreen-op.svg") ).expect("Chapter output: error creating fullscreen"); let ref mut file_hue = File::create( ch_path.join("hue.svg") ).unwrap(); writeln!( file_hue, "{}", include_str!("templates/chapter/hue.svg") ).expect("Chapter output: error creating hue"); let ref mut file_util = File::create( ch_path.join("util.js") ).unwrap(); writeln!( file_util, "{}", include_str!("templates/chapter/util.js") ).expect("Chapter output: error creating util"); } } /// Get a vector of image paths to import from a user. fn get_images() -> Vec <String> { /// Get vector containing already imported images from Imported. fn get_imported_images() -> Vec <String> { if Path::new(IMPORTED).exists() { let mut list: Vec <String> = Vec::new(); let f = (File::open(IMPORTED)).unwrap(); let file = BufReader::new(&f); for line in file.lines() { let templ = line.unwrap(); list.push(templ); } list } else { Vec::new() } } /// Get user selections as a `Vec <String>` fn parse_input( uin: String, mpaths: Vec <String>, new: &mut Vec <String> ) -> Vec <String> { let mut selected: Vec <String> = Vec::new(); let stringified: Vec <String> = uin.split(' ') .map(|x| x.to_string()).collect(); for sel in stringified { if sel == "+" { selected.append(new); } else if sel.to_string().contains("-") { let numbers: Vec <String> = uin.split('-') .map(|x| x.to_string()).collect(); let start = numbers[0].parse::<usize>().unwrap(); let end = numbers[1].parse::<usize>().unwrap(); for i in start..(end+1) { selected.push(mpaths[i].clone()); } } else { let i = sel.to_string().parse::<usize>().unwrap(); selected.push(mpaths[i].clone()); } } selected } let paths = fs::read_dir("./").unwrap(); let mut mpaths: Vec <String> = Vec::new(); let mut new: Vec <String> = Vec::new(); let imported: Vec <String> = get_imported_images(); for p in paths { let path = p.unwrap().path(); if !(path.extension() == None) { //The next line needs to be cleaned up. It is written like this to appease the borrow checker if path.extension().unwrap() == "png" || path.extension().unwrap() == "jpg" || path.extension().unwrap() == "bpm" || path.extension().unwrap() == "gif" { mpaths.push(path.into_os_string().into_string().unwrap());//ugly hack but as_path().to_string() does not work } } } mpaths.sort(); let mut fiter:usize = 0; for p in &mpaths { if !imported.contains(p) { print!("+"); new.push(p.clone());//cannot pass borrowed var w/o cloning } println!(" {}: {}", fiter, p); fiter += 1; } println!("Enter an number to select an image to import. "); println!("Enter 5-6 to import images 5 through 6. "); println!("Enter + to import the images you have not imported. (These images are indicated in the list by + signs)"); println!("Use spaces to seperate multiple selections. "); print!("select: "); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); let mut uin = String::new(); io::stdin().read_line(&mut uin).ok().expect("Error reading line"); uin.pop(); parse_input(uin, mpaths, &mut new) } /// Add content objects to chapter or destroy them because they lack a chapter. fn add_content( clump: Clump, page: &Page, chapter: &mut Chapter, destroyed: &mut usize, started: bool ) { if started { chapter.content.push(Content::new(clump.blobs, page.dimensions, chapter.cur_height)); } else { *destroyed += clump.blobs.len(); } } /// Add definition objects to chapter or destroy them because they lack a chapter. fn add_definition( clump: Clump, page: &Page, chapter: &mut Chapter, destroyed: &mut usize, started: bool ) { fn is_underlined(blob: ImgBlob, line: &ImgBlob) -> bool { ((blob.top_left[0] as i64 - line.top_left[0] as i64) > -50) && // Make -50 proportional ((line.bottom_right[0] as i64 - blob.bottom_right[0] as i64) > -50) && (blob.bottom_right[1] < line.bottom_right[1]) } if started { let mut line: ImgBlob = ImgBlob::new(); let mut name: Vec<ImgBlob> = Vec::new(); let mut cont: Vec<ImgBlob> = Vec::new(); for i in 0..clump.blobs.len() { if clump.blobs[i].blob_type == 1 { line = clump.blobs[i].clone(); break; } } for i in 0..clump.blobs.len() { if clump.blobs[i] != line { if is_underlined(clump.blobs[i].clone(), &line) { name.push(clump.blobs[i].clone()); } else { cont.push(clump.blobs[i].clone()); } } } let mut idea = Idea::new(); idea.subject = Content::new(name, page.dimensions, chapter.cur_height); idea.extension = Content::new(cont, page.dimensions, chapter.cur_height); idea.update_size_pos(page.dimensions); chapter.ideas.push(idea); } else { *destroyed += clump.blobs.len(); } } trait Sub { fn sub(self, other: [usize;2]) -> [usize; 2]; } ///Difference between 2D usize array impl Sub for [usize; 2] { fn sub(self, other: [usize;2]) -> [usize; 2] { if (self[0] > other[0]) && (self[1] > other[1]) { [self[0]-other[0], self[1]-other[1]] } else if (self[0] > other[0]) && (self[1] < other[1]) { [self[0]-other[0], other[1]-self[1]] } else if (self[0] < other[0]) && (self[1] > other[1]) { [other[0]-self[0], self[1]-other[1]] } else{ [other[0]-self[0], other[1]-self[1]] } } } trait Filter { fn filter(&mut self); } impl Filter for image::RgbImage { fn filter(&mut self) { for (_, _, pixel) in self.enumerate_pixels_mut() { if pixel[0] > pixel[1] && pixel[0] > pixel[2] { let avg = ((pixel[1] as u16+pixel[2] as u16)/2u16) as u8; if pixel[0] - avg > 60 { pixel[0] = 255u8; pixel[1] = 0u8; pixel[2] = 0u8; } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } else if pixel[1] > pixel[0] && pixel[1] > pixel[2] { let avg = ((pixel[0] as u16+pixel[2] as u16)/2u16) as u8; if pixel[1] - avg > 60 { pixel[0] = 0u8; pixel[1] = 255u8; pixel[2] = 0u8; } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } else if pixel[2] > pixel[0] && pixel[2] > pixel[1] { let avg = ((pixel[0] as u16+pixel[1] as u16)/2u16) as u8; if pixel[2] - avg > 60 { pixel[0] = 0u8; pixel[1] = 0u8; pixel[2] = 255u8; } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } } } /// Create new chapter, add heading objects to chapter, or destroy them because they lack a chapter. fn add_heading( clump: Clump, page: &Page, chapter: &mut Chapter, destroyed: &mut usize, created: &mut usize, started: &mut bool ) { let mut i: usize = 0; let mut linemode: i8 = -1; let mut past = [0usize; 2]; let mut head: Heading = Heading::new(); while i < clump.blobs.len() { let blob = clump.blobs[i].clone(); if blob.blob_type == 1 { // TODO: reduce cyclomatic complexity if linemode==1 { // 1/17 of width and 1/22 height off acceptable let diff = blob.top_left.sub(past); if (diff[0] as f32) < 1f32/4f32*(page.dimensions[0] as f32) && (diff[1] as f32) < 1f32/20f32*(page.dimensions[1] as f32) { if *started { chapter.cur_height += page.dimensions[1] as i64; (chapter.clone()).add_chapter(); *created += 1; } chapter.blank(); head.number = 1; head.subject.update_size_pos(page.dimensions, chapter.cur_height); chapter.heading = head.clone(); head = Heading::new(); chapter.height_precent += (page.dimensions[1] as f64)/ (page.dimensions[0] as f64); *started = true; linemode = -1; } else { *destroyed += 1; } } else if linemode == 0 { head.number = 2; linemode = 1; past = blob.top_left; } else { *destroyed += 1; } } else { if linemode < 1 { if linemode == -1 { linemode = 0; } head.subject.blobs.push(blob); } else { assert!( head.number == 2, "Found heading.number of {}. Expected 2", head.number ); if *started { head.subject.update_size_pos(page.dimensions, chapter.cur_height); chapter.sub_headings.push(head.clone()); linemode = 0; head = Heading::new(); head.subject.blobs.push(blob); } else { *destroyed += head.subject.blobs.len() + 1; } } } i += 1; } if linemode != -1 { assert!( head.number != 1, "Found heading.number of 1. Expected 2 or 3" ); if *started { head.subject.update_size_pos(page.dimensions, chapter.cur_height); chapter.sub_headings.push(head); } else { *destroyed += head.subject.blobs.len(); } } } /// Entry point to the program fn main() { //iterate through images pulling out clumps //iterate through pages parsing clumps and creating chapters let selected = get_images(); let mut pages: Vec <Page> = Vec::new(); print!("○: Identifying objects"); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); for img in selected { pages.push(Page::from_path(img.clone())); if !fs::metadata(IMPORTED).is_ok() { File::create(IMPORTED).unwrap(); } let mut file = OpenOptions::new() .write(true) .append(true) .open(IMPORTED) .unwrap(); let _ = writeln!(file, "{}", img); // TODO: Warn the user about errors here } print!("\r◑: Dividing by chapter"); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); let mut chapter: Chapter = Chapter::new(); let mut started = false; let mut created_chapters = 0; let mut destroyed: usize = 0; for mut p in pages { chapter.height_precent += (p.dimensions[1] as f64)/(p.dimensions[0] as f64); let mut i: usize = 0; while i < p.clumps.len() { match p.clumps[i].ctype { RED => add_heading( // Heading(s) of some type p.clumps[i].clone(), &p, &mut chapter, &mut destroyed, &mut created_chapters, &mut started ), GREEN => add_definition( // Defintions(s) of some type p.clumps[i].clone(), &p, &mut chapter, &mut destroyed, started ), BLUE => add_content( // Content p.clumps[i].clone(), &p, &mut chapter, &mut destroyed, started ), _ => panic!("Invalid Content") }; i += 1; } chapter.cur_height += p.dimensions[1] as i64; } if chapter.heading.subject.blobs.len() > 0 { chapter.add_chapter(); created_chapters += 1; } print!("\r◕: Writing "); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); println!("\r●: Done "); println!( "{} chapters added. {} orphaned objects destroyed", created_chapters, destroyed ); } Various defintion fixes prevent green notebook edges from creating empty defintion objects and assertionerrors allow defintions to be created without an extension (I am pretty sure these cannot be written out just yet) #![crate_name = "notedigest"] /// Application that converts handwritten notes into organized html pages. extern crate image; extern crate uuid; use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::io; use std::io::Write; use std::io::Read; use std::io::BufReader; use std::io::BufRead; use std::fs::File; use std::fs::OpenOptions; use image::GenericImage; use uuid::Uuid; /// The location where a list of already imported files may be found const IMPORTED: &'static str = "./.imported"; /// The location where the organized notes should be written to const OUT_PATH: &'static str = "Documents/Notebook/"; /// Minimum value for a channel to be considered on const MIN_THRESH: u8 = 120; /// Maximum value for a channel to be considered off const MAX_THRESH: u8 = 126; /// Minimum width to heigth ratio for object to be considered a line const LINE_RATIO: f32 = 7.0; /// Defines red channel index const RED: u8 = 0; /// Defines green channel index const GREEN: u8 = 1; /// Defines blue channel index const BLUE: u8 = 2; //should be a trait. I am not sure how to impliment one for only Vec <Vec <bool>> and not Vec <T> /** Inserts boolean into `Vec <Vec <bool>>` at specified point. If the point does not exist the vector is expanded. # Arguments * `y` - A 64 bit integer with the row to insert relative to the top right corner. * `x` - A 64 bit integer with the collum to insert relative to the top right corner. * `value` - The boolean value you wish to insert * `img` - The 2d boolean vector. Must be `&mut` */ fn boundless_insert( y: i64, x: i64, value: bool, img: &mut Vec<Vec <bool>> ) { let mut tx = x; let mut ty = y; for i in 0..img.len(){ let mut ttx = x; while ttx < 0 { img[i].insert(0, false); ttx += 1; } while img[i].len() <= ttx as usize { img[i].push(false); } tx = ttx } let mut row:Vec <bool> = Vec::new(); for _ in 0..img[0].len() { row.push(false); } while ty < 0 { img.insert(0, row.clone()); ty += 1; } while img.len() <= ty as usize { img.push(row.clone()) } img[ty as usize][tx as usize] = value; } #[derive(Debug, Clone, PartialEq)] /// Monochrome image fragment struct ImgBlob { /// Is it a line? /// 0: object, 1: line blob_type: u8, /// Top left coordinate. (collum first then row) top_left: [usize; 2], /// Bottom right coordinate. (collum first then row) bottom_right: [usize; 2], /// 2d array of booleans representing monochrome image fragment bitmap: Vec <Vec <bool>> } impl ImgBlob { /// Checks using a floodfill if an image blob can be started at a coordinate. /// If one can be found it returns it. fn from_top_left( x: usize, y: usize, claim: &mut Vec <Vec <bool>>, img: &Vec <Vec <bool>> ) -> Option<ImgBlob>{ let mut left = x; let mut top = y; //pretty sure that this is not supposed to change and can be eliminated let mut bitmap: Vec <Vec <bool>> = Vec::new(); let mut queue: Vec <[usize; 2]> = Vec::new(); claim[y][x] = true; queue.push([y+1, x]); queue.push([y, x+1]); bitmap.push(vec![true]); while queue.len() > 0 { let tempx = queue[0][1]; let tempy = queue[0][0]; if (tempy < img.len()) && (tempx < img[0].len()) && (tempy > 0) && (tempx > 0) { if img[tempy][tempx] && !(claim[tempy][tempx]){ boundless_insert( (tempy as i64)-(top as i64), (tempx as i64)-(left as i64), true, &mut bitmap ); if tempx < left { left = tempx; } if tempy < top { top = tempy; } claim[tempy][tempx] = true; if tempy > top { queue.push([tempy-1, tempx-1]); queue.push([tempy-1, tempx]); queue.push([tempy-1, tempx+1]); } queue.push([tempy, tempx-1]); queue.push([tempy+1, tempx-1]); queue.push([tempy+1, tempx]); queue.push([tempy+1, tempx+1]); queue.push([tempy, tempx+1]); } } queue.remove(0); } if ( bitmap[0].len() + bitmap.len() > 8) && (bitmap.len() > 4) && (bitmap[0].len() > 4 ) { Some(ImgBlob { blob_type: if (bitmap[0].len() as f32 / bitmap.len() as f32) > LINE_RATIO && bitmap[0].len() > 60 {1} else {0}, top_left: [left, top], bottom_right: [left+bitmap[0].len(), top+bitmap.len()], bitmap: bitmap }) } else { None } } fn new() -> ImgBlob { ImgBlob { bitmap: Vec::new(), blob_type: 0, bottom_right: [0, 0], top_left: [0, 0] } } } #[derive(Clone)] /// A group of blob objects along with the color information for the blobs struct Clump { /// What color is it? Uses color constants ctype: u8, /// Blob objects blobs: Vec <ImgBlob> } impl Clump { /// Add an `ImgBlob` object to an array of clumps. /// If the blob is not of the same type as the current clump create a new clump. /// Otherwise add it to the current clump. fn clump_update(blob:ImgBlob, t: u8, clumps: &mut Vec <Clump>){ let clen = clumps.len(); if clen > 0 { if t == clumps[clen-1].ctype { clumps[clen-1].blobs.push(blob); } else { clumps.push( Clump { ctype: t, blobs: vec![blob] } ); } } else { clumps.push( Clump { ctype: t, blobs: vec![blob] } ); } } } /// Representation of a page. Holds clumps and original page dimensions. struct Page { /// Vector of `Clump` objects clumps: Vec <Clump>, /// Dimensions of original page dimensions: [u32; 2] } impl Page { fn get_highest(a:usize, b:usize, c:usize) -> u8 { if a <= b && a <= c { 0 } else if a > b && b <= c { 1 } else { 2 } } /// Convert three channels of `ImgBlob` vectors to clumps in `Page` objects. fn from_blobs( mut rblobs: Vec <ImgBlob>, mut gblobs: Vec <ImgBlob>, mut bblobs: Vec <ImgBlob>, dimensions: [u32; 2] ) -> Page { print!("\r◔: Clustering objects "); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); let mut clumps = Vec::new(); let mut rpos; let mut gpos; let mut bpos; while rblobs.len() + gblobs.len() + bblobs.len() > 0 { if rblobs.len() > 0 { rpos = rblobs[0].top_left[1]; } else { rpos = <usize>::max_value(); } if gblobs.len() > 0 { gpos = gblobs[0].top_left[1]; } else { gpos = <usize>::max_value(); } if bblobs.len() > 0 { bpos = bblobs[0].top_left[1]; } else { bpos = <usize>::max_value(); } match Page::get_highest(rpos, gpos, bpos) { 0 => Clump::clump_update(rblobs.remove(0), 0, &mut clumps), 1 => Clump::clump_update(gblobs.remove(0), 1, &mut clumps), 2 => Clump::clump_update(bblobs.remove(0), 2, &mut clumps), _ => panic!("Invalid clump type(>2)"), }; } Page { clumps: clumps, dimensions: dimensions } } /// Create a `Page` object from a file path. fn from_path(path: String) -> Page { fn thresh_and_blob( rgbimg: &image::RgbImage, channel: u8, claimed: &mut Vec <Vec <bool>>, thresh: &mut Vec <Vec <bool>>, blobs: &mut Vec <ImgBlob> ) { match channel { RED => { for (x, y, pixel) in rgbimg.enumerate_pixels() { thresh[y as usize][x as usize] = if (pixel[0] > MIN_THRESH) && (pixel[1] <= MAX_THRESH) && (pixel[2] <= MAX_THRESH) {true} else {false}; } }, GREEN => { for (x, y, pixel) in rgbimg.enumerate_pixels() { thresh[y as usize][x as usize] = if (pixel[1] > MIN_THRESH) && (pixel[0] <= MAX_THRESH) && (pixel[2] <= MAX_THRESH) {true} else {false}; } }, BLUE => { for (x, y, pixel) in rgbimg.enumerate_pixels() { thresh[y as usize][x as usize] = if (pixel[2] > MIN_THRESH) && (pixel[1] <= MAX_THRESH) && (pixel[0] <= MAX_THRESH) {true} else {false}; } }, _ => panic!("Invalid color") } for y in 0..thresh.len() { for x in 0..thresh[0].len() { if thresh[y][x] && !claimed[y][x] { match ImgBlob::from_top_left(x, y, claimed, &thresh){ Some(o) => blobs.push(o), None => {}, } } } } } let mut claimed: Vec <Vec <bool>> = Vec::new(); let mut thresh: Vec <Vec <bool>> = Vec::new(); let mut rblobs: Vec <ImgBlob> = Vec::new(); let mut gblobs: Vec <ImgBlob> = Vec::new(); let mut bblobs: Vec <ImgBlob> = Vec::new(); let img = image::open(&Path::new(&path)).unwrap(); //img = img.adjust_contrast(-22f32); let mut row:Vec <bool> = Vec::new(); for _ in 0..img.width() { row.push(false); } for _ in 0..img.height() { claimed.push(row.clone()); thresh.push(row.clone()); } let mut rgbimg = img.to_rgb(); rgbimg.filter(); thresh_and_blob( &rgbimg, RED, &mut claimed, &mut thresh, &mut rblobs ); thresh_and_blob( &rgbimg, GREEN, &mut claimed, &mut thresh, &mut gblobs ); thresh_and_blob( &rgbimg, BLUE, &mut claimed, &mut thresh, &mut bblobs ); Page::from_blobs( rblobs, gblobs, bblobs, [img.width(), img.height()] ) } } #[derive(Clone)] /// Representation of a heading. struct Heading { id: Uuid, number: u8, /// Heading number. subject: Content } impl Heading { fn new() -> Heading { Heading { id: Uuid::new_v4(), number:0, subject: Content::empty() } } } /// Definition or important idea #[derive(Clone)] struct Idea { id: Uuid, top_pix: u32, top_precent: f64, left_pix: u32, left_precent: f64, width_pix: u32, width_precent: f64, height_pix: u32, height_precent: f64, subject: Content, // Just the header extension: Content // just the content } impl Idea { fn new() -> Idea { Idea { id: Uuid::new_v4(), top_pix: 0, top_precent: 0.0, left_pix: 0, left_precent: 0.0, width_pix: 0, width_precent: 0.0, height_pix: 0, height_precent: 0.0, subject: Content::empty(), // Just the header extension: Content::empty() // just the content } } fn update_size_pos(&mut self, dim: [u32; 2]) { if self.subject.top_pix < self.extension.top_pix { self.top_pix = self.subject.top_pix; } else { self.top_pix = self.extension.top_pix; } if self.subject.left_pix < self.extension.left_pix { self.left_pix = self.subject.left_pix; } else { self.left_pix = self.extension.left_pix; } if self.subject.top_pix + self.subject.height_pix > self.extension.top_pix + self.extension.height_pix { self.height_pix = ( self.subject.top_pix + self.subject.height_pix ) - self.top_pix; } else { self.height_pix = ( self.extension.top_pix + self.extension.height_pix ) - self.top_pix; } if self.subject.left_pix + self.subject.width_pix > self.extension.left_pix + self.extension.width_pix { self.width_pix = ( self.subject.left_pix + self.subject.width_pix ) - self.left_pix; } else { self.width_pix = ( self.extension.left_pix + self.extension.width_pix ) - self.left_pix; } self.left_precent = (self.left_pix as f64) / (dim[0] as f64); self.top_precent = (self.top_pix as f64) / (dim[1] as f64); self.width_precent = (self.width_pix as f64) / (dim[0] as f64); self.height_precent = (self.height_pix as f64) / (dim[1] as f64); } } /// Content cluster #[derive(Clone)] struct Content { id: Uuid, top_pix: u32, top_total_pix: u32, // workaround to fix index out of bounds error caused when writing image out top_precent: f64, left_pix: u32, left_precent: f64, width_pix: u32, width_precent: f64, height_pix: u32, height_precent: f64, blobs: Vec <ImgBlob> } impl Content { fn update_size_pos(&mut self, dim: [u32; 2], cur_height: i64) { let mut top: i64 = (<u32>::max_value()) as i64; let mut left: u32 = <u32>::max_value(); let mut bottom: u32 = 0; let mut right: u32 = 0; assert!( &self.blobs.len() > &0, "Output Generation: Error empty content object" ); for b in &self.blobs { if b.top_left[1] < top as usize { top = b.top_left[1] as i64; } if b.top_left[0] < left as usize { left = b.top_left[0] as u32; } if b.bottom_right[1] > bottom as usize { bottom = b.bottom_right[1] as u32; } if b.bottom_right[0] > right as usize { right = b.bottom_right[0] as u32; } } self.top_pix = top as u32; self.top_total_pix = (top + cur_height) as u32; // The height percents should not be final and should probably not be set here self.top_precent = (self.top_total_pix as f64) / ((dim[1] as i64 + cur_height) as f64); self.left_pix = left; self.left_precent = (left as f64) / (dim[0] as f64); self.width_pix = right - left; self.width_precent = (self.width_pix as f64) / (dim[0] as f64); self.height_pix = (bottom as i64 - top) as u32; self.height_precent = (self.height_pix as f64) / ((dim[1] as i64 + cur_height) as f64); } fn update_top(&mut self, cur_height: &i64) { self.top_precent = (self.top_total_pix as f64) / (*cur_height as f64); self.height_precent = (self.height_pix as f64) / (*cur_height as f64); } fn new(blobs: Vec <ImgBlob>, dim: [u32; 2], cur_height: i64) -> Content { let mut out = Content { id: Uuid::new_v4(), top_pix: 0u32, top_total_pix: 0u32, top_precent: 0f64, left_pix: 0u32, left_precent: 0f64, width_pix: 0u32, width_precent: 0f64, height_pix: 0u32, height_precent: 0f64, blobs: blobs.clone() }; out.update_size_pos(dim, cur_height); out } fn empty() -> Content { Content { id: Uuid::new_v4(), top_pix: 0u32, top_total_pix: 0u32, top_precent: 0f64, left_pix: 0u32, left_precent: 0f64, width_pix: 0u32, width_precent: 0f64, height_pix: 0u32, height_precent: 0f64, blobs: Vec::new() } } fn to_image(&self) -> image::ImageBuffer<image::LumaA<u8>, Vec<u8>> { let mut imgbuf = image::ImageBuffer::<image::LumaA<u8>, Vec<u8>>::new( self.width_pix as u32, self.height_pix as u32 ); for b in &self.blobs { let xoff = (b.top_left[0] as u32) - self.left_pix; let yoff = (b.top_left[1] as u32) - self.top_pix; let mut y: usize = 0; while y < b.bitmap.len() { let mut x: usize = 0; while x < b.bitmap[y].len() { if b.bitmap[y][x] { imgbuf.put_pixel( (x as u32)+xoff, (y as u32)+yoff, image::LumaA([0, 255]) ); } x += 1; } y += 1; } } imgbuf } } /// Objects holding `Heading`, `Idea`, and `Content` objects #[derive(Clone)] struct Chapter { id: Uuid, heading: Heading, sub_headings: Vec <Heading>, ideas: Vec <Idea>, content: Vec <Content>, writeable: bool, height_precent: f64, cur_height: i64 // use of i64 rather than u64 allows future negative starting value to componsate against mid page start } impl Chapter { /// Create a `Chapter` object fn new() -> Chapter { Chapter { id: Uuid::new_v4(), heading: Heading::new(), sub_headings: Vec::new(), ideas: Vec::new(), content: Vec::new(), writeable: false, height_precent: 0.0, cur_height: 0 } } /// Blanks a `Chapter` object. /// Used to avoid scope problems that would arise due to initializing a new object in a subroutine. fn blank(&mut self) { self.id = Uuid::new_v4(); self.heading = Heading::new(); self.sub_headings = Vec::new(); self.ideas = Vec::new(); self.content = Vec::new(); self.writeable = false; self.height_precent = 0.0; self.cur_height = 0; } /// Adds heading to table of contents fn add_to_toc(head: &Heading, parent: PathBuf, pid: Uuid) { let mut f = File::open(parent.join("index.html")) .expect( // This should never happen as we have already verified the file exists "Output Generation: could not find created table of contents" ); let mut contents = String::new(); f.read_to_string(&mut contents) .expect("Output Generation: error reading created table of contents"); contents = contents.replace( "<!-- NEXT CHAPTER -->", &format!( "<a href=\"{}/index.html\" class=\"head tc h1\"><img src=\"{}/img/t{}.png\"/></a><br/>\n\t\t\t<!-- NEXT CHAPTER -->", pid.simple().to_string(), pid.simple().to_string(), head.id.simple().to_string() ) ); let mut file = File::create(parent.join("index.html")) .expect( "Output Generation: error recreating table of contents" ); writeln!(file, "{}", contents) .expect( "Output Generation: error rewriting table of contents" ); } /// Writes a chapter object out fn add_chapter(&mut self) { fn assemble_path() -> PathBuf { let dir: PathBuf; match env::home_dir() { Some(path) => dir = path, None => panic!( "Output Generation: system lacks valid home directory" ), } dir.as_path().join(Path::new(OUT_PATH)) } fn setup_dirs(comp_out: &PathBuf) { fs::create_dir_all(comp_out).expect( "Output Generation: error creating root path" ); let mut file = File::create( comp_out.join("index.html") ).expect( "Output Generation: error creating root index" ); writeln!(file, include_str!("templates/table/index.html")) .expect( "Output Generation: error writing to root index" ); file = File::create( comp_out.join("static.css") ).expect( "Output Generation: error creating root style" ); writeln!(file, "{}", include_str!("templates/table/static.css")) .expect( "Output Generation: error writing to root style" ); file = File::create( comp_out.join("hue.svg") ).expect( "Output Generation: error creating root color profile" ); writeln!(file, include_str!("templates/table/hue.svg")).expect( "Output Generation: error writing to root color profile" ); file = File::create( comp_out.join("fullscreen-op.svg") ).expect( "Output Generation: error creating root fullscreen" ); writeln!(file, include_str!("templates/table/fullscreen-op.svg")) .expect( "Output Generation: error writing to root fullscreen" ); file = File::create( comp_out.join("util.js") ).expect( "Output Generation: error creating root utilities" ); writeln!(file, "{}", include_str!("templates/table/util.js")) .expect( "Output Generation: error writing to root utilities" ); } let comp_out = assemble_path(); if !Path::new(&( comp_out.join("index.html") )).exists() { setup_dirs(&comp_out); } let ch_path = comp_out.join(&self.id.simple().to_string()); fs::create_dir(&ch_path) .expect("Output Generation: error creating chapter path"); fs::create_dir( ch_path.join("img") ).expect("Output Generation: error creating chapter image path"); let mut out = String::from( include_str!("template_fragments/chapter/index.html0") ); let mut gencss = String::from( include_str!("template_fragments/chapter/gen.css1") ); let ref mut fout = File::create( ch_path.join( "img/t".to_string()+ &self.heading.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); out += &( "<img class=\"head h1\" id=\"".to_string()+ &self.heading.id.simple().to_string()+ &"\" src=\"img/t".to_string()+ &self.heading.id.simple().to_string()+ &".png\"></img>".to_string() ); self.heading.subject.update_top(&self.cur_height); gencss += &( "#t".to_string()+ &self.heading.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(self.heading.subject.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(self.heading.subject.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(self.heading.subject.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); Chapter::add_to_toc( &self.heading, comp_out, self.id ); out += include_str!("template_fragments/chapter/index.html1"); let _ = image::ImageLumaA8( self.heading.subject.to_image() ).save(fout, image::PNG); for mut head in &mut self.sub_headings { let ref mut fout = File::create( ch_path.join( "img/h".to_string()+ &head.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( head.subject.to_image() ).save(fout, image::PNG); out += &( "<img class=\"head\" id=\"h".to_string()+ &head.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/h".to_string()+ &head.id.simple().to_string()+ &".png\"></img>".to_string() ); head.subject.update_top(&self.cur_height); gencss += &( "#h".to_string()+ &head.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(head.subject.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(head.subject.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(head.subject.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); } for mut cont in &mut self.content { let ref mut fout = File::create( ch_path.join( "img/c".to_string()+ &cont.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( cont.to_image() ).save(fout, image::PNG); out += &( "<img class=\"cont\" id=\"c".to_string()+ &cont.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/c".to_string()+ &cont.id.simple().to_string()+ &".png\"></img>".to_string() ); cont.update_top(&self.cur_height); gencss += &( "#c".to_string()+ &cont.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(cont.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(cont.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(cont.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); } for mut idea in &mut self.ideas { let ref mut hout = File::create( ch_path.join( "img/dh".to_string()+ &idea.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( idea.subject.to_image() ).save(hout, image::PNG); out += &( "<img class=\"defi h2\" id=\"dh".to_string()+ &idea.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/dh".to_string()+ &idea.id.simple().to_string()+ &".png\"></img>".to_string() ); idea.subject.update_top(&self.cur_height); gencss += &( "#dh".to_string()+ &idea.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(idea.subject.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(idea.subject.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(idea.subject.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); let ref mut cout = File::create( ch_path.join( "img/dc".to_string()+ &idea.id.simple().to_string()+ &".png".to_string() ) ).unwrap(); let _ = image::ImageLumaA8( idea.extension.to_image() ).save(cout, image::PNG); out += &( "<img class=\"defi\" id=\"dc".to_string()+ &idea.id.simple().to_string()+ &"\"".to_string()+ &"src=\"img/dc".to_string()+ &idea.id.simple().to_string()+ &".png\"></img>".to_string() ); idea.extension.update_top(&self.cur_height); gencss += &( "#dc".to_string()+ &idea.id.simple().to_string()+ &"{\n\ttop:".to_string()+ &(idea.extension.top_precent*(100 as f64)).to_string()+ &"%;\n\tleft:".to_string()+ &(idea.extension.left_precent*(100 as f64)).to_string()+ &"%;\n\twidth:".to_string()+ &(idea.extension.width_precent*(100 as f64)).to_string()+ &"%;\nposition:absolute;\n}\n".to_string() ); } out += include_str!("template_fragments/chapter/index.html2"); gencss = "\tpadding-bottom:".to_string()+ &(self.height_precent*(100 as f64)).to_string()+ &"%;\n".to_string()+ &gencss ; gencss = String::from( include_str!("template_fragments/chapter/gen.css0") ) + &gencss; let ref mut file = File::create( ch_path.join("index.html") ).unwrap(); writeln!(file, "{}", out) .expect("Chapter output: error creating index"); let ref mut file_gencss = File::create( ch_path.join("gen.css") ).unwrap(); writeln!(file_gencss, "{}", gencss) .expect("Chapter output: error creating index"); let ref mut file_scss = File::create( ch_path.join("static.css") ).unwrap(); writeln!( file_scss, "{}", include_str!("templates/chapter/static.css") ).expect("Chapter output: error creating static CSS"); let ref mut file_fscr = File::create( ch_path.join("fullscreen-op.svg") ).unwrap(); writeln!( file_fscr, "{}", include_str!("templates/chapter/fullscreen-op.svg") ).expect("Chapter output: error creating fullscreen"); let ref mut file_hue = File::create( ch_path.join("hue.svg") ).unwrap(); writeln!( file_hue, "{}", include_str!("templates/chapter/hue.svg") ).expect("Chapter output: error creating hue"); let ref mut file_util = File::create( ch_path.join("util.js") ).unwrap(); writeln!( file_util, "{}", include_str!("templates/chapter/util.js") ).expect("Chapter output: error creating util"); } } /// Get a vector of image paths to import from a user. fn get_images() -> Vec <String> { /// Get vector containing already imported images from Imported. fn get_imported_images() -> Vec <String> { if Path::new(IMPORTED).exists() { let mut list: Vec <String> = Vec::new(); let f = (File::open(IMPORTED)).unwrap(); let file = BufReader::new(&f); for line in file.lines() { let templ = line.unwrap(); list.push(templ); } list } else { Vec::new() } } /// Get user selections as a `Vec <String>` fn parse_input( uin: String, mpaths: Vec <String>, new: &mut Vec <String> ) -> Vec <String> { let mut selected: Vec <String> = Vec::new(); let stringified: Vec <String> = uin.split(' ') .map(|x| x.to_string()).collect(); for sel in stringified { if sel == "+" { selected.append(new); } else if sel.to_string().contains("-") { let numbers: Vec <String> = uin.split('-') .map(|x| x.to_string()).collect(); let start = numbers[0].parse::<usize>().unwrap(); let end = numbers[1].parse::<usize>().unwrap(); for i in start..(end+1) { selected.push(mpaths[i].clone()); } } else { let i = sel.to_string().parse::<usize>().unwrap(); selected.push(mpaths[i].clone()); } } selected } let paths = fs::read_dir("./").unwrap(); let mut mpaths: Vec <String> = Vec::new(); let mut new: Vec <String> = Vec::new(); let imported: Vec <String> = get_imported_images(); for p in paths { let path = p.unwrap().path(); if !(path.extension() == None) { //The next line needs to be cleaned up. It is written like this to appease the borrow checker if path.extension().unwrap() == "png" || path.extension().unwrap() == "jpg" || path.extension().unwrap() == "bpm" || path.extension().unwrap() == "gif" { mpaths.push(path.into_os_string().into_string().unwrap());//ugly hack but as_path().to_string() does not work } } } mpaths.sort(); let mut fiter:usize = 0; for p in &mpaths { if !imported.contains(p) { print!("+"); new.push(p.clone());//cannot pass borrowed var w/o cloning } println!(" {}: {}", fiter, p); fiter += 1; } println!("Enter an number to select an image to import. "); println!("Enter 5-6 to import images 5 through 6. "); println!("Enter + to import the images you have not imported. (These images are indicated in the list by + signs)"); println!("Use spaces to seperate multiple selections. "); print!("select: "); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); let mut uin = String::new(); io::stdin().read_line(&mut uin).ok().expect("Error reading line"); uin.pop(); parse_input(uin, mpaths, &mut new) } /// Add content objects to chapter or destroy them because they lack a chapter. fn add_content( clump: Clump, page: &Page, chapter: &mut Chapter, destroyed: &mut usize, started: bool ) { if started { chapter.content.push(Content::new(clump.blobs, page.dimensions, chapter.cur_height)); } else { *destroyed += clump.blobs.len(); } } /// Add definition objects to chapter or destroy them because they lack a chapter. fn add_definition( clump: Clump, page: &Page, chapter: &mut Chapter, destroyed: &mut usize, started: bool ) { fn is_underlined(blob: ImgBlob, line: &ImgBlob) -> bool { ((blob.top_left[0] as i64 - line.top_left[0] as i64) > -50) && // Make -50 proportional ((line.bottom_right[0] as i64 - blob.bottom_right[0] as i64) > -50) && (blob.bottom_right[1] < line.bottom_right[1]) } if started { let mut line: ImgBlob = ImgBlob::new(); let mut name: Vec<ImgBlob> = Vec::new(); let mut cont: Vec<ImgBlob> = Vec::new(); for i in 0..clump.blobs.len() { if clump.blobs[i].blob_type == 1 { line = clump.blobs[i].clone(); break; } } if line.bottom_right[1] == 0 { *destroyed += clump.blobs.len(); return; } for i in 0..clump.blobs.len() { if clump.blobs[i] != line { if is_underlined(clump.blobs[i].clone(), &line) { name.push(clump.blobs[i].clone()); } else { cont.push(clump.blobs[i].clone()); } } } let mut idea = Idea::new(); if name.len() == 0 { *destroyed += clump.blobs.len(); return; } idea.subject = Content::new(name, page.dimensions, chapter.cur_height); if cont.len() > 0 { idea.extension = Content::new(cont, page.dimensions, chapter.cur_height); idea.update_size_pos(page.dimensions); // probably unused } chapter.ideas.push(idea); } else { *destroyed += clump.blobs.len(); } } trait Sub { fn sub(self, other: [usize;2]) -> [usize; 2]; } ///Difference between 2D usize array impl Sub for [usize; 2] { fn sub(self, other: [usize;2]) -> [usize; 2] { if (self[0] > other[0]) && (self[1] > other[1]) { [self[0]-other[0], self[1]-other[1]] } else if (self[0] > other[0]) && (self[1] < other[1]) { [self[0]-other[0], other[1]-self[1]] } else if (self[0] < other[0]) && (self[1] > other[1]) { [other[0]-self[0], self[1]-other[1]] } else{ [other[0]-self[0], other[1]-self[1]] } } } trait Filter { fn filter(&mut self); } impl Filter for image::RgbImage { fn filter(&mut self) { for (_, _, pixel) in self.enumerate_pixels_mut() { if pixel[0] > pixel[1] && pixel[0] > pixel[2] { let avg = ((pixel[1] as u16+pixel[2] as u16)/2u16) as u8; if pixel[0] - avg > 60 { pixel[0] = 255u8; pixel[1] = 0u8; pixel[2] = 0u8; } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } else if pixel[1] > pixel[0] && pixel[1] > pixel[2] { let avg = ((pixel[0] as u16+pixel[2] as u16)/2u16) as u8; if pixel[1] - avg > 60 { pixel[0] = 0u8; pixel[1] = 255u8; pixel[2] = 0u8; } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } else if pixel[2] > pixel[0] && pixel[2] > pixel[1] { let avg = ((pixel[0] as u16+pixel[1] as u16)/2u16) as u8; if pixel[2] - avg > 60 { pixel[0] = 0u8; pixel[1] = 0u8; pixel[2] = 255u8; } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } else { pixel[0] = 255u8; pixel[1] = 255u8; pixel[2] = 255u8; } } } } /// Create new chapter, add heading objects to chapter, or destroy them because they lack a chapter. fn add_heading( clump: Clump, page: &Page, chapter: &mut Chapter, destroyed: &mut usize, created: &mut usize, started: &mut bool ) { let mut i: usize = 0; let mut linemode: i8 = -1; let mut past = [0usize; 2]; let mut head: Heading = Heading::new(); while i < clump.blobs.len() { let blob = clump.blobs[i].clone(); if blob.blob_type == 1 { // TODO: reduce cyclomatic complexity if linemode==1 { // 1/17 of width and 1/22 height off acceptable let diff = blob.top_left.sub(past); if (diff[0] as f32) < 1f32/4f32*(page.dimensions[0] as f32) && (diff[1] as f32) < 1f32/20f32*(page.dimensions[1] as f32) { if *started { chapter.cur_height += page.dimensions[1] as i64; (chapter.clone()).add_chapter(); *created += 1; } chapter.blank(); head.number = 1; head.subject.update_size_pos(page.dimensions, chapter.cur_height); chapter.heading = head.clone(); head = Heading::new(); chapter.height_precent += (page.dimensions[1] as f64)/ (page.dimensions[0] as f64); *started = true; linemode = -1; } else { *destroyed += 1; } } else if linemode == 0 { head.number = 2; linemode = 1; past = blob.top_left; } else { *destroyed += 1; } } else { if linemode < 1 { if linemode == -1 { linemode = 0; } head.subject.blobs.push(blob); } else { assert!( head.number == 2, "Found heading.number of {}. Expected 2", head.number ); if *started { head.subject.update_size_pos(page.dimensions, chapter.cur_height); chapter.sub_headings.push(head.clone()); linemode = 0; head = Heading::new(); head.subject.blobs.push(blob); } else { *destroyed += head.subject.blobs.len() + 1; } } } i += 1; } if linemode != -1 { assert!( head.number != 1, "Found heading.number of 1. Expected 2 or 3" ); if *started { head.subject.update_size_pos(page.dimensions, chapter.cur_height); chapter.sub_headings.push(head); } else { *destroyed += head.subject.blobs.len(); } } } /// Entry point to the program fn main() { //iterate through images pulling out clumps //iterate through pages parsing clumps and creating chapters let selected = get_images(); let mut pages: Vec <Page> = Vec::new(); print!("○: Identifying objects"); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); for img in selected { pages.push(Page::from_path(img.clone())); if !fs::metadata(IMPORTED).is_ok() { File::create(IMPORTED).unwrap(); } let mut file = OpenOptions::new() .write(true) .append(true) .open(IMPORTED) .unwrap(); let _ = writeln!(file, "{}", img); // TODO: Warn the user about errors here } print!("\r◑: Dividing by chapter"); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); let mut chapter: Chapter = Chapter::new(); let mut started = false; let mut created_chapters = 0; let mut destroyed: usize = 0; for mut p in pages { chapter.height_precent += (p.dimensions[1] as f64)/(p.dimensions[0] as f64); let mut i: usize = 0; while i < p.clumps.len() { match p.clumps[i].ctype { RED => add_heading( // Heading(s) of some type p.clumps[i].clone(), &p, &mut chapter, &mut destroyed, &mut created_chapters, &mut started ), GREEN => add_definition( // Defintions(s) of some type p.clumps[i].clone(), &p, &mut chapter, &mut destroyed, started ), BLUE => add_content( // Content p.clumps[i].clone(), &p, &mut chapter, &mut destroyed, started ), _ => panic!("Invalid Content") }; i += 1; } chapter.cur_height += p.dimensions[1] as i64; } if chapter.heading.subject.blobs.len() > 0 { chapter.add_chapter(); created_chapters += 1; } print!("\r◕: Writing "); std::io::stdout().flush().ok().expect("Could not flush STDOUT!"); println!("\r●: Done "); println!( "{} chapters added. {} orphaned objects destroyed", created_chapters, destroyed ); }
// TODO: Fix unwraps extern crate git2; extern crate term_painter; use std::error::Error; use std::io; use std::env; use std::path::{Path, PathBuf}; use term_painter::{ToStyle, Color}; fn main() { let working_dir = match env::current_dir() { Ok(path) => path, Err(err) => { println!("Error getting working directory: {}", err.description()); return; } }; println!("{:?}", walk_dirs(&working_dir)); } fn walk_dirs(path: &Path) -> io::Result<()> { let mut pending: Vec<PathBuf> = Vec::new(); loop { let current_dir = if pending.len() == 0 { path.to_path_buf() } else { pending.pop().unwrap() }; let read_result = current_dir.read_dir(); if read_result.is_ok() { let iter = read_result.unwrap() .filter(|x| x.is_ok()) .map(|x| x.unwrap()) .filter(|x| match x.file_type() { Ok(t) => t.is_dir(), Err(_) => false, }) .filter(|x| match x.path().file_name() { Some(name) => name.ne(".git") && !name.to_str().unwrap().starts_with("$"), None => false, }); for entry in iter { if git_changes(&entry.path()).is_err() { pending.push(entry.path().to_path_buf()); } } } if pending.len() == 0 { break; } } Ok(()) } enum GitError { OpenRepo, } impl From<git2::Error> for GitError { fn from(_: git2::Error) -> Self { GitError::OpenRepo } } fn git_changes(path: &Path) -> Result<(), GitError> { let repo = git2::Repository::open(path)?; let mut opts = git2::StatusOptions::new(); opts.include_ignored(false) .include_untracked(true) .recurse_untracked_dirs(true) .include_unreadable_as_untracked(true) .disable_pathspec_match(true) .exclude_submodules(true); let statuses = repo.statuses(Some(&mut opts)).unwrap(); let mut statuses_iter = statuses.iter() .filter(|x| { if x.status() != git2::STATUS_WT_DELETED { return true; } // For some reason, some files with the deleted // status actually still exist, so ignore these. let mut del_path = path.to_path_buf(); del_path.push(x.path().unwrap()); !del_path.exists() }) .peekable(); if statuses_iter.peek().is_none() { return Ok(()); } println!("{}", path.to_str().unwrap()); for entry in statuses_iter { let pre = match entry.status() { git2::STATUS_WT_DELETED => Color::BrightRed.paint(" Deleted"), git2::STATUS_WT_MODIFIED => Color::BrightCyan.paint(" Modified"), git2::STATUS_WT_NEW => Color::BrightGreen.paint(" New"), git2::STATUS_WT_RENAMED => Color::BrightCyan.paint(" Renamed"), git2::STATUS_WT_TYPECHANGE => Color::BrightCyan.paint("Typechanged"), _ => Color::BrightMagenta.paint(" Unknown"), }; println!(" {} {}", pre, entry.path().unwrap()); } Ok(()) } Changed folder filter logic // TODO: Fix unwraps extern crate git2; extern crate term_painter; use std::error::Error; use std::io; use std::env; use std::path::{Path, PathBuf}; use term_painter::{ToStyle, Color}; fn main() { let working_dir = match env::current_dir() { Ok(path) => path, Err(err) => { println!("Error getting working directory: {}", err.description()); return; } }; println!("{:?}", walk_dirs(&working_dir)); } fn walk_dirs(path: &Path) -> io::Result<()> { let mut pending: Vec<PathBuf> = Vec::new(); loop { let current_dir = if pending.len() == 0 { path.to_path_buf() } else { pending.pop().unwrap() }; let read_result = current_dir.read_dir(); if read_result.is_ok() { let iter = read_result.unwrap() .filter(|x| x.is_ok()) .map(|x| x.unwrap()) .filter(|x| match x.file_type() { Ok(t) => t.is_dir(), Err(_) => false, }) .filter(|x| match x.path().file_name() { Some(name) => { let name_str = name.to_str().unwrap(); !name_str.starts_with(".") && !name_str.starts_with("$") } None => false, }); for entry in iter { if git_changes(&entry.path()).is_err() { pending.push(entry.path().to_path_buf()); } } } if pending.len() == 0 { break; } } Ok(()) } enum GitError { OpenRepo, } impl From<git2::Error> for GitError { fn from(_: git2::Error) -> Self { GitError::OpenRepo } } fn git_changes(path: &Path) -> Result<(), GitError> { let repo = git2::Repository::open(path)?; let mut opts = git2::StatusOptions::new(); opts.include_ignored(false) .include_untracked(true) .recurse_untracked_dirs(true) .include_unreadable_as_untracked(true) .disable_pathspec_match(true) .exclude_submodules(true); let statuses = repo.statuses(Some(&mut opts)).unwrap(); let mut statuses_iter = statuses.iter() .filter(|x| { if x.status() != git2::STATUS_WT_DELETED { return true; } // For some reason, some files with the deleted // status actually still exist, so ignore these. let mut del_path = path.to_path_buf(); del_path.push(x.path().unwrap()); !del_path.exists() }) .peekable(); if statuses_iter.peek().is_none() { return Ok(()); } println!("{}", path.to_str().unwrap()); for entry in statuses_iter { let pre = match entry.status() { git2::STATUS_WT_DELETED => Color::BrightRed.paint(" Deleted"), git2::STATUS_WT_MODIFIED => Color::BrightCyan.paint(" Modified"), git2::STATUS_WT_NEW => Color::BrightGreen.paint(" New"), git2::STATUS_WT_RENAMED => Color::BrightCyan.paint(" Renamed"), git2::STATUS_WT_TYPECHANGE => Color::BrightCyan.paint("Typechanged"), _ => Color::BrightMagenta.paint(" Unknown"), }; println!(" {} {}", pre, entry.path().unwrap()); } Ok(()) }
use std::env; use std::ffi::OsString; use std::fs::File; use std::io::{self, BufRead, Write}; use std::path::PathBuf; use std::process::{self, Command, Stdio}; extern crate isatty; use isatty::{stderr_isatty, stdout_isatty}; extern crate tempfile; fn main() { let result = cargo_expand_or_run_nightly(); process::exit(match result { Ok(code) => code, Err(err) => { let _ = writeln!(&mut io::stderr(), "{}", err); 1 } }); } fn cargo_expand_or_run_nightly() -> io::Result<i32> { const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY"; let maybe_nightly = !definitely_not_nightly(); if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() { return cargo_expand(); } let mut nightly = Command::new("cargo"); nightly.arg("+nightly"); nightly.arg("expand"); nightly.args(env::args_os().skip(1)); // Hopefully prevent infinite re-run loop. nightly.env(NO_RUN_NIGHTLY, ""); let status = nightly.status()?; Ok(match status.code() { Some(code) => code, None => if status.success() { 0 } else { 1 }, }) } fn definitely_not_nightly() -> bool { let mut cmd = Command::new(cargo_binary()); cmd.arg("--version"); let output = match cmd.output() { Ok(output) => output, Err(_) => return false, }; let version = match String::from_utf8(output.stdout) { Ok(version) => version, Err(_) => return false, }; version.starts_with("cargo 1") && !version.contains("nightly") } fn cargo_binary() -> OsString { env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into()) } fn cargo_expand() -> io::Result<i32> { let args: Vec<_> = env::args_os().collect(); let which_rustfmt = which(&["rustfmt"]); let which_pygmentize = if !color_never(&args) && stdout_isatty() { which(&["pygmentize", "-l", "rust"]) } else { None }; let outdir = if which_rustfmt.is_some() || which_pygmentize.is_some() { let mut builder = tempfile::Builder::new(); builder.prefix("cargo-expand"); Some(builder.tempdir().expect("failed to create tmp file")) } else { None }; let outfile = outdir.as_ref().map(|dir| dir.path().join("expanded")); // Run cargo let mut cmd = Command::new(cargo_binary()); cmd.args(&wrap_args(args.clone(), outfile.as_ref())); let code = filter_err(&mut cmd, ignore_cargo_err)?; if code != 0 { return Ok(code); } // Run rustfmt if let Some(fmt) = which_rustfmt { // Ignore any errors. let _status = Command::new(fmt) .arg(outfile.as_ref().unwrap()) .stderr(Stdio::null()) .status(); } // Run pygmentize if let Some(pyg) = which_pygmentize { let _status = Command::new(pyg) .args(&["-l", "rust", "-O", "encoding=utf8"]) .arg(outfile.as_ref().unwrap()) .status(); } else if let Some(outfile) = outfile { // Cat outfile if rustfmt was used. let mut reader = File::open(outfile)?; io::copy(&mut reader, &mut io::stdout())?; } Ok(0) } // Based on https://github.com/rsolomo/cargo-check fn wrap_args<I>(it: I, outfile: Option<&PathBuf>) -> Vec<OsString> where I: IntoIterator<Item = OsString>, { let mut args = vec!["rustc".into()]; let mut ends_with_test = false; let mut ends_with_example = false; let mut has_color = false; let mut it = it.into_iter().skip(2); for arg in &mut it { if arg == *"--" { break; } ends_with_test = arg == *"--test"; ends_with_example = arg == *"--example"; has_color |= arg.to_str().unwrap_or("").starts_with("--color"); args.push(arg.into()); } if ends_with_test { // Expand the `test.rs` test by default. args.push("test".into()); } if ends_with_example { // Expand the `example.rs` example by default. args.push("example".into()); } if !has_color { let color = stderr_isatty(); let setting = if color { "always" } else { "never" }; args.push(format!("--color={}", setting).into()); } args.push("--".into()); if let Some(path) = outfile { args.push("-o".into()); args.push(path.into()); } args.push("-Zunstable-options".into()); args.push("--pretty=expanded".into()); args.extend(it); args } fn color_never(args: &Vec<OsString>) -> bool { args.windows(2).any(|pair| pair[0] == *"--color" && pair[1] == *"never") || args.iter().any(|arg| *arg == *"--color=never") } fn which(cmd: &[&str]) -> Option<OsString> { if env::args_os().find(|arg| arg == "--help").is_some() { return None; } if let Some(which) = env::var_os(&cmd[0].to_uppercase()) { return if which.is_empty() { None } else { Some(which) }; } let spawn = Command::new(cmd[0]) .args(&cmd[1..]) .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn(); let mut child = match spawn { Ok(child) => child, Err(_) => { return None; } }; let exit = match child.wait() { Ok(exit) => exit, Err(_) => { return None; } }; if exit.success() { Some(cmd[0].into()) } else { None } } fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> { let mut child = cmd.stderr(Stdio::piped()).spawn()?; let mut stderr = io::BufReader::new(child.stderr.take().unwrap()); let mut line = String::new(); while let Ok(n) = stderr.read_line(&mut line) { if n == 0 { break; } if !ignore(&line) { let _ = write!(&mut io::stderr(), "{}", line); } line.clear(); } let code = child.wait()?.code().unwrap_or(1); Ok(code) } fn ignore_cargo_err(line: &str) -> bool { if line.trim().is_empty() { return true; } let blacklist = [ "ignoring specified output filename because multiple outputs were \ requested", "ignoring specified output filename for 'link' output because multiple \ outputs were requested", "ignoring --out-dir flag due to -o flag", "ignoring -C extra-filename flag due to -o flag", "due to multiple output types requested, the explicitly specified \ output file name will be adapted for each output type", ]; for s in &blacklist { if line.contains(s) { return true; } } false } Use --profile=check to skip codegen use std::env; use std::ffi::OsString; use std::fs::File; use std::io::{self, BufRead, Write}; use std::path::PathBuf; use std::process::{self, Command, Stdio}; extern crate isatty; use isatty::{stderr_isatty, stdout_isatty}; extern crate tempfile; fn main() { let result = cargo_expand_or_run_nightly(); process::exit(match result { Ok(code) => code, Err(err) => { let _ = writeln!(&mut io::stderr(), "{}", err); 1 } }); } fn cargo_expand_or_run_nightly() -> io::Result<i32> { const NO_RUN_NIGHTLY: &str = "CARGO_EXPAND_NO_RUN_NIGHTLY"; let maybe_nightly = !definitely_not_nightly(); if maybe_nightly || env::var_os(NO_RUN_NIGHTLY).is_some() { return cargo_expand(); } let mut nightly = Command::new("cargo"); nightly.arg("+nightly"); nightly.arg("expand"); nightly.args(env::args_os().skip(1)); // Hopefully prevent infinite re-run loop. nightly.env(NO_RUN_NIGHTLY, ""); let status = nightly.status()?; Ok(match status.code() { Some(code) => code, None => if status.success() { 0 } else { 1 }, }) } fn definitely_not_nightly() -> bool { let mut cmd = Command::new(cargo_binary()); cmd.arg("--version"); let output = match cmd.output() { Ok(output) => output, Err(_) => return false, }; let version = match String::from_utf8(output.stdout) { Ok(version) => version, Err(_) => return false, }; version.starts_with("cargo 1") && !version.contains("nightly") } fn cargo_binary() -> OsString { env::var_os("CARGO").unwrap_or_else(|| "cargo".to_owned().into()) } fn cargo_expand() -> io::Result<i32> { let args: Vec<_> = env::args_os().collect(); let which_rustfmt = which(&["rustfmt"]); let which_pygmentize = if !color_never(&args) && stdout_isatty() { which(&["pygmentize", "-l", "rust"]) } else { None }; let outdir = if which_rustfmt.is_some() || which_pygmentize.is_some() { let mut builder = tempfile::Builder::new(); builder.prefix("cargo-expand"); Some(builder.tempdir().expect("failed to create tmp file")) } else { None }; let outfile = outdir.as_ref().map(|dir| dir.path().join("expanded")); // Run cargo let mut cmd = Command::new(cargo_binary()); cmd.args(&wrap_args(args.clone(), outfile.as_ref())); let code = filter_err(&mut cmd, ignore_cargo_err)?; if code != 0 { return Ok(code); } // Run rustfmt if let Some(fmt) = which_rustfmt { // Ignore any errors. let _status = Command::new(fmt) .arg(outfile.as_ref().unwrap()) .stderr(Stdio::null()) .status(); } // Run pygmentize if let Some(pyg) = which_pygmentize { let _status = Command::new(pyg) .args(&["-l", "rust", "-O", "encoding=utf8"]) .arg(outfile.as_ref().unwrap()) .status(); } else if let Some(outfile) = outfile { // Cat outfile if rustfmt was used. let mut reader = File::open(outfile)?; io::copy(&mut reader, &mut io::stdout())?; } Ok(0) } // Based on https://github.com/rsolomo/cargo-check fn wrap_args<I>(it: I, outfile: Option<&PathBuf>) -> Vec<OsString> where I: IntoIterator<Item = OsString>, { let mut args = vec!["rustc".into(), "--profile=check".into()]; let mut ends_with_test = false; let mut ends_with_example = false; let mut has_color = false; let mut it = it.into_iter().skip(2); for arg in &mut it { if arg == *"--" { break; } ends_with_test = arg == *"--test"; ends_with_example = arg == *"--example"; has_color |= arg.to_str().unwrap_or("").starts_with("--color"); args.push(arg.into()); } if ends_with_test { // Expand the `test.rs` test by default. args.push("test".into()); } if ends_with_example { // Expand the `example.rs` example by default. args.push("example".into()); } if !has_color { let color = stderr_isatty(); let setting = if color { "always" } else { "never" }; args.push(format!("--color={}", setting).into()); } args.push("--".into()); if let Some(path) = outfile { args.push("-o".into()); args.push(path.into()); } args.push("-Zunstable-options".into()); args.push("--pretty=expanded".into()); args.extend(it); args } fn color_never(args: &Vec<OsString>) -> bool { args.windows(2).any(|pair| pair[0] == *"--color" && pair[1] == *"never") || args.iter().any(|arg| *arg == *"--color=never") } fn which(cmd: &[&str]) -> Option<OsString> { if env::args_os().find(|arg| arg == "--help").is_some() { return None; } if let Some(which) = env::var_os(&cmd[0].to_uppercase()) { return if which.is_empty() { None } else { Some(which) }; } let spawn = Command::new(cmd[0]) .args(&cmd[1..]) .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn(); let mut child = match spawn { Ok(child) => child, Err(_) => { return None; } }; let exit = match child.wait() { Ok(exit) => exit, Err(_) => { return None; } }; if exit.success() { Some(cmd[0].into()) } else { None } } fn filter_err(cmd: &mut Command, ignore: fn(&str) -> bool) -> io::Result<i32> { let mut child = cmd.stderr(Stdio::piped()).spawn()?; let mut stderr = io::BufReader::new(child.stderr.take().unwrap()); let mut line = String::new(); while let Ok(n) = stderr.read_line(&mut line) { if n == 0 { break; } if !ignore(&line) { let _ = write!(&mut io::stderr(), "{}", line); } line.clear(); } let code = child.wait()?.code().unwrap_or(1); Ok(code) } fn ignore_cargo_err(line: &str) -> bool { if line.trim().is_empty() { return true; } let blacklist = [ "ignoring specified output filename because multiple outputs were \ requested", "ignoring specified output filename for 'link' output because multiple \ outputs were requested", "ignoring --out-dir flag due to -o flag", "ignoring -C extra-filename flag due to -o flag", "due to multiple output types requested, the explicitly specified \ output file name will be adapted for each output type", ]; for s in &blacklist { if line.contains(s) { return true; } } false }